repo_name
string
path
string
copies
string
size
string
content
string
license
string
fileton/linux
sound/soc/codecs/wm8804-spi.c
617
1336
/* * wm8804-spi.c -- WM8804 S/PDIF transceiver driver - SPI * * Copyright 2015 Cirrus Logic Inc * * Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/spi/spi.h> #include "wm8804.h" static int wm8804_spi_probe(struct spi_device *spi) { struct regmap *regmap; regmap = devm_regmap_init_spi(spi, &wm8804_regmap_config); if (IS_ERR(regmap)) return PTR_ERR(regmap); return wm8804_probe(&spi->dev, regmap); } static int wm8804_spi_remove(struct spi_device *spi) { wm8804_remove(&spi->dev); return 0; } static const struct of_device_id wm8804_of_match[] = { { .compatible = "wlf,wm8804", }, { } }; MODULE_DEVICE_TABLE(of, wm8804_of_match); static struct spi_driver wm8804_spi_driver = { .driver = { .name = "wm8804", .owner = THIS_MODULE, .pm = &wm8804_pm, .of_match_table = wm8804_of_match, }, .probe = wm8804_spi_probe, .remove = wm8804_spi_remove }; module_spi_driver(wm8804_spi_driver); MODULE_DESCRIPTION("ASoC WM8804 driver - SPI"); MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Pingmin/linux
arch/arm/mm/cache-uniphier.c
617
14464
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015-2016 Socionext Inc. * Author: Masahiro Yamada <yamada.masahiro@socionext.com> */ #define pr_fmt(fmt) "uniphier: " fmt #include <linux/bitops.h> #include <linux/init.h> #include <linux/io.h> #include <linux/log2.h> #include <linux/of_address.h> #include <linux/slab.h> #include <asm/hardware/cache-uniphier.h> #include <asm/outercache.h> /* control registers */ #define UNIPHIER_SSCC 0x0 /* Control Register */ #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */ #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */ #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */ #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */ #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */ #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */ #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */ /* revision registers */ #define UNIPHIER_SSCID 0x0 /* ID Register */ /* operation registers */ #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */ #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */ #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */ #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */ #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */ #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */ #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */ #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17) #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17) #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17) #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */ #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */ #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */ #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */ #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */ #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */ #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/ #define UNIPHIER_SSCOPPQSEF_FE BIT(1) #define UNIPHIER_SSCOPPQSEF_OE BIT(0) #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */ #define UNIPHIER_SSCOLPQS_EF BIT(2) #define UNIPHIER_SSCOLPQS_EST BIT(1) #define UNIPHIER_SSCOLPQS_QST BIT(0) /* Is the operation region specified by address range? */ #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \ ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE) /** * uniphier_cache_data - UniPhier outer cache specific data * * @ctrl_base: virtual base address of control registers * @rev_base: virtual base address of revision registers * @op_base: virtual base address of operation registers * @way_mask: each bit specifies if the way is present * @nsets: number of associativity sets * @line_size: line size in bytes * @range_op_max_size: max size that can be handled by a single range operation * @list: list node to include this level in the whole cache hierarchy */ struct uniphier_cache_data { void __iomem *ctrl_base; void __iomem *rev_base; void __iomem *op_base; void __iomem *way_ctrl_base; u32 way_mask; u32 nsets; u32 line_size; u32 range_op_max_size; struct list_head list; }; /* * List of the whole outer cache hierarchy. This list is only modified during * the early boot stage, so no mutex is taken for the access to the list. */ static LIST_HEAD(uniphier_cache_list); /** * __uniphier_cache_sync - perform a sync point for a particular cache level * * @data: cache controller specific data */ static void __uniphier_cache_sync(struct uniphier_cache_data *data) { /* This sequence need not be atomic. Do not disable IRQ. */ writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC, data->op_base + UNIPHIER_SSCOPE); /* need a read back to confirm */ readl_relaxed(data->op_base + UNIPHIER_SSCOPE); } /** * __uniphier_cache_maint_common - run a queue operation for a particular level * * @data: cache controller specific data * @start: start address of range operation (don't care for "all" operation) * @size: data size of range operation (don't care for "all" operation) * @operation: flags to specify the desired cache operation */ static void __uniphier_cache_maint_common(struct uniphier_cache_data *data, unsigned long start, unsigned long size, u32 operation) { unsigned long flags; /* * No spin lock is necessary here because: * * [1] This outer cache controller is able to accept maintenance * operations from multiple CPUs at a time in an SMP system; if a * maintenance operation is under way and another operation is issued, * the new one is stored in the queue. The controller performs one * operation after another. If the queue is full, the status register, * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have * different instances for each CPU, i.e. each CPU can track the status * of the maintenance operations triggered by itself. * * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ, * SSCOQWN}, are shared between multiple CPUs, but the hardware still * guarantees the registration sequence is atomic; the write access to * them are arbitrated by the hardware. The first accessor to the * register, UNIPHIER_SSCOQM, holds the access right and it is released * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU * is holding the access right, other CPUs fail to register operations. * One CPU should not hold the access right for a long time, so local * IRQs should be disabled while the following sequence. */ local_irq_save(flags); /* clear the complete notification flag */ writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS); do { /* set cache operation */ writel_relaxed(UNIPHIER_SSCOQM_CE | operation, data->op_base + UNIPHIER_SSCOQM); /* set address range if needed */ if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) { writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD); writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ); } } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) & (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE))); /* wait until the operation is completed */ while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF)) cpu_relax(); local_irq_restore(flags); } static void __uniphier_cache_maint_all(struct uniphier_cache_data *data, u32 operation) { __uniphier_cache_maint_common(data, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation); __uniphier_cache_sync(data); } static void __uniphier_cache_maint_range(struct uniphier_cache_data *data, unsigned long start, unsigned long end, u32 operation) { unsigned long size; /* * If the start address is not aligned, * perform a cache operation for the first cache-line */ start = start & ~(data->line_size - 1); size = end - start; if (unlikely(size >= (unsigned long)(-data->line_size))) { /* this means cache operation for all range */ __uniphier_cache_maint_all(data, operation); return; } /* * If the end address is not aligned, * perform a cache operation for the last cache-line */ size = ALIGN(size, data->line_size); while (size) { unsigned long chunk_size = min_t(unsigned long, size, data->range_op_max_size); __uniphier_cache_maint_common(data, start, chunk_size, UNIPHIER_SSCOQM_S_RANGE | operation); start += chunk_size; size -= chunk_size; } __uniphier_cache_sync(data); } static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on) { u32 val = 0; if (on) val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON; writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC); } static void __init __uniphier_cache_set_active_ways( struct uniphier_cache_data *data) { unsigned int cpu; for_each_possible_cpu(cpu) writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu); } static void uniphier_cache_maint_range(unsigned long start, unsigned long end, u32 operation) { struct uniphier_cache_data *data; list_for_each_entry(data, &uniphier_cache_list, list) __uniphier_cache_maint_range(data, start, end, operation); } static void uniphier_cache_maint_all(u32 operation) { struct uniphier_cache_data *data; list_for_each_entry(data, &uniphier_cache_list, list) __uniphier_cache_maint_all(data, operation); } static void uniphier_cache_inv_range(unsigned long start, unsigned long end) { uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV); } static void uniphier_cache_clean_range(unsigned long start, unsigned long end) { uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN); } static void uniphier_cache_flush_range(unsigned long start, unsigned long end) { uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH); } static void __init uniphier_cache_inv_all(void) { uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV); } static void uniphier_cache_flush_all(void) { uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH); } static void uniphier_cache_disable(void) { struct uniphier_cache_data *data; list_for_each_entry_reverse(data, &uniphier_cache_list, list) __uniphier_cache_enable(data, false); uniphier_cache_flush_all(); } static void __init uniphier_cache_enable(void) { struct uniphier_cache_data *data; uniphier_cache_inv_all(); list_for_each_entry(data, &uniphier_cache_list, list) { __uniphier_cache_enable(data, true); __uniphier_cache_set_active_ways(data); } } static void uniphier_cache_sync(void) { struct uniphier_cache_data *data; list_for_each_entry(data, &uniphier_cache_list, list) __uniphier_cache_sync(data); } static const struct of_device_id uniphier_cache_match[] __initconst = { { .compatible = "socionext,uniphier-system-cache" }, { /* sentinel */ } }; static int __init __uniphier_cache_init(struct device_node *np, unsigned int *cache_level) { struct uniphier_cache_data *data; u32 level, cache_size; struct device_node *next_np; int ret = 0; if (!of_match_node(uniphier_cache_match, np)) { pr_err("L%d: not compatible with uniphier cache\n", *cache_level); return -EINVAL; } if (of_property_read_u32(np, "cache-level", &level)) { pr_err("L%d: cache-level is not specified\n", *cache_level); return -EINVAL; } if (level != *cache_level) { pr_err("L%d: cache-level is unexpected value %d\n", *cache_level, level); return -EINVAL; } if (!of_property_read_bool(np, "cache-unified")) { pr_err("L%d: cache-unified is not specified\n", *cache_level); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; if (of_property_read_u32(np, "cache-line-size", &data->line_size) || !is_power_of_2(data->line_size)) { pr_err("L%d: cache-line-size is unspecified or invalid\n", *cache_level); ret = -EINVAL; goto err; } if (of_property_read_u32(np, "cache-sets", &data->nsets) || !is_power_of_2(data->nsets)) { pr_err("L%d: cache-sets is unspecified or invalid\n", *cache_level); ret = -EINVAL; goto err; } if (of_property_read_u32(np, "cache-size", &cache_size) || cache_size == 0 || cache_size % (data->nsets * data->line_size)) { pr_err("L%d: cache-size is unspecified or invalid\n", *cache_level); ret = -EINVAL; goto err; } data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1, 0); data->ctrl_base = of_iomap(np, 0); if (!data->ctrl_base) { pr_err("L%d: failed to map control register\n", *cache_level); ret = -ENOMEM; goto err; } data->rev_base = of_iomap(np, 1); if (!data->rev_base) { pr_err("L%d: failed to map revision register\n", *cache_level); ret = -ENOMEM; goto err; } data->op_base = of_iomap(np, 2); if (!data->op_base) { pr_err("L%d: failed to map operation register\n", *cache_level); ret = -ENOMEM; goto err; } data->way_ctrl_base = data->ctrl_base + 0xc00; if (*cache_level == 2) { u32 revision = readl(data->rev_base + UNIPHIER_SSCID); /* * The size of range operation is limited to (1 << 22) or less * for PH-sLD8 or older SoCs. */ if (revision <= 0x16) data->range_op_max_size = (u32)1 << 22; /* * Unfortunatly, the offset address of active way control base * varies from SoC to SoC. */ switch (revision) { case 0x11: /* sLD3 */ data->way_ctrl_base = data->ctrl_base + 0x870; break; case 0x12: /* LD4 */ case 0x16: /* sld8 */ data->way_ctrl_base = data->ctrl_base + 0x840; break; default: break; } } data->range_op_max_size -= data->line_size; INIT_LIST_HEAD(&data->list); list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */ /* * OK, this level has been successfully initialized. Look for the next * level cache. Do not roll back even if the initialization of the * next level cache fails because we want to continue with available * cache levels. */ next_np = of_find_next_cache_node(np); if (next_np) { (*cache_level)++; ret = __uniphier_cache_init(next_np, cache_level); } of_node_put(next_np); return ret; err: iounmap(data->op_base); iounmap(data->rev_base); iounmap(data->ctrl_base); kfree(data); return ret; } int __init uniphier_cache_init(void) { struct device_node *np = NULL; unsigned int cache_level; int ret = 0; /* look for level 2 cache */ while ((np = of_find_matching_node(np, uniphier_cache_match))) if (!of_property_read_u32(np, "cache-level", &cache_level) && cache_level == 2) break; if (!np) return -ENODEV; ret = __uniphier_cache_init(np, &cache_level); of_node_put(np); if (ret) { /* * Error out iif L2 initialization fails. Continue with any * error on L3 or outer because they are optional. */ if (cache_level == 2) { pr_err("failed to initialize L2 cache\n"); return ret; } cache_level--; ret = 0; } outer_cache.inv_range = uniphier_cache_inv_range; outer_cache.clean_range = uniphier_cache_clean_range; outer_cache.flush_range = uniphier_cache_flush_range; outer_cache.flush_all = uniphier_cache_flush_all; outer_cache.disable = uniphier_cache_disable; outer_cache.sync = uniphier_cache_sync; uniphier_cache_enable(); pr_info("enabled outer cache (cache level: %d)\n", cache_level); return ret; }
gpl-2.0
archfan/xu4-linux
drivers/cpufreq/tegra20-cpufreq.c
617
5679
/* * Copyright (C) 2010 Google, Inc. * * Author: * Colin Cross <ccross@google.com> * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> static struct cpufreq_frequency_table freq_table[] = { { .frequency = 216000 }, { .frequency = 312000 }, { .frequency = 456000 }, { .frequency = 608000 }, { .frequency = 760000 }, { .frequency = 816000 }, { .frequency = 912000 }, { .frequency = 1000000 }, { .frequency = CPUFREQ_TABLE_END }, }; #define NUM_CPUS 2 static struct clk *cpu_clk; static struct clk *pll_x_clk; static struct clk *pll_p_clk; static struct clk *emc_clk; static bool pll_x_prepared; static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy, unsigned int index) { unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; /* * Don't switch to intermediate freq if: * - we are already at it, i.e. policy->cur == ifreq * - index corresponds to ifreq */ if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) return 0; return ifreq; } static int tegra_target_intermediate(struct cpufreq_policy *policy, unsigned int index) { int ret; /* * Take an extra reference to the main pll so it doesn't turn * off when we move the cpu off of it as enabling it again while we * switch to it from tegra_target() would take additional time. * * When target-freq is equal to intermediate freq we don't need to * switch to an intermediate freq and so this routine isn't called. * Also, we wouldn't be using pll_x anymore and must not take extra * reference to it, as it can be disabled now to save some power. */ clk_prepare_enable(pll_x_clk); ret = clk_set_parent(cpu_clk, pll_p_clk); if (ret) clk_disable_unprepare(pll_x_clk); else pll_x_prepared = true; return ret; } static int tegra_target(struct cpufreq_policy *policy, unsigned int index) { unsigned long rate = freq_table[index].frequency; unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; int ret = 0; /* * Vote on memory bus frequency based on cpu frequency * This sets the minimum frequency, display or avp may request higher */ if (rate >= 816000) clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ else if (rate >= 456000) clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ else clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ /* * target freq == pll_p, don't need to take extra reference to pll_x_clk * as it isn't used anymore. */ if (rate == ifreq) return clk_set_parent(cpu_clk, pll_p_clk); ret = clk_set_rate(pll_x_clk, rate * 1000); /* Restore to earlier frequency on error, i.e. pll_x */ if (ret) pr_err("Failed to change pll_x to %lu\n", rate); ret = clk_set_parent(cpu_clk, pll_x_clk); /* This shouldn't fail while changing or restoring */ WARN_ON(ret); /* * Drop count to pll_x clock only if we switched to intermediate freq * earlier while transitioning to a target frequency. */ if (pll_x_prepared) { clk_disable_unprepare(pll_x_clk); pll_x_prepared = false; } return ret; } static int tegra_cpu_init(struct cpufreq_policy *policy) { int ret; if (policy->cpu >= NUM_CPUS) return -EINVAL; clk_prepare_enable(emc_clk); clk_prepare_enable(cpu_clk); /* FIXME: what's the actual transition time? */ ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); if (ret) { clk_disable_unprepare(cpu_clk); clk_disable_unprepare(emc_clk); return ret; } policy->clk = cpu_clk; policy->suspend_freq = freq_table[0].frequency; return 0; } static int tegra_cpu_exit(struct cpufreq_policy *policy) { clk_disable_unprepare(cpu_clk); clk_disable_unprepare(emc_clk); return 0; } static struct cpufreq_driver tegra_cpufreq_driver = { .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .get_intermediate = tegra_get_intermediate, .target_intermediate = tegra_target_intermediate, .target_index = tegra_target, .get = cpufreq_generic_get, .init = tegra_cpu_init, .exit = tegra_cpu_exit, .name = "tegra", .attr = cpufreq_generic_attr, .suspend = cpufreq_generic_suspend, }; static int __init tegra_cpufreq_init(void) { cpu_clk = clk_get_sys(NULL, "cclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); pll_x_clk = clk_get_sys(NULL, "pll_x"); if (IS_ERR(pll_x_clk)) return PTR_ERR(pll_x_clk); pll_p_clk = clk_get_sys(NULL, "pll_p"); if (IS_ERR(pll_p_clk)) return PTR_ERR(pll_p_clk); emc_clk = clk_get_sys("cpu", "emc"); if (IS_ERR(emc_clk)) { clk_put(cpu_clk); return PTR_ERR(emc_clk); } return cpufreq_register_driver(&tegra_cpufreq_driver); } static void __exit tegra_cpufreq_exit(void) { cpufreq_unregister_driver(&tegra_cpufreq_driver); clk_put(emc_clk); clk_put(cpu_clk); } MODULE_AUTHOR("Colin Cross <ccross@android.com>"); MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); MODULE_LICENSE("GPL"); module_init(tegra_cpufreq_init); module_exit(tegra_cpufreq_exit);
gpl-2.0
xingrz/android_kernel_nubia_msm8996
drivers/input/keyboard/qt1070.c
1385
6875
/* * Atmel AT42QT1070 QTouch Sensor Controller * * Copyright (C) 2011 Atmel * * Authors: Bo Shen <voice.shen@atmel.com> * * Base on AT42QT2160 driver by: * Raphael Derosso Pereira <raphaelpereira@gmail.com> * Copyright (C) 2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/delay.h> /* Address for each register */ #define CHIP_ID 0x00 #define QT1070_CHIP_ID 0x2E #define FW_VERSION 0x01 #define QT1070_FW_VERSION 0x15 #define DET_STATUS 0x02 #define KEY_STATUS 0x03 /* Calibrate */ #define CALIBRATE_CMD 0x38 #define QT1070_CAL_TIME 200 /* Reset */ #define RESET 0x39 #define QT1070_RESET_TIME 255 /* AT42QT1070 support up to 7 keys */ static const unsigned short qt1070_key2code[] = { KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, }; struct qt1070_data { struct i2c_client *client; struct input_dev *input; unsigned int irq; unsigned short keycodes[ARRAY_SIZE(qt1070_key2code)]; u8 last_keys; }; static int qt1070_read(struct i2c_client *client, u8 reg) { int ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) dev_err(&client->dev, "can not read register, returned %d\n", ret); return ret; } static int qt1070_write(struct i2c_client *client, u8 reg, u8 data) { int ret; ret = i2c_smbus_write_byte_data(client, reg, data); if (ret < 0) dev_err(&client->dev, "can not write register, returned %d\n", ret); return ret; } static bool qt1070_identify(struct i2c_client *client) { int id, ver; /* Read Chip ID */ id = qt1070_read(client, CHIP_ID); if (id != QT1070_CHIP_ID) { dev_err(&client->dev, "ID %d not supported\n", id); return false; } /* Read firmware version */ ver = qt1070_read(client, FW_VERSION); if (ver < 0) { dev_err(&client->dev, "could not read the firmware version\n"); return false; } dev_info(&client->dev, "AT42QT1070 firmware version %x\n", ver); return true; } static irqreturn_t qt1070_interrupt(int irq, void *dev_id) { struct qt1070_data *data = dev_id; struct i2c_client *client = data->client; struct input_dev *input = data->input; int i; u8 new_keys, keyval, mask = 0x01; /* Read the detected status register, thus clearing interrupt */ qt1070_read(client, DET_STATUS); /* Read which key changed */ new_keys = qt1070_read(client, KEY_STATUS); for (i = 0; i < ARRAY_SIZE(qt1070_key2code); i++) { keyval = new_keys & mask; if ((data->last_keys & mask) != keyval) input_report_key(input, data->keycodes[i], keyval); mask <<= 1; } input_sync(input); data->last_keys = new_keys; return IRQ_HANDLED; } static int qt1070_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct qt1070_data *data; struct input_dev *input; int i; int err; err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE); if (!err) { dev_err(&client->dev, "%s adapter not supported\n", dev_driver_string(&client->adapter->dev)); return -ENODEV; } if (!client->irq) { dev_err(&client->dev, "please assign the irq to this device\n"); return -EINVAL; } /* Identify the qt1070 chip */ if (!qt1070_identify(client)) return -ENODEV; data = kzalloc(sizeof(struct qt1070_data), GFP_KERNEL); input = input_allocate_device(); if (!data || !input) { dev_err(&client->dev, "insufficient memory\n"); err = -ENOMEM; goto err_free_mem; } data->client = client; data->input = input; data->irq = client->irq; input->name = "AT42QT1070 QTouch Sensor"; input->dev.parent = &client->dev; input->id.bustype = BUS_I2C; /* Add the keycode */ input->keycode = data->keycodes; input->keycodesize = sizeof(data->keycodes[0]); input->keycodemax = ARRAY_SIZE(qt1070_key2code); __set_bit(EV_KEY, input->evbit); for (i = 0; i < ARRAY_SIZE(qt1070_key2code); i++) { data->keycodes[i] = qt1070_key2code[i]; __set_bit(qt1070_key2code[i], input->keybit); } /* Calibrate device */ qt1070_write(client, CALIBRATE_CMD, 1); msleep(QT1070_CAL_TIME); /* Soft reset */ qt1070_write(client, RESET, 1); msleep(QT1070_RESET_TIME); err = request_threaded_irq(client->irq, NULL, qt1070_interrupt, IRQF_TRIGGER_NONE | IRQF_ONESHOT, client->dev.driver->name, data); if (err) { dev_err(&client->dev, "fail to request irq\n"); goto err_free_mem; } /* Register the input device */ err = input_register_device(data->input); if (err) { dev_err(&client->dev, "Failed to register input device\n"); goto err_free_irq; } i2c_set_clientdata(client, data); /* Read to clear the chang line */ qt1070_read(client, DET_STATUS); return 0; err_free_irq: free_irq(client->irq, data); err_free_mem: input_free_device(input); kfree(data); return err; } static int qt1070_remove(struct i2c_client *client) { struct qt1070_data *data = i2c_get_clientdata(client); /* Release IRQ */ free_irq(client->irq, data); input_unregister_device(data->input); kfree(data); return 0; } #ifdef CONFIG_PM_SLEEP static int qt1070_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct qt1070_data *data = i2c_get_clientdata(client); if (device_may_wakeup(dev)) enable_irq_wake(data->irq); return 0; } static int qt1070_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct qt1070_data *data = i2c_get_clientdata(client); if (device_may_wakeup(dev)) disable_irq_wake(data->irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(qt1070_pm_ops, qt1070_suspend, qt1070_resume); static const struct i2c_device_id qt1070_id[] = { { "qt1070", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, qt1070_id); static struct i2c_driver qt1070_driver = { .driver = { .name = "qt1070", .owner = THIS_MODULE, .pm = &qt1070_pm_ops, }, .id_table = qt1070_id, .probe = qt1070_probe, .remove = qt1070_remove, }; module_i2c_driver(qt1070_driver); MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>"); MODULE_DESCRIPTION("Driver for AT42QT1070 QTouch sensor"); MODULE_LICENSE("GPL");
gpl-2.0
SchulerControl/linux
drivers/media/radio/radio-shark.c
2153
11694
/* * Linux V4L2 radio driver for the Griffin radioSHARK USB radio receiver * * Note the radioSHARK offers the audio through a regular USB audio device, * this driver only handles the tuning. * * The info necessary to drive the shark was taken from the small userspace * shark.c program by Michael Rolig, which he kindly placed in the Public * Domain. * * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <media/v4l2-device.h> #include <sound/tea575x-tuner.h> #if defined(CONFIG_LEDS_CLASS) || \ (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE)) #define SHARK_USE_LEDS 1 #endif /* * Version Information */ MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Griffin radioSHARK, USB radio receiver driver"); MODULE_LICENSE("GPL"); #define SHARK_IN_EP 0x83 #define SHARK_OUT_EP 0x05 #define TEA575X_BIT_MONO (1<<22) /* 0 = stereo, 1 = mono */ #define TEA575X_BIT_BAND_MASK (3<<20) #define TEA575X_BIT_BAND_FM (0<<20) #define TB_LEN 6 #define DRV_NAME "radioshark" #define v4l2_dev_to_shark(d) container_of(d, struct shark_device, v4l2_dev) /* Note BLUE_IS_PULSE comes after NO_LEDS as it is a status bit, not a LED */ enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS, BLUE_IS_PULSE }; struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct snd_tea575x tea; #ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; #endif u8 *transfer_buffer; u32 last_val; }; static atomic_t shark_instance = ATOMIC_INIT(0); static void shark_write_val(struct snd_tea575x *tea, u32 val) { struct shark_device *shark = tea->private_data; int i, res, actual_len; /* Avoid unnecessary (slow) USB transfers */ if (shark->last_val == val) return; memset(shark->transfer_buffer, 0, TB_LEN); shark->transfer_buffer[0] = 0xc0; /* Write shift register command */ for (i = 0; i < 4; i++) shark->transfer_buffer[i] |= (val >> (24 - i * 8)) & 0xff; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, SHARK_OUT_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res >= 0) shark->last_val = val; else v4l2_err(&shark->v4l2_dev, "set-freq error: %d\n", res); } static u32 shark_read_val(struct snd_tea575x *tea) { struct shark_device *shark = tea->private_data; int i, res, actual_len; u32 val = 0; memset(shark->transfer_buffer, 0, TB_LEN); shark->transfer_buffer[0] = 0x80; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, SHARK_OUT_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) { v4l2_err(&shark->v4l2_dev, "request-status error: %d\n", res); return shark->last_val; } res = usb_interrupt_msg(shark->usbdev, usb_rcvintpipe(shark->usbdev, SHARK_IN_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) { v4l2_err(&shark->v4l2_dev, "get-status error: %d\n", res); return shark->last_val; } for (i = 0; i < 4; i++) val |= shark->transfer_buffer[i] << (24 - i * 8); shark->last_val = val; /* * The shark does not allow actually reading the stereo / mono pin :( * So assume that when we're tuned to an FM station and mono has not * been requested, that we're receiving stereo. */ if (((val & TEA575X_BIT_BAND_MASK) == TEA575X_BIT_BAND_FM) && !(val & TEA575X_BIT_MONO)) shark->tea.stereo = true; else shark->tea.stereo = false; return val; } static struct snd_tea575x_ops shark_tea_ops = { .write_val = shark_write_val, .read_val = shark_read_val, }; #ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; for (i = 0; i < 3; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) continue; brightness = atomic_read(&shark->brightness[i]); memset(shark->transfer_buffer, 0, TB_LEN); if (i != RED_LED) { shark->transfer_buffer[0] = 0xA0 + i; shark->transfer_buffer[1] = brightness; } else shark->transfer_buffer[0] = brightness ? 0xA9 : 0xA8; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, 0x05), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } } static void shark_led_set_blue(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[BLUE_LED]); atomic_set(&shark->brightness[BLUE_LED], value); set_bit(BLUE_LED, &shark->brightness_new); clear_bit(BLUE_IS_PULSE, &shark->brightness_new); schedule_work(&shark->led_work); } static void shark_led_set_blue_pulse(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[BLUE_PULSE_LED]); atomic_set(&shark->brightness[BLUE_PULSE_LED], 256 - value); set_bit(BLUE_PULSE_LED, &shark->brightness_new); set_bit(BLUE_IS_PULSE, &shark->brightness_new); schedule_work(&shark->led_work); } static void shark_led_set_red(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[RED_LED]); atomic_set(&shark->brightness[RED_LED], value); set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } static const struct led_classdev shark_led_templates[NO_LEDS] = { [BLUE_LED] = { .name = "%s:blue:", .brightness = LED_OFF, .max_brightness = 127, .brightness_set = shark_led_set_blue, }, [BLUE_PULSE_LED] = { .name = "%s:blue-pulse:", .brightness = LED_OFF, .max_brightness = 255, .brightness_set = shark_led_set_blue_pulse, }, [RED_LED] = { .name = "%s:red:", .brightness = LED_OFF, .max_brightness = 1, .brightness_set = shark_led_set_red, }, }; static int shark_register_leds(struct shark_device *shark, struct device *dev) { int i, retval; atomic_set(&shark->brightness[BLUE_LED], 127); INIT_WORK(&shark->led_work, shark_led_work); for (i = 0; i < NO_LEDS; i++) { shark->leds[i] = shark_led_templates[i]; snprintf(shark->led_names[i], sizeof(shark->led_names[0]), shark->leds[i].name, shark->v4l2_dev.name); shark->leds[i].name = shark->led_names[i]; retval = led_classdev_register(dev, &shark->leds[i]); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register led: %s\n", shark->led_names[i]); return retval; } } return 0; } static void shark_unregister_leds(struct shark_device *shark) { int i; for (i = 0; i < NO_LEDS; i++) led_classdev_unregister(&shark->leds[i]); cancel_work_sync(&shark->led_work); } static void shark_resume_leds(struct shark_device *shark) { if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) set_bit(BLUE_PULSE_LED, &shark->brightness_new); else set_bit(BLUE_LED, &shark->brightness_new); set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { v4l2_warn(&shark->v4l2_dev, "CONFIG_LEDS_CLASS not enabled, LED support disabled\n"); return 0; } static inline void shark_unregister_leds(struct shark_device *shark) { } static inline void shark_resume_leds(struct shark_device *shark) { } #endif static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); snd_tea575x_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); } static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) return retval; shark->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL); if (!shark->transfer_buffer) goto err_alloc_buffer; v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = shark_register_leds(shark, &intf->dev); if (retval) goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); goto err_reg_dev; } shark->usbdev = interface_to_usbdev(intf); shark->tea.v4l2_dev = &shark->v4l2_dev; shark->tea.private_data = shark; shark->tea.radio_nr = -1; shark->tea.ops = &shark_tea_ops; shark->tea.cannot_mute = true; shark->tea.has_am = true; strlcpy(shark->tea.card, "Griffin radioSHARK", sizeof(shark->tea.card)); usb_make_path(shark->usbdev, shark->tea.bus_info, sizeof(shark->tea.bus_info)); retval = snd_tea575x_init(&shark->tea, THIS_MODULE); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't init tea5757\n"); goto err_init_tea; } return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: shark_unregister_leds(shark); err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); return retval; } #ifdef CONFIG_PM static int usb_shark_suspend(struct usb_interface *intf, pm_message_t message) { return 0; } static int usb_shark_resume(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); mutex_lock(&shark->tea.mutex); snd_tea575x_set_freq(&shark->tea); mutex_unlock(&shark->tea.mutex); shark_resume_leds(shark); return 0; } #endif /* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */ static struct usb_device_id usb_shark_device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = 0x077d, .idProduct = 0x627a, .bcdDevice_lo = 0x0001, .bcdDevice_hi = 0x0001, .bInterfaceClass = 3, }, { } }; MODULE_DEVICE_TABLE(usb, usb_shark_device_table); static struct usb_driver usb_shark_driver = { .name = DRV_NAME, .probe = usb_shark_probe, .disconnect = usb_shark_disconnect, .id_table = usb_shark_device_table, #ifdef CONFIG_PM .suspend = usb_shark_suspend, .resume = usb_shark_resume, .reset_resume = usb_shark_resume, #endif }; module_usb_driver(usb_shark_driver);
gpl-2.0
wzhy90/Huawei_Watch_Kernel
fs/reiserfs/journal.c
2153
124107
/* ** Write ahead logging implementation copyright Chris Mason 2000 ** ** The background commits make this code very interrelated, and ** overly complex. I need to rethink things a bit....The major players: ** ** journal_begin -- call with the number of blocks you expect to log. ** If the current transaction is too ** old, it will block until the current transaction is ** finished, and then start a new one. ** Usually, your transaction will get joined in with ** previous ones for speed. ** ** journal_join -- same as journal_begin, but won't block on the current ** transaction regardless of age. Don't ever call ** this. Ever. There are only two places it should be ** called from, and they are both inside this file. ** ** journal_mark_dirty -- adds blocks into this transaction. clears any flags ** that might make them get sent to disk ** and then marks them BH_JDirty. Puts the buffer head ** into the current transaction hash. ** ** journal_end -- if the current transaction is batchable, it does nothing ** otherwise, it could do an async/synchronous commit, or ** a full flush of all log and real blocks in the ** transaction. ** ** flush_old_commits -- if the current transaction is too old, it is ended and ** commit blocks are sent to disk. Forces commit blocks ** to disk for all backgrounded commits that have been ** around too long. ** -- Note, if you call this as an immediate flush from ** from within kupdate, it will ignore the immediate flag */ #include <linux/time.h> #include <linux/semaphore.h> #include <linux/vmalloc.h> #include "reiserfs.h" #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/uaccess.h> #include <linux/slab.h> /* gets a struct reiserfs_journal_list * from a list head */ #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ j_list)) #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ j_working_list)) /* the number of mounted filesystems. This is used to decide when to ** start and kill the commit workqueue */ static int reiserfs_mounted_fs_count; static struct workqueue_struct *commit_wq; #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit structs at 4k */ #define BUFNR 64 /*read ahead */ /* cnode stat bits. Move these into reiserfs_fs.h */ #define BLOCK_FREED 2 /* this block was freed, and can't be written. */ #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */ #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */ #define BLOCK_DIRTIED 5 /* journal list state bits */ #define LIST_TOUCHED 1 #define LIST_DIRTY 2 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */ /* flags for do_journal_end */ #define FLUSH_ALL 1 /* flush commit and real blocks */ #define COMMIT_NOW 2 /* end and commit this transaction */ #define WAIT 4 /* wait for the log blocks to hit the disk */ static int do_journal_end(struct reiserfs_transaction_handle *, struct super_block *, unsigned long nblocks, int flags); static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int can_dirty(struct reiserfs_journal_cnode *cn); static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks); static void release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl); static void flush_async_commits(struct work_struct *work); static void queue_log_writer(struct super_block *s); /* values for join in do_journal_begin_r */ enum { JBEGIN_REG = 0, /* regular journal begin */ JBEGIN_JOIN = 1, /* join the running transaction if at all possible */ JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */ }; static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join); static void init_journal_hash(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); } /* ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for ** more details. */ static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) { if (bh) { clear_buffer_dirty(bh); clear_buffer_journal_test(bh); } return 0; } static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block *sb) { struct reiserfs_bitmap_node *bn; static int id; bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS); if (!bn) { return NULL; } bn->data = kzalloc(sb->s_blocksize, GFP_NOFS); if (!bn->data) { kfree(bn); return NULL; } bn->id = id++; INIT_LIST_HEAD(&bn->list); return bn; } static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; struct list_head *entry = journal->j_bitmap_nodes.next; journal->j_used_bitmap_nodes++; repeat: if (entry != &journal->j_bitmap_nodes) { bn = list_entry(entry, struct reiserfs_bitmap_node, list); list_del(entry); memset(bn->data, 0, sb->s_blocksize); journal->j_free_bitmap_nodes--; return bn; } bn = allocate_bitmap_node(sb); if (!bn) { yield(); goto repeat; } return bn; } static inline void free_bitmap_node(struct super_block *sb, struct reiserfs_bitmap_node *bn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); journal->j_used_bitmap_nodes--; if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) { kfree(bn->data); kfree(bn); } else { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } } static void allocate_bitmap_nodes(struct super_block *sb) { int i; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) { bn = allocate_bitmap_node(sb); if (bn) { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } else { break; /* this is ok, we'll try again when more are needed */ } } } static int set_bit_in_list_bitmap(struct super_block *sb, b_blocknr_t block, struct reiserfs_list_bitmap *jb) { unsigned int bmap_nr = block / (sb->s_blocksize << 3); unsigned int bit_nr = block % (sb->s_blocksize << 3); if (!jb->bitmaps[bmap_nr]) { jb->bitmaps[bmap_nr] = get_bitmap_node(sb); } set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data); return 0; } static void cleanup_bitmap_list(struct super_block *sb, struct reiserfs_list_bitmap *jb) { int i; if (jb->bitmaps == NULL) return; for (i = 0; i < reiserfs_bmap_count(sb); i++) { if (jb->bitmaps[i]) { free_bitmap_node(sb, jb->bitmaps[i]); jb->bitmaps[i] = NULL; } } } /* ** only call this on FS unmount. */ static int free_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array) { int i; struct reiserfs_list_bitmap *jb; for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; cleanup_bitmap_list(sb, jb); vfree(jb->bitmaps); jb->bitmaps = NULL; } return 0; } static int free_bitmap_nodes(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct list_head *next = journal->j_bitmap_nodes.next; struct reiserfs_bitmap_node *bn; while (next != &journal->j_bitmap_nodes) { bn = list_entry(next, struct reiserfs_bitmap_node, list); list_del(next); kfree(bn->data); kfree(bn); next = journal->j_bitmap_nodes.next; journal->j_free_bitmap_nodes--; } return 0; } /* ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. ** jb_array is the array to be filled in. */ int reiserfs_allocate_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array, unsigned int bmap_nr) { int i; int failed = 0; struct reiserfs_list_bitmap *jb; int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *); for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; jb->bitmaps = vzalloc(mem); if (!jb->bitmaps) { reiserfs_warning(sb, "clm-2000", "unable to " "allocate bitmaps for journal lists"); failed = 1; break; } } if (failed) { free_list_bitmaps(sb, jb_array); return -1; } return 0; } /* ** find an available list bitmap. If you can't find one, flush a commit list ** and try again */ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb, struct reiserfs_journal_list *jl) { int i, j; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_list_bitmap *jb = NULL; for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) { i = journal->j_list_bitmap_index; journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS; jb = journal->j_list_bitmap + i; if (journal->j_list_bitmap[i].journal_list) { flush_commit_list(sb, journal->j_list_bitmap[i]. journal_list, 1); if (!journal->j_list_bitmap[i].journal_list) { break; } } else { break; } } if (jb->journal_list) { /* double check to make sure if flushed correctly */ return NULL; } jb->journal_list = jl; return jb; } /* ** allocates a new chunk of X nodes, and links them all together as a list. ** Uses the cnode->next and cnode->prev pointers ** returns NULL on failure */ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) { struct reiserfs_journal_cnode *head; int i; if (num_cnodes <= 0) { return NULL; } head = vzalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)); if (!head) { return NULL; } head[0].prev = NULL; head[0].next = head + 1; for (i = 1; i < num_cnodes; i++) { head[i].prev = head + (i - 1); head[i].next = head + (i + 1); /* if last one, overwrite it after the if */ } head[num_cnodes - 1].next = NULL; return head; } /* ** pulls a cnode off the free list, or returns NULL on failure */ static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "get_cnode"); if (journal->j_cnode_free <= 0) { return NULL; } journal->j_cnode_used++; journal->j_cnode_free--; cn = journal->j_cnode_free_list; if (!cn) { return cn; } if (cn->next) { cn->next->prev = NULL; } journal->j_cnode_free_list = cn->next; memset(cn, 0, sizeof(struct reiserfs_journal_cnode)); return cn; } /* ** returns a cnode to the free list */ static void free_cnode(struct super_block *sb, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "free_cnode"); journal->j_cnode_used--; journal->j_cnode_free++; /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */ cn->next = journal->j_cnode_free_list; if (journal->j_cnode_free_list) { journal->j_cnode_free_list->prev = cn; } cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */ journal->j_cnode_free_list = cn; } static void clear_prepared_bits(struct buffer_head *bh) { clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); } /* return a cnode with same dev, block number and size in table, or null if not found */ static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct super_block *sb, struct reiserfs_journal_cnode **table, long bl) { struct reiserfs_journal_cnode *cn; cn = journal_hash(table, sb, bl); while (cn) { if (cn->blocknr == bl && cn->sb == sb) return cn; cn = cn->hnext; } return (struct reiserfs_journal_cnode *)0; } /* ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever ** being overwritten by a replay after crashing. ** ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make ** sure you never write the block without logging it. ** ** next_zero_bit is a suggestion about the next block to try for find_forward. ** when bl is rejected because it is set in a journal list bitmap, we search ** for the next zero bit in the bitmap that rejected bl. Then, we return that ** through next_zero_bit for find_forward to try. ** ** Just because we return something in next_zero_bit does not mean we won't ** reject it on the next call to reiserfs_in_journal ** */ int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, int bit_nr, int search_all, b_blocknr_t * next_zero_bit) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn; struct reiserfs_list_bitmap *jb; int i; unsigned long bl; *next_zero_bit = 0; /* always start this at zero. */ PROC_INFO_INC(sb, journal.in_journal); /* If we aren't doing a search_all, this is a metablock, and it will be logged before use. ** if we crash before the transaction that freed it commits, this transaction won't ** have committed either, and the block will never be written */ if (search_all) { for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { PROC_INFO_INC(sb, journal.in_journal_bitmap); jb = journal->j_list_bitmap + i; if (jb->journal_list && jb->bitmaps[bmap_nr] && test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]-> data)) { *next_zero_bit = find_next_zero_bit((unsigned long *) (jb->bitmaps[bmap_nr]-> data), sb->s_blocksize << 3, bit_nr + 1); return 1; } } } bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr; /* is it in any old transactions? */ if (search_all && (cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) { return 1; } /* is it in the current transaction. This should never happen */ if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) { BUG(); return 1; } PROC_INFO_INC(sb, journal.in_journal_reusable); /* safe for reuse */ return 0; } /* insert cn into table */ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal_cnode *cn_orig; cn_orig = journal_hash(table, cn->sb, cn->blocknr); cn->hnext = cn_orig; cn->hprev = NULL; if (cn_orig) { cn_orig->hprev = cn; } journal_hash(table, cn->sb, cn->blocknr) = cn; } /* lock the current transaction */ static inline void lock_journal(struct super_block *sb) { PROC_INFO_INC(sb, journal.lock_journal); reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb); } /* unlock the current transaction */ static inline void unlock_journal(struct super_block *sb) { mutex_unlock(&SB_JOURNAL(sb)->j_mutex); } static inline void get_journal_list(struct reiserfs_journal_list *jl) { jl->j_refcount++; } static inline void put_journal_list(struct super_block *s, struct reiserfs_journal_list *jl) { if (jl->j_refcount < 1) { reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d", jl->j_trans_id, jl->j_refcount); } if (--jl->j_refcount == 0) kfree(jl); } /* ** this used to be much more involved, and I'm keeping it just in case things get ugly again. ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a ** transaction. */ static void cleanup_freed_for_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl) { struct reiserfs_list_bitmap *jb = jl->j_list_bitmap; if (jb) { cleanup_bitmap_list(sb, jb); } jl->j_list_bitmap->journal_list = NULL; jl->j_list_bitmap = NULL; } static int journal_list_still_alive(struct super_block *s, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct list_head *entry = &journal->j_journal_list; struct reiserfs_journal_list *jl; if (!list_empty(entry)) { jl = JOURNAL_LIST_ENTRY(entry->next); if (jl->j_trans_id <= trans_id) { return 1; } } return 0; } /* * If page->mapping was null, we failed to truncate this page for * some reason. Most likely because it was truncated after being * logged via data=journal. * * This does a check to see if the buffer belongs to one of these * lost pages before doing the final put_bh. If page->mapping was * null, it tries to free buffers on the page, which should make the * final page_cache_release drop the page from the lru. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page = bh->b_page; if (!page->mapping && trylock_page(page)) { page_cache_get(page); put_bh(bh); if (!page->mapping) try_to_free_buffers(page); unlock_page(page); page_cache_release(page); } else { put_bh(bh); } } static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; if (buffer_journaled(bh)) { reiserfs_warning(NULL, "clm-2084", "pinned buffer %lu:%s sent to disk", bh->b_blocknr, bdevname(bh->b_bdev, b)); } if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); release_buffer_page(bh); } static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) { if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); put_bh(bh); } static void submit_logged_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_buffer_io_sync; clear_buffer_journal_new(bh); clear_buffer_dirty(bh); if (!test_clear_buffer_journal_test(bh)) BUG(); if (!buffer_uptodate(bh)) BUG(); submit_bh(WRITE, bh); } static void submit_ordered_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_ordered_io; clear_buffer_dirty(bh); if (!buffer_uptodate(bh)) BUG(); submit_bh(WRITE, bh); } #define CHUNK_SIZE 32 struct buffer_chunk { struct buffer_head *bh[CHUNK_SIZE]; int nr; }; static void write_chunk(struct buffer_chunk *chunk) { int i; for (i = 0; i < chunk->nr; i++) { submit_logged_buffer(chunk->bh[i]); } chunk->nr = 0; } static void write_ordered_chunk(struct buffer_chunk *chunk) { int i; for (i = 0; i < chunk->nr; i++) { submit_ordered_buffer(chunk->bh[i]); } chunk->nr = 0; } static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, spinlock_t * lock, void (fn) (struct buffer_chunk *)) { int ret = 0; BUG_ON(chunk->nr >= CHUNK_SIZE); chunk->bh[chunk->nr++] = bh; if (chunk->nr >= CHUNK_SIZE) { ret = 1; if (lock) spin_unlock(lock); fn(chunk); if (lock) spin_lock(lock); } return ret; } static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0); static struct reiserfs_jh *alloc_jh(void) { struct reiserfs_jh *jh; while (1) { jh = kmalloc(sizeof(*jh), GFP_NOFS); if (jh) { atomic_inc(&nr_reiserfs_jh); return jh; } yield(); } } /* * we want to free the jh when the buffer has been written * and waited on */ void reiserfs_free_jh(struct buffer_head *bh) { struct reiserfs_jh *jh; jh = bh->b_private; if (jh) { bh->b_private = NULL; jh->bh = NULL; list_del_init(&jh->list); kfree(jh); if (atomic_read(&nr_reiserfs_jh) <= 0) BUG(); atomic_dec(&nr_reiserfs_jh); put_bh(bh); } } static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh, int tail) { struct reiserfs_jh *jh; if (bh->b_private) { spin_lock(&j->j_dirty_buffers_lock); if (!bh->b_private) { spin_unlock(&j->j_dirty_buffers_lock); goto no_jh; } jh = bh->b_private; list_del_init(&jh->list); } else { no_jh: get_bh(bh); jh = alloc_jh(); spin_lock(&j->j_dirty_buffers_lock); /* buffer must be locked for __add_jh, should be able to have * two adds at the same time */ BUG_ON(bh->b_private); jh->bh = bh; bh->b_private = jh; } jh->jl = j->j_current_jl; if (tail) list_add_tail(&jh->list, &jh->jl->j_tail_bh_list); else { list_add_tail(&jh->list, &jh->jl->j_bh_list); } spin_unlock(&j->j_dirty_buffers_lock); return 0; } int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1); } int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0); } #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list) static int write_ordered_buffers(spinlock_t * lock, struct reiserfs_journal *j, struct reiserfs_journal_list *jl, struct list_head *list) { struct buffer_head *bh; struct reiserfs_jh *jh; int ret = j->j_errno; struct buffer_chunk chunk; struct list_head tmp; INIT_LIST_HEAD(&tmp); chunk.nr = 0; spin_lock(lock); while (!list_empty(list)) { jh = JH_ENTRY(list->next); bh = jh->bh; get_bh(bh); if (!trylock_buffer(bh)) { if (!buffer_dirty(bh)) { list_move(&jh->list, &tmp); goto loop_next; } spin_unlock(lock); if (chunk.nr) write_ordered_chunk(&chunk); wait_on_buffer(bh); cond_resched(); spin_lock(lock); goto loop_next; } /* in theory, dirty non-uptodate buffers should never get here, * but the upper layer io error paths still have a few quirks. * Handle them here as gracefully as we can */ if (!buffer_uptodate(bh) && buffer_dirty(bh)) { clear_buffer_dirty(bh); ret = -EIO; } if (buffer_dirty(bh)) { list_move(&jh->list, &tmp); add_to_chunk(&chunk, bh, lock, write_ordered_chunk); } else { reiserfs_free_jh(bh); unlock_buffer(bh); } loop_next: put_bh(bh); cond_resched_lock(lock); } if (chunk.nr) { spin_unlock(lock); write_ordered_chunk(&chunk); spin_lock(lock); } while (!list_empty(&tmp)) { jh = JH_ENTRY(tmp.prev); bh = jh->bh; get_bh(bh); reiserfs_free_jh(bh); if (buffer_locked(bh)) { spin_unlock(lock); wait_on_buffer(bh); spin_lock(lock); } if (!buffer_uptodate(bh)) { ret = -EIO; } /* ugly interaction with invalidatepage here. * reiserfs_invalidate_page will pin any buffer that has a valid * journal head from an older transaction. If someone else sets * our buffer dirty after we write it in the first loop, and * then someone truncates the page away, nobody will ever write * the buffer. We're safe if we write the page one last time * after freeing the journal header. */ if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { spin_unlock(lock); ll_rw_block(WRITE, 1, &bh); spin_lock(lock); } put_bh(bh); cond_resched_lock(lock); } spin_unlock(lock); return ret; } static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct reiserfs_journal_list *other_jl; struct reiserfs_journal_list *first_jl; struct list_head *entry; unsigned int trans_id = jl->j_trans_id; unsigned int other_trans_id; unsigned int first_trans_id; find_first: /* * first we walk backwards to find the oldest uncommitted transation */ first_jl = jl; entry = jl->j_list.prev; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); if (entry == &journal->j_journal_list || atomic_read(&other_jl->j_older_commits_done)) break; first_jl = other_jl; entry = other_jl->j_list.prev; } /* if we didn't find any older uncommitted transactions, return now */ if (first_jl == jl) { return 0; } first_trans_id = first_jl->j_trans_id; entry = &first_jl->j_list; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); other_trans_id = other_jl->j_trans_id; if (other_trans_id < trans_id) { if (atomic_read(&other_jl->j_commit_left) != 0) { flush_commit_list(s, other_jl, 0); /* list we were called with is gone, return */ if (!journal_list_still_alive(s, trans_id)) return 1; /* the one we just flushed is gone, this means all * older lists are also gone, so first_jl is no longer * valid either. Go back to the beginning. */ if (!journal_list_still_alive (s, other_trans_id)) { goto find_first; } } entry = entry->next; if (entry == &journal->j_journal_list) return 0; } else { return 0; } } return 0; } static int reiserfs_async_progress_wait(struct super_block *s) { struct reiserfs_journal *j = SB_JOURNAL(s); if (atomic_read(&j->j_async_throttle)) { reiserfs_write_unlock(s); congestion_wait(BLK_RW_ASYNC, HZ / 10); reiserfs_write_lock(s); } return 0; } /* ** if this journal list still has commit blocks unflushed, send them to disk. ** ** log areas must be flushed in order (transaction 2 can't commit before transaction 1) ** Before the commit block can by written, every other log block must be safely on disk ** */ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { int i; b_blocknr_t bn; struct buffer_head *tbh = NULL; unsigned int trans_id = jl->j_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); int retval = 0; int write_len; reiserfs_check_lock_depth(s, "flush_commit_list"); if (atomic_read(&jl->j_older_commits_done)) { return 0; } /* before we can put our commit blocks on disk, we have to make sure everyone older than ** us is on disk too */ BUG_ON(jl->j_len <= 0); BUG_ON(trans_id == journal->j_trans_id); get_journal_list(jl); if (flushall) { if (flush_older_commits(s, jl) == 1) { /* list disappeared during flush_older_commits. return */ goto put_jl; } } /* make sure nobody is trying to flush this one at the same time */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s); if (!journal_list_still_alive(s, trans_id)) { mutex_unlock(&jl->j_commit_mutex); goto put_jl; } BUG_ON(jl->j_trans_id == 0); /* this commit is done, exit */ if (atomic_read(&(jl->j_commit_left)) <= 0) { if (flushall) { atomic_set(&(jl->j_older_commits_done), 1); } mutex_unlock(&jl->j_commit_mutex); goto put_jl; } if (!list_empty(&jl->j_bh_list)) { int ret; /* * We might sleep in numerous places inside * write_ordered_buffers. Relax the write lock. */ reiserfs_write_unlock(s); ret = write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_bh_list); if (ret < 0 && retval == 0) retval = ret; reiserfs_write_lock(s); } BUG_ON(!list_empty(&jl->j_bh_list)); /* * for the description block and all the log blocks, submit any buffers * that haven't already reached the disk. Try to write at least 256 * log blocks. later on, we will only wait on blocks that correspond * to this transaction, but while we're unplugging we might as well * get a chunk of data on there. */ atomic_inc(&journal->j_async_throttle); write_len = jl->j_len + 1; if (write_len < 256) write_len = 256; for (i = 0 ; i < write_len ; i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); if (tbh) { if (buffer_dirty(tbh)) { reiserfs_write_unlock(s); ll_rw_block(WRITE, 1, &tbh); reiserfs_write_lock(s); } put_bh(tbh) ; } } atomic_dec(&journal->j_async_throttle); for (i = 0; i < (jl->j_len + 1); i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); reiserfs_write_unlock(s); wait_on_buffer(tbh); reiserfs_write_lock(s); // since we're using ll_rw_blk above, it might have skipped over // a locked buffer. Double check here // /* redundant, sync_dirty_buffer() checks */ if (buffer_dirty(tbh)) { reiserfs_write_unlock(s); sync_dirty_buffer(tbh); reiserfs_write_lock(s); } if (unlikely(!buffer_uptodate(tbh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-601", "buffer write failed"); #endif retval = -EIO; } put_bh(tbh); /* once for journal_find_get_block */ put_bh(tbh); /* once due to original getblk in do_journal_end */ atomic_dec(&(jl->j_commit_left)); } BUG_ON(atomic_read(&(jl->j_commit_left)) != 1); /* If there was a write error in the journal - we can't commit * this transaction - it will be invalid and, if successful, * will just end up propagating the write error out to * the file system. */ if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { if (buffer_dirty(jl->j_commit_bh)) BUG(); mark_buffer_dirty(jl->j_commit_bh) ; reiserfs_write_unlock(s); if (reiserfs_barrier_flush(s)) __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA); else sync_dirty_buffer(jl->j_commit_bh); reiserfs_write_lock(s); } /* If there was a write error in the journal - we can't commit this * transaction - it will be invalid and, if successful, will just end * up propagating the write error out to the filesystem. */ if (unlikely(!buffer_uptodate(jl->j_commit_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-615", "buffer write failed"); #endif retval = -EIO; } bforget(jl->j_commit_bh); if (journal->j_last_commit_id != 0 && (jl->j_trans_id - journal->j_last_commit_id) != 1) { reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu", journal->j_last_commit_id, jl->j_trans_id); } journal->j_last_commit_id = jl->j_trans_id; /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */ cleanup_freed_for_journal_list(s, jl); retval = retval ? retval : journal->j_errno; /* mark the metadata dirty */ if (!retval) dirty_one_transaction(s, jl); atomic_dec(&(jl->j_commit_left)); if (flushall) { atomic_set(&(jl->j_older_commits_done), 1); } mutex_unlock(&jl->j_commit_mutex); put_jl: put_journal_list(s, jl); if (retval) reiserfs_abort(s, retval, "Journal write error in %s", __func__); return retval; } /* ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or ** returns NULL if it can't find anything */ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; cn = cn->hprev; while (cn) { if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) { return cn->jlist; } cn = cn->hprev; } return NULL; } static int newer_jl_done(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; cn = cn->hprev; while (cn) { if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist && atomic_read(&cn->jlist->j_commit_left) != 0) return 0; cn = cn->hprev; } return 1; } static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **, struct reiserfs_journal_list *, unsigned long, int); /* ** once all the real blocks have been flushed, it is safe to remove them from the ** journal list for this transaction. Aside from freeing the cnode, this also allows the ** block to be reallocated for data blocks if it had been deleted. */ static void remove_all_from_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl, int debug) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *last; cn = jl->j_realblock; /* which is better, to lock once around the whole loop, or ** to lock for each call to remove_journal_hash? */ while (cn) { if (cn->blocknr != 0) { if (debug) { reiserfs_warning(sb, "reiserfs-2201", "block %u, bh is %d, state %ld", cn->blocknr, cn->bh ? 1 : 0, cn->state); } cn->state = 0; remove_journal_hash(sb, journal->j_list_hash_table, jl, cn->blocknr, 1); } last = cn; cn = cn->next; free_cnode(sb, last); } jl->j_realblock = NULL; } /* ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block. ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start ** releasing blocks in this transaction for reuse as data blocks. ** called by flush_journal_list, before it calls remove_all_from_journal_list ** */ static int _update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { struct reiserfs_journal_header *jh; struct reiserfs_journal *journal = SB_JOURNAL(sb); if (reiserfs_is_journal_aborted(journal)) return -EIO; if (trans_id >= journal->j_last_flush_trans_id) { if (buffer_locked((journal->j_header_bh))) { reiserfs_write_unlock(sb); wait_on_buffer((journal->j_header_bh)); reiserfs_write_lock(sb); if (unlikely(!buffer_uptodate(journal->j_header_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(sb, "journal-699", "buffer write failed"); #endif return -EIO; } } journal->j_last_flush_trans_id = trans_id; journal->j_first_unflushed_offset = offset; jh = (struct reiserfs_journal_header *)(journal->j_header_bh-> b_data); jh->j_last_flush_trans_id = cpu_to_le32(trans_id); jh->j_first_unflushed_offset = cpu_to_le32(offset); jh->j_mount_id = cpu_to_le32(journal->j_mount_id); set_buffer_dirty(journal->j_header_bh); reiserfs_write_unlock(sb); if (reiserfs_barrier_flush(sb)) __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA); else sync_dirty_buffer(journal->j_header_bh); reiserfs_write_lock(sb); if (!buffer_uptodate(journal->j_header_bh)) { reiserfs_warning(sb, "journal-837", "IO error during journal replay"); return -EIO; } } return 0; } static int update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { return _update_journal_header_block(sb, offset, trans_id); } /* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list */ static int flush_older_journal_lists(struct super_block *sb, struct reiserfs_journal_list *jl) { struct list_head *entry; struct reiserfs_journal_list *other_jl; struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned int trans_id = jl->j_trans_id; /* we know we are the only ones flushing things, no extra race * protection is required. */ restart: entry = journal->j_journal_list.next; /* Did we wrap? */ if (entry == &journal->j_journal_list) return 0; other_jl = JOURNAL_LIST_ENTRY(entry); if (other_jl->j_trans_id < trans_id) { BUG_ON(other_jl->j_refcount <= 0); /* do not flush all */ flush_journal_list(sb, other_jl, 0); /* other_jl is now deleted from the list */ goto restart; } return 0; } static void del_from_work_list(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (!list_empty(&jl->j_working_list)) { list_del_init(&jl->j_working_list); journal->j_num_work_lists--; } } /* flush a journal list, both commit and real blocks ** ** always set flushall to 1, unless you are calling from inside ** flush_journal_list ** ** IMPORTANT. This can only be called while there are no journal writers, ** and the journal is locked. That means it can only be called from ** do_journal_end, or by journal_release */ static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { struct reiserfs_journal_list *pjl; struct reiserfs_journal_cnode *cn, *last; int count; int was_jwait = 0; int was_dirty = 0; struct buffer_head *saved_bh; unsigned long j_len_saved = jl->j_len; struct reiserfs_journal *journal = SB_JOURNAL(s); int err = 0; BUG_ON(j_len_saved <= 0); if (atomic_read(&journal->j_wcount) != 0) { reiserfs_warning(s, "clm-2048", "called with wcount %d", atomic_read(&journal->j_wcount)); } BUG_ON(jl->j_trans_id == 0); /* if flushall == 0, the lock is already held */ if (flushall) { reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); } else if (mutex_trylock(&journal->j_flush_mutex)) { BUG(); } count = 0; if (j_len_saved > journal->j_trans_max) { reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu", j_len_saved, jl->j_trans_id); return 0; } /* if all the work is already done, get out of here */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return; } /* start by putting the commit list on disk. This will also flush ** the commit lists of any olders transactions */ flush_commit_list(s, jl, 1); if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted(journal)) BUG(); /* are we done now? */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return; } /* loop through each cnode, see if we need to write it, ** or wait on a more recent transaction, or just ignore it */ if (atomic_read(&(journal->j_wcount)) != 0) { reiserfs_panic(s, "journal-844", "journal list is flushing, " "wcount is not 0"); } cn = jl->j_realblock; while (cn) { was_jwait = 0; was_dirty = 0; saved_bh = NULL; /* blocknr of 0 is no longer in the hash, ignore it */ if (cn->blocknr == 0) { goto free_cnode; } /* This transaction failed commit. Don't write out to the disk */ if (!(jl->j_state & LIST_DIRTY)) goto free_cnode; pjl = find_newer_jl_for_cn(cn); /* the order is important here. We check pjl to make sure we ** don't clear BH_JDirty_wait if we aren't the one writing this ** block to disk */ if (!pjl && cn->bh) { saved_bh = cn->bh; /* we do this to make sure nobody releases the buffer while ** we are working with it */ get_bh(saved_bh); if (buffer_journal_dirty(saved_bh)) { BUG_ON(!can_dirty(cn)); was_jwait = 1; was_dirty = 1; } else if (can_dirty(cn)) { /* everything with !pjl && jwait should be writable */ BUG(); } } /* if someone has this block in a newer transaction, just make ** sure they are committed, and don't try writing it to disk */ if (pjl) { if (atomic_read(&pjl->j_commit_left)) flush_commit_list(s, pjl, 1); goto free_cnode; } /* bh == NULL when the block got to disk on its own, OR, ** the block got freed in a future transaction */ if (saved_bh == NULL) { goto free_cnode; } /* this should never happen. kupdate_one_transaction has this list ** locked while it works, so we should never see a buffer here that ** is not marked JDirty_wait */ if ((!was_jwait) && !buffer_locked(saved_bh)) { reiserfs_warning(s, "journal-813", "BAD! buffer %llu %cdirty %cjwait, " "not in a newer tranasction", (unsigned long long)saved_bh-> b_blocknr, was_dirty ? ' ' : '!', was_jwait ? ' ' : '!'); } if (was_dirty) { /* we inc again because saved_bh gets decremented at free_cnode */ get_bh(saved_bh); set_bit(BLOCK_NEEDS_FLUSH, &cn->state); lock_buffer(saved_bh); BUG_ON(cn->blocknr != saved_bh->b_blocknr); if (buffer_dirty(saved_bh)) submit_logged_buffer(saved_bh); else unlock_buffer(saved_bh); count++; } else { reiserfs_warning(s, "clm-2082", "Unable to flush buffer %llu in %s", (unsigned long long)saved_bh-> b_blocknr, __func__); } free_cnode: last = cn; cn = cn->next; if (saved_bh) { /* we incremented this to keep others from taking the buffer head away */ put_bh(saved_bh); if (atomic_read(&(saved_bh->b_count)) < 0) { reiserfs_warning(s, "journal-945", "saved_bh->b_count < 0"); } } } if (count > 0) { cn = jl->j_realblock; while (cn) { if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) { if (!cn->bh) { reiserfs_panic(s, "journal-1011", "cn->bh is NULL"); } reiserfs_write_unlock(s); wait_on_buffer(cn->bh); reiserfs_write_lock(s); if (!cn->bh) { reiserfs_panic(s, "journal-1012", "cn->bh is NULL"); } if (unlikely(!buffer_uptodate(cn->bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-949", "buffer write failed"); #endif err = -EIO; } /* note, we must clear the JDirty_wait bit after the up to date ** check, otherwise we race against our flushpage routine */ BUG_ON(!test_clear_buffer_journal_dirty (cn->bh)); /* drop one ref for us */ put_bh(cn->bh); /* drop one ref for journal_mark_dirty */ release_buffer_page(cn->bh); } cn = cn->next; } } if (err) reiserfs_abort(s, -EIO, "Write error while pushing transaction to disk in %s", __func__); flush_older_and_return: /* before we can update the journal header block, we _must_ flush all ** real blocks from all older transactions to disk. This is because ** once the header block is updated, this transaction will not be ** replayed after a crash */ if (flushall) { flush_older_journal_lists(s, jl); } err = journal->j_errno; /* before we can remove everything from the hash tables for this ** transaction, we must make sure it can never be replayed ** ** since we are only called from do_journal_end, we know for sure there ** are no allocations going on while we are flushing journal lists. So, ** we only need to update the journal header block for the last list ** being flushed */ if (!err && flushall) { err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id); if (err) reiserfs_abort(s, -EIO, "Write error while updating journal header in %s", __func__); } remove_all_from_journal_list(s, jl, 0); list_del_init(&jl->j_list); journal->j_num_lists--; del_from_work_list(s, jl); if (journal->j_last_flush_id != 0 && (jl->j_trans_id - journal->j_last_flush_id) != 1) { reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu", journal->j_last_flush_id, jl->j_trans_id); } journal->j_last_flush_id = jl->j_trans_id; /* not strictly required since we are freeing the list, but it should * help find code using dead lists later on */ jl->j_len = 0; atomic_set(&(jl->j_nonzerolen), 0); jl->j_start = 0; jl->j_realblock = NULL; jl->j_commit_bh = NULL; jl->j_trans_id = 0; jl->j_state = 0; put_journal_list(s, jl); if (flushall) mutex_unlock(&journal->j_flush_mutex); return err; } static int test_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_cnode *cn; if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) return 1; cn = jl->j_realblock; while (cn) { /* if the blocknr == 0, this has been cleared from the hash, ** skip it */ if (cn->blocknr == 0) { goto next; } if (cn->bh && !newer_jl_done(cn)) return 0; next: cn = cn->next; cond_resched(); } return 0; } static int write_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_chunk *chunk) { struct reiserfs_journal_cnode *cn; int ret = 0; jl->j_state |= LIST_TOUCHED; del_from_work_list(s, jl); if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) { return 0; } cn = jl->j_realblock; while (cn) { /* if the blocknr == 0, this has been cleared from the hash, ** skip it */ if (cn->blocknr == 0) { goto next; } if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) { struct buffer_head *tmp_bh; /* we can race against journal_mark_freed when we try * to lock_buffer(cn->bh), so we have to inc the buffer * count, and recheck things after locking */ tmp_bh = cn->bh; get_bh(tmp_bh); lock_buffer(tmp_bh); if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) { if (!buffer_journal_dirty(tmp_bh) || buffer_journal_prepared(tmp_bh)) BUG(); add_to_chunk(chunk, tmp_bh, NULL, write_chunk); ret++; } else { /* note, cn->bh might be null now */ unlock_buffer(tmp_bh); } put_bh(tmp_bh); } next: cn = cn->next; cond_resched(); } return ret; } /* used by flush_commit_list */ static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal_list *pjl; int ret = 0; jl->j_state |= LIST_DIRTY; cn = jl->j_realblock; while (cn) { /* look for a more recent transaction that logged this ** buffer. Only the most recent transaction with a buffer in ** it is allowed to send that buffer to disk */ pjl = find_newer_jl_for_cn(cn); if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh)) { BUG_ON(!can_dirty(cn)); /* if the buffer is prepared, it will either be logged * or restored. If restored, we need to make sure * it actually gets marked dirty */ clear_buffer_journal_new(cn->bh); if (buffer_journal_prepared(cn->bh)) { set_buffer_journal_restore_dirty(cn->bh); } else { set_buffer_journal_test(cn->bh); mark_buffer_dirty(cn->bh); } } cn = cn->next; } return ret; } static int kupdate_transactions(struct super_block *s, struct reiserfs_journal_list *jl, struct reiserfs_journal_list **next_jl, unsigned int *next_trans_id, int num_blocks, int num_trans) { int ret = 0; int written = 0; int transactions_flushed = 0; unsigned int orig_trans_id = jl->j_trans_id; struct buffer_chunk chunk; struct list_head *entry; struct reiserfs_journal *journal = SB_JOURNAL(s); chunk.nr = 0; reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); if (!journal_list_still_alive(s, orig_trans_id)) { goto done; } /* we've got j_flush_mutex held, nobody is going to delete any * of these lists out from underneath us */ while ((num_trans && transactions_flushed < num_trans) || (!num_trans && written < num_blocks)) { if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) || atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY)) { del_from_work_list(s, jl); break; } ret = write_one_transaction(s, jl, &chunk); if (ret < 0) goto done; transactions_flushed++; written += ret; entry = jl->j_list.next; /* did we wrap? */ if (entry == &journal->j_journal_list) { break; } jl = JOURNAL_LIST_ENTRY(entry); /* don't bother with older transactions */ if (jl->j_trans_id <= orig_trans_id) break; } if (chunk.nr) { write_chunk(&chunk); } done: mutex_unlock(&journal->j_flush_mutex); return ret; } /* for o_sync and fsync heavy applications, they tend to use ** all the journa list slots with tiny transactions. These ** trigger lots and lots of calls to update the header block, which ** adds seeks and slows things down. ** ** This function tries to clear out a large chunk of the journal lists ** at once, which makes everything faster since only the newest journal ** list updates the header block */ static int flush_used_journal_lists(struct super_block *s, struct reiserfs_journal_list *jl) { unsigned long len = 0; unsigned long cur_len; int ret; int i; int limit = 256; struct reiserfs_journal_list *tjl; struct reiserfs_journal_list *flush_jl; unsigned int trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); flush_jl = tjl = jl; /* in data logging mode, try harder to flush a lot of blocks */ if (reiserfs_data_log(s)) limit = 1024; /* flush for 256 transactions or limit blocks, whichever comes first */ for (i = 0; i < 256 && len < limit; i++) { if (atomic_read(&tjl->j_commit_left) || tjl->j_trans_id < jl->j_trans_id) { break; } cur_len = atomic_read(&tjl->j_nonzerolen); if (cur_len > 0) { tjl->j_state &= ~LIST_TOUCHED; } len += cur_len; flush_jl = tjl; if (tjl->j_list.next == &journal->j_journal_list) break; tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); } /* try to find a group of blocks we can flush across all the ** transactions, but only bother if we've actually spanned ** across multiple lists */ if (flush_jl != jl) { ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); } flush_journal_list(s, flush_jl, 1); return 0; } /* ** removes any nodes in table with name block and dev as bh. ** only touchs the hnext and hprev pointers. */ void remove_journal_hash(struct super_block *sb, struct reiserfs_journal_cnode **table, struct reiserfs_journal_list *jl, unsigned long block, int remove_freed) { struct reiserfs_journal_cnode *cur; struct reiserfs_journal_cnode **head; head = &(journal_hash(table, sb, block)); if (!head) { return; } cur = *head; while (cur) { if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) { if (cur->hnext) { cur->hnext->hprev = cur->hprev; } if (cur->hprev) { cur->hprev->hnext = cur->hnext; } else { *head = cur->hnext; } cur->blocknr = 0; cur->sb = NULL; cur->state = 0; if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */ atomic_dec(&(cur->jlist->j_nonzerolen)); cur->bh = NULL; cur->jlist = NULL; } cur = cur->hnext; } } static void free_journal_ram(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); kfree(journal->j_current_jl); journal->j_num_lists--; vfree(journal->j_cnode_free_orig); free_list_bitmaps(sb, journal->j_list_bitmap); free_bitmap_nodes(sb); /* must be after free_list_bitmaps */ if (journal->j_header_bh) { brelse(journal->j_header_bh); } /* j_header_bh is on the journal dev, make sure not to release the journal * dev until we brelse j_header_bh */ release_journal_dev(sb, journal); vfree(journal); } /* ** call on unmount. Only set error to 1 if you haven't made your way out ** of read_super() yet. Any other caller must keep error at 0. */ static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb, int error) { struct reiserfs_transaction_handle myth; int flushed = 0; struct reiserfs_journal *journal = SB_JOURNAL(sb); /* we only want to flush out transactions if we were called with error == 0 */ if (!error && !(sb->s_flags & MS_RDONLY)) { /* end the current trans */ BUG_ON(!th->t_trans_id); do_journal_end(th, sb, 10, FLUSH_ALL); /* make sure something gets logged to force our way into the flush code */ if (!journal_join(&myth, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, sb, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, sb, 1, FLUSH_ALL); flushed = 1; } } /* this also catches errors during the do_journal_end above */ if (!error && reiserfs_is_journal_aborted(journal)) { memset(&myth, 0, sizeof(myth)); if (!journal_join_abort(&myth, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, sb, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, sb, 1, FLUSH_ALL); } } reiserfs_mounted_fs_count--; /* wait for all commits to finish */ cancel_delayed_work(&SB_JOURNAL(sb)->j_work); /* * We must release the write lock here because * the workqueue job (flush_async_commit) needs this lock */ reiserfs_write_unlock(sb); cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); flush_workqueue(commit_wq); if (!reiserfs_mounted_fs_count) { destroy_workqueue(commit_wq); commit_wq = NULL; } free_journal_ram(sb); reiserfs_write_lock(sb); return 0; } /* ** call on unmount. flush all journal trans, release all alloc'd ram */ int journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 0); } /* ** only call from an error condition inside reiserfs_read_super! */ int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 1); } /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */ static int journal_compare_desc_commit(struct super_block *sb, struct reiserfs_journal_desc *desc, struct reiserfs_journal_commit *commit) { if (get_commit_trans_id(commit) != get_desc_trans_id(desc) || get_commit_trans_len(commit) != get_desc_trans_len(desc) || get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max || get_commit_trans_len(commit) <= 0) { return 1; } return 0; } /* returns 0 if it did not find a description block ** returns -1 if it found a corrupt commit block ** returns 1 if both desc and commit were valid */ static int journal_transaction_is_valid(struct super_block *sb, struct buffer_head *d_bh, unsigned int *oldest_invalid_trans_id, unsigned long *newest_mount_id) { struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; unsigned long offset; if (!d_bh) return 0; desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) { if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-986: transaction " "is valid returning because trans_id %d is greater than " "oldest_invalid %lu", get_desc_trans_id(desc), *oldest_invalid_trans_id); return 0; } if (newest_mount_id && *newest_mount_id > get_desc_mount_id(desc)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1087: transaction " "is valid returning because mount_id %d is less than " "newest_mount_id %lu", get_desc_mount_id(desc), *newest_mount_id); return -1; } if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) { reiserfs_warning(sb, "journal-2018", "Bad transaction length %d " "encountered, ignoring transaction", get_desc_trans_len(desc)); return -1; } offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); /* ok, we have a journal description block, lets see if the transaction was valid */ c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) return 0; commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_transaction_is_valid, commit offset %ld had bad " "time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); if (oldest_invalid_trans_id) { *oldest_invalid_trans_id = get_desc_trans_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1004: " "transaction_is_valid setting oldest invalid trans_id " "to %d", get_desc_trans_id(desc)); } return -1; } brelse(c_bh); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1006: found valid " "transaction start offset %llu, len %d id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_trans_id(desc)); return 1; } else { return 0; } } static void brelse_array(struct buffer_head **heads, int num) { int i; for (i = 0; i < num; i++) { brelse(heads[i]); } } /* ** given the start, and values for the oldest acceptable transactions, ** this either reads in a replays a transaction, or returns because the transaction ** is invalid, or too old. */ static int journal_read_transaction(struct super_block *sb, unsigned long cur_dblock, unsigned long oldest_start, unsigned int oldest_trans_id, unsigned long newest_mount_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; unsigned int trans_id = 0; struct buffer_head *c_bh; struct buffer_head *d_bh; struct buffer_head **log_blocks = NULL; struct buffer_head **real_blocks = NULL; unsigned int trans_offset; int i; int trans_half; d_bh = journal_bread(sb, cur_dblock); if (!d_bh) return 1; desc = (struct reiserfs_journal_desc *)d_bh->b_data; trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: " "journal_read_transaction, offset %llu, len %d mount_id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_mount_id(desc)); if (get_desc_trans_id(desc) < oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: " "journal_read_trans skipping because %lu is too old", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); brelse(d_bh); return 1; } if (get_desc_mount_id(desc) != newest_mount_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: " "journal_read_trans skipping because %d is != " "newest_mount_id %lu", get_desc_mount_id(desc), newest_mount_id); brelse(d_bh); return 1; } c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) { brelse(d_bh); return 1; } commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_read_transaction, " "commit offset %llu had bad time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); brelse(d_bh); return 1; } if (bdev_read_only(sb->s_bdev)) { reiserfs_warning(sb, "clm-2076", "device is readonly, unable to replay log"); brelse(c_bh); brelse(d_bh); return -EROFS; } trans_id = get_desc_trans_id(desc); /* now we know we've got a good transaction, and it was inside the valid time ranges */ log_blocks = kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS); real_blocks = kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS); if (!log_blocks || !real_blocks) { brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); reiserfs_warning(sb, "journal-1169", "kmalloc failed, unable to mount FS"); return -1; } /* get all the buffer heads */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0; i < get_desc_trans_len(desc); i++) { log_blocks[i] = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(sb)); if (i < trans_half) { real_blocks[i] = sb_getblk(sb, le32_to_cpu(desc->j_realblock[i])); } else { real_blocks[i] = sb_getblk(sb, le32_to_cpu(commit-> j_realblock[i - trans_half])); } if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) { reiserfs_warning(sb, "journal-1207", "REPLAY FAILURE fsck required! " "Block to replay is outside of " "filesystem"); goto abort_replay; } /* make sure we don't try to replay onto log or reserved area */ if (is_block_in_log_or_reserved_area (sb, real_blocks[i]->b_blocknr)) { reiserfs_warning(sb, "journal-1204", "REPLAY FAILURE fsck required! " "Trying to replay onto a log block"); abort_replay: brelse_array(log_blocks, i); brelse_array(real_blocks, i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } } /* read in the log blocks, memcpy to the corresponding real block */ ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); for (i = 0; i < get_desc_trans_len(desc); i++) { reiserfs_write_unlock(sb); wait_on_buffer(log_blocks[i]); reiserfs_write_lock(sb); if (!buffer_uptodate(log_blocks[i])) { reiserfs_warning(sb, "journal-1212", "REPLAY FAILURE fsck required! " "buffer write failed"); brelse_array(log_blocks + i, get_desc_trans_len(desc) - i); brelse_array(real_blocks, get_desc_trans_len(desc)); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size); set_buffer_uptodate(real_blocks[i]); brelse(log_blocks[i]); } /* flush out the real blocks */ for (i = 0; i < get_desc_trans_len(desc); i++) { set_buffer_dirty(real_blocks[i]); write_dirty_buffer(real_blocks[i], WRITE); } for (i = 0; i < get_desc_trans_len(desc); i++) { wait_on_buffer(real_blocks[i]); if (!buffer_uptodate(real_blocks[i])) { reiserfs_warning(sb, "journal-1226", "REPLAY FAILURE, fsck required! " "buffer write failed"); brelse_array(real_blocks + i, get_desc_trans_len(desc) - i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } brelse(real_blocks[i]); } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(sb)); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal " "start to offset %ld", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); /* init starting values for the first transaction, in case this is the last transaction to be replayed. */ journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb); journal->j_last_flush_trans_id = trans_id; journal->j_trans_id = trans_id + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return 0; } /* This function reads blocks starting from block and to max_block of bufsize size (but no more than BUFNR blocks at a time). This proved to improve mounting speed on self-rebuilding raid5 arrays at least. Right now it is only used from journal code. But later we might use it from other places. Note: Do not use journal_getblk/sb_getblk functions here! */ static struct buffer_head *reiserfs_breada(struct block_device *dev, b_blocknr_t block, int bufsize, b_blocknr_t max_block) { struct buffer_head *bhlist[BUFNR]; unsigned int blocks = BUFNR; struct buffer_head *bh; int i, j; bh = __getblk(dev, block, bufsize); if (buffer_uptodate(bh)) return (bh); if (block + BUFNR > max_block) { blocks = max_block - block; } bhlist[0] = bh; j = 1; for (i = 1; i < blocks; i++) { bh = __getblk(dev, block + i, bufsize); if (buffer_uptodate(bh)) { brelse(bh); break; } else bhlist[j++] = bh; } ll_rw_block(READ, j, bhlist); for (i = 1; i < j; i++) brelse(bhlist[i]); bh = bhlist[0]; wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); return NULL; } /* ** read and replay the log ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast. ** ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid. ** ** On exit, it sets things up so the first transaction will work correctly. */ static int journal_read(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; unsigned int oldest_trans_id = 0; unsigned int oldest_invalid_trans_id = 0; time_t start; unsigned long oldest_start = 0; unsigned long cur_dblock = 0; unsigned long newest_mount_id = 9; struct buffer_head *d_bh; struct reiserfs_journal_header *jh; int valid_journal_header = 0; int replay_count = 0; int continue_replay = 1; int ret; char b[BDEVNAME_SIZE]; cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_info(sb, "checking transaction log (%s)\n", bdevname(journal->j_dev_bd, b)); start = get_seconds(); /* step 1, read in the journal header block. Check the transaction it says ** is the first unflushed, and if that transaction is not valid, ** replay is done */ journal->j_header_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!journal->j_header_bh) { return 1; } jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data); if (le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(sb) && le32_to_cpu(jh->j_last_flush_trans_id) > 0) { oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset); oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; newest_mount_id = le32_to_cpu(jh->j_mount_id); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1153: found in " "header: first_unflushed_offset %d, last_flushed_trans_id " "%lu", le32_to_cpu(jh->j_first_unflushed_offset), le32_to_cpu(jh->j_last_flush_trans_id)); valid_journal_header = 1; /* now, we try to read the first unflushed offset. If it is not valid, ** there is nothing more we can do, and it makes no sense to read ** through the whole log. */ d_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset)); ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL); if (!ret) { continue_replay = 0; } brelse(d_bh); goto start_log_replay; } /* ok, there are transactions that need to be replayed. start with the first log block, find ** all the valid transactions, and pick out the oldest. */ while (continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb))) { /* Note that it is required for blocksize of primary fs device and journal device to be the same */ d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, sb->s_blocksize, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); ret = journal_transaction_is_valid(sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id); if (ret == 1) { desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (oldest_start == 0) { /* init all oldest_ values */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1179: Setting " "oldest_start to offset %llu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } else if (oldest_trans_id > get_desc_trans_id(desc)) { /* one we just read was older */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting " "oldest_start to offset %lu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } if (newest_mount_id < get_desc_mount_id(desc)) { newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %d", get_desc_mount_id(desc)); } cur_dblock += get_desc_trans_len(desc) + 2; } else { cur_dblock++; } brelse(d_bh); } start_log_replay: cur_dblock = oldest_start; if (oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay " "from offset %llu, trans_id %lu", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb), oldest_trans_id); } replay_count = 0; while (continue_replay && oldest_trans_id > 0) { ret = journal_read_transaction(sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id); if (ret < 0) { return ret; } else if (ret != 0) { break; } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start; replay_count++; if (cur_dblock == oldest_start) break; } if (oldest_trans_id == 0) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1225: No valid " "transactions found"); } /* j_start does not get set correctly if we don't replay any transactions. ** if we had a valid journal_header, set j_start to the first unflushed transaction value, ** copy the trans_id from the header */ if (valid_journal_header && replay_count == 0) { journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset); journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id); journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1; } else { journal->j_mount_id = newest_mount_id + 1; } reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %lu", journal->j_mount_id); journal->j_first_unflushed_offset = journal->j_start; if (replay_count > 0) { reiserfs_info(sb, "replayed %d transactions in %lu seconds\n", replay_count, get_seconds() - start); } if (!bdev_read_only(sb->s_bdev) && _update_journal_header_block(sb, journal->j_start, journal->j_last_flush_trans_id)) { /* replay failed, caller must call free_journal_ram and abort ** the mount */ return -1; } return 0; } static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s) { struct reiserfs_journal_list *jl; jl = kzalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS | __GFP_NOFAIL); INIT_LIST_HEAD(&jl->j_list); INIT_LIST_HEAD(&jl->j_working_list); INIT_LIST_HEAD(&jl->j_tail_bh_list); INIT_LIST_HEAD(&jl->j_bh_list); mutex_init(&jl->j_commit_mutex); SB_JOURNAL(s)->j_num_lists++; get_journal_list(jl); return jl; } static void journal_list_init(struct super_block *sb) { SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb); } static void release_journal_dev(struct super_block *super, struct reiserfs_journal *journal) { if (journal->j_dev_bd != NULL) { blkdev_put(journal->j_dev_bd, journal->j_dev_mode); journal->j_dev_bd = NULL; } } static int journal_init_dev(struct super_block *super, struct reiserfs_journal *journal, const char *jdev_name) { int result; dev_t jdev; fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; char b[BDEVNAME_SIZE]; result = 0; journal->j_dev_bd = NULL; jdev = SB_ONDISK_JOURNAL_DEVICE(super) ? new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev; if (bdev_read_only(super->s_bdev)) blkdev_mode = FMODE_READ; /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { if (jdev == super->s_dev) blkdev_mode &= ~FMODE_EXCL; journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, journal); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "sh-458", "cannot init journal device '%s': %i", __bdevname(jdev, b), result); return result; } else if (jdev != super->s_dev) set_blocksize(journal->j_dev_bd, super->s_blocksize); return 0; } journal->j_dev_mode = blkdev_mode; journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "journal_init_dev: Cannot open '%s': %i", jdev_name, result); return result; } set_blocksize(journal->j_dev_bd, super->s_blocksize); reiserfs_info(super, "journal_init_dev: journal device: %s\n", bdevname(journal->j_dev_bd, b)); return 0; } /** * When creating/tuning a file system user can assign some * journal params within boundaries which depend on the ratio * blocksize/standard_blocksize. * * For blocks >= standard_blocksize transaction size should * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more * then JOURNAL_TRANS_MAX_DEFAULT. * * For blocks < standard_blocksize these boundaries should be * decreased proportionally. */ #define REISERFS_STANDARD_BLKSIZE (4096) static int check_advise_trans_params(struct super_block *sb, struct reiserfs_journal *journal) { if (journal->j_trans_max) { /* Non-default journal params. Do sanity check for them. */ int ratio = 1; if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE) ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize; if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio || journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio || SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max < JOURNAL_MIN_RATIO) { reiserfs_warning(sb, "sh-462", "bad transaction max size (%u). " "FSCK?", journal->j_trans_max); return 1; } if (journal->j_max_batch != (journal->j_trans_max) * JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) { reiserfs_warning(sb, "sh-463", "bad transaction max batch (%u). " "FSCK?", journal->j_max_batch); return 1; } } else { /* Default journal params. The file system was created by old version of mkreiserfs, so some fields contain zeros, and we need to advise proper values for them */ if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) { reiserfs_warning(sb, "sh-464", "bad blocksize (%u)", sb->s_blocksize); return 1; } journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT; journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT; journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE; } return 0; } /* ** must be called once on fs mount. calls journal_read for you */ int journal_init(struct super_block *sb, const char *j_dev_name, int old_format, unsigned int commit_max_age) { int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2; struct buffer_head *bhjh; struct reiserfs_super_block *rs; struct reiserfs_journal_header *jh; struct reiserfs_journal *journal; struct reiserfs_journal_list *jl; char b[BDEVNAME_SIZE]; int ret; journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal)); if (!journal) { reiserfs_warning(sb, "journal-1256", "unable to get memory for journal structure"); return 1; } INIT_LIST_HEAD(&journal->j_bitmap_nodes); INIT_LIST_HEAD(&journal->j_prealloc_list); INIT_LIST_HEAD(&journal->j_working_list); INIT_LIST_HEAD(&journal->j_journal_list); journal->j_persistent_trans = 0; if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap, reiserfs_bmap_count(sb))) goto free_and_return; allocate_bitmap_nodes(sb); /* reserved for journal area support */ SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ? REISERFS_OLD_DISK_OFFSET_IN_BYTES / sb->s_blocksize + reiserfs_bmap_count(sb) + 1 : REISERFS_DISK_OFFSET_IN_BYTES / sb->s_blocksize + 2); /* Sanity check to see is the standard journal fitting within first bitmap (actual for small blocksizes) */ if (!SB_ONDISK_JOURNAL_DEVICE(sb) && (SB_JOURNAL_1st_RESERVED_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) { reiserfs_warning(sb, "journal-1393", "journal does not fit for area addressed " "by first of bitmap blocks. It starts at " "%u and its size is %u. Block size %ld", SB_JOURNAL_1st_RESERVED_BLOCK(sb), SB_ONDISK_JOURNAL_SIZE(sb), sb->s_blocksize); goto free_and_return; } if (journal_init_dev(sb, journal, j_dev_name) != 0) { reiserfs_warning(sb, "sh-462", "unable to initialize jornal device"); goto free_and_return; } rs = SB_DISK_SUPER_BLOCK(sb); /* read journal header */ bhjh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!bhjh) { reiserfs_warning(sb, "sh-459", "unable to read journal header"); goto free_and_return; } jh = (struct reiserfs_journal_header *)(bhjh->b_data); /* make sure that journal matches to the super block */ if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) { reiserfs_warning(sb, "sh-460", "journal header magic %x (device %s) does " "not match to magic found in super block %x", jh->jh_journal.jp_journal_magic, bdevname(journal->j_dev_bd, b), sb_jp_journal_magic(rs)); brelse(bhjh); goto free_and_return; } journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max); journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch); journal->j_max_commit_age = le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age); journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE; if (check_advise_trans_params(sb, journal) != 0) goto free_and_return; journal->j_default_max_commit_age = journal->j_max_commit_age; if (commit_max_age != 0) { journal->j_max_commit_age = commit_max_age; journal->j_max_trans_age = commit_max_age; } reiserfs_info(sb, "journal params: device %s, size %u, " "journal first block %u, max trans len %u, max batch %u, " "max commit age %u, max trans age %u\n", bdevname(journal->j_dev_bd, b), SB_ONDISK_JOURNAL_SIZE(sb), SB_ONDISK_JOURNAL_1st_BLOCK(sb), journal->j_trans_max, journal->j_max_batch, journal->j_max_commit_age, journal->j_max_trans_age); brelse(bhjh); journal->j_list_bitmap_index = 0; journal_list_init(sb); memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); INIT_LIST_HEAD(&journal->j_dirty_buffers); spin_lock_init(&journal->j_dirty_buffers_lock); journal->j_start = 0; journal->j_len = 0; journal->j_len_alloc = 0; atomic_set(&(journal->j_wcount), 0); atomic_set(&(journal->j_async_throttle), 0); journal->j_bcount = 0; journal->j_trans_start_time = 0; journal->j_last = NULL; journal->j_first = NULL; init_waitqueue_head(&(journal->j_join_wait)); mutex_init(&journal->j_mutex); mutex_init(&journal->j_flush_mutex); journal->j_trans_id = 10; journal->j_mount_id = 10; journal->j_state = 0; atomic_set(&(journal->j_jlock), 0); journal->j_cnode_free_list = allocate_cnodes(num_cnodes); journal->j_cnode_free_orig = journal->j_cnode_free_list; journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0; journal->j_cnode_used = 0; journal->j_must_wait = 0; if (journal->j_cnode_free == 0) { reiserfs_warning(sb, "journal-2004", "Journal cnode memory " "allocation failed (%ld bytes). Journal is " "too large for available memory. Usually " "this is due to a journal that is too large.", sizeof (struct reiserfs_journal_cnode) * num_cnodes); goto free_and_return; } init_journal_hash(sb); jl = journal->j_current_jl; /* * get_list_bitmap() may call flush_commit_list() which * requires the lock. Calling flush_commit_list() shouldn't happen * this early but I like to be paranoid. */ reiserfs_write_lock(sb); jl->j_list_bitmap = get_list_bitmap(sb, jl); reiserfs_write_unlock(sb); if (!jl->j_list_bitmap) { reiserfs_warning(sb, "journal-2005", "get_list_bitmap failed for journal list 0"); goto free_and_return; } /* * Journal_read needs to be inspected in order to push down * the lock further inside (or even remove it). */ reiserfs_write_lock(sb); ret = journal_read(sb); reiserfs_write_unlock(sb); if (ret < 0) { reiserfs_warning(sb, "reiserfs-2006", "Replay Failure, unable to mount"); goto free_and_return; } reiserfs_mounted_fs_count++; if (reiserfs_mounted_fs_count <= 1) commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0); INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); journal->j_work_sb = sb; return 0; free_and_return: free_journal_ram(sb); return 1; } /* ** test for a polite end of the current transaction. Used by file_write, and should ** be used by delete to make sure they don't write more than can fit inside a single ** transaction */ int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); time_t now = get_seconds(); /* cannot restart while nested */ BUG_ON(!th->t_trans_id); if (th->t_refcount > 1) return 0; if (journal->j_must_wait > 0 || (journal->j_len_alloc + new_alloc) >= journal->j_max_batch || atomic_read(&(journal->j_jlock)) || (now - journal->j_trans_start_time) > journal->j_max_trans_age || journal->j_cnode_free < (journal->j_trans_max * 3)) { return 1; } journal->j_len_alloc += new_alloc; th->t_blocks_allocated += new_alloc ; return 0; } /* this must be called inside a transaction */ void reiserfs_block_writes(struct reiserfs_transaction_handle *th) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); BUG_ON(!th->t_trans_id); journal->j_must_wait = 1; set_bit(J_WRITERS_BLOCKED, &journal->j_state); return; } /* this must be called without a transaction started */ void reiserfs_allow_writes(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); clear_bit(J_WRITERS_BLOCKED, &journal->j_state); wake_up(&journal->j_join_wait); } /* this must be called without a transaction started */ void reiserfs_wait_on_write_block(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); wait_event(journal->j_join_wait, !test_bit(J_WRITERS_BLOCKED, &journal->j_state)); } static void queue_log_writer(struct super_block *s) { wait_queue_t wait; struct reiserfs_journal *journal = SB_JOURNAL(s); set_bit(J_WRITERS_QUEUED, &journal->j_state); /* * we don't want to use wait_event here because * we only want to wait once. */ init_waitqueue_entry(&wait, current); add_wait_queue(&journal->j_join_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) { reiserfs_write_unlock(s); schedule(); reiserfs_write_lock(s); } __set_current_state(TASK_RUNNING); remove_wait_queue(&journal->j_join_wait, &wait); } static void wake_queued_writers(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state)) wake_up(&journal->j_join_wait); } static void let_transaction_grow(struct super_block *sb, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned long bcount = journal->j_bcount; while (1) { reiserfs_write_unlock(sb); schedule_timeout_uninterruptible(1); reiserfs_write_lock(sb); journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; while ((atomic_read(&journal->j_wcount) > 0 || atomic_read(&journal->j_jlock)) && journal->j_trans_id == trans_id) { queue_log_writer(sb); } if (journal->j_trans_id != trans_id) break; if (bcount == journal->j_bcount) break; bcount = journal->j_bcount; } } /* join == true if you must join an existing transaction. ** join == false if you can deal with waiting for others to finish ** ** this will block until the transaction is joinable. send the number of blocks you ** expect to use in nblocks. */ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join) { time_t now = get_seconds(); unsigned int old_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_transaction_handle myth; int sched_count = 0; int retval; reiserfs_check_lock_depth(sb, "journal_begin"); BUG_ON(nblocks > journal->j_trans_max); PROC_INFO_INC(sb, journal.journal_being); /* set here for journal_join */ th->t_refcount = 1; th->t_super = sb; relock: lock_journal(sb); if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) { unlock_journal(sb); retval = journal->j_errno; goto out_fail; } journal->j_bcount++; if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { unlock_journal(sb); reiserfs_write_unlock(sb); reiserfs_wait_on_write_block(sb); reiserfs_write_lock(sb); PROC_INFO_INC(sb, journal.journal_relock_writers); goto relock; } now = get_seconds(); /* if there is no room in the journal OR ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning ** we don't sleep if there aren't other writers */ if ((!join && journal->j_must_wait > 0) || (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) || (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) || (!join && atomic_read(&journal->j_jlock)) || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) { old_trans_id = journal->j_trans_id; unlock_journal(sb); /* allow others to finish this transaction */ if (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch && ((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75)) { if (atomic_read(&journal->j_wcount) > 10) { sched_count++; queue_log_writer(sb); goto relock; } } /* don't mess with joining the transaction if all we have to do is * wait for someone else to do a commit */ if (atomic_read(&journal->j_jlock)) { while (journal->j_trans_id == old_trans_id && atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } goto relock; } retval = journal_join(&myth, sb, 1); if (retval) goto out_fail; /* someone might have ended the transaction while we joined */ if (old_trans_id != journal->j_trans_id) { retval = do_journal_end(&myth, sb, 1, 0); } else { retval = do_journal_end(&myth, sb, 1, COMMIT_NOW); } if (retval) goto out_fail; PROC_INFO_INC(sb, journal.journal_relock_wcount); goto relock; } /* we are the first writer, set trans_id */ if (journal->j_trans_start_time == 0) { journal->j_trans_start_time = get_seconds(); } atomic_inc(&(journal->j_wcount)); journal->j_len_alloc += nblocks; th->t_blocks_logged = 0; th->t_blocks_allocated = nblocks; th->t_trans_id = journal->j_trans_id; unlock_journal(sb); INIT_LIST_HEAD(&th->t_list); return 0; out_fail: memset(th, 0, sizeof(*th)); /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return retval; } struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct super_block *s, int nblocks) { int ret; struct reiserfs_transaction_handle *th; /* if we're nesting into an existing transaction. It will be ** persistent on its own */ if (reiserfs_transaction_running(s)) { th = current->journal_info; th->t_refcount++; BUG_ON(th->t_refcount < 2); return th; } th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS); if (!th) return NULL; ret = journal_begin(th, s, nblocks); if (ret) { kfree(th); return NULL; } SB_JOURNAL(s)->j_persistent_trans++; return th; } int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) { struct super_block *s = th->t_super; int ret = 0; if (th->t_trans_id) ret = journal_end(th, th->t_super, th->t_blocks_allocated); else ret = -EIO; if (th->t_refcount == 0) { SB_JOURNAL(s)->j_persistent_trans--; kfree(th); } return ret; } static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* this keeps do_journal_end from NULLing out the current->journal_info ** pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN); } int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* this keeps do_journal_end from NULLing out the current->journal_info ** pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT); } int journal_begin(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; int ret; th->t_handle_save = NULL; if (cur_th) { /* we are nesting into the current transaction */ if (cur_th->t_super == sb) { BUG_ON(!cur_th->t_refcount); cur_th->t_refcount++; memcpy(th, cur_th, sizeof(*th)); if (th->t_refcount <= 1) reiserfs_warning(sb, "reiserfs-2005", "BAD: refcount <= 1, but " "journal_info != 0"); return 0; } else { /* we've ended up with a handle from a different filesystem. ** save it and restore on journal_end. This should never ** really happen... */ reiserfs_warning(sb, "clm-2100", "nesting info a different FS"); th->t_handle_save = current->journal_info; current->journal_info = th; } } else { current->journal_info = th; } ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG); BUG_ON(current->journal_info != th); /* I guess this boils down to being the reciprocal of clm-2100 above. * If do_journal_begin_r fails, we need to put it back, since journal_end * won't be called to do it. */ if (ret) current->journal_info = th->t_handle_save; else BUG_ON(!th->t_refcount); return ret; } /* ** puts bh into the current transaction. If it was already there, reorders removes the ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order). ** ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the ** transaction is committed. ** ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. */ int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *sb, struct buffer_head *bh) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; int count_already_incd = 0; int prepared = 0; BUG_ON(!th->t_trans_id); PROC_INFO_INC(sb, journal.mark_dirty); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } prepared = test_clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); /* already in this transaction, we are done */ if (buffer_journaled(bh)) { PROC_INFO_INC(sb, journal.mark_dirty_already); return 0; } /* this must be turned into a panic instead of a warning. We can't allow ** a dirty or journal_dirty or locked buffer to be logged, as some changes ** could get to disk too early. NOT GOOD. */ if (!prepared || buffer_dirty(bh)) { reiserfs_warning(sb, "journal-1777", "buffer %llu bad state " "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT", (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!', buffer_locked(bh) ? ' ' : '!', buffer_dirty(bh) ? ' ' : '!', buffer_journal_dirty(bh) ? ' ' : '!'); } if (atomic_read(&(journal->j_wcount)) <= 0) { reiserfs_warning(sb, "journal-1409", "returning because j_wcount was %d", atomic_read(&(journal->j_wcount))); return 1; } /* this error means I've screwed up, and we've overflowed the transaction. ** Nothing can be done here, except make the FS readonly or panic. */ if (journal->j_len >= journal->j_trans_max) { reiserfs_panic(th->t_super, "journal-1413", "j_len (%lu) is too big", journal->j_len); } if (buffer_journal_dirty(bh)) { count_already_incd = 1; PROC_INFO_INC(sb, journal.mark_dirty_notjournal); clear_buffer_journal_dirty(bh); } if (journal->j_len > journal->j_len_alloc) { journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT; } set_buffer_journaled(bh); /* now put this guy on the end */ if (!cn) { cn = get_cnode(sb); if (!cn) { reiserfs_panic(sb, "journal-4", "get_cnode failed!"); } if (th->t_blocks_logged == th->t_blocks_allocated) { th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT; journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT; } th->t_blocks_logged++; journal->j_len++; cn->bh = bh; cn->blocknr = bh->b_blocknr; cn->sb = sb; cn->jlist = NULL; insert_journal_hash(journal->j_hash_table, cn); if (!count_already_incd) { get_bh(bh); } } cn->next = NULL; cn->prev = journal->j_last; cn->bh = bh; if (journal->j_last) { journal->j_last->next = cn; journal->j_last = cn; } else { journal->j_first = cn; journal->j_last = cn; } reiserfs_schedule_old_flush(sb); return 0; } int journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { if (!current->journal_info && th->t_refcount > 1) reiserfs_warning(sb, "REISER-NESTING", "th NULL, refcount %d", th->t_refcount); if (!th->t_trans_id) { WARN_ON(1); return -EIO; } th->t_refcount--; if (th->t_refcount > 0) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* we aren't allowed to close a nested transaction on a different ** filesystem from the one in the task struct */ BUG_ON(cur_th->t_super != th->t_super); if (th != cur_th) { memcpy(current->journal_info, th, sizeof(*th)); th->t_trans_id = 0; } return 0; } else { return do_journal_end(th, sb, nblocks, 0); } } /* removes from the current transaction, relsing and descrementing any counters. ** also files the removed buffer directly onto the clean list ** ** called by journal_mark_freed when a block has been deleted ** ** returns 1 if it cleaned and relsed the buffer. 0 otherwise */ static int remove_from_transaction(struct super_block *sb, b_blocknr_t blocknr, int already_cleaned) { struct buffer_head *bh; struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (!cn || !cn->bh) { return ret; } bh = cn->bh; if (cn->prev) { cn->prev->next = cn->next; } if (cn->next) { cn->next->prev = cn->prev; } if (cn == journal->j_first) { journal->j_first = cn->next; } if (cn == journal->j_last) { journal->j_last = cn->prev; } if (bh) remove_journal_hash(sb, journal->j_hash_table, NULL, bh->b_blocknr, 0); clear_buffer_journaled(bh); /* don't log this one */ if (!already_cleaned) { clear_buffer_journal_dirty(bh); clear_buffer_dirty(bh); clear_buffer_journal_test(bh); put_bh(bh); if (atomic_read(&(bh->b_count)) < 0) { reiserfs_warning(sb, "journal-1752", "b_count < 0"); } ret = 1; } journal->j_len--; journal->j_len_alloc--; free_cnode(sb, cn); return ret; } /* ** for any cnode in a journal list, it can only be dirtied of all the ** transactions that include it are committed to disk. ** this checks through each transaction, and returns 1 if you are allowed to dirty, ** and 0 if you aren't ** ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log ** blocks for a given transaction on disk ** */ static int can_dirty(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; struct reiserfs_journal_cnode *cur = cn->hprev; int can_dirty = 1; /* first test hprev. These are all newer than cn, so any node here ** with the same block number and dev means this node can't be sent ** to disk right now. */ while (cur && can_dirty) { if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hprev; } /* then test hnext. These are all older than cn. As long as they ** are committed to the log, it is safe to write cn to disk */ cur = cn->hnext; while (cur && can_dirty) { if (cur->jlist && cur->jlist->j_len > 0 && atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hnext; } return can_dirty; } /* syncs the commit blocks, but does not force the real buffers to disk ** will wait until the current transaction is done/committed before returning */ int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); /* you can sync while nested, very, very bad */ BUG_ON(th->t_refcount > 1); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT); } /* ** writeback the pending async commits to disk */ static void flush_async_commits(struct work_struct *work) { struct reiserfs_journal *journal = container_of(work, struct reiserfs_journal, j_work.work); struct super_block *sb = journal->j_work_sb; struct reiserfs_journal_list *jl; struct list_head *entry; reiserfs_write_lock(sb); if (!list_empty(&journal->j_journal_list)) { /* last entry is the youngest, commit it and you get everything */ entry = journal->j_journal_list.prev; jl = JOURNAL_LIST_ENTRY(entry); flush_commit_list(sb, jl, 1); } reiserfs_write_unlock(sb); } /* ** flushes any old transactions to disk ** ends the current transaction if it is too old */ void reiserfs_flush_old_commits(struct super_block *sb) { time_t now; struct reiserfs_transaction_handle th; struct reiserfs_journal *journal = SB_JOURNAL(sb); now = get_seconds(); /* safety check so we don't flush while we are replaying the log during * mount */ if (list_empty(&journal->j_journal_list)) return; /* check the current transaction. If there are no writers, and it is * too old, finish it, and force the commit blocks to disk */ if (atomic_read(&journal->j_wcount) <= 0 && journal->j_trans_start_time > 0 && journal->j_len > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) { if (!journal_join(&th, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)); /* we're only being called from kreiserfsd, it makes no sense to do ** an async commit so that kreiserfsd can do it later */ do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT); } } } /* ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit ** ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just ** flushes the commit list and returns 0. ** ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait. ** ** Note, we can't allow the journal_end to proceed while there are still writers in the log. */ static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int flags) { time_t now; int flush = flags & FLUSH_ALL; int commit_now = flags & COMMIT_NOW; int wait_on_commit = flags & WAIT; struct reiserfs_journal_list *jl; struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged); if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */ atomic_dec(&(journal->j_wcount)); } /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released ** will be dealt with by next transaction that actually writes something, but should be taken ** care of in this trans */ BUG_ON(journal->j_len == 0); /* if wcount > 0, and we are called to with flush or commit_now, ** we wait on j_join_wait. We will wake up when the last writer has ** finished the transaction, and started it on its way to the disk. ** Then, we flush the commit or journal list, and just return 0 ** because the rest of journal end was already done for this transaction. */ if (atomic_read(&(journal->j_wcount)) > 0) { if (flush || commit_now) { unsigned trans_id; jl = journal->j_current_jl; trans_id = jl->j_trans_id; if (wait_on_commit) jl->j_state |= LIST_COMMIT_PENDING; atomic_set(&(journal->j_jlock), 1); if (flush) { journal->j_next_full_flush = 1; } unlock_journal(sb); /* sleep while the current transaction is still j_jlocked */ while (journal->j_trans_id == trans_id) { if (atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } else { lock_journal(sb); if (journal->j_trans_id == trans_id) { atomic_set(&(journal->j_jlock), 1); } unlock_journal(sb); } } BUG_ON(journal->j_trans_id == trans_id); if (commit_now && journal_list_still_alive(sb, trans_id) && wait_on_commit) { flush_commit_list(sb, jl, 1); } return 0; } unlock_journal(sb); return 0; } /* deal with old transactions where we are the last writers */ now = get_seconds(); if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) { commit_now = 1; journal->j_next_async_flush = 1; } /* don't batch when someone is waiting on j_join_wait */ /* don't batch when syncing the commit or flushing the whole trans */ if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now && (journal->j_len < journal->j_max_batch) && journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) { journal->j_bcount++; unlock_journal(sb); return 0; } if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) { reiserfs_panic(sb, "journal-003", "j_start (%ld) is too high", journal->j_start); } return 1; } /* ** Does all the work that makes deleting blocks safe. ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on. ** ** otherwise: ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes ** before this transaction has finished. ** ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash, ** the block can't be reallocated yet. ** ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list. */ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *sb, b_blocknr_t blocknr) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; struct buffer_head *bh = NULL; struct reiserfs_list_bitmap *jb = NULL; int cleaned = 0; BUG_ON(!th->t_trans_id); cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (cn && cn->bh) { bh = cn->bh; get_bh(bh); } /* if it is journal new, we just remove it from this transaction */ if (bh && buffer_journal_new(bh)) { clear_buffer_journal_new(bh); clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); cleaned = remove_from_transaction(sb, blocknr, cleaned); } else { /* set the bit for this block in the journal bitmap for this transaction */ jb = journal->j_current_jl->j_list_bitmap; if (!jb) { reiserfs_panic(sb, "journal-1702", "journal_list_bitmap is NULL"); } set_bit_in_list_bitmap(sb, blocknr, jb); /* Note, the entire while loop is not allowed to schedule. */ if (bh) { clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); } cleaned = remove_from_transaction(sb, blocknr, cleaned); /* find all older transactions with this block, make sure they don't try to write it out */ cn = get_journal_hash_dev(sb, journal->j_list_hash_table, blocknr); while (cn) { if (sb == cn->sb && blocknr == cn->blocknr) { set_bit(BLOCK_FREED, &cn->state); if (cn->bh) { if (!cleaned) { /* remove_from_transaction will brelse the buffer if it was ** in the current trans */ clear_buffer_journal_dirty(cn-> bh); clear_buffer_dirty(cn->bh); clear_buffer_journal_test(cn-> bh); cleaned = 1; put_bh(cn->bh); if (atomic_read (&(cn->bh->b_count)) < 0) { reiserfs_warning(sb, "journal-2138", "cn->bh->b_count < 0"); } } if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */ atomic_dec(& (cn->jlist-> j_nonzerolen)); } cn->bh = NULL; } } cn = cn->hnext; } } if (bh) release_buffer_page(bh); /* get_hash grabs the buffer */ return 0; } void reiserfs_update_inode_transaction(struct inode *inode) { struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb); REISERFS_I(inode)->i_jl = journal->j_current_jl; REISERFS_I(inode)->i_trans_id = journal->j_trans_id; } /* * returns -1 on error, 0 if no commits/barriers were done and 1 * if a transaction was actually committed and the barrier was done */ static int __commit_trans_jl(struct inode *inode, unsigned long id, struct reiserfs_journal_list *jl) { struct reiserfs_transaction_handle th; struct super_block *sb = inode->i_sb; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; /* is it from the current transaction, or from an unknown transaction? */ if (id == journal->j_trans_id) { jl = journal->j_current_jl; /* try to let other writers come in and grow this transaction */ let_transaction_grow(sb, id); if (journal->j_trans_id != id) { goto flush_commit_only; } ret = journal_begin(&th, sb, 1); if (ret) return ret; /* someone might have ended this transaction while we joined */ if (journal->j_trans_id != id) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)); ret = journal_end(&th, sb, 1); goto flush_commit_only; } ret = journal_end_sync(&th, sb, 1); if (!ret) ret = 1; } else { /* this gets tricky, we have to make sure the journal list in * the inode still exists. We know the list is still around * if we've got a larger transaction id than the oldest list */ flush_commit_only: if (journal_list_still_alive(inode->i_sb, id)) { /* * we only set ret to 1 when we know for sure * the barrier hasn't been started yet on the commit * block. */ if (atomic_read(&jl->j_commit_left) > 1) ret = 1; flush_commit_list(sb, jl, 1); if (journal->j_errno) ret = journal->j_errno; } } /* otherwise the list is gone, and long since committed */ return ret; } int reiserfs_commit_for_inode(struct inode *inode) { unsigned int id = REISERFS_I(inode)->i_trans_id; struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl; /* for the whole inode, assume unset id means it was * changed in the current transaction. More conservative */ if (!id || !jl) { reiserfs_update_inode_transaction(inode); id = REISERFS_I(inode)->i_trans_id; /* jl will be updated in __commit_trans_jl */ } return __commit_trans_jl(inode, id, jl); } void reiserfs_restore_prepared_buffer(struct super_block *sb, struct buffer_head *bh) { struct reiserfs_journal *journal = SB_JOURNAL(sb); PROC_INFO_INC(sb, journal.restore_prepared); if (!bh) { return; } if (test_clear_buffer_journal_restore_dirty(bh) && buffer_journal_dirty(bh)) { struct reiserfs_journal_cnode *cn; cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bh->b_blocknr); if (cn && can_dirty(cn)) { set_buffer_journal_test(bh); mark_buffer_dirty(bh); } } clear_buffer_journal_prepared(bh); } extern struct tree_balance *cur_tb; /* ** before we can change a metadata block, we have to make sure it won't ** be written to disk while we are altering it. So, we must: ** clean it ** wait on it. ** */ int reiserfs_prepare_for_journal(struct super_block *sb, struct buffer_head *bh, int wait) { PROC_INFO_INC(sb, journal.prepare); if (!trylock_buffer(bh)) { if (!wait) return 0; lock_buffer(bh); } set_buffer_journal_prepared(bh); if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) { clear_buffer_journal_test(bh); set_buffer_journal_restore_dirty(bh); } unlock_buffer(bh); return 1; } static void flush_old_journal_lists(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct reiserfs_journal_list *jl; struct list_head *entry; time_t now = get_seconds(); while (!list_empty(&journal->j_journal_list)) { entry = journal->j_journal_list.next; jl = JOURNAL_LIST_ENTRY(entry); /* this check should always be run, to send old lists to disk */ if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) && atomic_read(&jl->j_commit_left) == 0 && test_transaction(s, jl)) { flush_used_journal_lists(s, jl); } else { break; } } } /* ** long and ugly. If flush, will not return until all commit ** blocks and all real buffers in the trans are on disk. ** If no_async, won't return until all commit blocks are on disk. ** ** keep reading, there are comments as you go along ** ** If the journal is aborted, we just clean up. Things like flushing ** journal lists, etc just won't happen. */ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int flags) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *next, *jl_cn; struct reiserfs_journal_cnode *last_cn = NULL; struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; /* commit bh */ struct buffer_head *d_bh; /* desc bh */ int cur_write_start = 0; /* start index of current log write */ int old_start; int i; int flush; int wait_on_commit; struct reiserfs_journal_list *jl, *temp_jl; struct list_head *entry, *safe; unsigned long jindex; unsigned int commit_trans_id; int trans_half; BUG_ON(th->t_refcount > 1); BUG_ON(!th->t_trans_id); /* protect flush_older_commits from doing mistakes if the transaction ID counter gets overflowed. */ if (th->t_trans_id == ~0U) flags |= FLUSH_ALL | COMMIT_NOW | WAIT; flush = flags & FLUSH_ALL; wait_on_commit = flags & WAIT; current->journal_info = th->t_handle_save; reiserfs_check_lock_depth(sb, "journal end"); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } lock_journal(sb); if (journal->j_next_full_flush) { flags |= FLUSH_ALL; flush = 1; } if (journal->j_next_async_flush) { flags |= COMMIT_NOW | WAIT; wait_on_commit = 1; } /* check_journal_end locks the journal, and unlocks if it does not return 1 ** it tells us if we should continue with the journal_end, or just return */ if (!check_journal_end(th, sb, nblocks, flags)) { reiserfs_schedule_old_flush(sb); wake_queued_writers(sb); reiserfs_async_progress_wait(sb); goto out; } /* check_journal_end might set these, check again */ if (journal->j_next_full_flush) { flush = 1; } /* ** j must wait means we have to flush the log blocks, and the real blocks for ** this transaction */ if (journal->j_must_wait > 0) { flush = 1; } #ifdef REISERFS_PREALLOCATE /* quota ops might need to nest, setup the journal_info pointer for them * and raise the refcount so that it is > 0. */ current->journal_info = th; th->t_refcount++; reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into * the transaction */ th->t_refcount--; current->journal_info = th->t_handle_save; #endif /* setup description block */ d_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start); set_buffer_uptodate(d_bh); desc = (struct reiserfs_journal_desc *)(d_bh)->b_data; memset(d_bh->b_data, 0, d_bh->b_size); memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8); set_desc_trans_id(desc, journal->j_trans_id); /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */ c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); commit = (struct reiserfs_journal_commit *)c_bh->b_data; memset(c_bh->b_data, 0, c_bh->b_size); set_commit_trans_id(commit, journal->j_trans_id); set_buffer_uptodate(c_bh); /* init this journal list */ jl = journal->j_current_jl; /* we lock the commit before doing anything because * we want to make sure nobody tries to run flush_commit_list until * the new transaction is fully setup, and we've already flushed the * ordered bh list */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb); /* save the transaction id in case we need to commit it later */ commit_trans_id = jl->j_trans_id; atomic_set(&jl->j_older_commits_done, 0); jl->j_trans_id = journal->j_trans_id; jl->j_timestamp = journal->j_trans_start_time; jl->j_commit_bh = c_bh; jl->j_start = journal->j_start; jl->j_len = journal->j_len; atomic_set(&jl->j_nonzerolen, journal->j_len); atomic_set(&jl->j_commit_left, journal->j_len + 2); jl->j_realblock = NULL; /* The ENTIRE FOR LOOP MUST not cause schedule to occur. ** for each real block, add it to the journal list hash, ** copy into real block index array in the commit or desc block */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) { if (buffer_journaled(cn->bh)) { jl_cn = get_cnode(sb); if (!jl_cn) { reiserfs_panic(sb, "journal-1676", "get_cnode returned NULL"); } if (i == 0) { jl->j_realblock = jl_cn; } jl_cn->prev = last_cn; jl_cn->next = NULL; if (last_cn) { last_cn->next = jl_cn; } last_cn = jl_cn; /* make sure the block we are trying to log is not a block of journal or reserved area */ if (is_block_in_log_or_reserved_area (sb, cn->bh->b_blocknr)) { reiserfs_panic(sb, "journal-2332", "Trying to log block %lu, " "which is a log block", cn->bh->b_blocknr); } jl_cn->blocknr = cn->bh->b_blocknr; jl_cn->state = 0; jl_cn->sb = sb; jl_cn->bh = cn->bh; jl_cn->jlist = jl; insert_journal_hash(journal->j_list_hash_table, jl_cn); if (i < trans_half) { desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr); } else { commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr); } } else { i--; } } set_desc_trans_len(desc, journal->j_len); set_desc_mount_id(desc, journal->j_mount_id); set_desc_trans_id(desc, journal->j_trans_id); set_commit_trans_len(commit, journal->j_len); /* special check in case all buffers in the journal were marked for not logging */ BUG_ON(journal->j_len == 0); /* we're about to dirty all the log blocks, mark the description block * dirty now too. Don't mark the commit block dirty until all the * others are on disk */ mark_buffer_dirty(d_bh); /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */ cur_write_start = journal->j_start; cn = journal->j_first; jindex = 1; /* start at one so we don't get the desc again */ while (cn) { clear_buffer_journal_new(cn->bh); /* copy all the real blocks into log area. dirty log blocks */ if (buffer_journaled(cn->bh)) { struct buffer_head *tmp_bh; char *addr; struct page *page; tmp_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(sb))); set_buffer_uptodate(tmp_bh); page = cn->bh->b_page; addr = kmap(page); memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data), cn->bh->b_size); kunmap(page); mark_buffer_dirty(tmp_bh); jindex++; set_buffer_journal_dirty(cn->bh); clear_buffer_journaled(cn->bh); } else { /* JDirty cleared sometime during transaction. don't log this one */ reiserfs_warning(sb, "journal-2048", "BAD, buffer in journal hash, " "but not JDirty!"); brelse(cn->bh); } next = cn->next; free_cnode(sb, cn); cn = next; reiserfs_write_unlock(sb); cond_resched(); reiserfs_write_lock(sb); } /* we are done with both the c_bh and d_bh, but ** c_bh must be written after all other commit blocks, ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1. */ journal->j_current_jl = alloc_journal_list(sb); /* now it is safe to insert this transaction on the main list */ list_add_tail(&jl->j_list, &journal->j_journal_list); list_add_tail(&jl->j_working_list, &journal->j_working_list); journal->j_num_work_lists++; /* reset journal values for the next transaction */ old_start = journal->j_start; journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(sb); atomic_set(&(journal->j_wcount), 0); journal->j_bcount = 0; journal->j_last = NULL; journal->j_first = NULL; journal->j_len = 0; journal->j_trans_start_time = 0; /* check for trans_id overflow */ if (++journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_current_jl->j_trans_id = journal->j_trans_id; journal->j_must_wait = 0; journal->j_len_alloc = 0; journal->j_next_full_flush = 0; journal->j_next_async_flush = 0; init_journal_hash(sb); // make sure reiserfs_add_jh sees the new current_jl before we // write out the tails smp_mb(); /* tail conversion targets have to hit the disk before we end the * transaction. Otherwise a later transaction might repack the tail * before this transaction commits, leaving the data block unflushed and * clean, if we crash before the later transaction commits, the data block * is lost. */ if (!list_empty(&jl->j_tail_bh_list)) { reiserfs_write_unlock(sb); write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_tail_bh_list); reiserfs_write_lock(sb); } BUG_ON(!list_empty(&jl->j_tail_bh_list)); mutex_unlock(&jl->j_commit_mutex); /* honor the flush wishes from the caller, simple commits can ** be done outside the journal lock, they are done below ** ** if we don't flush the commit list right now, we put it into ** the work queue so the people waiting on the async progress work ** queue don't wait for this proc to flush journal lists and such. */ if (flush) { flush_commit_list(sb, jl, 1); flush_journal_list(sb, jl, 1); } else if (!(jl->j_state & LIST_COMMIT_PENDING)) queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); /* if the next transaction has any chance of wrapping, flush ** transactions that might get overwritten. If any journal lists are very ** old flush them as well. */ first_jl: list_for_each_safe(entry, safe, &journal->j_journal_list) { temp_jl = JOURNAL_LIST_ENTRY(entry); if (journal->j_start <= temp_jl->j_start) { if ((journal->j_start + journal->j_trans_max + 1) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else if ((journal->j_start + journal->j_trans_max + 1) < SB_ONDISK_JOURNAL_SIZE(sb)) { /* if we don't cross into the next transaction and we don't * wrap, there is no way we can overlap any later transactions * break now */ break; } } else if ((journal->j_start + journal->j_trans_max + 1) > SB_ONDISK_JOURNAL_SIZE(sb)) { if (((journal->j_start + journal->j_trans_max + 1) % SB_ONDISK_JOURNAL_SIZE(sb)) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else { /* we don't overlap anything from out start to the end of the * log, and our wrapped portion doesn't overlap anything at * the start of the log. We can break */ break; } } } flush_old_journal_lists(sb); journal->j_current_jl->j_list_bitmap = get_list_bitmap(sb, journal->j_current_jl); if (!(journal->j_current_jl->j_list_bitmap)) { reiserfs_panic(sb, "journal-1996", "could not get a list bitmap"); } atomic_set(&(journal->j_jlock), 0); unlock_journal(sb); /* wake up any body waiting to join. */ clear_bit(J_WRITERS_QUEUED, &journal->j_state); wake_up(&(journal->j_join_wait)); if (!flush && wait_on_commit && journal_list_still_alive(sb, commit_trans_id)) { flush_commit_list(sb, jl, 1); } out: reiserfs_check_lock_depth(sb, "journal end2"); memset(th, 0, sizeof(*th)); /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return journal->j_errno; } /* Send the file system read only and refuse new transactions */ void reiserfs_abort_journal(struct super_block *sb, int errno) { struct reiserfs_journal *journal = SB_JOURNAL(sb); if (test_bit(J_ABORTED, &journal->j_state)) return; if (!journal->j_errno) journal->j_errno = errno; sb->s_flags |= MS_RDONLY; set_bit(J_ABORTED, &journal->j_state); #ifdef CONFIG_REISERFS_CHECK dump_stack(); #endif }
gpl-2.0
Desterly/android_kernel_motorola_msm8994
drivers/media/radio/radio-shark.c
2153
11694
/* * Linux V4L2 radio driver for the Griffin radioSHARK USB radio receiver * * Note the radioSHARK offers the audio through a regular USB audio device, * this driver only handles the tuning. * * The info necessary to drive the shark was taken from the small userspace * shark.c program by Michael Rolig, which he kindly placed in the Public * Domain. * * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <media/v4l2-device.h> #include <sound/tea575x-tuner.h> #if defined(CONFIG_LEDS_CLASS) || \ (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE)) #define SHARK_USE_LEDS 1 #endif /* * Version Information */ MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Griffin radioSHARK, USB radio receiver driver"); MODULE_LICENSE("GPL"); #define SHARK_IN_EP 0x83 #define SHARK_OUT_EP 0x05 #define TEA575X_BIT_MONO (1<<22) /* 0 = stereo, 1 = mono */ #define TEA575X_BIT_BAND_MASK (3<<20) #define TEA575X_BIT_BAND_FM (0<<20) #define TB_LEN 6 #define DRV_NAME "radioshark" #define v4l2_dev_to_shark(d) container_of(d, struct shark_device, v4l2_dev) /* Note BLUE_IS_PULSE comes after NO_LEDS as it is a status bit, not a LED */ enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS, BLUE_IS_PULSE }; struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct snd_tea575x tea; #ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; #endif u8 *transfer_buffer; u32 last_val; }; static atomic_t shark_instance = ATOMIC_INIT(0); static void shark_write_val(struct snd_tea575x *tea, u32 val) { struct shark_device *shark = tea->private_data; int i, res, actual_len; /* Avoid unnecessary (slow) USB transfers */ if (shark->last_val == val) return; memset(shark->transfer_buffer, 0, TB_LEN); shark->transfer_buffer[0] = 0xc0; /* Write shift register command */ for (i = 0; i < 4; i++) shark->transfer_buffer[i] |= (val >> (24 - i * 8)) & 0xff; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, SHARK_OUT_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res >= 0) shark->last_val = val; else v4l2_err(&shark->v4l2_dev, "set-freq error: %d\n", res); } static u32 shark_read_val(struct snd_tea575x *tea) { struct shark_device *shark = tea->private_data; int i, res, actual_len; u32 val = 0; memset(shark->transfer_buffer, 0, TB_LEN); shark->transfer_buffer[0] = 0x80; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, SHARK_OUT_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) { v4l2_err(&shark->v4l2_dev, "request-status error: %d\n", res); return shark->last_val; } res = usb_interrupt_msg(shark->usbdev, usb_rcvintpipe(shark->usbdev, SHARK_IN_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) { v4l2_err(&shark->v4l2_dev, "get-status error: %d\n", res); return shark->last_val; } for (i = 0; i < 4; i++) val |= shark->transfer_buffer[i] << (24 - i * 8); shark->last_val = val; /* * The shark does not allow actually reading the stereo / mono pin :( * So assume that when we're tuned to an FM station and mono has not * been requested, that we're receiving stereo. */ if (((val & TEA575X_BIT_BAND_MASK) == TEA575X_BIT_BAND_FM) && !(val & TEA575X_BIT_MONO)) shark->tea.stereo = true; else shark->tea.stereo = false; return val; } static struct snd_tea575x_ops shark_tea_ops = { .write_val = shark_write_val, .read_val = shark_read_val, }; #ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; for (i = 0; i < 3; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) continue; brightness = atomic_read(&shark->brightness[i]); memset(shark->transfer_buffer, 0, TB_LEN); if (i != RED_LED) { shark->transfer_buffer[0] = 0xA0 + i; shark->transfer_buffer[1] = brightness; } else shark->transfer_buffer[0] = brightness ? 0xA9 : 0xA8; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, 0x05), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } } static void shark_led_set_blue(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[BLUE_LED]); atomic_set(&shark->brightness[BLUE_LED], value); set_bit(BLUE_LED, &shark->brightness_new); clear_bit(BLUE_IS_PULSE, &shark->brightness_new); schedule_work(&shark->led_work); } static void shark_led_set_blue_pulse(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[BLUE_PULSE_LED]); atomic_set(&shark->brightness[BLUE_PULSE_LED], 256 - value); set_bit(BLUE_PULSE_LED, &shark->brightness_new); set_bit(BLUE_IS_PULSE, &shark->brightness_new); schedule_work(&shark->led_work); } static void shark_led_set_red(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[RED_LED]); atomic_set(&shark->brightness[RED_LED], value); set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } static const struct led_classdev shark_led_templates[NO_LEDS] = { [BLUE_LED] = { .name = "%s:blue:", .brightness = LED_OFF, .max_brightness = 127, .brightness_set = shark_led_set_blue, }, [BLUE_PULSE_LED] = { .name = "%s:blue-pulse:", .brightness = LED_OFF, .max_brightness = 255, .brightness_set = shark_led_set_blue_pulse, }, [RED_LED] = { .name = "%s:red:", .brightness = LED_OFF, .max_brightness = 1, .brightness_set = shark_led_set_red, }, }; static int shark_register_leds(struct shark_device *shark, struct device *dev) { int i, retval; atomic_set(&shark->brightness[BLUE_LED], 127); INIT_WORK(&shark->led_work, shark_led_work); for (i = 0; i < NO_LEDS; i++) { shark->leds[i] = shark_led_templates[i]; snprintf(shark->led_names[i], sizeof(shark->led_names[0]), shark->leds[i].name, shark->v4l2_dev.name); shark->leds[i].name = shark->led_names[i]; retval = led_classdev_register(dev, &shark->leds[i]); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register led: %s\n", shark->led_names[i]); return retval; } } return 0; } static void shark_unregister_leds(struct shark_device *shark) { int i; for (i = 0; i < NO_LEDS; i++) led_classdev_unregister(&shark->leds[i]); cancel_work_sync(&shark->led_work); } static void shark_resume_leds(struct shark_device *shark) { if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) set_bit(BLUE_PULSE_LED, &shark->brightness_new); else set_bit(BLUE_LED, &shark->brightness_new); set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { v4l2_warn(&shark->v4l2_dev, "CONFIG_LEDS_CLASS not enabled, LED support disabled\n"); return 0; } static inline void shark_unregister_leds(struct shark_device *shark) { } static inline void shark_resume_leds(struct shark_device *shark) { } #endif static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); snd_tea575x_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); } static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) return retval; shark->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL); if (!shark->transfer_buffer) goto err_alloc_buffer; v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = shark_register_leds(shark, &intf->dev); if (retval) goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); goto err_reg_dev; } shark->usbdev = interface_to_usbdev(intf); shark->tea.v4l2_dev = &shark->v4l2_dev; shark->tea.private_data = shark; shark->tea.radio_nr = -1; shark->tea.ops = &shark_tea_ops; shark->tea.cannot_mute = true; shark->tea.has_am = true; strlcpy(shark->tea.card, "Griffin radioSHARK", sizeof(shark->tea.card)); usb_make_path(shark->usbdev, shark->tea.bus_info, sizeof(shark->tea.bus_info)); retval = snd_tea575x_init(&shark->tea, THIS_MODULE); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't init tea5757\n"); goto err_init_tea; } return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: shark_unregister_leds(shark); err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); return retval; } #ifdef CONFIG_PM static int usb_shark_suspend(struct usb_interface *intf, pm_message_t message) { return 0; } static int usb_shark_resume(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); mutex_lock(&shark->tea.mutex); snd_tea575x_set_freq(&shark->tea); mutex_unlock(&shark->tea.mutex); shark_resume_leds(shark); return 0; } #endif /* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */ static struct usb_device_id usb_shark_device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = 0x077d, .idProduct = 0x627a, .bcdDevice_lo = 0x0001, .bcdDevice_hi = 0x0001, .bInterfaceClass = 3, }, { } }; MODULE_DEVICE_TABLE(usb, usb_shark_device_table); static struct usb_driver usb_shark_driver = { .name = DRV_NAME, .probe = usb_shark_probe, .disconnect = usb_shark_disconnect, .id_table = usb_shark_device_table, #ifdef CONFIG_PM .suspend = usb_shark_suspend, .resume = usb_shark_resume, .reset_resume = usb_shark_resume, #endif }; module_usb_driver(usb_shark_driver);
gpl-2.0
Frontier314/kernel_s702hf
drivers/hid/hid-wacom.c
2665
11785
/* * Bluetooth Wacom Tablet support * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru> * Copyright (c) 2009 Bastien Nocera <hadess@hadess.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #ifdef CONFIG_HID_WACOM_POWER_SUPPLY #include <linux/power_supply.h> #endif #include "hid-ids.h" struct wacom_data { __u16 tool; unsigned char butstate; unsigned char high_speed; #ifdef CONFIG_HID_WACOM_POWER_SUPPLY int battery_capacity; struct power_supply battery; struct power_supply ac; #endif }; #ifdef CONFIG_HID_WACOM_POWER_SUPPLY /*percent of battery capacity, 0 means AC online*/ static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 }; static enum power_supply_property wacom_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY }; static enum power_supply_property wacom_ac_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE }; static int wacom_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct wacom_data *wdata = container_of(psy, struct wacom_data, battery); int power_state = batcap[wdata->battery_capacity]; int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_CAPACITY: /* show 100% battery capacity when charging */ if (power_state == 0) val->intval = 100; else val->intval = power_state; break; default: ret = -EINVAL; break; } return ret; } static int wacom_ac_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct wacom_data *wdata = container_of(psy, struct wacom_data, ac); int power_state = batcap[wdata->battery_capacity]; int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_PRESENT: /* fall through */ case POWER_SUPPLY_PROP_ONLINE: if (power_state == 0) val->intval = 1; else val->intval = 0; break; default: ret = -EINVAL; break; } return ret; } #endif static void wacom_poke(struct hid_device *hdev, u8 speed) { struct wacom_data *wdata = hid_get_drvdata(hdev); int limit, ret; char rep_data[2]; rep_data[0] = 0x03 ; rep_data[1] = 0x00; limit = 3; do { ret = hdev->hid_output_raw_report(hdev, rep_data, 2, HID_FEATURE_REPORT); } while (ret < 0 && limit-- > 0); if (ret >= 0) { if (speed == 0) rep_data[0] = 0x05; else rep_data[0] = 0x06; rep_data[1] = 0x00; limit = 3; do { ret = hdev->hid_output_raw_report(hdev, rep_data, 2, HID_FEATURE_REPORT); } while (ret < 0 && limit-- > 0); if (ret >= 0) { wdata->high_speed = speed; return; } } /* * Note that if the raw queries fail, it's not a hard failure and it * is safe to continue */ hid_warn(hdev, "failed to poke device, command %d, err %d\n", rep_data[0], ret); return; } static ssize_t wacom_show_speed(struct device *dev, struct device_attribute *attr, char *buf) { struct wacom_data *wdata = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed); } static ssize_t wacom_store_speed(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); int new_speed; if (sscanf(buf, "%1d", &new_speed ) != 1) return -EINVAL; if (new_speed == 0 || new_speed == 1) { wacom_poke(hdev, new_speed); return strnlen(buf, PAGE_SIZE); } else return -EINVAL; } static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP, wacom_show_speed, wacom_store_speed); static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int size) { struct wacom_data *wdata = hid_get_drvdata(hdev); struct hid_input *hidinput; struct input_dev *input; unsigned char *data = (unsigned char *) raw_data; int tool, x, y, rw; if (!(hdev->claimed & HID_CLAIMED_INPUT)) return 0; tool = 0; hidinput = list_entry(hdev->inputs.next, struct hid_input, list); input = hidinput->input; /* Check if this is a tablet report */ if (data[0] != 0x03) return 0; /* Get X & Y positions */ x = le16_to_cpu(*(__le16 *) &data[2]); y = le16_to_cpu(*(__le16 *) &data[4]); /* Get current tool identifier */ if (data[1] & 0x90) { /* If pen is in the in/active area */ switch ((data[1] >> 5) & 3) { case 0: /* Pen */ tool = BTN_TOOL_PEN; break; case 1: /* Rubber */ tool = BTN_TOOL_RUBBER; break; case 2: /* Mouse with wheel */ case 3: /* Mouse without wheel */ tool = BTN_TOOL_MOUSE; break; } /* Reset tool if out of active tablet area */ if (!(data[1] & 0x10)) tool = 0; } /* If tool changed, notify input subsystem */ if (wdata->tool != tool) { if (wdata->tool) { /* Completely reset old tool state */ if (wdata->tool == BTN_TOOL_MOUSE) { input_report_key(input, BTN_LEFT, 0); input_report_key(input, BTN_RIGHT, 0); input_report_key(input, BTN_MIDDLE, 0); input_report_abs(input, ABS_DISTANCE, input_abs_get_max(input, ABS_DISTANCE)); } else { input_report_key(input, BTN_TOUCH, 0); input_report_key(input, BTN_STYLUS, 0); input_report_key(input, BTN_STYLUS2, 0); input_report_abs(input, ABS_PRESSURE, 0); } input_report_key(input, wdata->tool, 0); input_sync(input); } wdata->tool = tool; if (tool) input_report_key(input, tool, 1); } if (tool) { input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); switch ((data[1] >> 5) & 3) { case 2: /* Mouse with wheel */ input_report_key(input, BTN_MIDDLE, data[1] & 0x04); rw = (data[6] & 0x01) ? -1 : (data[6] & 0x02) ? 1 : 0; input_report_rel(input, REL_WHEEL, rw); /* fall through */ case 3: /* Mouse without wheel */ input_report_key(input, BTN_LEFT, data[1] & 0x01); input_report_key(input, BTN_RIGHT, data[1] & 0x02); /* Compute distance between mouse and tablet */ rw = 44 - (data[6] >> 2); if (rw < 0) rw = 0; else if (rw > 31) rw = 31; input_report_abs(input, ABS_DISTANCE, rw); break; default: input_report_abs(input, ABS_PRESSURE, data[6] | (((__u16) (data[1] & 0x08)) << 5)); input_report_key(input, BTN_TOUCH, data[1] & 0x01); input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, (tool == BTN_TOOL_PEN) && data[1] & 0x04); break; } input_sync(input); } /* Report the state of the two buttons at the top of the tablet * as two extra fingerpad keys (buttons 4 & 5). */ rw = data[7] & 0x03; if (rw != wdata->butstate) { wdata->butstate = rw; input_report_key(input, BTN_0, rw & 0x02); input_report_key(input, BTN_1, rw & 0x01); input_report_key(input, BTN_TOOL_FINGER, 0xf0); input_event(input, EV_MSC, MSC_SERIAL, 0xf0); input_sync(input); } #ifdef CONFIG_HID_WACOM_POWER_SUPPLY /* Store current battery capacity */ rw = (data[7] >> 2 & 0x07); if (rw != wdata->battery_capacity) wdata->battery_capacity = rw; #endif return 1; } static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct hid_input *hidinput; struct input_dev *input; struct wacom_data *wdata; int ret; wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); if (wdata == NULL) { hid_err(hdev, "can't alloc wacom descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, wdata); /* Parse the HID report now */ ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } ret = device_create_file(&hdev->dev, &dev_attr_speed); if (ret) hid_warn(hdev, "can't create sysfs speed attribute err: %d\n", ret); /* Set Wacom mode 2 with high reporting speed */ wacom_poke(hdev, 1); #ifdef CONFIG_HID_WACOM_POWER_SUPPLY wdata->battery.properties = wacom_battery_props; wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props); wdata->battery.get_property = wacom_battery_get_property; wdata->battery.name = "wacom_battery"; wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY; wdata->battery.use_for_apm = 0; ret = power_supply_register(&hdev->dev, &wdata->battery); if (ret) { hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", ret); /* * battery attribute is not critical for the tablet, but if it * failed then there is no need to create ac attribute */ goto move_on; } wdata->ac.properties = wacom_ac_props; wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props); wdata->ac.get_property = wacom_ac_get_property; wdata->ac.name = "wacom_ac"; wdata->ac.type = POWER_SUPPLY_TYPE_MAINS; wdata->ac.use_for_apm = 0; ret = power_supply_register(&hdev->dev, &wdata->ac); if (ret) { hid_warn(hdev, "can't create ac battery attribute, err: %d\n", ret); /* * ac attribute is not critical for the tablet, but if it * failed then we don't want to battery attribute to exist */ power_supply_unregister(&wdata->battery); } move_on: #endif hidinput = list_entry(hdev->inputs.next, struct hid_input, list); input = hidinput->input; /* Basics */ input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); __set_bit(REL_WHEEL, input->relbit); __set_bit(BTN_TOOL_PEN, input->keybit); __set_bit(BTN_TOUCH, input->keybit); __set_bit(BTN_STYLUS, input->keybit); __set_bit(BTN_STYLUS2, input->keybit); __set_bit(BTN_LEFT, input->keybit); __set_bit(BTN_RIGHT, input->keybit); __set_bit(BTN_MIDDLE, input->keybit); /* Pad */ input->evbit[0] |= BIT(EV_MSC); __set_bit(MSC_SERIAL, input->mscbit); __set_bit(BTN_0, input->keybit); __set_bit(BTN_1, input->keybit); __set_bit(BTN_TOOL_FINGER, input->keybit); /* Distance, rubber and mouse */ __set_bit(BTN_TOOL_RUBBER, input->keybit); __set_bit(BTN_TOOL_MOUSE, input->keybit); input_set_abs_params(input, ABS_X, 0, 16704, 4, 0); input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0); input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0); input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0); return 0; err_free: kfree(wdata); return ret; } static void wacom_remove(struct hid_device *hdev) { #ifdef CONFIG_HID_WACOM_POWER_SUPPLY struct wacom_data *wdata = hid_get_drvdata(hdev); #endif hid_hw_stop(hdev); #ifdef CONFIG_HID_WACOM_POWER_SUPPLY power_supply_unregister(&wdata->battery); power_supply_unregister(&wdata->ac); #endif kfree(hid_get_drvdata(hdev)); } static const struct hid_device_id wacom_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, { } }; MODULE_DEVICE_TABLE(hid, wacom_devices); static struct hid_driver wacom_driver = { .name = "wacom", .id_table = wacom_devices, .probe = wacom_probe, .remove = wacom_remove, .raw_event = wacom_raw_event, }; static int __init wacom_init(void) { int ret; ret = hid_register_driver(&wacom_driver); if (ret) pr_err("can't register wacom driver\n"); return ret; } static void __exit wacom_exit(void) { hid_unregister_driver(&wacom_driver); } module_init(wacom_init); module_exit(wacom_exit); MODULE_LICENSE("GPL");
gpl-2.0
PhilZ-cwm6/android_kernel_htc_vigor
drivers/atm/zatm.c
2665
44396
/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/atm_zatm.h> #include <linux/capability.h> #include <linux/bitops.h> #include <linux/wait.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/system.h> #include <asm/string.h> #include <asm/io.h> #include <asm/atomic.h> #include <asm/uaccess.h> #include "uPD98401.h" #include "uPD98402.h" #include "zeprom.h" #include "zatm.h" /* * TODO: * * Minor features * - support 64 kB SDUs (will have to use multibuffer batches then :-( ) * - proper use of CDV, credit = max(1,CDVT*PCR) * - AAL0 * - better receive timestamps * - OAM */ #define ZATM_COPPER 1 #if 0 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif #ifndef CONFIG_ATM_ZATM_DEBUG #define NULLCHECK(x) #define EVENT(s,a,b) static void event_dump(void) { } #else /* * NULL pointer checking */ #define NULLCHECK(x) \ if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x)) /* * Very extensive activity logging. Greatly improves bug detection speed but * costs a few Mbps if enabled. */ #define EV 64 static const char *ev[EV]; static unsigned long ev_a[EV],ev_b[EV]; static int ec = 0; static void EVENT(const char *s,unsigned long a,unsigned long b) { ev[ec] = s; ev_a[ec] = a; ev_b[ec] = b; ec = (ec+1) % EV; } static void event_dump(void) { int n,i; printk(KERN_NOTICE "----- event dump follows -----\n"); for (n = 0; n < EV; n++) { i = (ec+n) % EV; printk(KERN_NOTICE); printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); } printk(KERN_NOTICE "----- event dump ends here -----\n"); } #endif /* CONFIG_ATM_ZATM_DEBUG */ #define RING_BUSY 1 /* indication from do_tx that PDU has to be backlogged */ static struct atm_dev *zatm_boards = NULL; static unsigned long dummy[2] = {0,0}; #define zin_n(r) inl(zatm_dev->base+r*4) #define zin(r) inl(zatm_dev->base+uPD98401_##r*4) #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) #define zwait while (zin(CMR) & uPD98401_BUSY) /* RX0, RX1, TX0, TX1 */ static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ #define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i]) /*-------------------------------- utilities --------------------------------*/ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) { zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) { zwait; zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER); } /*------------------------------- free lists --------------------------------*/ /* * Free buffer head structure: * [0] pointer to buffer (for SAR) * [1] buffer descr link pointer (for SAR) * [2] back pointer to skb (for poll_rx) * [3] data * ... */ struct rx_buffer_head { u32 buffer; /* pointer to buffer (for SAR) */ u32 link; /* buffer descriptor link pointer (for SAR) */ struct sk_buff *skb; /* back pointer to skb (for poll_rx) */ }; static void refill_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; struct sk_buff *skb; struct rx_buffer_head *first; unsigned long flags; int align,offset,free,count,size; EVENT("refill_pool\n",0,0); zatm_dev = ZATM_DEV(dev); size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); if (size < PAGE_SIZE) { align = 32; /* for 32 byte alignment */ offset = sizeof(struct rx_buffer_head); } else { align = 4096; offset = zatm_dev->pool_info[pool].offset+ sizeof(struct rx_buffer_head); } size += align; spin_lock_irqsave(&zatm_dev->lock, flags); free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & uPD98401_RXFP_REMAIN; spin_unlock_irqrestore(&zatm_dev->lock, flags); if (free >= zatm_dev->pool_info[pool].low_water) return; EVENT("starting ... POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); count = 0; first = NULL; while (free < zatm_dev->pool_info[pool].high_water) { struct rx_buffer_head *head; skb = alloc_skb(size,GFP_ATOMIC); if (!skb) { printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new " "skb (%d) with %d free\n",dev->number,size,free); break; } skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+ align+offset-1) & ~(unsigned long) (align-1))-offset)- skb->data); head = (struct rx_buffer_head *) skb->data; skb_reserve(skb,sizeof(struct rx_buffer_head)); if (!first) first = head; count++; head->buffer = virt_to_bus(skb->data); head->link = 0; head->skb = skb; EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb, (unsigned long) head); spin_lock_irqsave(&zatm_dev->lock, flags); if (zatm_dev->last_free[pool]) ((struct rx_buffer_head *) (zatm_dev->last_free[pool]-> data))[-1].link = virt_to_bus(head); zatm_dev->last_free[pool] = skb; skb_queue_tail(&zatm_dev->pool[pool],skb); spin_unlock_irqrestore(&zatm_dev->lock, flags); free++; } if (first) { spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(virt_to_bus(first),CER); zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT ("POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); } } static void drain_free(struct atm_dev *dev,int pool) { skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); } static int pool_index(int max_pdu) { int i; if (max_pdu % ATM_CELL_PAYLOAD) printk(KERN_ERR DEV_LABEL ": driver error in pool_index: " "max_pdu is %d\n",max_pdu); if (max_pdu > 65536) return -1; for (i = 0; (64 << i) < max_pdu; i++); return i+ZATM_AAL5_POOL_BASE; } /* use_pool isn't reentrant */ static void use_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; unsigned long flags; int size; zatm_dev = ZATM_DEV(dev); if (!(zatm_dev->pool_info[pool].ref_count++)) { skb_queue_head_init(&zatm_dev->pool[pool]); size = pool-ZATM_AAL5_POOL_BASE; if (size < 0) size = 0; /* 64B... */ else if (size > 10) size = 10; /* ... 64kB */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << uPD98401_RXFP_ALERT_SHIFT) | (1 << uPD98401_RXFP_BTSZ_SHIFT) | (size << uPD98401_RXFP_BFSZ_SHIFT), zatm_dev->pool_base+pool*2); zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+ pool*2+1); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->last_free[pool] = NULL; refill_pool(dev,pool); } DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); } static void unuse_pool(struct atm_dev *dev,int pool) { if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count)) drain_free(dev,pool); } /*----------------------------------- RX ------------------------------------*/ #if 0 static void exception(struct atm_vcc *vcc) { static int count = 0; struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev); struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc); unsigned long *qrp; int i; if (count++ > 2) return; for (i = 0; i < 8; i++) printk("TX%d: 0x%08lx\n",i, zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i)); for (i = 0; i < 5; i++) printk("SH%d: 0x%08lx\n",i, zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i)); qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP); printk("qrp=0x%08lx\n",(unsigned long) qrp); for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]); } #endif static const char *err_txt[] = { "No error", "RX buf underflow", "RX FIFO overrun", "Maximum len violation", "CRC error", "User abort", "Length violation", "T1 error", "Deactivated", "???", "???", "???", "???", "???", "???", "???" }; static void poll_rx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; int error; EVENT("poll_rx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { u32 *here; struct sk_buff *skb; struct atm_vcc *vcc; int cells,size,chan; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); here = (u32 *) pos; if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; cells = here[0] & uPD98401_AAL5_SIZE; #if 0 printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]); { unsigned long *x; printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev, zatm_dev->pool_base), zpeekl(zatm_dev,zatm_dev->pool_base+1)); x = (unsigned long *) here[2]; printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n", x[0],x[1],x[2],x[3]); } #endif error = 0; if (here[3] & uPD98401_AAL5_ERR) { error = (here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT; if (error == uPD98401_AAL5_ES_DEACT || error == uPD98401_AAL5_ES_FREE) continue; } EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT,error); skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; __net_timestamp(skb); #if 0 printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], ((unsigned *) skb->data)[0]); #endif EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb, (unsigned long) here); #if 0 printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); #endif size = error ? 0 : ntohs(((__be16 *) skb->data)[cells* ATM_CELL_PAYLOAD/sizeof(u16)-3]); EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size); chan = (here[3] & uPD98401_AAL5_CHAN) >> uPD98401_AAL5_CHAN_SHIFT; if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { int pos; vcc = zatm_dev->rx_map[chan]; pos = ZATM_VCC(vcc)->pool; if (skb == zatm_dev->last_free[pos]) zatm_dev->last_free[pos] = NULL; skb_unlink(skb, zatm_dev->pool + pos); } else { printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " "for non-existing channel\n",dev->number); size = 0; vcc = NULL; event_dump(); } if (error) { static unsigned long silence = 0; static int last_error = 0; if (error != last_error || time_after(jiffies, silence) || silence == 0){ printk(KERN_WARNING DEV_LABEL "(itf %d): " "chan %d error %s\n",dev->number,chan, err_txt[error]); last_error = error; silence = (jiffies+2*HZ)|1; } size = 0; } if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER || size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) { printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d " "cells\n",dev->number,size,cells); size = 0; event_dump(); } if (size > ATM_MAX_AAL5_PDU) { printk(KERN_ERR DEV_LABEL "(itf %d): size too big " "(%d)\n",dev->number,size); size = 0; event_dump(); } if (!size) { dev_kfree_skb_irq(skb); if (vcc) atomic_inc(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { dev_kfree_skb_irq(skb); continue; } skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); atomic_inc(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ refill_pool(dev,zatm_vcc->pool); /* maybe this saves us a few interrupts */ #endif } static int open_rx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; unsigned short chan; int cells; DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->rx_chan = 0; if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; if (vcc->qos.aal == ATM_AAL5) { if (vcc->qos.rxtp.max_sdu > 65464) vcc->qos.rxtp.max_sdu = 65464; /* fix this - we may want to receive 64kB SDUs later */ cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD); zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); } else { cells = 1; zatm_vcc->pool = ZATM_AAL0_POOL; } if (zatm_vcc->pool < 0) return -EMSGSIZE; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; use_pool(vcc->dev,zatm_vcc->pool); DPRINTK("pool %d\n",zatm_vcc->pool); /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT, chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ? uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1); zpokel(zatm_dev,0,chan*VC_SIZE/4+2); zatm_vcc->rx_chan = chan; zatm_dev->rx_map[chan] = vcc; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static int open_rx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->rx_chan) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); /* should also handle VPI @@@ */ pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) | ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos); spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static void close_rx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); if (!zatm_vcc->rx_chan) return; DPRINTK("close_rx\n"); /* disable receiver */ if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { spin_lock_irqsave(&zatm_dev->lock, flags); pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); zwait; zout(uPD98401_NOP,CMR); zwait; zout(uPD98401_NOP,CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; udelay(10); /* why oh why ... ? */ zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " "%d\n",vcc->dev->number,zatm_vcc->rx_chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL; zatm_vcc->rx_chan = 0; unuse_pool(vcc->dev,zatm_vcc->pool); } static int start_rx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int size,i; DPRINTK("start_rx\n"); zatm_dev = ZATM_DEV(dev); size = sizeof(struct atm_vcc *)*zatm_dev->chans; zatm_dev->rx_map = kzalloc(size,GFP_KERNEL); if (!zatm_dev->rx_map) return -ENOMEM; /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */ zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR); /* prepare free buffer pools */ for (i = 0; i <= ZATM_LAST_POOL; i++) { zatm_dev->pool_info[i].ref_count = 0; zatm_dev->pool_info[i].rqa_count = 0; zatm_dev->pool_info[i].rqu_count = 0; zatm_dev->pool_info[i].low_water = LOW_MARK; zatm_dev->pool_info[i].high_water = HIGH_MARK; zatm_dev->pool_info[i].offset = 0; zatm_dev->pool_info[i].next_off = 0; zatm_dev->pool_info[i].next_cnt = 0; zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES; } return 0; } /*----------------------------------- TX ------------------------------------*/ static int do_tx(struct sk_buff *skb) { struct atm_vcc *vcc; struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; u32 *dsc; unsigned long flags; EVENT("do_tx\n",0,0); DPRINTK("sending skb %p\n",skb); vcc = ATM_SKB(skb)->vcc; zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0); spin_lock_irqsave(&zatm_dev->lock, flags); if (!skb_shinfo(skb)->nr_frags) { if (zatm_vcc->txing == RING_ENTRIES-1) { spin_unlock_irqrestore(&zatm_dev->lock, flags); return RING_BUSY; } zatm_vcc->txing++; dsc = zatm_vcc->ring+zatm_vcc->ring_curr; zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) & (RING_ENTRIES*RING_WORDS-1); dsc[1] = 0; dsc[2] = skb->len; dsc[3] = virt_to_bus(skb->data); mb(); dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0); } else { printk("NONONONOO!!!!\n"); dsc = NULL; #if 0 u32 *put; int i; dsc = kmalloc(uPD98401_TXPD_SIZE * 2 + uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC); if (!dsc) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_irq(skb); return -EAGAIN; } /* @@@ should check alignment */ put = dsc+8; dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); dsc[1] = 0; dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE; dsc[3] = virt_to_bus(put); for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) { *put++ = ((struct iovec *) skb->data)[i].iov_len; *put++ = virt_to_bus(((struct iovec *) skb->data)[i].iov_base); } put[-2] |= uPD98401_TXBD_LAST; #endif } ZATM_PRV_DSC(skb) = dsc; skb_queue_tail(&zatm_vcc->tx_queue,skb); DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP)); zwait; zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT("done\n",0,0); return 0; } static inline void dequeue_tx(struct atm_vcc *vcc) { struct zatm_vcc *zatm_vcc; struct sk_buff *skb; EVENT("dequeue_tx\n",0,0); zatm_vcc = ZATM_VCC(vcc); skb = skb_dequeue(&zatm_vcc->tx_queue); if (!skb) { printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not " "txing\n",vcc->dev->number); return; } #if 0 /* @@@ would fail on CLP */ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n", *ZATM_PRV_DSC(skb)); #endif *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */ zatm_vcc->txing--; if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); while ((skb = skb_dequeue(&zatm_vcc->backlog))) if (do_tx(skb) == RING_BUSY) { skb_queue_head(&zatm_vcc->backlog,skb); break; } atomic_inc(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } static void poll_tx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; EVENT("poll_tx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { int chan; #if 1 u32 data,*addr; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); addr = (u32 *) pos; data = *addr; chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr, data); EVENT("chan = %d\n",chan,0); #else NO ! chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; #endif if (chan < zatm_dev->chans && zatm_dev->tx_map[chan]) dequeue_tx(zatm_dev->tx_map[chan]); else { printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication " "for non-existing channel %d\n",dev->number,chan); event_dump(); } if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; } zout(pos & 0xffff,MTA(mbx)); } /* * BUG BUG BUG: Doesn't handle "new-style" rate specification yet. */ static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr) { struct zatm_dev *zatm_dev; unsigned long flags; unsigned long i,m,c; int shaper; DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max); zatm_dev = ZATM_DEV(dev); if (!zatm_dev->free_shapers) return -EAGAIN; for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++); zatm_dev->free_shapers &= ~1 << shaper; if (ubr) { c = 5; i = m = 1; zatm_dev->ubr_ref_cnt++; zatm_dev->ubr = shaper; *pcr = 0; } else { if (min) { if (min <= 255) { i = min; m = ATM_OC3_PCR; } else { i = 255; m = ATM_OC3_PCR*255/min; } } else { if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw; if (max <= 255) { i = max; m = ATM_OC3_PCR; } else { i = 255; m = DIV_ROUND_UP(ATM_OC3_PCR*255, max); } } if (i > m) { printk(KERN_CRIT DEV_LABEL "shaper algorithm botched " "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m); m = i; } *pcr = i*ATM_OC3_PCR/m; c = 20; /* @@@ should use max_cdv ! */ if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL; if (zatm_dev->tx_bw < *pcr) return -EAGAIN; zatm_dev->tx_bw -= *pcr; } spin_lock_irqsave(&zatm_dev->lock, flags); DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr); zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper)); zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper)); zpokel(zatm_dev,0,uPD98401_X(shaper)); zpokel(zatm_dev,0,uPD98401_Y(shaper)); zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); return shaper; } static void dealloc_shaper(struct atm_dev *dev,int shaper) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); if (shaper == zatm_dev->ubr) { if (--zatm_dev->ubr_ref_cnt) return; zatm_dev->ubr = -1; } spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E, uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->free_shapers |= 1 << shaper; } static void close_tx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int chan; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); chan = zatm_vcc->tx_chan; if (!chan) return; DPRINTK("close_tx\n"); if (skb_peek(&zatm_vcc->backlog)) { printk("waiting for backlog to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); } if (skb_peek(&zatm_vcc->tx_queue)) { printk("waiting for TX queue to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue)); } spin_lock_irqsave(&zatm_dev->lock, flags); #if 0 zwait; zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); #endif zwait; zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " "%d\n",vcc->dev->number,chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_vcc->tx_chan = 0; zatm_dev->tx_map[chan] = NULL; if (zatm_vcc->shaper != zatm_dev->ubr) { zatm_dev->tx_bw += vcc->qos.txtp.min_pcr; dealloc_shaper(vcc->dev,zatm_vcc->shaper); } kfree(zatm_vcc->ring); } static int open_tx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; u32 *loop; unsigned short chan; int unlimited; DPRINTK("open_tx_first\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->tx_chan = 0; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; unlimited = vcc->qos.txtp.traffic_class == ATM_UBR && (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR || vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; else { int uninitialized_var(pcr); if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) < 0) { close_tx(vcc); return zatm_vcc->shaper; } if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR; vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr; } zatm_vcc->tx_chan = chan; skb_queue_head_init(&zatm_vcc->tx_queue); init_waitqueue_head(&zatm_vcc->tx_wait); /* initialize ring */ zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL); if (!zatm_vcc->ring) return -ENOMEM; loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS; loop[0] = uPD98401_TXPD_V; loop[1] = loop[2] = 0; loop[3] = virt_to_bus(zatm_vcc->ring); zatm_vcc->ring_curr = 0; zatm_vcc->txing = 0; skb_queue_head_init(&zatm_vcc->backlog); zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring), chan*VC_SIZE/4+uPD98401_TXVC_QRP); return 0; } static int open_tx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; DPRINTK("open_tx_second\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->tx_chan) return 0; /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper << uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) | vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc; return 0; } static int start_tx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int i; DPRINTK("start_tx\n"); zatm_dev = ZATM_DEV(dev); zatm_dev->tx_map = kmalloc(sizeof(struct atm_vcc *)* zatm_dev->chans,GFP_KERNEL); if (!zatm_dev->tx_map) return -ENOMEM; zatm_dev->tx_bw = ATM_OC3_PCR; zatm_dev->free_shapers = (1 << NR_SHAPERS)-1; zatm_dev->ubr = -1; zatm_dev->ubr_ref_cnt = 0; /* initialize shapers */ for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i)); return 0; } /*------------------------------- interrupts --------------------------------*/ static irqreturn_t zatm_int(int irq,void *dev_id) { struct atm_dev *dev; struct zatm_dev *zatm_dev; u32 reason; int handled = 0; dev = dev_id; zatm_dev = ZATM_DEV(dev); while ((reason = zin(GSR))) { handled = 1; EVENT("reason 0x%x\n",reason,0); if (reason & uPD98401_INT_PI) { EVENT("PHY int\n",0,0); dev->phy->interrupt(dev); } if (reason & uPD98401_INT_RQA) { unsigned long pools; int i; pools = zin(RQA); EVENT("RQA (0x%08x)\n",pools,0); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqa_count++; } pools >>= 1; } } if (reason & uPD98401_INT_RQU) { unsigned long pools; int i; pools = zin(RQU); printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n", dev->number,pools); event_dump(); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqu_count++; } pools >>= 1; } } /* don't handle RD */ if (reason & uPD98401_INT_SPE) printk(KERN_ALERT DEV_LABEL "(itf %d): system parity " "error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_CPE) printk(KERN_ALERT DEV_LABEL "(itf %d): control memory " "parity error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_SBE) { printk(KERN_ALERT DEV_LABEL "(itf %d): system bus " "error at 0x%08x\n",dev->number,zin(ADDR)); event_dump(); } /* don't handle IND */ if (reason & uPD98401_INT_MF) { printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full " "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF) >> uPD98401_INT_MF_SHIFT); event_dump(); /* @@@ should try to recover */ } if (reason & uPD98401_INT_MM) { if (reason & 1) poll_rx(dev,0); if (reason & 2) poll_rx(dev,1); if (reason & 4) poll_tx(dev,2); if (reason & 8) poll_tx(dev,3); } /* @@@ handle RCRn */ } return IRQ_RETVAL(handled); } /*----------------------------- (E)EPROM access -----------------------------*/ static void __devinit eprom_set(struct zatm_dev *zatm_dev,unsigned long value, unsigned short cmd) { int error; if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value))) printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n", error); } static unsigned long __devinit eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd) { unsigned int value; int error; if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value))) printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n", error); return value; } static void __devinit eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,int bits,unsigned short cmd) { unsigned long value; int i; for (i = bits-1; i >= 0; i--) { value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0); eprom_set(zatm_dev,value,cmd); eprom_set(zatm_dev,value | ZEPROM_SK,cmd); eprom_set(zatm_dev,value,cmd); } } static void __devinit eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,unsigned short cmd) { int i; *byte = 0; for (i = 8; i; i--) { eprom_set(zatm_dev,ZEPROM_CS,cmd); eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd); *byte <<= 1; if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1; eprom_set(zatm_dev,ZEPROM_CS,cmd); } } static unsigned char __devinit eprom_try_esi(struct atm_dev *dev, unsigned short cmd,int offset,int swap) { unsigned char buf[ZEPROM_SIZE]; struct zatm_dev *zatm_dev; int i; zatm_dev = ZATM_DEV(dev); for (i = 0; i < ZEPROM_SIZE; i += 2) { eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */ eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd); eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd); eprom_get_byte(zatm_dev,buf+i+swap,cmd); eprom_get_byte(zatm_dev,buf+i+1-swap,cmd); eprom_set(zatm_dev,0,cmd); /* deselect EPROM */ } memcpy(dev->esi,buf+offset,ESI_LEN); return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */ } static void __devinit eprom_get_esi(struct atm_dev *dev) { if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return; (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0); } /*--------------------------------- entries ---------------------------------*/ static int __devinit zatm_init(struct atm_dev *dev) { struct zatm_dev *zatm_dev; struct pci_dev *pci_dev; unsigned short command; int error,i,last; unsigned long t0,t1,t2; DPRINTK(">zatm_init\n"); zatm_dev = ZATM_DEV(dev); spin_lock_init(&zatm_dev->lock); pci_dev = zatm_dev->pci_dev; zatm_dev->base = pci_resource_start(pci_dev, 0); zatm_dev->irq = pci_dev->irq; if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) { printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n", dev->number,error); return -EINVAL; } if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)" "\n",dev->number,error); return -EIO; } eprom_get_esi(dev); printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,", dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq); /* reset uPD98401 */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR); last = MAX_CRAM_SIZE; for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { zpokel(zatm_dev,0x55555555,i); if (zpeekl(zatm_dev,i) != 0x55555555) last = i; else { zpokel(zatm_dev,0xAAAAAAAA,i); if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i; else zpokel(zatm_dev,i,i); } } for (i = 0; i < last; i += RAM_INCREMENT) if (zpeekl(zatm_dev,i) != i) break; zatm_dev->mem = i << 2; while (i) zpokel(zatm_dev,0,--i); /* reset again to rebuild memory pointers */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 | uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR); /* TODO: should shrink allocation now */ printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" : "MMF"); for (i = 0; i < ESI_LEN; i++) printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-"); do { unsigned long flags; spin_lock_irqsave(&zatm_dev->lock, flags); t0 = zpeekl(zatm_dev,uPD98401_TSR); udelay(10); t1 = zpeekl(zatm_dev,uPD98401_TSR); udelay(1010); t2 = zpeekl(zatm_dev,uPD98401_TSR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } while (t0 > t1 || t1 > t2); /* loop if wrapping ... */ zatm_dev->khz = t2-2*t1+t0; printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d " "MHz\n",dev->number, (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT, zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000); return uPD98402_init(dev); } static int __devinit zatm_start(struct atm_dev *dev) { struct zatm_dev *zatm_dev = ZATM_DEV(dev); struct pci_dev *pdev = zatm_dev->pci_dev; unsigned long curr; int pools,vccs,rx; int error, i, ld; DPRINTK("zatm_start\n"); zatm_dev->rx_map = zatm_dev->tx_map = NULL; for (i = 0; i < NR_MBX; i++) zatm_dev->mbx_start[i] = 0; error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev); if (error < 0) { printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", dev->number,zatm_dev->irq); goto done; } /* define memory regions */ pools = NR_POOLS; if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE) pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE; vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/ (2*VC_SIZE+RX_SIZE); ld = -1; for (rx = 1; rx < vccs; rx <<= 1) ld++; dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */ dev->ci_range.vci_bits = ld; dev->link_rate = ATM_OC3_PCR; zatm_dev->chans = vccs; /* ??? */ curr = rx*RX_SIZE/4; DPRINTK("RX pool 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ zatm_dev->pool_base = curr; curr += pools*POOL_SIZE/4; DPRINTK("Shapers 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */ curr += NR_SHAPERS*SHAPER_SIZE/4; DPRINTK("Free 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, " "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx, (zatm_dev->mem-curr*4)/VC_SIZE); /* create mailboxes */ for (i = 0; i < NR_MBX; i++) { void *mbx; dma_addr_t mbx_dma; if (!mbx_entries[i]) continue; mbx = pci_alloc_consistent(pdev, 2*MBX_SIZE(i), &mbx_dma); if (!mbx) { error = -ENOMEM; goto out; } /* * Alignment provided by pci_alloc_consistent() isn't enough * for this device. */ if (((unsigned long)mbx ^ mbx_dma) & 0xffff) { printk(KERN_ERR DEV_LABEL "(itf %d): system " "bus incompatible with driver\n", dev->number); pci_free_consistent(pdev, 2*MBX_SIZE(i), mbx, mbx_dma); error = -ENODEV; goto out; } DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i)); zatm_dev->mbx_start[i] = (unsigned long)mbx; zatm_dev->mbx_dma[i] = mbx_dma; zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) & 0xffff; zout(mbx_dma >> 16, MSH(i)); zout(mbx_dma, MSL(i)); zout(zatm_dev->mbx_end[i], MBA(i)); zout((unsigned long)mbx & 0xffff, MTA(i)); zout((unsigned long)mbx & 0xffff, MWA(i)); } error = start_tx(dev); if (error) goto out; error = start_rx(dev); if (error) goto out_tx; error = dev->phy->start(dev); if (error) goto out_rx; zout(0xffffffff,IMR); /* enable interrupts */ /* enable TX & RX */ zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR); done: return error; out_rx: kfree(zatm_dev->rx_map); out_tx: kfree(zatm_dev->tx_map); out: while (i-- > 0) { pci_free_consistent(pdev, 2*MBX_SIZE(i), (void *)zatm_dev->mbx_start[i], zatm_dev->mbx_dma[i]); } free_irq(zatm_dev->irq, dev); goto done; } static void zatm_close(struct atm_vcc *vcc) { DPRINTK(">zatm_close\n"); if (!ZATM_VCC(vcc)) return; clear_bit(ATM_VF_READY,&vcc->flags); close_rx(vcc); EVENT("close_tx\n",0,0); close_tx(vcc); DPRINTK("zatm_close: done waiting\n"); /* deallocate memory */ kfree(ZATM_VCC(vcc)); vcc->dev_data = NULL; clear_bit(ATM_VF_ADDR,&vcc->flags); } static int zatm_open(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; short vpi = vcc->vpi; int vci = vcc->vci; int error; DPRINTK(">zatm_open\n"); zatm_dev = ZATM_DEV(vcc->dev); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) set_bit(ATM_VF_ADDR,&vcc->flags); if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */ DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, vcc->vci); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { zatm_vcc = kmalloc(sizeof(struct zatm_vcc),GFP_KERNEL); if (!zatm_vcc) { clear_bit(ATM_VF_ADDR,&vcc->flags); return -ENOMEM; } vcc->dev_data = zatm_vcc; ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */ if ((error = open_rx_first(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_first(vcc))) { zatm_close(vcc); return error; } } if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; if ((error = open_rx_second(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_second(vcc))) { zatm_close(vcc); return error; } set_bit(ATM_VF_READY,&vcc->flags); return 0; } static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags) { printk("Not yet implemented\n"); return -ENOSYS; /* @@@ */ } static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); switch (cmd) { case ZATM_GETPOOLZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case ZATM_GETPOOL: { struct zatm_pool_info info; int pool; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); info = zatm_dev->pool_info[pool]; if (cmd == ZATM_GETPOOLZ) { zatm_dev->pool_info[pool].rqa_count = 0; zatm_dev->pool_info[pool].rqu_count = 0; } spin_unlock_irqrestore(&zatm_dev->lock, flags); return copy_to_user( &((struct zatm_pool_req __user *) arg)->info, &info,sizeof(info)) ? -EFAULT : 0; } case ZATM_SETPOOL: { struct zatm_pool_info info; int pool; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; if (!info.low_water) info.low_water = zatm_dev-> pool_info[pool].low_water; if (!info.high_water) info.high_water = zatm_dev-> pool_info[pool].high_water; if (!info.next_thres) info.next_thres = zatm_dev-> pool_info[pool].next_thres; if (info.low_water >= info.high_water || info.low_water < 0) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); zatm_dev->pool_info[pool].low_water = info.low_water; zatm_dev->pool_info[pool].high_water = info.high_water; zatm_dev->pool_info[pool].next_thres = info.next_thres; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } default: if (!dev->phy->ioctl) return -ENOIOCTLCMD; return dev->phy->ioctl(dev,cmd,arg); } } static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen) { return -EINVAL; } static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,unsigned int optlen) { return -EINVAL; } static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) { int error; EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0); if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } if (!skb) { printk(KERN_CRIT "!skb in zatm_send ?\n"); if (vcc->pop) vcc->pop(vcc,skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; error = do_tx(skb); if (error != RING_BUSY) return error; skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); return 0; } static void zatm_phy_put(struct atm_dev *dev,unsigned char value, unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER) & 0xff; } static const struct atmdev_ops ops = { .open = zatm_open, .close = zatm_close, .ioctl = zatm_ioctl, .getsockopt = zatm_getsockopt, .setsockopt = zatm_setsockopt, .send = zatm_send, .phy_put = zatm_phy_put, .phy_get = zatm_phy_get, .change_qos = zatm_change_qos, }; static int __devinit zatm_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { struct atm_dev *dev; struct zatm_dev *zatm_dev; int ret = -ENOMEM; zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL); if (!zatm_dev) { printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); goto out; } dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto out_free; ret = pci_enable_device(pci_dev); if (ret < 0) goto out_deregister; ret = pci_request_regions(pci_dev, DEV_LABEL); if (ret < 0) goto out_disable; zatm_dev->pci_dev = pci_dev; dev->dev_data = zatm_dev; zatm_dev->copper = (int)ent->driver_data; if ((ret = zatm_init(dev)) || (ret = zatm_start(dev))) goto out_release; pci_set_drvdata(pci_dev, dev); zatm_dev->more = zatm_boards; zatm_boards = dev; ret = 0; out: return ret; out_release: pci_release_regions(pci_dev); out_disable: pci_disable_device(pci_dev); out_deregister: atm_dev_deregister(dev); out_free: kfree(zatm_dev); goto out; } MODULE_LICENSE("GPL"); static struct pci_device_id zatm_pci_tbl[] __devinitdata = { { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, zatm_pci_tbl); static struct pci_driver zatm_driver = { .name = DEV_LABEL, .id_table = zatm_pci_tbl, .probe = zatm_init_one, }; static int __init zatm_init_module(void) { return pci_register_driver(&zatm_driver); } module_init(zatm_init_module); /* module_exit not defined so not unloadable */
gpl-2.0
ashishkrishnan/android_kernel_samsung_smdk4412
fs/cachefiles/namei.c
3177
25200
/* CacheFiles path walking and related routines * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/quotaops.h> #include <linux/xattr.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/slab.h> #include "internal.h" #define CACHEFILES_KEYBUF_SIZE 512 /* * dump debugging info about an object */ static noinline void __cachefiles_printk_object(struct cachefiles_object *object, const char *prefix, u8 *keybuf) { struct fscache_cookie *cookie; unsigned keylen, loop; printk(KERN_ERR "%sobject: OBJ%x\n", prefix, object->fscache.debug_id); printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", prefix, fscache_object_states[object->fscache.state], object->fscache.flags, work_busy(&object->fscache.work), object->fscache.events, object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK); printk(KERN_ERR "%sops=%u inp=%u exc=%u\n", prefix, object->fscache.n_ops, object->fscache.n_in_progress, object->fscache.n_exclusive); printk(KERN_ERR "%sparent=%p\n", prefix, object->fscache.parent); spin_lock(&object->fscache.lock); cookie = object->fscache.cookie; if (cookie) { printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n", prefix, object->fscache.cookie, object->fscache.cookie->parent, object->fscache.cookie->netfs_data, object->fscache.cookie->flags); if (keybuf) keylen = cookie->def->get_key(cookie->netfs_data, keybuf, CACHEFILES_KEYBUF_SIZE); else keylen = 0; } else { printk(KERN_ERR "%scookie=NULL\n", prefix); keylen = 0; } spin_unlock(&object->fscache.lock); if (keylen) { printk(KERN_ERR "%skey=[%u] '", prefix, keylen); for (loop = 0; loop < keylen; loop++) printk("%02x", keybuf[loop]); printk("'\n"); } } /* * dump debugging info about a pair of objects */ static noinline void cachefiles_printk_object(struct cachefiles_object *object, struct cachefiles_object *xobject) { u8 *keybuf; keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO); if (object) __cachefiles_printk_object(object, "", keybuf); if (xobject) __cachefiles_printk_object(xobject, "x", keybuf); kfree(keybuf); } /* * mark the owner of a dentry, if there is one, to indicate that that dentry * has been preemptively deleted * - the caller must hold the i_mutex on the dentry's parent as required to * call vfs_unlink(), vfs_rmdir() or vfs_rename() */ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, struct dentry *dentry) { struct cachefiles_object *object; struct rb_node *p; _enter(",'%*.*s'", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name); write_lock(&cache->active_lock); p = cache->active_nodes.rb_node; while (p) { object = rb_entry(p, struct cachefiles_object, active_node); if (object->dentry > dentry) p = p->rb_left; else if (object->dentry < dentry) p = p->rb_right; else goto found_dentry; } write_unlock(&cache->active_lock); _leave(" [no owner]"); return; /* found the dentry for */ found_dentry: kdebug("preemptive burial: OBJ%x [%s] %p", object->fscache.debug_id, fscache_object_states[object->fscache.state], dentry); if (object->fscache.state < FSCACHE_OBJECT_DYING) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Can't preemptively bury live object\n"); cachefiles_printk_object(object, NULL); } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { printk(KERN_ERR "CacheFiles: Error:" " Object already preemptively buried\n"); } write_unlock(&cache->active_lock); _leave(" [owner marked]"); } /* * record the fact that an object is now active */ static int cachefiles_mark_object_active(struct cachefiles_cache *cache, struct cachefiles_object *object) { struct cachefiles_object *xobject; struct rb_node **_p, *_parent = NULL; struct dentry *dentry; _enter(",%p", object); try_again: write_lock(&cache->active_lock); if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) { printk(KERN_ERR "CacheFiles: Error: Object already active\n"); cachefiles_printk_object(object, NULL); BUG(); } dentry = object->dentry; _p = &cache->active_nodes.rb_node; while (*_p) { _parent = *_p; xobject = rb_entry(_parent, struct cachefiles_object, active_node); ASSERT(xobject != object); if (xobject->dentry > dentry) _p = &(*_p)->rb_left; else if (xobject->dentry < dentry) _p = &(*_p)->rb_right; else goto wait_for_old_object; } rb_link_node(&object->active_node, _parent, _p); rb_insert_color(&object->active_node, &cache->active_nodes); write_unlock(&cache->active_lock); _leave(" = 0"); return 0; /* an old object from a previous incarnation is hogging the slot - we * need to wait for it to be destroyed */ wait_for_old_object: if (xobject->fscache.state < FSCACHE_OBJECT_DYING) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Unexpected object collision\n"); cachefiles_printk_object(object, xobject); BUG(); } atomic_inc(&xobject->usage); write_unlock(&cache->active_lock); if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { wait_queue_head_t *wq; signed long timeout = 60 * HZ; wait_queue_t wait; bool requeue; /* if the object we're waiting for is queued for processing, * then just put ourselves on the queue behind it */ if (work_pending(&xobject->fscache.work)) { _debug("queue OBJ%x behind OBJ%x immediately", object->fscache.debug_id, xobject->fscache.debug_id); goto requeue; } /* otherwise we sleep until either the object we're waiting for * is done, or the fscache_object is congested */ wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); init_wait(&wait); requeue = false; do { prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) break; requeue = fscache_object_sleep_till_congested(&timeout); } while (timeout > 0 && !requeue); finish_wait(wq, &wait); if (requeue && test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { _debug("queue OBJ%x behind OBJ%x after wait", object->fscache.debug_id, xobject->fscache.debug_id); goto requeue; } if (timeout <= 0) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error: Overlong" " wait for old active object to go away\n"); cachefiles_printk_object(object, xobject); goto requeue; } } ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); cache->cache.ops->put_object(&xobject->fscache); goto try_again; requeue: clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); cache->cache.ops->put_object(&xobject->fscache); _leave(" = -ETIMEDOUT"); return -ETIMEDOUT; } /* * delete an object representation from the cache * - file backed objects are unlinked * - directory backed objects are stuffed into the graveyard for userspace to * delete * - unlocks the directory mutex */ static int cachefiles_bury_object(struct cachefiles_cache *cache, struct dentry *dir, struct dentry *rep, bool preemptive) { struct dentry *grave, *trap; struct path path, path_to_graveyard; char nbuffer[8 + 8 + 1]; int ret; _enter(",'%*.*s','%*.*s'", dir->d_name.len, dir->d_name.len, dir->d_name.name, rep->d_name.len, rep->d_name.len, rep->d_name.name); _debug("remove %p from %p", rep, dir); /* non-directories can just be unlinked */ if (!S_ISDIR(rep->d_inode->i_mode)) { _debug("unlink stale object"); path.mnt = cache->mnt; path.dentry = dir; ret = security_path_unlink(&path, rep); if (ret < 0) { cachefiles_io_error(cache, "Unlink security error"); } else { ret = vfs_unlink(dir->d_inode, rep); if (preemptive) cachefiles_mark_object_buried(cache, rep); } mutex_unlock(&dir->d_inode->i_mutex); if (ret == -EIO) cachefiles_io_error(cache, "Unlink failed"); _leave(" = %d", ret); return ret; } /* directories have to be moved to the graveyard */ _debug("move stale object to graveyard"); mutex_unlock(&dir->d_inode->i_mutex); try_again: /* first step is to make up a grave dentry in the graveyard */ sprintf(nbuffer, "%08x%08x", (uint32_t) get_seconds(), (uint32_t) atomic_inc_return(&cache->gravecounter)); /* do the multiway lock magic */ trap = lock_rename(cache->graveyard, dir); /* do some checks before getting the grave dentry */ if (rep->d_parent != dir) { /* the entry was probably culled when we dropped the parent dir * lock */ unlock_rename(cache->graveyard, dir); _leave(" = 0 [culled?]"); return 0; } if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Graveyard no longer a directory"); return -EIO; } if (trap == rep) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "May not make directory loop"); return -EIO; } if (d_mountpoint(rep)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Mountpoint in cache"); return -EIO; } grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); if (IS_ERR(grave)) { unlock_rename(cache->graveyard, dir); if (PTR_ERR(grave) == -ENOMEM) { _leave(" = -ENOMEM"); return -ENOMEM; } cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave)); return -EIO; } if (grave->d_inode) { unlock_rename(cache->graveyard, dir); dput(grave); grave = NULL; cond_resched(); goto try_again; } if (d_mountpoint(grave)) { unlock_rename(cache->graveyard, dir); dput(grave); cachefiles_io_error(cache, "Mountpoint in graveyard"); return -EIO; } /* target should not be an ancestor of source */ if (trap == grave) { unlock_rename(cache->graveyard, dir); dput(grave); cachefiles_io_error(cache, "May not make directory loop"); return -EIO; } /* attempt the rename */ path.mnt = cache->mnt; path.dentry = dir; path_to_graveyard.mnt = cache->mnt; path_to_graveyard.dentry = cache->graveyard; ret = security_path_rename(&path, rep, &path_to_graveyard, grave); if (ret < 0) { cachefiles_io_error(cache, "Rename security error %d", ret); } else { ret = vfs_rename(dir->d_inode, rep, cache->graveyard->d_inode, grave); if (ret != 0 && ret != -ENOMEM) cachefiles_io_error(cache, "Rename failed with error %d", ret); if (preemptive) cachefiles_mark_object_buried(cache, rep); } unlock_rename(cache->graveyard, dir); dput(grave); _leave(" = 0"); return 0; } /* * delete an object representation from the cache */ int cachefiles_delete_object(struct cachefiles_cache *cache, struct cachefiles_object *object) { struct dentry *dir; int ret; _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry); ASSERT(object->dentry); ASSERT(object->dentry->d_inode); ASSERT(object->dentry->d_parent); dir = dget_parent(object->dentry); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { /* object allocation for the same key preemptively deleted this * object's file so that it could create its own file */ _debug("object preemptively buried"); mutex_unlock(&dir->d_inode->i_mutex); ret = 0; } else { /* we need to check that our parent is _still_ our parent - it * may have been renamed */ if (dir == object->dentry->d_parent) { ret = cachefiles_bury_object(cache, dir, object->dentry, false); } else { /* it got moved, presumably by cachefilesd culling it, * so it's no longer in the key path and we can ignore * it */ mutex_unlock(&dir->d_inode->i_mutex); ret = 0; } } dput(dir); _leave(" = %d", ret); return ret; } /* * walk from the parent object to the child object through the backing * filesystem, creating directories as we go */ int cachefiles_walk_to_object(struct cachefiles_object *parent, struct cachefiles_object *object, const char *key, struct cachefiles_xattr *auxdata) { struct cachefiles_cache *cache; struct dentry *dir, *next = NULL; struct path path; unsigned long start; const char *name; int ret, nlen; _enter("OBJ%x{%p},OBJ%x,%s,", parent->fscache.debug_id, parent->dentry, object->fscache.debug_id, key); cache = container_of(parent->fscache.cache, struct cachefiles_cache, cache); path.mnt = cache->mnt; ASSERT(parent->dentry); ASSERT(parent->dentry->d_inode); if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) { // TODO: convert file to dir _leave("looking up in none directory"); return -ENOBUFS; } dir = dget(parent->dentry); advance: /* attempt to transit the first directory component */ name = key; nlen = strlen(key); /* key ends in a double NUL */ key = key + nlen + 1; if (!*key) key = NULL; lookup_again: /* search the current directory for the element name */ _debug("lookup '%s'", name); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); start = jiffies; next = lookup_one_len(name, dir, nlen); cachefiles_hist(cachefiles_lookup_histogram, start); if (IS_ERR(next)) goto lookup_error; _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative"); if (!key) object->new = !next->d_inode; /* if this element of the path doesn't exist, then the lookup phase * failed, and we can release any readers in the certain knowledge that * there's nothing for them to actually read */ if (!next->d_inode) fscache_object_lookup_negative(&object->fscache); /* we need to create the object if it's negative */ if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) { /* index objects and intervening tree levels must be subdirs */ if (!next->d_inode) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto create_error; path.dentry = dir; ret = security_path_mkdir(&path, next, 0); if (ret < 0) goto create_error; start = jiffies; ret = vfs_mkdir(dir->d_inode, next, 0); cachefiles_hist(cachefiles_mkdir_histogram, start); if (ret < 0) goto create_error; ASSERT(next->d_inode); _debug("mkdir -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); } else if (!S_ISDIR(next->d_inode->i_mode)) { kerror("inode %lu is not a directory", next->d_inode->i_ino); ret = -ENOBUFS; goto error; } } else { /* non-index objects start out life as files */ if (!next->d_inode) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto create_error; path.dentry = dir; ret = security_path_mknod(&path, next, S_IFREG, 0); if (ret < 0) goto create_error; start = jiffies; ret = vfs_create(dir->d_inode, next, S_IFREG, NULL); cachefiles_hist(cachefiles_create_histogram, start); if (ret < 0) goto create_error; ASSERT(next->d_inode); _debug("create -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); } else if (!S_ISDIR(next->d_inode->i_mode) && !S_ISREG(next->d_inode->i_mode) ) { kerror("inode %lu is not a file or directory", next->d_inode->i_ino); ret = -ENOBUFS; goto error; } } /* process the next component */ if (key) { _debug("advance"); mutex_unlock(&dir->d_inode->i_mutex); dput(dir); dir = next; next = NULL; goto advance; } /* we've found the object we were looking for */ object->dentry = next; /* if we've found that the terminal object exists, then we need to * check its attributes and delete it if it's out of date */ if (!object->new) { _debug("validate '%*.*s'", next->d_name.len, next->d_name.len, next->d_name.name); ret = cachefiles_check_object_xattr(object, auxdata); if (ret == -ESTALE) { /* delete the object (the deleter drops the directory * mutex) */ object->dentry = NULL; ret = cachefiles_bury_object(cache, dir, next, true); dput(next); next = NULL; if (ret < 0) goto delete_error; _debug("redo lookup"); goto lookup_again; } } /* note that we're now using this object */ ret = cachefiles_mark_object_active(cache, object); mutex_unlock(&dir->d_inode->i_mutex); dput(dir); dir = NULL; if (ret == -ETIMEDOUT) goto mark_active_timed_out; _debug("=== OBTAINED_OBJECT ==="); if (object->new) { /* attach data to a newly constructed terminal object */ ret = cachefiles_set_object_xattr(object, auxdata); if (ret < 0) goto check_error; } else { /* always update the atime on an object we've just looked up * (this is used to keep track of culling, and atimes are only * updated by read, write and readdir but not lookup or * open) */ touch_atime(cache->mnt, next); } /* open a file interface onto a data file */ if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { if (S_ISREG(object->dentry->d_inode->i_mode)) { const struct address_space_operations *aops; ret = -EPERM; aops = object->dentry->d_inode->i_mapping->a_ops; if (!aops->bmap) goto check_error; object->backer = object->dentry; } else { BUG(); // TODO: open file in data-class subdir } } object->new = 0; fscache_obtained_object(&object->fscache); _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino); return 0; create_error: _debug("create error %d", ret); if (ret == -EIO) cachefiles_io_error(cache, "Create/mkdir failed"); goto error; mark_active_timed_out: _debug("mark active timed out"); goto release_dentry; check_error: _debug("check error %d", ret); write_lock(&cache->active_lock); rb_erase(&object->active_node, &cache->active_nodes); clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); write_unlock(&cache->active_lock); release_dentry: dput(object->dentry); object->dentry = NULL; goto error_out; delete_error: _debug("delete error %d", ret); goto error_out2; lookup_error: _debug("lookup error %ld", PTR_ERR(next)); ret = PTR_ERR(next); if (ret == -EIO) cachefiles_io_error(cache, "Lookup failed"); next = NULL; error: mutex_unlock(&dir->d_inode->i_mutex); dput(next); error_out2: dput(dir); error_out: _leave(" = error %d", -ret); return ret; } /* * get a subdirectory */ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, struct dentry *dir, const char *dirname) { struct dentry *subdir; unsigned long start; struct path path; int ret; _enter(",,%s", dirname); /* search the current directory for the element name */ mutex_lock(&dir->d_inode->i_mutex); start = jiffies; subdir = lookup_one_len(dirname, dir, strlen(dirname)); cachefiles_hist(cachefiles_lookup_histogram, start); if (IS_ERR(subdir)) { if (PTR_ERR(subdir) == -ENOMEM) goto nomem_d_alloc; goto lookup_error; } _debug("subdir -> %p %s", subdir, subdir->d_inode ? "positive" : "negative"); /* we need to create the subdir if it doesn't exist yet */ if (!subdir->d_inode) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto mkdir_error; _debug("attempt mkdir"); path.mnt = cache->mnt; path.dentry = dir; ret = security_path_mkdir(&path, subdir, 0700); if (ret < 0) goto mkdir_error; ret = vfs_mkdir(dir->d_inode, subdir, 0700); if (ret < 0) goto mkdir_error; ASSERT(subdir->d_inode); _debug("mkdir -> %p{%p{ino=%lu}}", subdir, subdir->d_inode, subdir->d_inode->i_ino); } mutex_unlock(&dir->d_inode->i_mutex); /* we need to make sure the subdir is a directory */ ASSERT(subdir->d_inode); if (!S_ISDIR(subdir->d_inode->i_mode)) { kerror("%s is not a directory", dirname); ret = -EIO; goto check_error; } ret = -EPERM; if (!subdir->d_inode->i_op || !subdir->d_inode->i_op->setxattr || !subdir->d_inode->i_op->getxattr || !subdir->d_inode->i_op->lookup || !subdir->d_inode->i_op->mkdir || !subdir->d_inode->i_op->create || !subdir->d_inode->i_op->rename || !subdir->d_inode->i_op->rmdir || !subdir->d_inode->i_op->unlink) goto check_error; _leave(" = [%lu]", subdir->d_inode->i_ino); return subdir; check_error: dput(subdir); _leave(" = %d [check]", ret); return ERR_PTR(ret); mkdir_error: mutex_unlock(&dir->d_inode->i_mutex); dput(subdir); kerror("mkdir %s failed with error %d", dirname, ret); return ERR_PTR(ret); lookup_error: mutex_unlock(&dir->d_inode->i_mutex); ret = PTR_ERR(subdir); kerror("Lookup %s failed with error %d", dirname, ret); return ERR_PTR(ret); nomem_d_alloc: mutex_unlock(&dir->d_inode->i_mutex); _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } /* * find out if an object is in use or not * - if finds object and it's not in use: * - returns a pointer to the object and a reference on it * - returns with the directory locked */ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, struct dentry *dir, char *filename) { struct cachefiles_object *object; struct rb_node *_n; struct dentry *victim; unsigned long start; int ret; //_enter(",%*.*s/,%s", // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); /* look up the victim */ mutex_lock_nested(&dir->d_inode->i_mutex, 1); start = jiffies; victim = lookup_one_len(filename, dir, strlen(filename)); cachefiles_hist(cachefiles_lookup_histogram, start); if (IS_ERR(victim)) goto lookup_error; //_debug("victim -> %p %s", // victim, victim->d_inode ? "positive" : "negative"); /* if the object is no longer there then we probably retired the object * at the netfs's request whilst the cull was in progress */ if (!victim->d_inode) { mutex_unlock(&dir->d_inode->i_mutex); dput(victim); _leave(" = -ENOENT [absent]"); return ERR_PTR(-ENOENT); } /* check to see if we're using this object */ read_lock(&cache->active_lock); _n = cache->active_nodes.rb_node; while (_n) { object = rb_entry(_n, struct cachefiles_object, active_node); if (object->dentry > victim) _n = _n->rb_left; else if (object->dentry < victim) _n = _n->rb_right; else goto object_in_use; } read_unlock(&cache->active_lock); //_leave(" = %p", victim); return victim; object_in_use: read_unlock(&cache->active_lock); mutex_unlock(&dir->d_inode->i_mutex); dput(victim); //_leave(" = -EBUSY [in use]"); return ERR_PTR(-EBUSY); lookup_error: mutex_unlock(&dir->d_inode->i_mutex); ret = PTR_ERR(victim); if (ret == -ENOENT) { /* file or dir now absent - probably retired by netfs */ _leave(" = -ESTALE [absent]"); return ERR_PTR(-ESTALE); } if (ret == -EIO) { cachefiles_io_error(cache, "Lookup failed"); } else if (ret != -ENOMEM) { kerror("Internal error: %d", ret); ret = -EIO; } _leave(" = %d", ret); return ERR_PTR(ret); } /* * cull an object if it's not in use * - called only by cache manager daemon */ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, char *filename) { struct dentry *victim; int ret; _enter(",%*.*s/,%s", dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); victim = cachefiles_check_active(cache, dir, filename); if (IS_ERR(victim)) return PTR_ERR(victim); _debug("victim -> %p %s", victim, victim->d_inode ? "positive" : "negative"); /* okay... the victim is not being used so we can cull it * - start by marking it as stale */ _debug("victim is cullable"); ret = cachefiles_remove_object_xattr(cache, victim); if (ret < 0) goto error_unlock; /* actually remove the victim (drops the dir mutex) */ _debug("bury"); ret = cachefiles_bury_object(cache, dir, victim, false); if (ret < 0) goto error; dput(victim); _leave(" = 0"); return 0; error_unlock: mutex_unlock(&dir->d_inode->i_mutex); error: dput(victim); if (ret == -ENOENT) { /* file or dir now absent - probably retired by netfs */ _leave(" = -ESTALE [absent]"); return -ESTALE; } if (ret != -ENOMEM) { kerror("Internal error: %d", ret); ret = -EIO; } _leave(" = %d", ret); return ret; } /* * find out if an object is in use or not * - called only by cache manager daemon * - returns -EBUSY or 0 to indicate whether an object is in use or not */ int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, char *filename) { struct dentry *victim; //_enter(",%*.*s/,%s", // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); victim = cachefiles_check_active(cache, dir, filename); if (IS_ERR(victim)) return PTR_ERR(victim); mutex_unlock(&dir->d_inode->i_mutex); dput(victim); //_leave(" = 0"); return 0; }
gpl-2.0
blade-vec-4g/android_kernel_zte_msm8226
net/sctp/associola.c
3433
47211
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * This module provides the abstraction for an SCTP association. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Hui Huang <hui.huang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ryan Layer <rmlayer@us.ibm.com> * Kevin Gao <kevin.gao@intel.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/in.h> #include <net/ipv6.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal functions. */ static void sctp_assoc_bh_rcv(struct work_struct *work); static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); /* Keep track of the new idr low so that we don't re-use association id * numbers too fast. It is protected by they idr spin lock is in the * range of 1 - INT_MAX. */ static u32 idr_low = 1; /* 1st Level Abstractions. */ /* Initialize a new association from provided memory. */ static struct sctp_association *sctp_association_init(struct sctp_association *asoc, const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, gfp_t gfp) { struct sctp_sock *sp; int i; sctp_paramhdr_t *p; int err; /* Retrieve the SCTP per socket area. */ sp = sctp_sk((struct sock *)sk); /* Discarding const is appropriate here. */ asoc->ep = (struct sctp_endpoint *)ep; sctp_endpoint_hold(asoc->ep); /* Hold the sock. */ asoc->base.sk = (struct sock *)sk; sock_hold(asoc->base.sk); /* Initialize the common base substructure. */ asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; /* Initialize the object handling fields. */ atomic_set(&asoc->base.refcnt, 1); asoc->base.dead = 0; asoc->base.malloced = 0; /* Initialize the bind addr area. */ sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); asoc->state = SCTP_STATE_CLOSED; /* Set these values from the socket values, a conversion between * millsecons to seconds/microseconds must also be done. */ asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000; asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) * 1000; asoc->frag_point = 0; asoc->user_frag = sp->user_frag; /* Set the association max_retrans and RTO values from the * socket values. */ asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); asoc->overall_error_count = 0; /* Initialize the association's heartbeat interval based on the * sock configured value. */ asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); /* Initialize path max retrans value. */ asoc->pathmaxrxt = sp->pathmaxrxt; /* Initialize default path MTU. */ asoc->pathmtu = sp->pathmtu; /* Set association default SACK delay */ asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); asoc->sackfreq = sp->sackfreq; /* Set the association default flags controlling * Heartbeat, SACK delay, and Path MTU Discovery. */ asoc->param_flags = sp->param_flags; /* Initialize the maximum mumber of new data packets that can be sent * in a burst. */ asoc->max_burst = sp->max_burst; /* initialize association timers */ asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0; /* sctpimpguide Section 2.12.2 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the * recommended value of 5 times 'RTO.Max'. */ asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] = 5 * asoc->rto_max; asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) setup_timer(&asoc->timers[i], sctp_timer_events[i], (unsigned long)asoc); /* Pull default initialization values from the sock options. * Note: This assumes that the values have already been * validated in the sock. */ asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; asoc->max_init_timeo = msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); /* Allocate storage for the ssnmap after the inbound and outbound * streams have been negotiated during Init. */ asoc->ssnmap = NULL; /* Set the local window size for receive. * This is also the rcvbuf space per association. * RFC 6 - A SCTP receiver MUST be able to receive a minimum of * 1500 bytes in one SCTP packet. */ if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) asoc->rwnd = SCTP_DEFAULT_MINWINDOW; else asoc->rwnd = sk->sk_rcvbuf/2; asoc->a_rwnd = asoc->rwnd; asoc->rwnd_over = 0; asoc->rwnd_press = 0; /* Use my own max window until I learn something better. */ asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; /* Set the sndbuf size for transmit. */ asoc->sndbuf_used = 0; /* Initialize the receive memory counter */ atomic_set(&asoc->rmem_alloc, 0); init_waitqueue_head(&asoc->wait); asoc->c.my_vtag = sctp_generate_tag(ep); asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */ asoc->c.peer_vtag = 0; asoc->c.my_ttag = 0; asoc->c.peer_ttag = 0; asoc->c.my_port = ep->base.bind_addr.port; asoc->c.initial_tsn = sctp_generate_tsn(ep); asoc->next_tsn = asoc->c.initial_tsn; asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; asoc->highest_sacked = asoc->ctsn_ack_point; asoc->last_cwr_tsn = asoc->ctsn_ack_point; asoc->unack_data = 0; /* ADDIP Section 4.1 Asconf Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) a serial number should be assigned to the chunk. The serial * number SHOULD be a monotonically increasing number. The serial * numbers SHOULD be initialized at the start of the * association to the same value as the initial TSN. */ asoc->addip_serial = asoc->c.initial_tsn; INIT_LIST_HEAD(&asoc->addip_chunk_list); INIT_LIST_HEAD(&asoc->asconf_ack_list); /* Make an empty list of remote transport addresses. */ INIT_LIST_HEAD(&asoc->peer.transport_addr_list); asoc->peer.transport_count = 0; /* RFC 2960 5.1 Normal Establishment of an Association * * After the reception of the first data chunk in an * association the endpoint must immediately respond with a * sack to acknowledge the data chunk. Subsequent * acknowledgements should be done as described in Section * 6.2. * * [We implement this by telling a new association that it * already received one packet.] */ asoc->peer.sack_needed = 1; asoc->peer.sack_cnt = 0; /* Assume that the peer will tell us if he recognizes ASCONF * as part of INIT exchange. * The sctp_addip_noauth option is there for backward compatibilty * and will revert old behavior. */ asoc->peer.asconf_capable = 0; if (sctp_addip_noauth) asoc->peer.asconf_capable = 1; asoc->asconf_addr_del_pending = NULL; asoc->src_out_of_asoc_ok = 0; asoc->new_transport = NULL; /* Create an input queue. */ sctp_inq_init(&asoc->base.inqueue); sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); /* Create an output queue. */ sctp_outq_init(asoc, &asoc->outqueue); if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap)); asoc->need_ecne = 0; asoc->assoc_id = 0; /* Assume that peer would support both address types unless we are * told otherwise. */ asoc->peer.ipv4_address = 1; if (asoc->base.sk->sk_family == PF_INET6) asoc->peer.ipv6_address = 1; INIT_LIST_HEAD(&asoc->asocs); asoc->autoclose = sp->autoclose; asoc->default_stream = sp->default_stream; asoc->default_ppid = sp->default_ppid; asoc->default_flags = sp->default_flags; asoc->default_context = sp->default_context; asoc->default_timetolive = sp->default_timetolive; asoc->default_rcv_context = sp->default_rcv_context; /* AUTH related initializations */ INIT_LIST_HEAD(&asoc->endpoint_shared_keys); err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); if (err) goto fail_init; asoc->active_key_id = ep->active_key_id; asoc->asoc_shared_key = NULL; asoc->default_hmac_id = 0; /* Save the hmacs and chunks list into this association */ if (ep->auth_hmacs_list) memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, ntohs(ep->auth_hmacs_list->param_hdr.length)); if (ep->auth_chunk_list) memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, ntohs(ep->auth_chunk_list->param_hdr.length)); /* Get the AUTH random number for this association */ p = (sctp_paramhdr_t *)asoc->c.auth_random; p->type = SCTP_PARAM_RANDOM; p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH); get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); return asoc; fail_init: sctp_endpoint_put(asoc->ep); sock_put(asoc->base.sk); return NULL; } /* Allocate and initialize a new association */ struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, gfp_t gfp) { struct sctp_association *asoc; asoc = t_new(struct sctp_association, gfp); if (!asoc) goto fail; if (!sctp_association_init(asoc, ep, sk, scope, gfp)) goto fail_init; asoc->base.malloced = 1; SCTP_DBG_OBJCNT_INC(assoc); SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc); return asoc; fail_init: kfree(asoc); fail: return NULL; } /* Free this association if possible. There may still be users, so * the actual deallocation may be delayed. */ void sctp_association_free(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; struct sctp_transport *transport; struct list_head *pos, *temp; int i; /* Only real associations count against the endpoint, so * don't bother for if this is a temporary association. */ if (!asoc->temp) { list_del(&asoc->asocs); /* Decrement the backlog value for a TCP-style listening * socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) sk->sk_ack_backlog--; } /* Mark as dead, so other users can know this structure is * going away. */ asoc->base.dead = 1; /* Dispose of any data lying around in the outqueue. */ sctp_outq_free(&asoc->outqueue); /* Dispose of any pending messages for the upper layer. */ sctp_ulpq_free(&asoc->ulpq); /* Dispose of any pending chunks on the inqueue. */ sctp_inq_free(&asoc->base.inqueue); sctp_tsnmap_free(&asoc->peer.tsn_map); /* Free ssnmap storage. */ sctp_ssnmap_free(asoc->ssnmap); /* Clean up the bound address list. */ sctp_bind_addr_free(&asoc->base.bind_addr); /* Do we need to go through all of our timers and * delete them? To be safe we will try to delete all, but we * should be able to go through and make a guess based * on our state. */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { if (timer_pending(&asoc->timers[i]) && del_timer(&asoc->timers[i])) sctp_association_put(asoc); } /* Free peer's cached cookie. */ kfree(asoc->peer.cookie); kfree(asoc->peer.peer_random); kfree(asoc->peer.peer_chunks); kfree(asoc->peer.peer_hmacs); /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); list_del(pos); sctp_transport_free(transport); } asoc->peer.transport_count = 0; sctp_asconf_queue_teardown(asoc); /* Free pending address space being deleted */ if (asoc->asconf_addr_del_pending != NULL) kfree(asoc->asconf_addr_del_pending); /* AUTH - Free the endpoint shared keys */ sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); /* AUTH - Free the association shared key */ sctp_auth_key_put(asoc->asoc_shared_key); sctp_association_put(asoc); } /* Cleanup and free up an association. */ static void sctp_association_destroy(struct sctp_association *asoc) { SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return); sctp_endpoint_put(asoc->ep); sock_put(asoc->base.sk); if (asoc->assoc_id != 0) { spin_lock_bh(&sctp_assocs_id_lock); idr_remove(&sctp_assocs_id, asoc->assoc_id); spin_unlock_bh(&sctp_assocs_id_lock); } WARN_ON(atomic_read(&asoc->rmem_alloc)); if (asoc->base.malloced) { kfree(asoc); SCTP_DBG_OBJCNT_DEC(assoc); } } /* Change the primary destination address for the peer. */ void sctp_assoc_set_primary(struct sctp_association *asoc, struct sctp_transport *transport) { int changeover = 0; /* it's a changeover only if we already have a primary path * that we are changing */ if (asoc->peer.primary_path != NULL && asoc->peer.primary_path != transport) changeover = 1 ; asoc->peer.primary_path = transport; /* Set a default msg_name for events. */ memcpy(&asoc->peer.primary_addr, &transport->ipaddr, sizeof(union sctp_addr)); /* If the primary path is changing, assume that the * user wants to use this new path. */ if ((transport->state == SCTP_ACTIVE) || (transport->state == SCTP_UNKNOWN)) asoc->peer.active_path = transport; /* * SFR-CACC algorithm: * Upon the receipt of a request to change the primary * destination address, on the data structure for the new * primary destination, the sender MUST do the following: * * 1) If CHANGEOVER_ACTIVE is set, then there was a switch * to this destination address earlier. The sender MUST set * CYCLING_CHANGEOVER to indicate that this switch is a * double switch to the same destination address. * * Really, only bother is we have data queued or outstanding on * the association. */ if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) return; if (transport->cacc.changeover_active) transport->cacc.cycling_changeover = changeover; /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that * a changeover has occurred. */ transport->cacc.changeover_active = changeover; /* 3) The sender MUST store the next TSN to be sent in * next_tsn_at_change. */ transport->cacc.next_tsn_at_change = asoc->next_tsn; } /* Remove a transport from an association. */ void sctp_assoc_rm_peer(struct sctp_association *asoc, struct sctp_transport *peer) { struct list_head *pos; struct sctp_transport *transport; SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ", " port: %d\n", asoc, (&peer->ipaddr), ntohs(peer->ipaddr.v4.sin_port)); /* If we are to remove the current retran_path, update it * to the next peer before removing this peer from the list. */ if (asoc->peer.retran_path == peer) sctp_assoc_update_retran_path(asoc); /* Remove this peer from the list. */ list_del(&peer->transports); /* Get the first transport of asoc. */ pos = asoc->peer.transport_addr_list.next; transport = list_entry(pos, struct sctp_transport, transports); /* Update any entries that match the peer to be deleted. */ if (asoc->peer.primary_path == peer) sctp_assoc_set_primary(asoc, transport); if (asoc->peer.active_path == peer) asoc->peer.active_path = transport; if (asoc->peer.retran_path == peer) asoc->peer.retran_path = transport; if (asoc->peer.last_data_from == peer) asoc->peer.last_data_from = transport; /* If we remove the transport an INIT was last sent to, set it to * NULL. Combined with the update of the retran path above, this * will cause the next INIT to be sent to the next available * transport, maintaining the cycle. */ if (asoc->init_last_sent_to == peer) asoc->init_last_sent_to = NULL; /* If we remove the transport an SHUTDOWN was last sent to, set it * to NULL. Combined with the update of the retran path above, this * will cause the next SHUTDOWN to be sent to the next available * transport, maintaining the cycle. */ if (asoc->shutdown_last_sent_to == peer) asoc->shutdown_last_sent_to = NULL; /* If we remove the transport an ASCONF was last sent to, set it to * NULL. */ if (asoc->addip_last_asconf && asoc->addip_last_asconf->transport == peer) asoc->addip_last_asconf->transport = NULL; /* If we have something on the transmitted list, we have to * save it off. The best place is the active path. */ if (!list_empty(&peer->transmitted)) { struct sctp_transport *active = asoc->peer.active_path; struct sctp_chunk *ch; /* Reset the transport of each chunk on this list */ list_for_each_entry(ch, &peer->transmitted, transmitted_list) { ch->transport = NULL; ch->rtt_in_progress = 0; } list_splice_tail_init(&peer->transmitted, &active->transmitted); /* Start a T3 timer here in case it wasn't running so * that these migrated packets have a chance to get * retrnasmitted. */ if (!timer_pending(&active->T3_rtx_timer)) if (!mod_timer(&active->T3_rtx_timer, jiffies + active->rto)) sctp_transport_hold(active); } asoc->peer.transport_count--; sctp_transport_free(peer); } /* Add a transport address to an association. */ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, const union sctp_addr *addr, const gfp_t gfp, const int peer_state) { struct sctp_transport *peer; struct sctp_sock *sp; unsigned short port; sp = sctp_sk(asoc->base.sk); /* AF_INET and AF_INET6 share common port field. */ port = ntohs(addr->v4.sin_port); SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", " port: %d state:%d\n", asoc, addr, port, peer_state); /* Set the port if it has not been set yet. */ if (0 == asoc->peer.port) asoc->peer.port = port; /* Check to see if this is a duplicate. */ peer = sctp_assoc_lookup_paddr(asoc, addr); if (peer) { /* An UNKNOWN state is only set on transports added by * user in sctp_connectx() call. Such transports should be * considered CONFIRMED per RFC 4960, Section 5.4. */ if (peer->state == SCTP_UNKNOWN) { peer->state = SCTP_ACTIVE; } return peer; } peer = sctp_transport_new(addr, gfp); if (!peer) return NULL; sctp_transport_set_owner(peer, asoc); /* Initialize the peer's heartbeat interval based on the * association configured value. */ peer->hbinterval = asoc->hbinterval; /* Set the path max_retrans. */ peer->pathmaxrxt = asoc->pathmaxrxt; /* Initialize the peer's SACK delay timeout based on the * association configured value. */ peer->sackdelay = asoc->sackdelay; peer->sackfreq = asoc->sackfreq; /* Enable/disable heartbeat, SACK delay, and path MTU discovery * based on association setting. */ peer->param_flags = asoc->param_flags; sctp_transport_route(peer, NULL, sp); /* Initialize the pmtu of the transport. */ if (peer->param_flags & SPP_PMTUD_DISABLE) { if (asoc->pathmtu) peer->pathmtu = asoc->pathmtu; else peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } /* If this is the first transport addr on this association, * initialize the association PMTU to the peer's PMTU. * If not and the current association PMTU is higher than the new * peer's PMTU, reset the association PMTU to the new peer's PMTU. */ if (asoc->pathmtu) asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); else asoc->pathmtu = peer->pathmtu; SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " "%d\n", asoc, asoc->pathmtu); peer->pmtu_pending = 0; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); /* The asoc->peer.port might not be meaningful yet, but * initialize the packet structure anyway. */ sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, asoc->peer.port); /* 7.2.1 Slow-Start * * o The initial cwnd before DATA transmission or after a sufficiently * long idle period MUST be set to * min(4*MTU, max(2*MTU, 4380 bytes)) * * o The initial value of ssthresh MAY be arbitrarily high * (for example, implementations MAY use the size of the * receiver advertised window). */ peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); /* At this point, we may not have the receiver's advertised window, * so initialize ssthresh to the default value and it will be set * later when we process the INIT. */ peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; peer->partial_bytes_acked = 0; peer->flight_size = 0; peer->burst_limited = 0; /* Set the transport's RTO.initial value */ peer->rto = asoc->rto_initial; /* Set the peer's active state. */ peer->state = peer_state; /* Attach the remote transport to our asoc. */ list_add_tail(&peer->transports, &asoc->peer.transport_addr_list); asoc->peer.transport_count++; /* If we do not yet have a primary path, set one. */ if (!asoc->peer.primary_path) { sctp_assoc_set_primary(asoc, peer); asoc->peer.retran_path = peer; } if (asoc->peer.active_path == asoc->peer.retran_path && peer->state != SCTP_UNCONFIRMED) { asoc->peer.retran_path = peer; } return peer; } /* Delete a transport address from an association. */ void sctp_assoc_del_peer(struct sctp_association *asoc, const union sctp_addr *addr) { struct list_head *pos; struct list_head *temp; struct sctp_transport *transport; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { /* Do book keeping for removing the peer and free it. */ sctp_assoc_rm_peer(asoc, transport); break; } } } /* Lookup a transport by address. */ struct sctp_transport *sctp_assoc_lookup_paddr( const struct sctp_association *asoc, const union sctp_addr *address) { struct sctp_transport *t; /* Cycle through all transports searching for a peer address. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (sctp_cmp_addr_exact(address, &t->ipaddr)) return t; } return NULL; } /* Remove all transports except a give one */ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, struct sctp_transport *primary) { struct sctp_transport *temp; struct sctp_transport *t; list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, transports) { /* if the current transport is not the primary one, delete it */ if (t != primary) sctp_assoc_rm_peer(asoc, t); } } /* Engage in transport control operations. * Mark the transport up or down and send a notification to the user. * Select and update the new active and retran paths. */ void sctp_assoc_control_transport(struct sctp_association *asoc, struct sctp_transport *transport, sctp_transport_cmd_t command, sctp_sn_error_t error) { struct sctp_transport *t = NULL; struct sctp_transport *first; struct sctp_transport *second; struct sctp_ulpevent *event; struct sockaddr_storage addr; int spc_state = 0; /* Record the transition on the transport. */ switch (command) { case SCTP_TRANSPORT_UP: /* If we are moving from UNCONFIRMED state due * to heartbeat success, report the SCTP_ADDR_CONFIRMED * state to the user, otherwise report SCTP_ADDR_AVAILABLE. */ if (SCTP_UNCONFIRMED == transport->state && SCTP_HEARTBEAT_SUCCESS == error) spc_state = SCTP_ADDR_CONFIRMED; else spc_state = SCTP_ADDR_AVAILABLE; transport->state = SCTP_ACTIVE; break; case SCTP_TRANSPORT_DOWN: /* If the transport was never confirmed, do not transition it * to inactive state. Also, release the cached route since * there may be a better route next time. */ if (transport->state != SCTP_UNCONFIRMED) transport->state = SCTP_INACTIVE; else { dst_release(transport->dst); transport->dst = NULL; } spc_state = SCTP_ADDR_UNREACHABLE; break; default: return; } /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the * user. */ memset(&addr, 0, sizeof(struct sockaddr_storage)); memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 0, spc_state, error, GFP_ATOMIC); if (event) sctp_ulpq_tail_event(&asoc->ulpq, event); /* Select new active and retran paths. */ /* Look for the two most recently used active transports. * * This code produces the wrong ordering whenever jiffies * rolls over, but we still get usable transports, so we don't * worry about it. */ first = NULL; second = NULL; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) continue; if (!first || t->last_time_heard > first->last_time_heard) { second = first; first = t; } if (!second || t->last_time_heard > second->last_time_heard) second = t; } /* RFC 2960 6.4 Multi-Homed SCTP Endpoints * * By default, an endpoint should always transmit to the * primary path, unless the SCTP user explicitly specifies the * destination transport address (and possibly source * transport address) to use. * * [If the primary is active but not most recent, bump the most * recently used transport.] */ if (((asoc->peer.primary_path->state == SCTP_ACTIVE) || (asoc->peer.primary_path->state == SCTP_UNKNOWN)) && first != asoc->peer.primary_path) { second = first; first = asoc->peer.primary_path; } /* If we failed to find a usable transport, just camp on the * primary, even if it is inactive. */ if (!first) { first = asoc->peer.primary_path; second = asoc->peer.primary_path; } /* Set the active and retran transports. */ asoc->peer.active_path = first; asoc->peer.retran_path = second; } /* Hold a reference to an association. */ void sctp_association_hold(struct sctp_association *asoc) { atomic_inc(&asoc->base.refcnt); } /* Release a reference to an association and cleanup * if there are no more references. */ void sctp_association_put(struct sctp_association *asoc) { if (atomic_dec_and_test(&asoc->base.refcnt)) sctp_association_destroy(asoc); } /* Allocate the next TSN, Transmission Sequence Number, for the given * association. */ __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) { /* From Section 1.6 Serial Number Arithmetic: * Transmission Sequence Numbers wrap around when they reach * 2**32 - 1. That is, the next TSN a DATA chunk MUST use * after transmitting TSN = 2*32 - 1 is TSN = 0. */ __u32 retval = asoc->next_tsn; asoc->next_tsn++; asoc->unack_data++; return retval; } /* Compare two addresses to see if they match. Wildcard addresses * only match themselves. */ int sctp_cmp_addr_exact(const union sctp_addr *ss1, const union sctp_addr *ss2) { struct sctp_af *af; af = sctp_get_af_specific(ss1->sa.sa_family); if (unlikely(!af)) return 0; return af->cmp_addr(ss1, ss2); } /* Return an ecne chunk to get prepended to a packet. * Note: We are sly and return a shared, prealloced chunk. FIXME: * No we don't, but we could/should. */ struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) { struct sctp_chunk *chunk; /* Send ECNE if needed. * Not being able to allocate a chunk here is not deadly. */ if (asoc->need_ecne) chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn); else chunk = NULL; return chunk; } /* * Find which transport this TSN was sent on. */ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, __u32 tsn) { struct sctp_transport *active; struct sctp_transport *match; struct sctp_transport *transport; struct sctp_chunk *chunk; __be32 key = htonl(tsn); match = NULL; /* * FIXME: In general, find a more efficient data structure for * searching. */ /* * The general strategy is to search each transport's transmitted * list. Return which transport this TSN lives on. * * Let's be hopeful and check the active_path first. * Another optimization would be to know if there is only one * outbound path and not have to look for the TSN at all. * */ active = asoc->peer.active_path; list_for_each_entry(chunk, &active->transmitted, transmitted_list) { if (key == chunk->subh.data_hdr->tsn) { match = active; goto out; } } /* If not found, go search all the other transports. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { if (transport == active) break; list_for_each_entry(chunk, &transport->transmitted, transmitted_list) { if (key == chunk->subh.data_hdr->tsn) { match = transport; goto out; } } } out: return match; } /* Is this the association we are looking for? */ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, const union sctp_addr *laddr, const union sctp_addr *paddr) { struct sctp_transport *transport; if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && (htons(asoc->peer.port) == paddr->v4.sin_port)) { transport = sctp_assoc_lookup_paddr(asoc, paddr); if (!transport) goto out; if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, sctp_sk(asoc->base.sk))) goto out; } transport = NULL; out: return transport; } /* Do delayed input processing. This is scheduled by sctp_rcv(). */ static void sctp_assoc_bh_rcv(struct work_struct *work) { struct sctp_association *asoc = container_of(work, struct sctp_association, base.inqueue.immediate); struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sctp_inq *inqueue; int state; sctp_subtype_t subtype; int error = 0; /* The association should be held so we should be safe. */ ep = asoc->ep; inqueue = &asoc->base.inqueue; sctp_association_hold(asoc); while (NULL != (chunk = sctp_inq_pop(inqueue))) { state = asoc->state; subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); /* SCTP-AUTH, Section 6.3: * The receiver has a list of chunk types which it expects * to be received only after an AUTH-chunk. This list has * been sent to the peer during the association setup. It * MUST silently discard these chunks if they are not placed * after an AUTH chunk in the packet. */ if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) continue; /* Remember where the last DATA chunk came from so we * know where to send the SACK. */ if (sctp_chunk_is_data(chunk)) asoc->peer.last_data_from = chunk->transport; else SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); if (chunk->transport) chunk->transport->last_time_heard = jiffies; /* Run through the state machine. */ error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state, ep, asoc, chunk, GFP_ATOMIC); /* Check to see if the association is freed in response to * the incoming chunk. If so, get out of the while loop. */ if (asoc->base.dead) break; /* If there is an error on chunk, discard this packet. */ if (error && chunk) chunk->pdiscard = 1; } sctp_association_put(asoc); } /* This routine moves an association from its old sk to a new sk. */ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) { struct sctp_sock *newsp = sctp_sk(newsk); struct sock *oldsk = assoc->base.sk; /* Delete the association from the old endpoint's list of * associations. */ list_del_init(&assoc->asocs); /* Decrement the backlog value for a TCP-style socket. */ if (sctp_style(oldsk, TCP)) oldsk->sk_ack_backlog--; /* Release references to the old endpoint and the sock. */ sctp_endpoint_put(assoc->ep); sock_put(assoc->base.sk); /* Get a reference to the new endpoint. */ assoc->ep = newsp->ep; sctp_endpoint_hold(assoc->ep); /* Get a reference to the new sock. */ assoc->base.sk = newsk; sock_hold(assoc->base.sk); /* Add the association to the new endpoint's list of associations. */ sctp_endpoint_add_asoc(newsp->ep, assoc); } /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ void sctp_assoc_update(struct sctp_association *asoc, struct sctp_association *new) { struct sctp_transport *trans; struct list_head *pos, *temp; /* Copy in new parameters of peer. */ asoc->c = new->c; asoc->peer.rwnd = new->peer.rwnd; asoc->peer.sack_needed = new->peer.sack_needed; asoc->peer.i = new->peer.i; sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, GFP_ATOMIC); /* Remove any peer addresses not present in the new association. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { trans = list_entry(pos, struct sctp_transport, transports); if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { sctp_assoc_rm_peer(asoc, trans); continue; } if (asoc->state >= SCTP_STATE_ESTABLISHED) sctp_transport_reset(trans); } /* If the case is A (association restart), use * initial_tsn as next_tsn. If the case is B, use * current next_tsn in case data sent to peer * has been discarded and needs retransmission. */ if (asoc->state >= SCTP_STATE_ESTABLISHED) { asoc->next_tsn = new->next_tsn; asoc->ctsn_ack_point = new->ctsn_ack_point; asoc->adv_peer_ack_point = new->adv_peer_ack_point; /* Reinitialize SSN for both local streams * and peer's streams. */ sctp_ssnmap_clear(asoc->ssnmap); /* Flush the ULP reassembly and ordered queue. * Any data there will now be stale and will * cause problems. */ sctp_ulpq_flush(&asoc->ulpq); /* reset the overall association error count so * that the restarted association doesn't get torn * down on the next retransmission timer. */ asoc->overall_error_count = 0; } else { /* Add any peer addresses from the new association. */ list_for_each_entry(trans, &new->peer.transport_addr_list, transports) { if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) sctp_assoc_add_peer(asoc, &trans->ipaddr, GFP_ATOMIC, trans->state); } asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; if (!asoc->ssnmap) { /* Move the ssnmap. */ asoc->ssnmap = new->ssnmap; new->ssnmap = NULL; } if (!asoc->assoc_id) { /* get a new association id since we don't have one * yet. */ sctp_assoc_set_id(asoc, GFP_ATOMIC); } } /* SCTP-AUTH: Save the peer parameters from the new assocaitions * and also move the association shared keys over */ kfree(asoc->peer.peer_random); asoc->peer.peer_random = new->peer.peer_random; new->peer.peer_random = NULL; kfree(asoc->peer.peer_chunks); asoc->peer.peer_chunks = new->peer.peer_chunks; new->peer.peer_chunks = NULL; kfree(asoc->peer.peer_hmacs); asoc->peer.peer_hmacs = new->peer.peer_hmacs; new->peer.peer_hmacs = NULL; sctp_auth_key_put(asoc->asoc_shared_key); sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); } /* Update the retran path for sending a retransmitted packet. * Round-robin through the active transports, else round-robin * through the inactive transports as this is the next best thing * we can try. */ void sctp_assoc_update_retran_path(struct sctp_association *asoc) { struct sctp_transport *t, *next; struct list_head *head = &asoc->peer.transport_addr_list; struct list_head *pos; if (asoc->peer.transport_count == 1) return; /* Find the next transport in a round-robin fashion. */ t = asoc->peer.retran_path; pos = &t->transports; next = NULL; while (1) { /* Skip the head. */ if (pos->next == head) pos = head->next; else pos = pos->next; t = list_entry(pos, struct sctp_transport, transports); /* We have exhausted the list, but didn't find any * other active transports. If so, use the next * transport. */ if (t == asoc->peer.retran_path) { t = next; break; } /* Try to find an active transport. */ if ((t->state == SCTP_ACTIVE) || (t->state == SCTP_UNKNOWN)) { break; } else { /* Keep track of the next transport in case * we don't find any active transport. */ if (t->state != SCTP_UNCONFIRMED && !next) next = t; } } if (t) asoc->peer.retran_path = t; else t = asoc->peer.retran_path; SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" " %p addr: ", " port: %d\n", asoc, (&t->ipaddr), ntohs(t->ipaddr.v4.sin_port)); } /* Choose the transport for sending retransmit packet. */ struct sctp_transport *sctp_assoc_choose_alter_transport( struct sctp_association *asoc, struct sctp_transport *last_sent_to) { /* If this is the first time packet is sent, use the active path, * else use the retran path. If the last packet was sent over the * retran path, update the retran path and use it. */ if (!last_sent_to) return asoc->peer.active_path; else { if (last_sent_to == asoc->peer.retran_path) sctp_assoc_update_retran_path(asoc); return asoc->peer.retran_path; } } /* Update the association's pmtu and frag_point by going through all the * transports. This routine is called when a transport's PMTU has changed. */ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) { struct sctp_transport *t; __u32 pmtu = 0; if (!asoc) return; /* Get the lowest pmtu of all the transports. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (t->pmtu_pending && t->dst) { sctp_transport_update_pmtu(t, dst_mtu(t->dst)); t->pmtu_pending = 0; } if (!pmtu || (t->pathmtu < pmtu)) pmtu = t->pathmtu; } if (pmtu) { asoc->pathmtu = pmtu; asoc->frag_point = sctp_frag_point(asoc, pmtu); } SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, asoc->pathmtu, asoc->frag_point); } /* Should we send a SACK to update our peer? */ static inline int sctp_peer_needs_update(struct sctp_association *asoc) { switch (asoc->state) { case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: case SCTP_STATE_SHUTDOWN_SENT: if ((asoc->rwnd > asoc->a_rwnd) && ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift), asoc->pathmtu))) return 1; break; default: break; } return 0; } /* Increase asoc's rwnd by len and send any window update SACK if needed. */ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) { struct sctp_chunk *sack; struct timer_list *timer; if (asoc->rwnd_over) { if (asoc->rwnd_over >= len) { asoc->rwnd_over -= len; } else { asoc->rwnd += (len - asoc->rwnd_over); asoc->rwnd_over = 0; } } else { asoc->rwnd += len; } /* If we had window pressure, start recovering it * once our rwnd had reached the accumulated pressure * threshold. The idea is to recover slowly, but up * to the initial advertised window. */ if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { int change = min(asoc->pathmtu, asoc->rwnd_press); asoc->rwnd += change; asoc->rwnd_press -= change; } SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " "- %u\n", __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, asoc->a_rwnd); /* Send a window update SACK if the rwnd has increased by at least the * minimum of the association's PMTU and half of the receive buffer. * The algorithm used is similar to the one described in * Section 4.2.3.3 of RFC 1122. */ if (sctp_peer_needs_update(asoc)) { asoc->a_rwnd = asoc->rwnd; SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " "rwnd: %u a_rwnd: %u\n", __func__, asoc, asoc->rwnd, asoc->a_rwnd); sack = sctp_make_sack(asoc); if (!sack) return; asoc->peer.sack_needed = 0; sctp_outq_tail(&asoc->outqueue, sack); /* Stop the SACK timer. */ timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; if (timer_pending(timer) && del_timer(timer)) sctp_association_put(asoc); } } /* Decrease asoc's rwnd by len. */ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) { int rx_count; int over = 0; SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); if (asoc->ep->rcvbuf_policy) rx_count = atomic_read(&asoc->rmem_alloc); else rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); /* If we've reached or overflowed our receive buffer, announce * a 0 rwnd if rwnd would still be positive. Store the * the pottential pressure overflow so that the window can be restored * back to original value. */ if (rx_count >= asoc->base.sk->sk_rcvbuf) over = 1; if (asoc->rwnd >= len) { asoc->rwnd -= len; if (over) { asoc->rwnd_press += asoc->rwnd; asoc->rwnd = 0; } } else { asoc->rwnd_over = len - asoc->rwnd; asoc->rwnd = 0; } SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n", __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, asoc->rwnd_press); } /* Build the bind address list for the association based on info from the * local endpoint and the remote peer. */ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, sctp_scope_t scope, gfp_t gfp) { int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) flags |= SCTP_ADDR6_PEERSUPP; return sctp_bind_addr_copy(&asoc->base.bind_addr, &asoc->ep->base.bind_addr, scope, gfp, flags); } /* Build the association's bind address list from the cookie. */ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, struct sctp_cookie *cookie, gfp_t gfp) { int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); int var_size3 = cookie->raw_addr_list_len; __u8 *raw = (__u8 *)cookie->peer_init + var_size2; return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, asoc->ep->base.bind_addr.port, gfp); } /* Lookup laddr in the bind address list of an association. */ int sctp_assoc_lookup_laddr(struct sctp_association *asoc, const union sctp_addr *laddr) { int found = 0; if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && sctp_bind_addr_match(&asoc->base.bind_addr, laddr, sctp_sk(asoc->base.sk))) found = 1; return found; } /* Set an association id for a given association */ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) { int assoc_id; int error = 0; /* If the id is already assigned, keep it. */ if (asoc->assoc_id) return error; retry: if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) return -ENOMEM; spin_lock_bh(&sctp_assocs_id_lock); error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, idr_low, &assoc_id); if (!error) { idr_low = assoc_id + 1; if (idr_low == INT_MAX) idr_low = 1; } spin_unlock_bh(&sctp_assocs_id_lock); if (error == -EAGAIN) goto retry; else if (error) return error; asoc->assoc_id = (sctp_assoc_t) assoc_id; return error; } /* Free the ASCONF queue */ static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) { struct sctp_chunk *asconf; struct sctp_chunk *tmp; list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { list_del_init(&asconf->list); sctp_chunk_free(asconf); } } /* Free asconf_ack cache */ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) { struct sctp_chunk *ack; struct sctp_chunk *tmp; list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, transmitted_list) { list_del_init(&ack->transmitted_list); sctp_chunk_free(ack); } } /* Clean up the ASCONF_ACK queue */ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) { struct sctp_chunk *ack; struct sctp_chunk *tmp; /* We can remove all the entries from the queue up to * the "Peer-Sequence-Number". */ list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, transmitted_list) { if (ack->subh.addip_hdr->serial == htonl(asoc->peer.addip_serial)) break; list_del_init(&ack->transmitted_list); sctp_chunk_free(ack); } } /* Find the ASCONF_ACK whose serial number matches ASCONF */ struct sctp_chunk *sctp_assoc_lookup_asconf_ack( const struct sctp_association *asoc, __be32 serial) { struct sctp_chunk *ack; /* Walk through the list of cached ASCONF-ACKs and find the * ack chunk whose serial number matches that of the request. */ list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { if (ack->subh.addip_hdr->serial == serial) { sctp_chunk_hold(ack); return ack; } } return NULL; } void sctp_asconf_queue_teardown(struct sctp_association *asoc) { /* Free any cached ASCONF_ACK chunk. */ sctp_assoc_free_asconf_acks(asoc); /* Free the ASCONF queue. */ sctp_assoc_free_asconf_queue(asoc); /* Free any cached ASCONF chunk. */ if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); }
gpl-2.0
sicknemesis/stock_kernel_hammerhead
arch/arm/plat-omap/clock.c
4713
11263
/* * linux/arch/arm/plat-omap/clock.c * * Copyright (C) 2004 - 2008 Nokia corporation * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/err.h> #include <linux/string.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/cpufreq.h> #include <linux/io.h> #include <plat/clock.h> static LIST_HEAD(clocks); static DEFINE_MUTEX(clocks_mutex); static DEFINE_SPINLOCK(clockfw_lock); static struct clk_functions *arch_clock; /* * Standard clock functions defined in include/linux/clk.h */ int clk_enable(struct clk *clk) { unsigned long flags; int ret; if (clk == NULL || IS_ERR(clk)) return -EINVAL; if (!arch_clock || !arch_clock->clk_enable) return -EINVAL; spin_lock_irqsave(&clockfw_lock, flags); ret = arch_clock->clk_enable(clk); spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { unsigned long flags; if (clk == NULL || IS_ERR(clk)) return; if (!arch_clock || !arch_clock->clk_disable) return; spin_lock_irqsave(&clockfw_lock, flags); if (clk->usecount == 0) { pr_err("Trying disable clock %s with 0 usecount\n", clk->name); WARN_ON(1); goto out; } arch_clock->clk_disable(clk); out: spin_unlock_irqrestore(&clockfw_lock, flags); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { unsigned long flags; unsigned long ret; if (clk == NULL || IS_ERR(clk)) return 0; spin_lock_irqsave(&clockfw_lock, flags); ret = clk->rate; spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } EXPORT_SYMBOL(clk_get_rate); /* * Optional clock functions defined in include/linux/clk.h */ long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long flags; long ret; if (clk == NULL || IS_ERR(clk)) return 0; if (!arch_clock || !arch_clock->clk_round_rate) return 0; spin_lock_irqsave(&clockfw_lock, flags); ret = arch_clock->clk_round_rate(clk, rate); spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } EXPORT_SYMBOL(clk_round_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; int ret = -EINVAL; if (clk == NULL || IS_ERR(clk)) return ret; if (!arch_clock || !arch_clock->clk_set_rate) return ret; spin_lock_irqsave(&clockfw_lock, flags); ret = arch_clock->clk_set_rate(clk, rate); if (ret == 0) propagate_rate(clk); spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } EXPORT_SYMBOL(clk_set_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; int ret = -EINVAL; if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) return ret; if (!arch_clock || !arch_clock->clk_set_parent) return ret; spin_lock_irqsave(&clockfw_lock, flags); if (clk->usecount == 0) { ret = arch_clock->clk_set_parent(clk, parent); if (ret == 0) propagate_rate(clk); } else ret = -EBUSY; spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } EXPORT_SYMBOL(clk_set_parent); struct clk *clk_get_parent(struct clk *clk) { return clk->parent; } EXPORT_SYMBOL(clk_get_parent); /* * OMAP specific clock functions shared between omap1 and omap2 */ int __initdata mpurate; /* * By default we use the rate set by the bootloader. * You can override this with mpurate= cmdline option. */ static int __init omap_clk_setup(char *str) { get_option(&str, &mpurate); if (!mpurate) return 1; if (mpurate < 1000) mpurate *= 1000000; return 1; } __setup("mpurate=", omap_clk_setup); /* Used for clocks that always have same value as the parent clock */ unsigned long followparent_recalc(struct clk *clk) { return clk->parent->rate; } /* * Used for clocks that have the same value as the parent clock, * divided by some factor */ unsigned long omap_fixed_divisor_recalc(struct clk *clk) { WARN_ON(!clk->fixed_div); return clk->parent->rate / clk->fixed_div; } void clk_reparent(struct clk *child, struct clk *parent) { list_del_init(&child->sibling); if (parent) list_add(&child->sibling, &parent->children); child->parent = parent; /* now do the debugfs renaming to reattach the child to the proper parent */ } /* Propagate rate to children */ void propagate_rate(struct clk *tclk) { struct clk *clkp; list_for_each_entry(clkp, &tclk->children, sibling) { if (clkp->recalc) clkp->rate = clkp->recalc(clkp); propagate_rate(clkp); } } static LIST_HEAD(root_clks); /** * recalculate_root_clocks - recalculate and propagate all root clocks * * Recalculates all root clocks (clocks with no parent), which if the * clock's .recalc is set correctly, should also propagate their rates. * Called at init. */ void recalculate_root_clocks(void) { struct clk *clkp; list_for_each_entry(clkp, &root_clks, sibling) { if (clkp->recalc) clkp->rate = clkp->recalc(clkp); propagate_rate(clkp); } } /** * clk_preinit - initialize any fields in the struct clk before clk init * @clk: struct clk * to initialize * * Initialize any struct clk fields needed before normal clk initialization * can run. No return value. */ void clk_preinit(struct clk *clk) { INIT_LIST_HEAD(&clk->children); } int clk_register(struct clk *clk) { if (clk == NULL || IS_ERR(clk)) return -EINVAL; /* * trap out already registered clocks */ if (clk->node.next || clk->node.prev) return 0; mutex_lock(&clocks_mutex); if (clk->parent) list_add(&clk->sibling, &clk->parent->children); else list_add(&clk->sibling, &root_clks); list_add(&clk->node, &clocks); if (clk->init) clk->init(clk); mutex_unlock(&clocks_mutex); return 0; } EXPORT_SYMBOL(clk_register); void clk_unregister(struct clk *clk) { if (clk == NULL || IS_ERR(clk)) return; mutex_lock(&clocks_mutex); list_del(&clk->sibling); list_del(&clk->node); mutex_unlock(&clocks_mutex); } EXPORT_SYMBOL(clk_unregister); void clk_enable_init_clocks(void) { struct clk *clkp; list_for_each_entry(clkp, &clocks, node) { if (clkp->flags & ENABLE_ON_INIT) clk_enable(clkp); } } /** * omap_clk_get_by_name - locate OMAP struct clk by its name * @name: name of the struct clk to locate * * Locate an OMAP struct clk by its name. Assumes that struct clk * names are unique. Returns NULL if not found or a pointer to the * struct clk if found. */ struct clk *omap_clk_get_by_name(const char *name) { struct clk *c; struct clk *ret = NULL; mutex_lock(&clocks_mutex); list_for_each_entry(c, &clocks, node) { if (!strcmp(c->name, name)) { ret = c; break; } } mutex_unlock(&clocks_mutex); return ret; } int omap_clk_enable_autoidle_all(void) { struct clk *c; unsigned long flags; spin_lock_irqsave(&clockfw_lock, flags); list_for_each_entry(c, &clocks, node) if (c->ops->allow_idle) c->ops->allow_idle(c); spin_unlock_irqrestore(&clockfw_lock, flags); return 0; } int omap_clk_disable_autoidle_all(void) { struct clk *c; unsigned long flags; spin_lock_irqsave(&clockfw_lock, flags); list_for_each_entry(c, &clocks, node) if (c->ops->deny_idle) c->ops->deny_idle(c); spin_unlock_irqrestore(&clockfw_lock, flags); return 0; } /* * Low level helpers */ static int clkll_enable_null(struct clk *clk) { return 0; } static void clkll_disable_null(struct clk *clk) { } const struct clkops clkops_null = { .enable = clkll_enable_null, .disable = clkll_disable_null, }; /* * Dummy clock * * Used for clock aliases that are needed on some OMAPs, but not others */ struct clk dummy_ck = { .name = "dummy", .ops = &clkops_null, }; /* * */ #ifdef CONFIG_OMAP_RESET_CLOCKS /* * Disable any unused clocks left on by the bootloader */ static int __init clk_disable_unused(void) { struct clk *ck; unsigned long flags; if (!arch_clock || !arch_clock->clk_disable_unused) return 0; pr_info("clock: disabling unused clocks to save power\n"); spin_lock_irqsave(&clockfw_lock, flags); list_for_each_entry(ck, &clocks, node) { if (ck->ops == &clkops_null) continue; if (ck->usecount > 0 || !ck->enable_reg) continue; arch_clock->clk_disable_unused(ck); } spin_unlock_irqrestore(&clockfw_lock, flags); return 0; } late_initcall(clk_disable_unused); late_initcall(omap_clk_enable_autoidle_all); #endif int __init clk_init(struct clk_functions * custom_clocks) { if (!custom_clocks) { pr_err("No custom clock functions registered\n"); BUG(); } arch_clock = custom_clocks; return 0; } #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) /* * debugfs support to trace clock tree hierarchy and attributes */ #include <linux/debugfs.h> #include <linux/seq_file.h> static struct dentry *clk_debugfs_root; static int clk_dbg_show_summary(struct seq_file *s, void *unused) { struct clk *c; struct clk *pa; seq_printf(s, "%-30s %-30s %-10s %s\n", "clock-name", "parent-name", "rate", "use-count"); list_for_each_entry(c, &clocks, node) { pa = c->parent; seq_printf(s, "%-30s %-30s %-10lu %d\n", c->name, pa ? pa->name : "none", c->rate, c->usecount); } return 0; } static int clk_dbg_open(struct inode *inode, struct file *file) { return single_open(file, clk_dbg_show_summary, inode->i_private); } static const struct file_operations debug_clock_fops = { .open = clk_dbg_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int clk_debugfs_register_one(struct clk *c) { int err; struct dentry *d; struct clk *pa = c->parent; d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root); if (!d) return -ENOMEM; c->dent = d; d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); if (!d) { err = -ENOMEM; goto err_out; } d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); if (!d) { err = -ENOMEM; goto err_out; } d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); if (!d) { err = -ENOMEM; goto err_out; } return 0; err_out: debugfs_remove_recursive(c->dent); return err; } static int clk_debugfs_register(struct clk *c) { int err; struct clk *pa = c->parent; if (pa && !pa->dent) { err = clk_debugfs_register(pa); if (err) return err; } if (!c->dent) { err = clk_debugfs_register_one(c); if (err) return err; } return 0; } static int __init clk_debugfs_init(void) { struct clk *c; struct dentry *d; int err; d = debugfs_create_dir("clock", NULL); if (!d) return -ENOMEM; clk_debugfs_root = d; list_for_each_entry(c, &clocks, node) { err = clk_debugfs_register(c); if (err) goto err_out; } d = debugfs_create_file("summary", S_IRUGO, d, NULL, &debug_clock_fops); if (!d) return -ENOMEM; return 0; err_out: debugfs_remove_recursive(clk_debugfs_root); return err; } late_initcall(clk_debugfs_init); #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
gpl-2.0
StanTRC/lge-kernel-e430
drivers/staging/iio/iio_hwmon.c
4969
5667
/* Hwmon client for industrial I/O devices * * Copyright (c) 2011 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include "consumer.h" #include "types.h" /** * struct iio_hwmon_state - device instance state * @channels: filled with array of channels from iio * @num_channels: number of channels in channels (saves counting twice) * @hwmon_dev: associated hwmon device * @attr_group: the group of attributes * @attrs: null terminated array of attribute pointers. */ struct iio_hwmon_state { struct iio_channel *channels; int num_channels; struct device *hwmon_dev; struct attribute_group attr_group; struct attribute **attrs; }; /* * Assumes that IIO and hwmon operate in the same base units. * This is supposed to be true, but needs verification for * new channel types. */ static ssize_t iio_hwmon_read_val(struct device *dev, struct device_attribute *attr, char *buf) { long result; int val, ret, scaleint, scalepart; struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); struct iio_hwmon_state *state = dev_get_drvdata(dev); /* * No locking between this pair, so theoretically possible * the scale has changed. */ ret = iio_st_read_channel_raw(&state->channels[sattr->index], &val); if (ret < 0) return ret; ret = iio_st_read_channel_scale(&state->channels[sattr->index], &scaleint, &scalepart); if (ret < 0) return ret; switch (ret) { case IIO_VAL_INT: result = val * scaleint; break; case IIO_VAL_INT_PLUS_MICRO: result = (s64)val * (s64)scaleint + div_s64((s64)val * (s64)scalepart, 1000000LL); break; case IIO_VAL_INT_PLUS_NANO: result = (s64)val * (s64)scaleint + div_s64((s64)val * (s64)scalepart, 1000000000LL); break; default: return -EINVAL; } return sprintf(buf, "%ld\n", result); } static void iio_hwmon_free_attrs(struct iio_hwmon_state *st) { int i; struct sensor_device_attribute *a; for (i = 0; i < st->num_channels; i++) if (st->attrs[i]) { a = to_sensor_dev_attr( container_of(st->attrs[i], struct device_attribute, attr)); kfree(a); } } static int __devinit iio_hwmon_probe(struct platform_device *pdev) { struct iio_hwmon_state *st; struct sensor_device_attribute *a; int ret, i; int in_i = 1, temp_i = 1, curr_i = 1; enum iio_chan_type type; st = kzalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto error_ret; } st->channels = iio_st_channel_get_all(dev_name(&pdev->dev)); if (IS_ERR(st->channels)) { ret = PTR_ERR(st->channels); goto error_free_state; } /* count how many attributes we have */ while (st->channels[st->num_channels].indio_dev) st->num_channels++; st->attrs = kzalloc(sizeof(st->attrs) * (st->num_channels + 1), GFP_KERNEL); if (st->attrs == NULL) { ret = -ENOMEM; goto error_release_channels; } for (i = 0; i < st->num_channels; i++) { a = kzalloc(sizeof(*a), GFP_KERNEL); if (a == NULL) { ret = -ENOMEM; goto error_free_attrs; } sysfs_attr_init(&a->dev_attr.attr); ret = iio_st_get_channel_type(&st->channels[i], &type); if (ret < 0) { kfree(a); goto error_free_attrs; } switch (type) { case IIO_VOLTAGE: a->dev_attr.attr.name = kasprintf(GFP_KERNEL, "in%d_input", in_i++); break; case IIO_TEMP: a->dev_attr.attr.name = kasprintf(GFP_KERNEL, "temp%d_input", temp_i++); break; case IIO_CURRENT: a->dev_attr.attr.name = kasprintf(GFP_KERNEL, "curr%d_input", curr_i++); break; default: ret = -EINVAL; kfree(a); goto error_free_attrs; } if (a->dev_attr.attr.name == NULL) { kfree(a); ret = -ENOMEM; goto error_free_attrs; } a->dev_attr.show = iio_hwmon_read_val; a->dev_attr.attr.mode = S_IRUGO; a->index = i; st->attrs[i] = &a->dev_attr.attr; } st->attr_group.attrs = st->attrs; platform_set_drvdata(pdev, st); ret = sysfs_create_group(&pdev->dev.kobj, &st->attr_group); if (ret < 0) goto error_free_attrs; st->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(st->hwmon_dev)) { ret = PTR_ERR(st->hwmon_dev); goto error_remove_group; } return 0; error_remove_group: sysfs_remove_group(&pdev->dev.kobj, &st->attr_group); error_free_attrs: iio_hwmon_free_attrs(st); kfree(st->attrs); error_release_channels: iio_st_channel_release_all(st->channels); error_free_state: kfree(st); error_ret: return ret; } static int __devexit iio_hwmon_remove(struct platform_device *pdev) { struct iio_hwmon_state *st = platform_get_drvdata(pdev); hwmon_device_unregister(st->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &st->attr_group); iio_hwmon_free_attrs(st); kfree(st->attrs); iio_st_channel_release_all(st->channels); return 0; } static struct platform_driver __refdata iio_hwmon_driver = { .driver = { .name = "iio_hwmon", .owner = THIS_MODULE, }, .probe = iio_hwmon_probe, .remove = __devexit_p(iio_hwmon_remove), }; static int iio_inkern_init(void) { return platform_driver_register(&iio_hwmon_driver); } module_init(iio_inkern_init); static void iio_inkern_exit(void) { platform_driver_unregister(&iio_hwmon_driver); } module_exit(iio_inkern_exit); MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); MODULE_DESCRIPTION("IIO to hwmon driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
zhaochengw/A810S_CAF_KERNEL_3.4
drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
5225
5890
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * This is "two-way" interface, acting as the SHIM layer between driver * and PHY layer. The driver can optionally call this translation layer * to do some preprocessing, then reach PHY. On the PHY->driver direction, * all calls go through this layer since PHY doesn't have access to the * driver's brcms_hardware pointer. */ #include <linux/slab.h> #include <net/mac80211.h> #include "main.h" #include "mac80211_if.h" #include "phy_shim.h" /* PHY SHIM module specific state */ struct phy_shim_info { struct brcms_hardware *wlc_hw; /* pointer to main wlc_hw structure */ struct brcms_c_info *wlc; /* pointer to main wlc structure */ struct brcms_info *wl; /* pointer to os-specific private state */ }; struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw, struct brcms_info *wl, struct brcms_c_info *wlc) { struct phy_shim_info *physhim = NULL; physhim = kzalloc(sizeof(struct phy_shim_info), GFP_ATOMIC); if (!physhim) return NULL; physhim->wlc_hw = wlc_hw; physhim->wlc = wlc; physhim->wl = wl; return physhim; } void wlc_phy_shim_detach(struct phy_shim_info *physhim) { kfree(physhim); } struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, void (*fn)(struct brcms_phy *pi), void *arg, const char *name) { return (struct wlapi_timer *) brcms_init_timer(physhim->wl, (void (*)(void *))fn, arg, name); } void wlapi_free_timer(struct wlapi_timer *t) { brcms_free_timer((struct brcms_timer *)t); } void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic) { brcms_add_timer((struct brcms_timer *)t, ms, periodic); } bool wlapi_del_timer(struct wlapi_timer *t) { return brcms_del_timer((struct brcms_timer *)t); } void wlapi_intrson(struct phy_shim_info *physhim) { brcms_intrson(physhim->wl); } u32 wlapi_intrsoff(struct phy_shim_info *physhim) { return brcms_intrsoff(physhim->wl); } void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask) { brcms_intrsrestore(physhim->wl, macintmask); } void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v) { brcms_b_write_shm(physhim->wlc_hw, offset, v); } u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset) { return brcms_b_read_shm(physhim->wlc_hw, offset); } void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val, int bands) { brcms_b_mhf(physhim->wlc_hw, idx, mask, val, bands); } void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags) { brcms_b_corereset(physhim->wlc_hw, flags); } void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim) { brcms_c_suspend_mac_and_wait(physhim->wlc); } void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode) { brcms_b_switch_macfreq(physhim->wlc_hw, spurmode); } void wlapi_enable_mac(struct phy_shim_info *physhim) { brcms_c_enable_mac(physhim->wlc); } void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val) { brcms_b_mctrl(physhim->wlc_hw, mask, val); } void wlapi_bmac_phy_reset(struct phy_shim_info *physhim) { brcms_b_phy_reset(physhim->wlc_hw); } void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw) { brcms_b_bw_set(physhim->wlc_hw, bw); } u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim) { return brcms_b_get_txant(physhim->wlc_hw); } void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk) { brcms_b_phyclk_fgc(physhim->wlc_hw, clk); } void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk) { brcms_b_macphyclk_set(physhim->wlc_hw, clk); } void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on) { brcms_b_core_phypll_ctl(physhim->wlc_hw, on); } void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim) { brcms_b_core_phypll_reset(physhim->wlc_hw); } void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim) { brcms_c_ucode_wake_override_set(physhim->wlc_hw, BRCMS_WAKE_OVERRIDE_PHYREG); } void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim) { brcms_c_ucode_wake_override_clear(physhim->wlc_hw, BRCMS_WAKE_OVERRIDE_PHYREG); } void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int offset, int len, void *buf) { brcms_b_write_template_ram(physhim->wlc_hw, offset, len, buf); } u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate) { return brcms_b_rate_shm_offset(physhim->wlc_hw, rate); } void wlapi_ucode_sample_init(struct phy_shim_info *physhim) { } void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint offset, void *buf, int len, u32 sel) { brcms_b_copyfrom_objmem(physhim->wlc_hw, offset, buf, len, sel); } void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint offset, const void *buf, int l, u32 sel) { brcms_b_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel); } char *wlapi_getvar(struct phy_shim_info *physhim, enum brcms_srom_id id) { return getvar(physhim->wlc_hw->sih, id); } int wlapi_getintvar(struct phy_shim_info *physhim, enum brcms_srom_id id) { return getintvar(physhim->wlc_hw->sih, id); }
gpl-2.0
picmaster/linux-imx6
fs/jfs/file.c
5481
4460
/* * Copyright (C) International Business Machines Corp., 2000-2002 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/quotaops.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_dmap.h" #include "jfs_txnmgr.h" #include "jfs_xattr.h" #include "jfs_acl.h" #include "jfs_debug.h" int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int rc = 0; rc = filemap_write_and_wait_range(inode->i_mapping, start, end); if (rc) return rc; mutex_lock(&inode->i_mutex); if (!(inode->i_state & I_DIRTY) || (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { /* Make sure committed changes hit the disk */ jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); mutex_unlock(&inode->i_mutex); return rc; } rc |= jfs_commit_inode(inode, 1); mutex_unlock(&inode->i_mutex); return rc ? -EIO : 0; } static int jfs_open(struct inode *inode, struct file *file) { int rc; if ((rc = dquot_file_open(inode, file))) return rc; /* * We attempt to allow only one "active" file open per aggregate * group. Otherwise, appending to files in parallel can cause * fragmentation within the files. * * If the file is empty, it was probably just created and going * to be written to. If it has a size, we'll hold off until the * file is actually grown. */ if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE && (inode->i_size == 0)) { struct jfs_inode_info *ji = JFS_IP(inode); spin_lock_irq(&ji->ag_lock); if (ji->active_ag == -1) { struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]); } spin_unlock_irq(&ji->ag_lock); } return 0; } static int jfs_release(struct inode *inode, struct file *file) { struct jfs_inode_info *ji = JFS_IP(inode); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; atomic_dec(&bmap->db_active[ji->active_ag]); ji->active_ag = -1; } spin_unlock_irq(&ji->ag_lock); return 0; } int jfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; int rc; rc = inode_change_ok(inode, iattr); if (rc) return rc; if (is_quota_modification(inode, iattr)) dquot_initialize(inode); if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { rc = dquot_transfer(inode, iattr); if (rc) return rc; } if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { inode_dio_wait(inode); rc = vmtruncate(inode, iattr->ia_size); if (rc) return rc; } setattr_copy(inode, iattr); mark_inode_dirty(inode); if (iattr->ia_valid & ATTR_MODE) rc = jfs_acl_chmod(inode); return rc; } const struct inode_operations jfs_file_inode_operations = { .truncate = jfs_truncate, .setxattr = jfs_setxattr, .getxattr = jfs_getxattr, .listxattr = jfs_listxattr, .removexattr = jfs_removexattr, .setattr = jfs_setattr, #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, #endif }; const struct file_operations jfs_file_operations = { .open = jfs_open, .llseek = generic_file_llseek, .write = do_sync_write, .read = do_sync_read, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .fsync = jfs_fsync, .release = jfs_release, .unlocked_ioctl = jfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = jfs_compat_ioctl, #endif };
gpl-2.0
Albinoman887/android_kernel_samsung_klte
arch/ia64/sn/kernel/setup.c
6505
20665
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/kdev_t.h> #include <linux/string.h> #include <linux/screen_info.h> #include <linux/console.h> #include <linux/timex.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/serial.h> #include <linux/irq.h> #include <linux/bootmem.h> #include <linux/mmzone.h> #include <linux/interrupt.h> #include <linux/acpi.h> #include <linux/compiler.h> #include <linux/root_dev.h> #include <linux/nodemask.h> #include <linux/pm.h> #include <linux/efi.h> #include <asm/io.h> #include <asm/sal.h> #include <asm/machvec.h> #include <asm/processor.h> #include <asm/vga.h> #include <asm/setup.h> #include <asm/sn/arch.h> #include <asm/sn/addrs.h> #include <asm/sn/pda.h> #include <asm/sn/nodepda.h> #include <asm/sn/sn_cpuid.h> #include <asm/sn/simulator.h> #include <asm/sn/leds.h> #include <asm/sn/bte.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/clksupport.h> #include <asm/sn/sn_sal.h> #include <asm/sn/geo.h> #include <asm/sn/sn_feature_sets.h> #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" #include <asm/sn/klconfig.h> DEFINE_PER_CPU(struct pda_s, pda_percpu); #define MAX_PHYS_MEMORY (1UL << IA64_MAX_PHYS_BITS) /* Max physical address supported */ extern void bte_init_node(nodepda_t *, cnodeid_t); extern void sn_timer_init(void); extern unsigned long last_time_offset; extern void (*ia64_mark_idle) (int); extern void snidle(int); unsigned long sn_rtc_cycles_per_second; EXPORT_SYMBOL(sn_rtc_cycles_per_second); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info); DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); EXPORT_PER_CPU_SYMBOL(__sn_nodepda); char sn_system_serial_number_string[128]; EXPORT_SYMBOL(sn_system_serial_number_string); u64 sn_partition_serial_number; EXPORT_SYMBOL(sn_partition_serial_number); u8 sn_partition_id; EXPORT_SYMBOL(sn_partition_id); u8 sn_system_size; EXPORT_SYMBOL(sn_system_size); u8 sn_sharing_domain_size; EXPORT_SYMBOL(sn_sharing_domain_size); u8 sn_coherency_id; EXPORT_SYMBOL(sn_coherency_id); u8 sn_region_size; EXPORT_SYMBOL(sn_region_size); int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ short physical_node_map[MAX_NUMALINK_NODES]; static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS]; EXPORT_SYMBOL(physical_node_map); int num_cnodes; static void sn_init_pdas(char **); static void build_cnode_tables(void); static nodepda_t *nodepdaindr[MAX_COMPACT_NODES]; /* * The format of "screen_info" is strange, and due to early i386-setup * code. This is just enough to make the console code think we're on a * VGA color display. */ struct screen_info sn_screen_info = { .orig_x = 0, .orig_y = 0, .orig_video_mode = 3, .orig_video_cols = 80, .orig_video_ega_bx = 3, .orig_video_lines = 25, .orig_video_isVGA = 1, .orig_video_points = 16 }; /* * This routine can only be used during init, since * smp_boot_data is an init data structure. * We have to use smp_boot_data.cpu_phys_id to find * the physical id of the processor because the normal * cpu_physical_id() relies on data structures that * may not be initialized yet. */ static int __init pxm_to_nasid(int pxm) { int i; int nid; nid = pxm_to_node(pxm); for (i = 0; i < num_node_memblks; i++) { if (node_memblk[i].nid == nid) { return NASID_GET(node_memblk[i].start_paddr); } } return -1; } /** * early_sn_setup - early setup routine for SN platforms * * Sets up an initial console to aid debugging. Intended primarily * for bringup. See start_kernel() in init/main.c. */ void __init early_sn_setup(void) { efi_system_table_t *efi_systab; efi_config_table_t *config_tables; struct ia64_sal_systab *sal_systab; struct ia64_sal_desc_entry_point *ep; char *p; int i, j; /* * Parse enough of the SAL tables to locate the SAL entry point. Since, console * IO on SN2 is done via SAL calls, early_printk won't work without this. * * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c. * Any changes to those file may have to be made here as well. */ efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab); config_tables = __va(efi_systab->tables); for (i = 0; i < efi_systab->nr_tables; i++) { if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { sal_systab = __va(config_tables[i].table); p = (char *)(sal_systab + 1); for (j = 0; j < sal_systab->entry_count; j++) { if (*p == SAL_DESC_ENTRY_POINT) { ep = (struct ia64_sal_desc_entry_point *)p; ia64_sal_handler_init(__va (ep->sal_proc), __va(ep->gp)); return; } p += SAL_DESC_SIZE(*p); } } } /* Uh-oh, SAL not available?? */ printk(KERN_ERR "failed to find SAL entry point\n"); } extern int platform_intr_list[]; static int __cpuinitdata shub_1_1_found; /* * sn_check_for_wars * * Set flag for enabling shub specific wars */ static inline int __cpuinit is_shub_1_1(int nasid) { unsigned long id; int rev; if (is_shub2()) return 0; id = REMOTE_HUB_L(nasid, SH1_SHUB_ID); rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT; return rev <= 2; } static void __cpuinit sn_check_for_wars(void) { int cnode; if (is_shub2()) { /* none yet */ } else { for_each_online_node(cnode) { if (is_shub_1_1(cnodeid_to_nasid(cnode))) shub_1_1_found = 1; } } } /* * Scan the EFI PCDP table (if it exists) for an acceptable VGA console * output device. If one exists, pick it and set sn_legacy_{io,mem} to * reflect the bus offsets needed to address it. * * Since pcdp support in SN is not supported in the 2.4 kernel (or at least * the one lbs is based on) just declare the needed structs here. * * Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf * * Returns 0 if no acceptable vga is found, !0 otherwise. * * Note: This stuff is duped here because Altix requires the PCDP to * locate a usable VGA device due to lack of proper ACPI support. Structures * could be used from drivers/firmware/pcdp.h, but it was decided that moving * this file to a more public location just for Altix use was undesirable. */ struct hcdp_uart_desc { u8 pad[45]; }; struct pcdp { u8 signature[4]; /* should be 'HCDP' */ u32 length; u8 rev; /* should be >=3 for pcdp, <3 for hcdp */ u8 sum; u8 oem_id[6]; u64 oem_tableid; u32 oem_rev; u32 creator_id; u32 creator_rev; u32 num_type0; struct hcdp_uart_desc uart[0]; /* num_type0 of these */ /* pcdp descriptors follow */ } __attribute__((packed)); struct pcdp_device_desc { u8 type; u8 primary; u16 length; u16 index; /* interconnect specific structure follows */ /* device specific structure follows that */ } __attribute__((packed)); struct pcdp_interface_pci { u8 type; /* 1 == pci */ u8 reserved; u16 length; u8 segment; u8 bus; u8 dev; u8 fun; u16 devid; u16 vendid; u32 acpi_interrupt; u64 mmio_tra; u64 ioport_tra; u8 flags; u8 translation; } __attribute__((packed)); struct pcdp_vga_device { u8 num_eas_desc; /* ACPI Extended Address Space Desc follows */ } __attribute__((packed)); /* from pcdp_device_desc.primary */ #define PCDP_PRIMARY_CONSOLE 0x01 /* from pcdp_device_desc.type */ #define PCDP_CONSOLE_INOUT 0x0 #define PCDP_CONSOLE_DEBUG 0x1 #define PCDP_CONSOLE_OUT 0x2 #define PCDP_CONSOLE_IN 0x3 #define PCDP_CONSOLE_TYPE_VGA 0x8 #define PCDP_CONSOLE_VGA (PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT) /* from pcdp_interface_pci.type */ #define PCDP_IF_PCI 1 /* from pcdp_interface_pci.translation */ #define PCDP_PCI_TRANS_IOPORT 0x02 #define PCDP_PCI_TRANS_MMIO 0x01 #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) static void sn_scan_pcdp(void) { u8 *bp; struct pcdp *pcdp; struct pcdp_device_desc device; struct pcdp_interface_pci if_pci; extern struct efi efi; if (efi.hcdp == EFI_INVALID_TABLE_ADDR) return; /* no hcdp/pcdp table */ pcdp = __va(efi.hcdp); if (pcdp->rev < 3) return; /* only support PCDP (rev >= 3) */ for (bp = (u8 *)&pcdp->uart[pcdp->num_type0]; bp < (u8 *)pcdp + pcdp->length; bp += device.length) { memcpy(&device, bp, sizeof(device)); if (! (device.primary & PCDP_PRIMARY_CONSOLE)) continue; /* not primary console */ if (device.type != PCDP_CONSOLE_VGA) continue; /* not VGA descriptor */ memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci)); if (if_pci.type != PCDP_IF_PCI) continue; /* not PCI interconnect */ if (if_pci.translation & PCDP_PCI_TRANS_IOPORT) vga_console_iobase = if_pci.ioport_tra; if (if_pci.translation & PCDP_PCI_TRANS_MMIO) vga_console_membase = if_pci.mmio_tra | __IA64_UNCACHED_OFFSET; break; /* once we find the primary, we're done */ } } #endif static unsigned long sn2_rtc_initial; /** * sn_setup - SN platform setup routine * @cmdline_p: kernel command line * * Handles platform setup for SN machines. This includes determining * the RTC frequency (via a SAL call), initializing secondary CPUs, and * setting up per-node data areas. The console is also initialized here. */ void __init sn_setup(char **cmdline_p) { long status, ticks_per_sec, drift; u32 version = sn_sal_rev(); extern void sn_cpu_init(void); sn2_rtc_initial = rtc_time(); ia64_sn_plat_set_error_handling_features(); // obsolete ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV); ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES); /* * Note: The calls to notify the PROM of ACPI and PCI Segment * support must be done prior to acpi_load_tables(), as * an ACPI capable PROM will rebuild the DSDT as result * of the call. */ ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE); ia64_sn_set_os_feature(OSF_ACPI_ENABLE); /* Load the new DSDT and SSDT tables into the global table list. */ acpi_table_init(); #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) /* * Handle SN vga console. * * SN systems do not have enough ACPI table information * being passed from prom to identify VGA adapters and the legacy * addresses to access them. Until that is done, SN systems rely * on the PCDP table to identify the primary VGA console if one * exists. * * However, kernel PCDP support is optional, and even if it is built * into the kernel, it will not be used if the boot cmdline contains * console= directives. * * So, to work around this mess, we duplicate some of the PCDP code * here so that the primary VGA console (as defined by PCDP) will * work on SN systems even if a different console (e.g. serial) is * selected on the boot line (or CONFIG_EFI_PCDP is off). */ if (! vga_console_membase) sn_scan_pcdp(); /* * Setup legacy IO space. * vga_console_iobase maps to PCI IO Space address 0 on the * bus containing the VGA console. */ if (vga_console_iobase) { io_space[0].mmio_base = (unsigned long) ioremap(vga_console_iobase, 0); io_space[0].sparse = 0; } if (vga_console_membase) { /* usable vga ... make tty0 the preferred default console */ if (!strstr(*cmdline_p, "console=")) add_preferred_console("tty", 0, NULL); } else { printk(KERN_DEBUG "SGI: Disabling VGA console\n"); if (!strstr(*cmdline_p, "console=")) add_preferred_console("ttySG", 0, NULL); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #else conswitchp = NULL; #endif /* CONFIG_DUMMY_CONSOLE */ } #endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */ MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY; /* * Build the tables for managing cnodes. */ build_cnode_tables(); status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift); if (status != 0 || ticks_per_sec < 100000) { printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n"); /* PROM gives wrong value for clock freq. so guess */ sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; } else sn_rtc_cycles_per_second = ticks_per_sec; platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR; printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); /* * we set the default root device to /dev/hda * to make simulation easy */ ROOT_DEV = Root_HDA1; /* * Create the PDAs and NODEPDAs for all the cpus. */ sn_init_pdas(cmdline_p); ia64_mark_idle = &snidle; /* * For the bootcpu, we do this here. All other cpus will make the * call as part of cpu_init in slave cpu initialization. */ sn_cpu_init(); #ifdef CONFIG_SMP init_smp_config(); #endif screen_info = sn_screen_info; sn_timer_init(); /* * set pm_power_off to a SAL call to allow * sn machines to power off. The SAL call can be replaced * by an ACPI interface call when ACPI is fully implemented * for sn. */ pm_power_off = ia64_sn_power_down; current->thread.flags |= IA64_THREAD_MIGRATION; } /** * sn_init_pdas - setup node data areas * * One time setup for Node Data Area. Called by sn_setup(). */ static void __init sn_init_pdas(char **cmdline_p) { cnodeid_t cnode; /* * Allocate & initialize the nodepda for each node. */ for_each_online_node(cnode) { nodepdaindr[cnode] = alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t)); memset(nodepdaindr[cnode]->phys_cpuid, -1, sizeof(nodepdaindr[cnode]->phys_cpuid)); spin_lock_init(&nodepdaindr[cnode]->ptc_lock); } /* * Allocate & initialize nodepda for TIOs. For now, put them on node 0. */ for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) nodepdaindr[cnode] = alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t)); /* * Now copy the array of nodepda pointers to each nodepda. */ for (cnode = 0; cnode < num_cnodes; cnode++) memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr)); /* * Set up IO related platform-dependent nodepda fields. * The following routine actually sets up the hubinfo struct * in nodepda. */ for_each_online_node(cnode) { bte_init_node(nodepdaindr[cnode], cnode); } /* * Initialize the per node hubdev. This includes IO Nodes and * headless/memless nodes. */ for (cnode = 0; cnode < num_cnodes; cnode++) { hubdev_init_node(nodepdaindr[cnode], cnode); } } /** * sn_cpu_init - initialize per-cpu data areas * @cpuid: cpuid of the caller * * Called during cpu initialization on each cpu as it starts. * Currently, initializes the per-cpu data area for SNIA. * Also sets up a few fields in the nodepda. Also known as * platform_cpu_init() by the ia64 machvec code. */ void __cpuinit sn_cpu_init(void) { int cpuid; int cpuphyid; int nasid; int subnode; int slice; int cnode; int i; static int wars_have_been_checked, set_cpu0_number; cpuid = smp_processor_id(); if (cpuid == 0 && IS_MEDUSA()) { if (ia64_sn_is_fake_prom()) sn_prom_type = 2; else sn_prom_type = 1; printk(KERN_INFO "Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake"); } memset(pda, 0, sizeof(pda)); if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, &sn_coherency_id, &sn_region_size)) BUG(); sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; /* * Don't check status. The SAL call is not supported on all PROMs * but a failure is harmless. * Architecturally, cpu_init is always called twice on cpu 0. We * should set cpu_number on cpu 0 once. */ if (cpuid == 0) { if (!set_cpu0_number) { (void) ia64_sn_set_cpu_number(cpuid); set_cpu0_number = 1; } } else (void) ia64_sn_set_cpu_number(cpuid); /* * The boot cpu makes this call again after platform initialization is * complete. */ if (nodepdaindr[0] == NULL) return; for (i = 0; i < MAX_PROM_FEATURE_SETS; i++) if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) break; cpuphyid = get_sapicid(); if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) BUG(); for (i=0; i < MAX_NUMNODES; i++) { if (nodepdaindr[i]) { nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid; nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode; } } cnode = nasid_to_cnodeid(nasid); sn_nodepda = nodepdaindr[cnode]; pda->led_address = (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); pda->led_state = LED_ALWAYS_SET; pda->hb_count = HZ / 2; pda->hb_state = 0; pda->idle_flag = 0; if (cpuid != 0) { /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ memcpy(sn_cnodeid_to_nasid, (&per_cpu(__sn_cnodeid_to_nasid, 0)), sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); } /* * Check for WARs. * Only needs to be done once, on BSP. * Has to be done after loop above, because it uses this cpu's * sn_cnodeid_to_nasid table which was just initialized if this * isn't cpu 0. * Has to be done before assignment below. */ if (!wars_have_been_checked) { sn_check_for_wars(); wars_have_been_checked = 1; } sn_hub_info->shub_1_1_found = shub_1_1_found; /* * Set up addresses of PIO/MEM write status registers. */ { u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; u64 *pio; pio = is_shub1() ? pio1 : pio2; pda->pio_write_status_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]); pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; } /* * WAR addresses for SHUB 1.x. */ if (local_node_data->active_cpu_count++ == 0 && is_shub1()) { int buddy_nasid; buddy_nasid = cnodeid_to_nasid(numa_node_id() == num_online_nodes() - 1 ? 0 : numa_node_id() + 1); pda->pio_shub_war_cam_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, SH1_PI_CAM_CONTROL); } } /* * Build tables for converting between NASIDs and cnodes. */ static inline int __init board_needs_cnode(int type) { return (type == KLTYPE_SNIA || type == KLTYPE_TIO); } void __init build_cnode_tables(void) { int nasid; int node; lboard_t *brd; memset(physical_node_map, -1, sizeof(physical_node_map)); memset(sn_cnodeid_to_nasid, -1, sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); /* * First populate the tables with C/M bricks. This ensures that * cnode == node for all C & M bricks. */ for_each_online_node(node) { nasid = pxm_to_nasid(node_to_pxm(node)); sn_cnodeid_to_nasid[node] = nasid; physical_node_map[nasid] = node; } /* * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node * limit on the number of nodes, we can't use the generic node numbers * for this. Note that num_cnodes is incremented below as TIOs or * headless/memoryless nodes are discovered. */ num_cnodes = num_online_nodes(); /* fakeprom does not support klgraph */ if (IS_RUNNING_ON_FAKE_PROM()) return; /* Find TIOs & headless/memoryless nodes and add them to the tables */ for_each_online_node(node) { kl_config_hdr_t *klgraph_header; nasid = cnodeid_to_nasid(node); klgraph_header = ia64_sn_get_klconfig_addr(nasid); BUG_ON(klgraph_header == NULL); brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); while (brd) { if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid; physical_node_map[brd->brd_nasid] = num_cnodes++; } brd = find_lboard_next(brd); } } } int nasid_slice_to_cpuid(int nasid, int slice) { long cpu; for (cpu = 0; cpu < nr_cpu_ids; cpu++) if (cpuid_to_nasid(cpu) == nasid && cpuid_to_slice(cpu) == slice) return cpu; return -1; } int sn_prom_feature_available(int id) { if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS) return 0; return test_bit(id, sn_prom_features); } void sn_kernel_launch_event(void) { /* ignore status until we understand possible failure, if any*/ if (ia64_sn_kernel_launch_event()) printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n"); } EXPORT_SYMBOL(sn_prom_feature_available);
gpl-2.0
Advael/fjord-kernel
arch/score/kernel/setup.c
8809
4135
/* * arch/score/kernel/setup.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/bootmem.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/screen_info.h> #include <asm-generic/sections.h> #include <asm/setup.h> struct screen_info screen_info; unsigned long kernelsp; static char command_line[COMMAND_LINE_SIZE]; static struct resource code_resource = { .name = "Kernel code",}; static struct resource data_resource = { .name = "Kernel data",}; static void __init bootmem_init(void) { unsigned long start_pfn, bootmap_size; unsigned long size = initrd_end - initrd_start; start_pfn = PFN_UP(__pa(&_end)); min_low_pfn = PFN_UP(MEMORY_START); max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE); max_mapnr = max_low_pfn - min_low_pfn; /* Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, min_low_pfn, max_low_pfn); memblock_add_node(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn - min_low_pfn), 0); free_bootmem(PFN_PHYS(start_pfn), (max_low_pfn - start_pfn) << PAGE_SHIFT); memory_present(0, start_pfn, max_low_pfn); /* Reserve space for the bootmem bitmap. */ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT); if (size == 0) { printk(KERN_INFO "Initrd not found or empty"); goto disable; } if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { printk(KERN_ERR "Initrd extends beyond end of memory"); goto disable; } /* Reserve space for the initrd bitmap. */ reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", initrd_start, size); return; disable: printk(KERN_CONT " - disabling initrd\n"); initrd_start = 0; initrd_end = 0; } static void __init resource_init(void) { struct resource *res; code_resource.start = __pa(&_text); code_resource.end = __pa(&_etext) - 1; data_resource.start = __pa(&_etext); data_resource.end = __pa(&_edata) - 1; res = alloc_bootmem(sizeof(struct resource)); res->name = "System RAM"; res->start = MEMORY_START; res->end = MEMORY_START + MEMORY_SIZE - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); request_resource(res, &code_resource); request_resource(res, &data_resource); } void __init setup_arch(char **cmdline_p) { randomize_va_space = 0; *cmdline_p = command_line; cpu_cache_init(); tlb_init(); bootmem_init(); paging_init(); resource_init(); } static int show_cpuinfo(struct seq_file *m, void *v) { unsigned long n = (unsigned long) v - 1; seq_printf(m, "processor\t\t: %ld\n", n); seq_printf(m, "\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { unsigned long i = *pos; return i < 1 ? (void *) (i + 1) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, }; static int __init topology_init(void) { return 0; } subsys_initcall(topology_init);
gpl-2.0
sandymanu/manufooty_yu
arch/powerpc/boot/treeboot-currituck.c
8809
3066
/* * Copyright © 2011 Tony Breeds IBM Corporation * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <mporter@kernel.crashing.org> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright 2007 David Gibson, IBM Corporation. * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. * Copyright © 2011 David Kleikamp IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "libfdt.h" BSS_STACK(4096); #define MAX_RANKS 0x4 #define DDR3_MR0CF 0x80010011U static unsigned long long ibm_currituck_memsize; static unsigned long long ibm_currituck_detect_memsize(void) { u32 reg; unsigned i; unsigned long long memsize = 0; for(i = 0; i < MAX_RANKS; i++){ reg = mfdcrx(DDR3_MR0CF + i); if (!(reg & 1)) continue; reg &= 0x0000f000; reg >>= 12; memsize += (0x800000ULL << reg); } return memsize; } static void ibm_currituck_fixups(void) { void *devp = finddevice("/"); u32 dma_ranges[7]; dt_fixup_memory(0x0ULL, ibm_currituck_memsize); while ((devp = find_node_by_devtype(devp, "pci"))) { if (getprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)) < 0) { printf("%s: Failed to get dma-ranges\r\n", __func__); continue; } dma_ranges[5] = ibm_currituck_memsize >> 32; dma_ranges[6] = ibm_currituck_memsize & 0xffffffffUL; setprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)); } } #define SPRN_PIR 0x11E /* Processor Indentification Register */ void platform_init(void) { unsigned long end_of_ram, avail_ram; u32 pir_reg; int node, size; const u32 *timebase; ibm_currituck_memsize = ibm_currituck_detect_memsize(); if (ibm_currituck_memsize >> 32) end_of_ram = ~0UL; else end_of_ram = ibm_currituck_memsize; avail_ram = end_of_ram - (unsigned long)_end; simple_alloc_init(_end, avail_ram, 128, 64); platform_ops.fixups = ibm_currituck_fixups; platform_ops.exit = ibm44x_dbcr_reset; pir_reg = mfspr(SPRN_PIR); /* Make sure FDT blob is sane */ if (fdt_check_header(_dtb_start) != 0) fatal("Invalid device tree blob\n"); node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "cpu", sizeof("cpu")); if (!node) fatal("Cannot find cpu node\n"); timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); if (timebase && (size == 4)) timebase_period_ns = 1000000000 / *timebase; fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
CyanogenMod/android_kernel_sony_msm8660
net/ceph/buffer.c
10857
1396
#include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ceph/buffer.h> #include <linux/ceph/decode.h> struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) { struct ceph_buffer *b; b = kmalloc(sizeof(*b), gfp); if (!b) return NULL; b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN); if (b->vec.iov_base) { b->is_vmalloc = false; } else { b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL); if (!b->vec.iov_base) { kfree(b); return NULL; } b->is_vmalloc = true; } kref_init(&b->kref); b->alloc_len = len; b->vec.iov_len = len; dout("buffer_new %p\n", b); return b; } EXPORT_SYMBOL(ceph_buffer_new); void ceph_buffer_release(struct kref *kref) { struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref); dout("buffer_release %p\n", b); if (b->vec.iov_base) { if (b->is_vmalloc) vfree(b->vec.iov_base); else kfree(b->vec.iov_base); } kfree(b); } EXPORT_SYMBOL(ceph_buffer_release); int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end) { size_t len; ceph_decode_need(p, end, sizeof(u32), bad); len = ceph_decode_32(p); dout("decode_buffer len %d\n", (int)len); ceph_decode_need(p, end, len, bad); *b = ceph_buffer_new(len, GFP_NOFS); if (!*b) return -ENOMEM; ceph_decode_copy(p, (*b)->vec.iov_base, len); return 0; bad: return -EINVAL; }
gpl-2.0
EPDCenterSpain/bq-DC-v2
drivers/video/console/font_10x18.c
14697
143983
/******************************** * adapted from font_sun12x22.c * * by Jurriaan Kalkman 06-2005 * ********************************/ #include <linux/font.h> #define FONTDATAMAX 9216 static const unsigned char fontdata_10x18[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 1 0x01 '^A' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x80, /* 0011111110 */ 0x40, 0x40, /* 0100000001 */ 0x5b, 0x40, /* 0101101101 */ 0x40, 0x40, /* 0100000001 */ 0x44, 0x40, /* 0100010001 */ 0x44, 0x40, /* 0100010001 */ 0x51, 0x40, /* 0101000101 */ 0x4e, 0x40, /* 0100111001 */ 0x40, 0x40, /* 0100000001 */ 0x3f, 0x80, /* 0011111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 2 0x02 '^B' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x64, 0xc0, /* 0110010011 */ 0x7f, 0xc0, /* 0111111111 */ 0x7b, 0xc0, /* 0111101111 */ 0x7b, 0xc0, /* 0111101111 */ 0x6e, 0xc0, /* 0110111011 */ 0x71, 0xc0, /* 0111000111 */ 0x7f, 0xc0, /* 0111111111 */ 0x3f, 0x80, /* 0011111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 3 0x03 '^C' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x11, 0x00, /* 0001000100 */ 0x3b, 0x80, /* 0011101110 */ 0x7f, 0xc0, /* 0111111111 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 4 0x04 '^D' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 5 0x05 '^E' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x35, 0x80, /* 0011010110 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 6 0x06 '^F' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x35, 0x80, /* 0011010110 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 7 0x07 '^G' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 8 0x08 '^H' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xf3, 0xc0, /* 1111001111 */ 0xe1, 0xc0, /* 1110000111 */ 0xe1, 0xc0, /* 1110000111 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xe1, 0xc0, /* 1110000111 */ 0xe1, 0xc0, /* 1110000111 */ 0xf3, 0xc0, /* 1111001111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 9 0x09 '^I' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x12, 0x00, /* 0001001000 */ 0x12, 0x00, /* 0001001000 */ 0x21, 0x00, /* 0010000100 */ 0x21, 0x00, /* 0010000100 */ 0x12, 0x00, /* 0001001000 */ 0x12, 0x00, /* 0001001000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 10 0x0a '^J' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xf3, 0xc0, /* 1111001111 */ 0xed, 0xc0, /* 1110110111 */ 0xed, 0xc0, /* 1110110111 */ 0xde, 0xc0, /* 1101111011 */ 0xde, 0xc0, /* 1101111011 */ 0xed, 0xc0, /* 1110110111 */ 0xed, 0xc0, /* 1110110111 */ 0xf3, 0xc0, /* 1111001111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 11 0x0b '^K' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x03, 0xc0, /* 0000001111 */ 0x06, 0xc0, /* 0000011011 */ 0x0c, 0xc0, /* 0000110011 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0x66, 0x00, /* 0110011000 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 12 0x0c '^L' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 13 0x0d '^M' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0x80, /* 0000111110 */ 0x08, 0x80, /* 0000100010 */ 0x0f, 0x80, /* 0000111110 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x38, 0x00, /* 0011100000 */ 0x78, 0x00, /* 0111100000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 14 0x0e '^N' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x80, /* 0001111110 */ 0x10, 0x80, /* 0001000010 */ 0x1f, 0x80, /* 0001111110 */ 0x10, 0x80, /* 0001000010 */ 0x10, 0x80, /* 0001000010 */ 0x10, 0x80, /* 0001000010 */ 0x10, 0x80, /* 0001000010 */ 0x13, 0x80, /* 0001001110 */ 0x17, 0x80, /* 0001011110 */ 0x73, 0x00, /* 0111001100 */ 0xf0, 0x00, /* 1111000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 15 0x0f '^O' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x24, 0x80, /* 0010010010 */ 0x15, 0x00, /* 0001010100 */ 0x55, 0x40, /* 0101010101 */ 0x3f, 0x80, /* 0011111110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x3f, 0x80, /* 0011111110 */ 0x55, 0x40, /* 0101010101 */ 0x15, 0x00, /* 0001010100 */ 0x24, 0x80, /* 0010010010 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 16 0x10 '^P' */ 0x00, 0x80, /* 0000000010 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x80, /* 0000001110 */ 0x07, 0x80, /* 0000011110 */ 0x0f, 0x80, /* 0000111110 */ 0x1f, 0x80, /* 0001111110 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0x80, /* 0111111110 */ 0xff, 0x80, /* 1111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x80, /* 0001111110 */ 0x0f, 0x80, /* 0000111110 */ 0x07, 0x80, /* 0000011110 */ 0x03, 0x80, /* 0000001110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0x80, /* 0000000010 */ 0x00, 0x00, /* 0000000000 */ /* 17 0x11 '^Q' */ 0x40, 0x00, /* 0100000000 */ 0x60, 0x00, /* 0110000000 */ 0x70, 0x00, /* 0111000000 */ 0x78, 0x00, /* 0111100000 */ 0x7c, 0x00, /* 0111110000 */ 0x7e, 0x00, /* 0111111000 */ 0x7f, 0x00, /* 0111111100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x00, /* 0111111100 */ 0x7e, 0x00, /* 0111111000 */ 0x7c, 0x00, /* 0111110000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ 0x60, 0x00, /* 0110000000 */ 0x40, 0x00, /* 0100000000 */ 0x00, 0x00, /* 0000000000 */ /* 18 0x12 '^R' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 19 0x13 '^S' */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 20 0x14 '^T' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x39, 0x80, /* 0011100110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x39, 0x80, /* 0011100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x39, 0xc0, /* 0011100111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 21 0x15 '^U' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3e, 0x00, /* 0011111000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x3e, 0x00, /* 0011111000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 22 0x16 '^V' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 23 0x17 '^W' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 24 0x18 '^X' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 25 0x19 '^Y' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 26 0x1a '^Z' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x06, 0x00, /* 0000011000 */ 0x07, 0x00, /* 0000011100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x07, 0x00, /* 0000011100 */ 0x06, 0x00, /* 0000011000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 27 0x1b '^[' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x08, 0x00, /* 0000100000 */ 0x18, 0x00, /* 0001100000 */ 0x38, 0x00, /* 0011100000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x38, 0x00, /* 0011100000 */ 0x18, 0x00, /* 0001100000 */ 0x08, 0x00, /* 0000100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 28 0x1c '^\' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 29 0x1d '^]' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x12, 0x00, /* 0001001000 */ 0x33, 0x00, /* 0011001100 */ 0x73, 0x80, /* 0111001110 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x73, 0x80, /* 0111001110 */ 0x33, 0x00, /* 0011001100 */ 0x12, 0x00, /* 0001001000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 30 0x1e '^^' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 31 0x1f '^_' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 32 0x20 ' ' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 33 0x21 '!' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 34 0x22 '"' */ 0x00, 0x00, /* 0000000000 */ 0x63, 0x00, /* 0110001100 */ 0xf7, 0x80, /* 1111011110 */ 0xf7, 0x80, /* 1111011110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x63, 0x00, /* 0110001100 */ 0x42, 0x00, /* 0100001000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 35 0x23 '#' */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 36 0x24 '$' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x3e, 0x00, /* 0011111000 */ 0x3f, 0x00, /* 0011111100 */ 0x6f, 0x80, /* 0110111110 */ 0x6d, 0x80, /* 0110110110 */ 0x6c, 0x80, /* 0110110010 */ 0x3c, 0x00, /* 0011110000 */ 0x0f, 0x00, /* 0000111100 */ 0x0d, 0x80, /* 0000110110 */ 0x4d, 0x80, /* 0100110110 */ 0x6d, 0x80, /* 0110110110 */ 0x7f, 0x00, /* 0111111100 */ 0x3e, 0x00, /* 0011111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 37 0x25 '%' */ 0x00, 0x00, /* 0000000000 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0x00, /* 0111101100 */ 0x7b, 0x00, /* 0111101100 */ 0x36, 0x00, /* 0011011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x1b, 0x00, /* 0001101100 */ 0x37, 0x80, /* 0011011110 */ 0x37, 0x80, /* 0011011110 */ 0x63, 0x00, /* 0110001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 38 0x26 '&' */ 0x00, 0x00, /* 0000000000 */ 0x07, 0x00, /* 0000011100 */ 0x0f, 0x80, /* 0000111110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x0f, 0x80, /* 0000111110 */ 0x1e, 0x00, /* 0001111000 */ 0x3e, 0x00, /* 0011111000 */ 0x76, 0x00, /* 0111011000 */ 0x66, 0x40, /* 0110011001 */ 0x63, 0xc0, /* 0110001111 */ 0x63, 0x80, /* 0110001110 */ 0x63, 0x00, /* 0110001100 */ 0x3f, 0x80, /* 0011111110 */ 0x1c, 0xc0, /* 0001110011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 39 0x27 ''' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x78, 0x00, /* 0111100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x20, 0x00, /* 0010000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 40 0x28 '(' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 41 0x29 ')' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 42 0x2a '*' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x4c, 0x80, /* 0100110010 */ 0x6d, 0x80, /* 0110110110 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x6d, 0x80, /* 0110110110 */ 0x4c, 0x80, /* 0100110010 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 43 0x2b '+' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 44 0x2c ',' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x78, 0x00, /* 0111100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x40, 0x00, /* 0100000000 */ /* 45 0x2d '-' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 46 0x2e '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 47 0x2f '/' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 48 0x30 '0' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x23, 0x00, /* 0010001100 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x80, /* 0110001110 */ 0x65, 0x80, /* 0110010110 */ 0x65, 0x80, /* 0110010110 */ 0x69, 0x80, /* 0110100110 */ 0x69, 0x80, /* 0110100110 */ 0x71, 0x80, /* 0111000110 */ 0x61, 0x00, /* 0110000100 */ 0x31, 0x00, /* 0011000100 */ 0x3e, 0x00, /* 0011111000 */ 0x1c, 0x00, /* 0001110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 49 0x31 '1' */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x0c, 0x00, /* 0000110000 */ 0x1c, 0x00, /* 0001110000 */ 0x3c, 0x00, /* 0011110000 */ 0x6c, 0x00, /* 0110110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 50 0x32 '2' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x63, 0x80, /* 0110001110 */ 0x41, 0x80, /* 0100000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 51 0x33 '3' */ 0x00, 0x00, /* 0000000000 */ 0x1c, 0x00, /* 0001110000 */ 0x3e, 0x00, /* 0011111000 */ 0x47, 0x00, /* 0100011100 */ 0x03, 0x00, /* 0000001100 */ 0x07, 0x00, /* 0000011100 */ 0x06, 0x00, /* 0000011000 */ 0x0e, 0x00, /* 0000111000 */ 0x07, 0x00, /* 0000011100 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x61, 0x80, /* 0110000110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 52 0x34 '4' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0e, 0x00, /* 0000111000 */ 0x1e, 0x00, /* 0001111000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0xc6, 0x00, /* 1100011000 */ 0xc6, 0x00, /* 1100011000 */ 0xff, 0x80, /* 1111111110 */ 0xff, 0x80, /* 1111111110 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 53 0x35 '5' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x7e, 0x00, /* 0111111000 */ 0x67, 0x00, /* 0110011100 */ 0x03, 0x80, /* 0000001110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 54 0x36 '6' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x6e, 0x00, /* 0110111000 */ 0x7f, 0x00, /* 0111111100 */ 0x73, 0x80, /* 0111001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x00, /* 0111000100 */ 0x3e, 0x00, /* 0011111000 */ 0x1c, 0x00, /* 0001110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 55 0x37 '7' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x80, /* 0001111110 */ 0x3f, 0x80, /* 0011111110 */ 0x61, 0x80, /* 0110000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 56 0x38 '8' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x23, 0x00, /* 0010001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x31, 0x00, /* 0011000100 */ 0x1a, 0x00, /* 0001101000 */ 0x0c, 0x00, /* 0000110000 */ 0x16, 0x00, /* 0001011000 */ 0x23, 0x00, /* 0010001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x31, 0x00, /* 0011000100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 57 0x39 '9' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x17, 0x00, /* 0001011100 */ 0x23, 0x80, /* 0010001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0x80, /* 0011110110 */ 0x19, 0x80, /* 0001100110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 58 0x3a ':' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 59 0x3b ';' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x20, 0x00, /* 0010000000 */ /* 60 0x3c '<' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 61 0x3d '=' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 62 0x3e '>' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 63 0x3f '?' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x3b, 0x80, /* 0011101110 */ 0x21, 0x80, /* 0010000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 64 0x40 '@' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x80, /* 0011000110 */ 0x65, 0x80, /* 0110010110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6f, 0x80, /* 0110111110 */ 0x60, 0x00, /* 0110000000 */ 0x31, 0x80, /* 0011000110 */ 0x3f, 0x80, /* 0011111110 */ 0x0f, 0x00, /* 0000111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 65 0x41 'A' */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x19, 0x80, /* 0001100110 */ 0x31, 0x80, /* 0011000110 */ 0x3f, 0x80, /* 0011111110 */ 0x31, 0x80, /* 0011000110 */ 0x61, 0x80, /* 0110000110 */ 0x60, 0xc0, /* 0110000011 */ 0x60, 0xc0, /* 0110000011 */ 0xf1, 0xc0, /* 1111000111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 66 0x42 'B' */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0x62, 0x00, /* 0110001000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x66, 0x00, /* 0110011000 */ 0x7e, 0x00, /* 0111111000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0xfe, 0x00, /* 1111111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 67 0x43 'C' */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0x00, /* 0000111100 */ 0x11, 0x80, /* 0001000110 */ 0x20, 0x80, /* 0010000010 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x20, 0x00, /* 0010000000 */ 0x30, 0x80, /* 0011000010 */ 0x19, 0x00, /* 0001100100 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 68 0x44 'D' */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0x67, 0x00, /* 0110011100 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x00, /* 0110000100 */ 0x66, 0x00, /* 0110011000 */ 0xf8, 0x00, /* 1111100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 69 0x45 'E' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x31, 0x00, /* 0011000100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 70 0x46 'F' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x31, 0x00, /* 0011000100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 71 0x47 'G' */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0x00, /* 0000111100 */ 0x11, 0x80, /* 0001000110 */ 0x20, 0x80, /* 0010000010 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x67, 0xc0, /* 0110011111 */ 0x61, 0x80, /* 0110000110 */ 0x21, 0x80, /* 0010000110 */ 0x31, 0x80, /* 0011000110 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 72 0x48 'H' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 73 0x49 'I' */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 74 0x4a 'J' */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x08, 0x00, /* 0000100000 */ 0x70, 0x00, /* 0111000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ /* 75 0x4b 'K' */ 0x00, 0x00, /* 0000000000 */ 0xf1, 0x80, /* 1111000110 */ 0x63, 0x00, /* 0110001100 */ 0x66, 0x00, /* 0110011000 */ 0x6c, 0x00, /* 0110110000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ 0x70, 0x00, /* 0111000000 */ 0x78, 0x00, /* 0111100000 */ 0x78, 0x00, /* 0111100000 */ 0x6c, 0x00, /* 0110110000 */ 0x66, 0x00, /* 0110011000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0xf0, 0xc0, /* 1111000011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 76 0x4c 'L' */ 0x00, 0x00, /* 0000000000 */ 0x78, 0x00, /* 0111100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 77 0x4d 'M' */ 0x00, 0x00, /* 0000000000 */ 0xe0, 0xc0, /* 1110000011 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x73, 0x80, /* 0111001110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 78 0x4e 'N' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x67, 0x80, /* 0110011110 */ 0x67, 0x80, /* 0110011110 */ 0x63, 0x80, /* 0110001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 79 0x4f 'O' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x17, 0x00, /* 0001011100 */ 0x23, 0x00, /* 0010001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x21, 0x00, /* 0010000100 */ 0x31, 0x00, /* 0011000100 */ 0x1a, 0x00, /* 0001101000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 80 0x50 'P' */ 0x00, 0x00, /* 0000000000 */ 0xfe, 0x00, /* 1111111000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0x7e, 0x00, /* 0111111000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0xf0, 0x00, /* 1111000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 81 0x51 'Q' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x13, 0x00, /* 0001001100 */ 0x23, 0x00, /* 0010001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x31, 0x80, /* 0011000110 */ 0x3b, 0x00, /* 0011101100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x26, 0x00, /* 0010011000 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ /* 82 0x52 'R' */ 0x00, 0x00, /* 0000000000 */ 0xfe, 0x00, /* 1111111000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x00, /* 0110000100 */ 0x7e, 0x00, /* 0111111000 */ 0x78, 0x00, /* 0111100000 */ 0x6c, 0x00, /* 0110110000 */ 0x6e, 0x00, /* 0110111000 */ 0x67, 0x00, /* 0110011100 */ 0x63, 0x80, /* 0110001110 */ 0x61, 0xc0, /* 0110000111 */ 0xf0, 0xc0, /* 1111000011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 83 0x53 'S' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 84 0x54 'T' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x4c, 0x80, /* 0100110010 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 85 0x55 'U' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 86 0x56 'V' */ 0x00, 0x00, /* 0000000000 */ 0xe1, 0xc0, /* 1110000111 */ 0xc0, 0xc0, /* 1100000011 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x12, 0x00, /* 0001001000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 87 0x57 'W' */ 0x00, 0x00, /* 0000000000 */ 0xe1, 0xc0, /* 1110000111 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xe0, 0xc0, /* 1110000011 */ 0x61, 0x80, /* 0110000110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x7f, 0x80, /* 0111111110 */ 0x77, 0x00, /* 0111011100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 88 0x58 'X' */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x1c, 0x00, /* 0001110000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0xf7, 0x80, /* 1111011110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 89 0x59 'Y' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 90 0x5a 'Z' */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x80, /* 0011111110 */ 0x21, 0x80, /* 0010000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x3f, 0x80, /* 0011111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 91 0x5b '[' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 92 0x5c '\' */ 0x00, 0x00, /* 0000000000 */ 0xc0, 0x00, /* 1100000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0xc0, /* 0000000011 */ 0x00, 0x00, /* 0000000000 */ /* 93 0x5d ']' */ 0x00, 0x00, /* 0000000000 */ 0x3e, 0x00, /* 0011111000 */ 0x3e, 0x00, /* 0011111000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x3e, 0x00, /* 0011111000 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 94 0x5e '^' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 95 0x5f '_' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ /* 96 0x60 '`' */ 0x04, 0x00, /* 0000010000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 97 0x61 'a' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 98 0x62 'b' */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0xe0, 0x00, /* 1110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x66, 0x00, /* 0110011000 */ 0x6f, 0x00, /* 0110111100 */ 0x73, 0x80, /* 0111001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x7b, 0x00, /* 0111101100 */ 0x4e, 0x00, /* 0100111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 99 0x63 'c' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x37, 0x00, /* 0011011100 */ 0x23, 0x00, /* 0010001100 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x71, 0x00, /* 0111000100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 100 0x64 'd' */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x80, /* 0000001110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x0d, 0x80, /* 0000110110 */ 0x37, 0x80, /* 0011011110 */ 0x23, 0x80, /* 0010001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x35, 0x80, /* 0011010110 */ 0x19, 0xc0, /* 0001100111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 101 0x65 'e' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 102 0x66 'f' */ 0x07, 0x00, /* 0000011100 */ 0x09, 0x80, /* 0000100110 */ 0x09, 0x80, /* 0000100110 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x7f, 0x00, /* 0111111100 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 103 0x67 'g' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1c, 0x80, /* 0001110010 */ 0x37, 0x80, /* 0011011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x36, 0x00, /* 0011011000 */ 0x3c, 0x00, /* 0011110000 */ 0x60, 0x00, /* 0110000000 */ 0x7f, 0x00, /* 0111111100 */ 0x3f, 0x80, /* 0011111110 */ 0x21, 0x80, /* 0010000110 */ 0x40, 0x80, /* 0100000010 */ 0x7f, 0x00, /* 0111111100 */ 0x3e, 0x00, /* 0011111000 */ /* 104 0x68 'h' */ 0x10, 0x00, /* 0001000000 */ 0x30, 0x00, /* 0011000000 */ 0x70, 0x00, /* 0111000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x37, 0x00, /* 0011011100 */ 0x3b, 0x80, /* 0011101110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 105 0x69 'i' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 106 0x6a 'j' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x07, 0x80, /* 0000011110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x3f, 0x00, /* 0011111100 */ 0x1c, 0x00, /* 0001110000 */ /* 107 0x6b 'k' */ 0x60, 0x00, /* 0110000000 */ 0xe0, 0x00, /* 1110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x63, 0x80, /* 0110001110 */ 0x66, 0x00, /* 0110011000 */ 0x6c, 0x00, /* 0110110000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ 0x78, 0x00, /* 0111100000 */ 0x6c, 0x00, /* 0110110000 */ 0x6e, 0x00, /* 0110111000 */ 0x67, 0x00, /* 0110011100 */ 0xf3, 0x80, /* 1111001110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 108 0x6c 'l' */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 109 0x6d 'm' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xdb, 0x80, /* 1101101110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0xed, 0xc0, /* 1110110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 110 0x6e 'n' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x6f, 0x00, /* 0110111100 */ 0x7b, 0x80, /* 0111101110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 111 0x6f 'o' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 112 0x70 'p' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xde, 0x00, /* 1101111000 */ 0x76, 0x00, /* 0111011000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x7b, 0x00, /* 0111101100 */ 0x7e, 0x00, /* 0111111000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0xf0, 0x00, /* 1111000000 */ /* 113 0x71 'q' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0xc0, /* 0000111011 */ 0x1b, 0x80, /* 0001101110 */ 0x33, 0x80, /* 0011001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x3b, 0x80, /* 0011101110 */ 0x1f, 0x80, /* 0001111110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0xc0, /* 0000001111 */ /* 114 0x72 'r' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x73, 0x00, /* 0111001100 */ 0x35, 0x80, /* 0011010110 */ 0x39, 0x80, /* 0011100110 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 115 0x73 's' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x00, /* 0110000100 */ 0x70, 0x00, /* 0111000000 */ 0x38, 0x00, /* 0011100000 */ 0x0e, 0x00, /* 0000111000 */ 0x07, 0x00, /* 0000011100 */ 0x43, 0x00, /* 0100001100 */ 0x63, 0x00, /* 0110001100 */ 0x7e, 0x00, /* 0111111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 116 0x74 't' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x18, 0x00, /* 0001100000 */ 0x7f, 0x80, /* 0111111110 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x1c, 0x80, /* 0001110010 */ 0x0f, 0x00, /* 0000111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 117 0x75 'u' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 118 0x76 'v' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf1, 0xc0, /* 1111000111 */ 0x60, 0xc0, /* 0110000011 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x19, 0x80, /* 0001100110 */ 0x1b, 0x00, /* 0001101100 */ 0x0f, 0x00, /* 0000111100 */ 0x0f, 0x00, /* 0000111100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 119 0x77 'w' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xe3, 0xc0, /* 1110001111 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0x6b, 0x00, /* 0110101100 */ 0x6b, 0x00, /* 0110101100 */ 0x7e, 0x00, /* 0111111000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 120 0x78 'x' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x1c, 0x00, /* 0001110000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x66, 0x00, /* 0110011000 */ 0x63, 0x00, /* 0110001100 */ 0xf7, 0x80, /* 1111011110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 121 0x79 'y' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1b, 0x00, /* 0001101100 */ 0x1e, 0x00, /* 0001111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ /* 122 0x7a 'z' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x61, 0x80, /* 0110000110 */ 0x43, 0x00, /* 0100001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x80, /* 0110000010 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 123 0x7b '{' */ 0x07, 0x00, /* 0000011100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x70, 0x00, /* 0111000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x07, 0x00, /* 0000011100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 124 0x7c '|' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ /* 125 0x7d '}' */ 0x38, 0x00, /* 0011100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x80, /* 0000001110 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x38, 0x00, /* 0011100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 126 0x7e '~' */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x80, /* 0001100010 */ 0x3d, 0x80, /* 0011110110 */ 0x6f, 0x00, /* 0110111100 */ 0x46, 0x00, /* 0100011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 127 0x7f '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x12, 0x00, /* 0001001000 */ 0x21, 0x00, /* 0010000100 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 128 0x80 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x21, 0x80, /* 0010000110 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x60, 0x80, /* 0110000010 */ 0x31, 0x00, /* 0011000100 */ 0x1e, 0x00, /* 0001111000 */ 0x08, 0x00, /* 0000100000 */ 0x04, 0x00, /* 0000010000 */ 0x02, 0x00, /* 0000001000 */ 0x02, 0x00, /* 0000001000 */ 0x1c, 0x00, /* 0001110000 */ /* 129 0x81 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7b, 0x80, /* 0111101110 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x3b, 0x00, /* 0011101100 */ 0x1c, 0x80, /* 0001110010 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 130 0x82 '.' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x00, /* 0000000100 */ 0x02, 0x00, /* 0000001000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 131 0x83 '.' */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x31, 0x80, /* 0011000110 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 132 0x84 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 133 0x85 '.' */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 134 0x86 '.' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 135 0x87 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x20, 0x80, /* 0010000010 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x70, 0x80, /* 0111000010 */ 0x30, 0x80, /* 0011000010 */ 0x1f, 0x00, /* 0001111100 */ 0x04, 0x00, /* 0000010000 */ 0x02, 0x00, /* 0000001000 */ 0x01, 0x00, /* 0000000100 */ 0x0e, 0x00, /* 0000111000 */ /* 136 0x88 '.' */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x31, 0x80, /* 0011000110 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 137 0x89 '.' */ 0x00, 0x00, /* 0000000000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 138 0x8a '.' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 139 0x8b '.' */ 0x00, 0x00, /* 0000000000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 140 0x8c '.' */ 0x08, 0x00, /* 0000100000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 141 0x8d '.' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 142 0x8e '.' */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x19, 0x00, /* 0001100100 */ 0x19, 0x00, /* 0001100100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 143 0x8f '.' */ 0x04, 0x00, /* 0000010000 */ 0x0a, 0x00, /* 0000101000 */ 0x0a, 0x00, /* 0000101000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x19, 0x00, /* 0001100100 */ 0x19, 0x00, /* 0001100100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 144 0x90 '.' */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x00, /* 0011000000 */ 0x31, 0x00, /* 0011000100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 145 0x91 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3b, 0x80, /* 0011101110 */ 0x6c, 0xc0, /* 0110110011 */ 0x4c, 0xc0, /* 0100110011 */ 0x0c, 0xc0, /* 0000110011 */ 0x3f, 0xc0, /* 0011111111 */ 0x6c, 0x00, /* 0110110000 */ 0xcc, 0x00, /* 1100110000 */ 0xcc, 0x00, /* 1100110000 */ 0xee, 0xc0, /* 1110111011 */ 0x7b, 0x80, /* 0111101110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 146 0x92 '.' */ 0x00, 0x00, /* 0000000000 */ 0x07, 0xc0, /* 0000011111 */ 0x0e, 0x40, /* 0000111001 */ 0x0e, 0x40, /* 0000111001 */ 0x0e, 0x00, /* 0000111000 */ 0x16, 0x00, /* 0001011000 */ 0x16, 0x80, /* 0001011010 */ 0x17, 0x80, /* 0001011110 */ 0x16, 0x80, /* 0001011010 */ 0x3e, 0x00, /* 0011111000 */ 0x26, 0x00, /* 0010011000 */ 0x26, 0x00, /* 0010011000 */ 0x46, 0x40, /* 0100011001 */ 0x46, 0x40, /* 0100011001 */ 0xef, 0xc0, /* 1110111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 147 0x93 '.' */ 0x00, 0x00, /* 0000000000 */ 0x08, 0x00, /* 0000100000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 148 0x94 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 149 0x95 '.' */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 150 0x96 '.' */ 0x08, 0x00, /* 0000100000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 151 0x97 '.' */ 0x00, 0x00, /* 0000000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 152 0x98 '.' */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1b, 0x00, /* 0001101100 */ 0x1e, 0x00, /* 0001111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ /* 153 0x99 '.' */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x0c, 0x00, /* 0000110000 */ 0x17, 0x00, /* 0001011100 */ 0x23, 0x00, /* 0010001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x21, 0x00, /* 0010000100 */ 0x31, 0x00, /* 0011000100 */ 0x1a, 0x00, /* 0001101000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 154 0x9a '.' */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0xf1, 0xc0, /* 1111000111 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x71, 0x00, /* 0111000100 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 155 0x9b '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x1f, 0x80, /* 0001111110 */ 0x36, 0x80, /* 0011011010 */ 0x26, 0x00, /* 0010011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0x76, 0x00, /* 0111011000 */ 0x36, 0x80, /* 0011011010 */ 0x1f, 0x80, /* 0001111110 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ /* 156 0x9c '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x3b, 0x00, /* 0011101100 */ 0x33, 0x00, /* 0011001100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x7e, 0x00, /* 0111111000 */ 0x7e, 0x00, /* 0111111000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x7c, 0x80, /* 0111110010 */ 0x7f, 0x80, /* 0111111110 */ 0x43, 0x00, /* 0100001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 157 0x9d '.' */ 0x00, 0x00, /* 0000000000 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x21, 0x00, /* 0010000100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 158 0x9e '.' */ 0x00, 0x00, /* 0000000000 */ 0xbf, 0x00, /* 1011111100 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x7f, 0x00, /* 0111111100 */ 0x40, 0x00, /* 0100000000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x00, /* 0100100000 */ 0x5e, 0x00, /* 0101111000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x80, /* 0100100010 */ 0x47, 0x00, /* 0100011100 */ 0xe0, 0x00, /* 1110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 159 0x9f '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x04, 0x80, /* 0000010010 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x09, 0x00, /* 0000100100 */ 0x3e, 0x00, /* 0011111000 */ 0x48, 0x00, /* 0100100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x90, 0x00, /* 1001000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ /* 160 0xa0 '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 161 0xa1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 162 0xa2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 163 0xa3 '.' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 164 0xa4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x38, 0x80, /* 0011100010 */ 0x7f, 0x80, /* 0111111110 */ 0x47, 0x00, /* 0100011100 */ 0x00, 0x00, /* 0000000000 */ 0x6f, 0x00, /* 0110111100 */ 0x7b, 0x80, /* 0111101110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 165 0xa5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x38, 0x80, /* 0011100010 */ 0x7f, 0x80, /* 0111111110 */ 0x47, 0x00, /* 0100011100 */ 0x00, 0x00, /* 0000000000 */ 0xe3, 0xc0, /* 1110001111 */ 0x71, 0x80, /* 0111000110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x67, 0x80, /* 0110011110 */ 0x63, 0x80, /* 0110001110 */ 0x61, 0x80, /* 0110000110 */ 0xf0, 0xc0, /* 1111000011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 166 0xa6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x3e, 0x00, /* 0011111000 */ 0x63, 0x00, /* 0110001100 */ 0x03, 0x00, /* 0000001100 */ 0x0f, 0x00, /* 0000111100 */ 0x33, 0x00, /* 0011001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x67, 0x00, /* 0110011100 */ 0x3b, 0x80, /* 0011101110 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 167 0xa7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x33, 0x00, /* 0011001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x00, /* 0110000100 */ 0x33, 0x00, /* 0011001100 */ 0x1c, 0x00, /* 0001110000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 168 0xa8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x80, /* 0110000010 */ 0x73, 0x80, /* 0111001110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 169 0xa9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 170 0xaa '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 171 0xab '.' */ 0x00, 0x00, /* 0000000000 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x20, 0x00, /* 0010000000 */ 0x20, 0x80, /* 0010000010 */ 0x21, 0x00, /* 0010000100 */ 0x22, 0x00, /* 0010001000 */ 0x74, 0x00, /* 0111010000 */ 0x08, 0x00, /* 0000100000 */ 0x17, 0x00, /* 0001011100 */ 0x28, 0x80, /* 0010100010 */ 0x43, 0x00, /* 0100001100 */ 0x04, 0x00, /* 0000010000 */ 0x08, 0x00, /* 0000100000 */ 0x0f, 0x80, /* 0000111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 172 0xac '.' */ 0x00, 0x00, /* 0000000000 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x20, 0x00, /* 0010000000 */ 0x20, 0x80, /* 0010000010 */ 0x21, 0x00, /* 0010000100 */ 0x22, 0x00, /* 0010001000 */ 0x74, 0x00, /* 0111010000 */ 0x09, 0x00, /* 0000100100 */ 0x13, 0x00, /* 0001001100 */ 0x25, 0x00, /* 0010010100 */ 0x49, 0x00, /* 0100100100 */ 0x1f, 0x80, /* 0001111110 */ 0x01, 0x00, /* 0000000100 */ 0x01, 0x00, /* 0000000100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 173 0xad '.' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 174 0xae '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0d, 0x80, /* 0000110110 */ 0x1b, 0x00, /* 0001101100 */ 0x36, 0x00, /* 0011011000 */ 0x6c, 0x00, /* 0110110000 */ 0xd8, 0x00, /* 1101100000 */ 0x6c, 0x00, /* 0110110000 */ 0x36, 0x00, /* 0011011000 */ 0x1b, 0x00, /* 0001101100 */ 0x0d, 0x80, /* 0000110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 175 0xaf '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x6c, 0x00, /* 0110110000 */ 0x36, 0x00, /* 0011011000 */ 0x1b, 0x00, /* 0001101100 */ 0x0d, 0x80, /* 0000110110 */ 0x06, 0xc0, /* 0000011011 */ 0x0d, 0x80, /* 0000110110 */ 0x1b, 0x00, /* 0001101100 */ 0x36, 0x00, /* 0011011000 */ 0x6c, 0x00, /* 0110110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 176 0xb0 '.' */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ /* 177 0xb1 '.' */ 0x11, 0x00, /* 0001000100 */ 0xbb, 0x80, /* 1011101110 */ 0x11, 0x00, /* 0001000100 */ 0x44, 0x40, /* 0100010001 */ 0xee, 0xc0, /* 1110111011 */ 0x44, 0x40, /* 0100010001 */ 0x11, 0x00, /* 0001000100 */ 0xbb, 0x80, /* 1011101110 */ 0x11, 0x00, /* 0001000100 */ 0x44, 0x40, /* 0100010001 */ 0xee, 0xc0, /* 1110111011 */ 0x44, 0x40, /* 0100010001 */ 0x11, 0x00, /* 0001000100 */ 0xbb, 0x80, /* 1011101110 */ 0x11, 0x00, /* 0001000100 */ 0x44, 0x40, /* 0100010001 */ 0xee, 0xc0, /* 1110111011 */ 0x44, 0x40, /* 0100010001 */ /* 178 0xb2 '.' */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ /* 179 0xb3 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 180 0xb4 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 181 0xb5 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 182 0xb6 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 183 0xb7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 184 0xb8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 185 0xb9 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x03, 0x00, /* 0000001100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 186 0xba '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 187 0xbb '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x03, 0x00, /* 0000001100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 188 0xbc '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x03, 0x00, /* 0000001100 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 189 0xbd '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 190 0xbe '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 191 0xbf '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 192 0xc0 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 193 0xc1 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 194 0xc2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 195 0xc3 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 196 0xc4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 197 0xc5 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 198 0xc6 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 199 0xc7 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 200 0xc8 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x18, 0x00, /* 0001100000 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 201 0xc9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x18, 0x00, /* 0001100000 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 202 0xca '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 203 0xcb '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 204 0xcc '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x18, 0x00, /* 0001100000 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 205 0xcd '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 206 0xce '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x00, 0x00, /* 0000000000 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 207 0xcf '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 208 0xd0 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 209 0xd1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 210 0xd2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 211 0xd3 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 212 0xd4 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 213 0xd5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 214 0xd6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 215 0xd7 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 216 0xd8 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 217 0xd9 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 218 0xda '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 219 0xdb '.' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 220 0xdc '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 221 0xdd '.' */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ /* 222 0xde '.' */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ /* 223 0xdf '.' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 224 0xe0 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1c, 0x80, /* 0001110010 */ 0x35, 0x80, /* 0011010110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x37, 0x80, /* 0011011110 */ 0x1c, 0x80, /* 0001110010 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 225 0xe1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0x6f, 0x00, /* 0110111100 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0x6e, 0x00, /* 0110111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 226 0xe2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 227 0xe3 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 228 0xe4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x80, /* 1111111110 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x61, 0x80, /* 0110000110 */ 0xff, 0x80, /* 1111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 229 0xe5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 230 0xe6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x6d, 0x80, /* 0110110110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0xc0, 0x00, /* 1100000000 */ /* 231 0xe7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x36, 0x40, /* 0011011001 */ 0x5e, 0x00, /* 0101111000 */ 0x8c, 0x00, /* 1000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 232 0xe8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 233 0xe9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x60, 0xc0, /* 0110000011 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x60, 0xc0, /* 0110000011 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 234 0xea '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 235 0xeb '.' */ 0x00, 0x00, /* 0000000000 */ 0x07, 0x00, /* 0000011100 */ 0x1f, 0x80, /* 0001111110 */ 0x30, 0xc0, /* 0011000011 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3e, 0x00, /* 0011111000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0x66, 0x00, /* 0110011000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 236 0xec '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x6d, 0x80, /* 0110110110 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0x6d, 0x80, /* 0110110110 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 237 0xed '.' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x37, 0x00, /* 0011011100 */ 0x6d, 0x80, /* 0110110110 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0x6d, 0x80, /* 0110110110 */ 0x3b, 0x00, /* 0011101100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ /* 238 0xee '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x80, /* 0000001110 */ 0x0e, 0x00, /* 0000111000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0e, 0x00, /* 0000111000 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 239 0xef '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 240 0xf0 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 241 0xf1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 242 0xf2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xe0, 0x00, /* 1110000000 */ 0x38, 0x00, /* 0011100000 */ 0x0e, 0x00, /* 0000111000 */ 0x03, 0x80, /* 0000001110 */ 0x0e, 0x00, /* 0000111000 */ 0x38, 0x00, /* 0011100000 */ 0xe0, 0x00, /* 1110000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 243 0xf3 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x80, /* 0000001110 */ 0x0e, 0x00, /* 0000111000 */ 0x38, 0x00, /* 0011100000 */ 0xe0, 0x00, /* 1110000000 */ 0x38, 0x00, /* 0011100000 */ 0x0e, 0x00, /* 0000111000 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x80, /* 1111111110 */ 0xff, 0x80, /* 1111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 244 0xf4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ /* 245 0xf5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x00, 0x00, /* 0000000000 */ /* 246 0xf6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 247 0xf7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x38, 0x00, /* 0011100000 */ 0x6c, 0x00, /* 0110110000 */ 0x06, 0xc0, /* 0000011011 */ 0x03, 0x80, /* 0000001110 */ 0x38, 0x00, /* 0011100000 */ 0x6c, 0x00, /* 0110110000 */ 0x06, 0xc0, /* 0000011011 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 248 0xf8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 249 0xf9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 250 0xfa '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 251 0xfb '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xcc, 0x00, /* 1100110000 */ 0x6c, 0x00, /* 0110110000 */ 0x3c, 0x00, /* 0011110000 */ 0x1c, 0x00, /* 0001110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 252 0xfc '.' */ 0x00, 0x00, /* 0000000000 */ 0x27, 0x00, /* 0010011100 */ 0x7b, 0x00, /* 0111101100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x7b, 0x80, /* 0111101110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 253 0xfd '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x63, 0x00, /* 0110001100 */ 0x43, 0x00, /* 0100001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 254 0xfe '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 255 0xff '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ }; const struct font_desc font_10x18 = { .idx = FONT10x18_IDX, .name = "10x18", .width = 10, .height = 18, .data = fontdata_10x18, #ifdef __sparc__ .pref = 5, #else .pref = -1, #endif };
gpl-2.0
rbauduin/mptcp
drivers/usb/musb/omap2430.c
106
19015
/* * Copyright (C) 2005-2007 by Texas Instruments * Some code has been taken from tusb6010.c * Copyrights for that are attributable to: * Copyright (C) 2006 Nokia Corporation * Tony Lindgren <tony@atomide.com> * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/usb/musb-omap.h> #include <linux/usb/omap_control_usb.h> #include <linux/of_platform.h> #include "musb_core.h" #include "omap2430.h" struct omap2430_glue { struct device *dev; struct platform_device *musb; enum omap_musb_vbus_id_status status; struct work_struct omap_musb_mailbox_work; struct device *control_otghs; }; #define glue_to_musb(g) platform_get_drvdata(g->musb) static struct omap2430_glue *_glue; static struct timer_list musb_idle_timer; static void musb_do_idle(unsigned long _musb) { struct musb *musb = (void *)_musb; unsigned long flags; u8 power; u8 devctl; spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_A_WAIT_BCON: devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_IDLE; MUSB_DEV_MODE(musb); } else { musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); } break; case OTG_STATE_A_SUSPEND: /* finish RESUME signaling? */ if (musb->port1_status & MUSB_PORT_STAT_RESUME) { power = musb_readb(musb->mregs, MUSB_POWER); power &= ~MUSB_POWER_RESUME; dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power); musb_writeb(musb->mregs, MUSB_POWER, power); musb->is_active = 1; musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | MUSB_PORT_STAT_RESUME); musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; usb_hcd_poll_rh_status(musb->hcd); /* NOTE: it might really be A_WAIT_BCON ... */ musb->xceiv->state = OTG_STATE_A_HOST; } break; case OTG_STATE_A_HOST: devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) musb->xceiv->state = OTG_STATE_B_IDLE; else musb->xceiv->state = OTG_STATE_A_WAIT_BCON; default: break; } spin_unlock_irqrestore(&musb->lock, flags); } static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout) { unsigned long default_timeout = jiffies + msecs_to_jiffies(3); static unsigned long last_timer; if (timeout == 0) timeout = default_timeout; /* Never idle if active, or when VBUS timeout is not set as host */ if (musb->is_active || ((musb->a_wait_bcon == 0) && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { dev_dbg(musb->controller, "%s active, deleting timer\n", usb_otg_state_string(musb->xceiv->state)); del_timer(&musb_idle_timer); last_timer = jiffies; return; } if (time_after(last_timer, timeout)) { if (!timer_pending(&musb_idle_timer)) last_timer = timeout; else { dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); return; } } last_timer = timeout; dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", usb_otg_state_string(musb->xceiv->state), (unsigned long)jiffies_to_msecs(timeout - jiffies)); mod_timer(&musb_idle_timer, timeout); } static void omap2430_musb_set_vbus(struct musb *musb, int is_on) { struct usb_otg *otg = musb->xceiv->otg; u8 devctl; unsigned long timeout = jiffies + msecs_to_jiffies(1000); /* HDRC controls CPEN, but beware current surges during device * connect. They can trigger transient overcurrent conditions * that must be ignored. */ devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (is_on) { if (musb->xceiv->state == OTG_STATE_A_IDLE) { int loops = 100; /* start the session */ devctl |= MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); /* * Wait for the musb to set as A device to enable the * VBUS */ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { mdelay(5); cpu_relax(); if (time_after(jiffies, timeout) || loops-- <= 0) { dev_err(musb->controller, "configured as A device timeout"); break; } } otg_set_vbus(otg, 1); } else { musb->is_active = 1; otg->default_a = 1; musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; devctl |= MUSB_DEVCTL_SESSION; MUSB_HST_MODE(musb); } } else { musb->is_active = 0; /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and * jumping right to B_IDLE... */ otg->default_a = 0; musb->xceiv->state = OTG_STATE_B_IDLE; devctl &= ~MUSB_DEVCTL_SESSION; MUSB_DEV_MODE(musb); } musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); dev_dbg(musb->controller, "VBUS %s, devctl %02x " /* otg %3x conf %08x prcm %08x */ "\n", usb_otg_state_string(musb->xceiv->state), musb_readb(musb->mregs, MUSB_DEVCTL)); } static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode) { u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); devctl |= MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); return 0; } static inline void omap2430_low_level_exit(struct musb *musb) { u32 l; /* in any role */ l = musb_readl(musb->mregs, OTG_FORCESTDBY); l |= ENABLEFORCE; /* enable MSTANDBY */ musb_writel(musb->mregs, OTG_FORCESTDBY, l); } static inline void omap2430_low_level_init(struct musb *musb) { u32 l; l = musb_readl(musb->mregs, OTG_FORCESTDBY); l &= ~ENABLEFORCE; /* disable MSTANDBY */ musb_writel(musb->mregs, OTG_FORCESTDBY, l); } void omap_musb_mailbox(enum omap_musb_vbus_id_status status) { struct omap2430_glue *glue = _glue; if (!glue) { pr_err("%s: musb core is not yet initialized\n", __func__); return; } glue->status = status; if (!glue_to_musb(glue)) { pr_err("%s: musb core is not yet ready\n", __func__); return; } schedule_work(&glue->omap_musb_mailbox_work); } EXPORT_SYMBOL_GPL(omap_musb_mailbox); static void omap_musb_set_mailbox(struct omap2430_glue *glue) { struct musb *musb = glue_to_musb(glue); struct device *dev = musb->controller; struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); struct omap_musb_board_data *data = pdata->board_data; struct usb_otg *otg = musb->xceiv->otg; switch (glue->status) { case OMAP_MUSB_ID_GROUND: dev_dbg(dev, "ID GND\n"); otg->default_a = true; musb->xceiv->state = OTG_STATE_A_IDLE; musb->xceiv->last_event = USB_EVENT_ID; if (musb->gadget_driver) { pm_runtime_get_sync(dev); omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST); omap2430_musb_set_vbus(musb, 1); } break; case OMAP_MUSB_VBUS_VALID: dev_dbg(dev, "VBUS Connect\n"); otg->default_a = false; musb->xceiv->state = OTG_STATE_B_IDLE; musb->xceiv->last_event = USB_EVENT_VBUS; if (musb->gadget_driver) pm_runtime_get_sync(dev); omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); break; case OMAP_MUSB_ID_FLOAT: case OMAP_MUSB_VBUS_OFF: dev_dbg(dev, "VBUS Disconnect\n"); musb->xceiv->last_event = USB_EVENT_NONE; if (musb->gadget_driver) { omap2430_musb_set_vbus(musb, 0); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } if (data->interface_type == MUSB_INTERFACE_UTMI) otg_set_vbus(musb->xceiv->otg, 0); omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DISCONNECT); break; default: dev_dbg(dev, "ID float\n"); } atomic_notifier_call_chain(&musb->xceiv->notifier, musb->xceiv->last_event, NULL); } static void omap_musb_mailbox_work(struct work_struct *mailbox_work) { struct omap2430_glue *glue = container_of(mailbox_work, struct omap2430_glue, omap_musb_mailbox_work); omap_musb_set_mailbox(glue); } static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) { unsigned long flags; irqreturn_t retval = IRQ_NONE; struct musb *musb = __hci; spin_lock_irqsave(&musb->lock, flags); musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); if (musb->int_usb || musb->int_tx || musb->int_rx) retval = musb_interrupt(musb); spin_unlock_irqrestore(&musb->lock, flags); return retval; } static int omap2430_musb_init(struct musb *musb) { u32 l; int status = 0; struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); struct omap_musb_board_data *data = plat->board_data; /* We require some kind of external transceiver, hooked * up through ULPI. TWL4030-family PMICs include one, * which needs a driver, drivers aren't always needed. */ if (dev->parent->of_node) { musb->phy = devm_phy_get(dev->parent, "usb2-phy"); /* We can't totally remove musb->xceiv as of now because * musb core uses xceiv.state and xceiv.otg. Once we have * a separate state machine to handle otg, these can be moved * out of xceiv and then we can start using the generic PHY * framework */ musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent, "usb-phy", 0); } else { musb->xceiv = devm_usb_get_phy_dev(dev, 0); musb->phy = devm_phy_get(dev, "usb"); } if (IS_ERR(musb->xceiv)) { status = PTR_ERR(musb->xceiv); if (status == -ENXIO) return status; pr_err("HS USB OTG: no transceiver configured\n"); return -EPROBE_DEFER; } if (IS_ERR(musb->phy)) { pr_err("HS USB OTG: no PHY configured\n"); return PTR_ERR(musb->phy); } musb->isr = omap2430_musb_interrupt; status = pm_runtime_get_sync(dev); if (status < 0) { dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); goto err1; } l = musb_readl(musb->mregs, OTG_INTERFSEL); if (data->interface_type == MUSB_INTERFACE_UTMI) { /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */ l &= ~ULPI_12PIN; /* Disable ULPI */ l |= UTMI_8BIT; /* Enable UTMI */ } else { l |= ULPI_12PIN; } musb_writel(musb->mregs, OTG_INTERFSEL, l); pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", musb_readl(musb->mregs, OTG_REVISION), musb_readl(musb->mregs, OTG_SYSCONFIG), musb_readl(musb->mregs, OTG_SYSSTATUS), musb_readl(musb->mregs, OTG_INTERFSEL), musb_readl(musb->mregs, OTG_SIMENABLE)); setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); if (glue->status != OMAP_MUSB_UNKNOWN) omap_musb_set_mailbox(glue); phy_init(musb->phy); phy_power_on(musb->phy); pm_runtime_put_noidle(musb->controller); return 0; err1: return status; } static void omap2430_musb_enable(struct musb *musb) { u8 devctl; unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); struct omap_musb_board_data *data = pdata->board_data; switch (glue->status) { case OMAP_MUSB_ID_GROUND: omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST); if (data->interface_type != MUSB_INTERFACE_UTMI) break; devctl = musb_readb(musb->mregs, MUSB_DEVCTL); /* start the session */ devctl |= MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); while (musb_readb(musb->mregs, MUSB_DEVCTL) & MUSB_DEVCTL_BDEVICE) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(dev, "configured as A device timeout"); break; } } break; case OMAP_MUSB_VBUS_VALID: omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); break; default: break; } } static void omap2430_musb_disable(struct musb *musb) { struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); if (glue->status != OMAP_MUSB_UNKNOWN) omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DISCONNECT); } static int omap2430_musb_exit(struct musb *musb) { del_timer_sync(&musb_idle_timer); omap2430_low_level_exit(musb); phy_power_off(musb->phy); phy_exit(musb->phy); return 0; } static const struct musb_platform_ops omap2430_ops = { .init = omap2430_musb_init, .exit = omap2430_musb_exit, .set_mode = omap2430_musb_set_mode, .try_idle = omap2430_musb_try_idle, .set_vbus = omap2430_musb_set_vbus, .enable = omap2430_musb_enable, .disable = omap2430_musb_disable, }; static u64 omap2430_dmamask = DMA_BIT_MASK(32); static int omap2430_probe(struct platform_device *pdev) { struct resource musb_resources[3]; struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev); struct omap_musb_board_data *data; struct platform_device *musb; struct omap2430_glue *glue; struct device_node *np = pdev->dev.of_node; struct musb_hdrc_config *config; int ret = -ENOMEM; glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "failed to allocate glue context\n"); goto err0; } musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO); if (!musb) { dev_err(&pdev->dev, "failed to allocate musb device\n"); goto err0; } musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &omap2430_dmamask; musb->dev.coherent_dma_mask = omap2430_dmamask; glue->dev = &pdev->dev; glue->musb = musb; glue->status = OMAP_MUSB_UNKNOWN; glue->control_otghs = ERR_PTR(-ENODEV); if (np) { struct device_node *control_node; struct platform_device *control_pdev; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "failed to allocate musb platform data\n"); goto err2; } data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&pdev->dev, "failed to allocate musb board data\n"); goto err2; } config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); if (!config) { dev_err(&pdev->dev, "failed to allocate musb hdrc config\n"); goto err2; } of_property_read_u32(np, "mode", (u32 *)&pdata->mode); of_property_read_u32(np, "interface-type", (u32 *)&data->interface_type); of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); of_property_read_u32(np, "power", (u32 *)&pdata->power); config->multipoint = of_property_read_bool(np, "multipoint"); pdata->board_data = data; pdata->config = config; control_node = of_parse_phandle(np, "ctrl-module", 0); if (control_node) { control_pdev = of_find_device_by_node(control_node); if (!control_pdev) { dev_err(&pdev->dev, "Failed to get control device\n"); ret = -EINVAL; goto err2; } glue->control_otghs = &control_pdev->dev; } } pdata->platform_ops = &omap2430_ops; platform_set_drvdata(pdev, glue); /* * REVISIT if we ever have two instances of the wrapper, we will be * in big trouble */ _glue = glue; INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work); memset(musb_resources, 0x00, sizeof(*musb_resources) * ARRAY_SIZE(musb_resources)); musb_resources[0].name = pdev->resource[0].name; musb_resources[0].start = pdev->resource[0].start; musb_resources[0].end = pdev->resource[0].end; musb_resources[0].flags = pdev->resource[0].flags; musb_resources[1].name = pdev->resource[1].name; musb_resources[1].start = pdev->resource[1].start; musb_resources[1].end = pdev->resource[1].end; musb_resources[1].flags = pdev->resource[1].flags; musb_resources[2].name = pdev->resource[2].name; musb_resources[2].start = pdev->resource[2].start; musb_resources[2].end = pdev->resource[2].end; musb_resources[2].flags = pdev->resource[2].flags; ret = platform_device_add_resources(musb, musb_resources, ARRAY_SIZE(musb_resources)); if (ret) { dev_err(&pdev->dev, "failed to add resources\n"); goto err2; } ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); goto err2; } pm_runtime_enable(&pdev->dev); ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err2; } return 0; err2: platform_device_put(musb); err0: return ret; } static int omap2430_remove(struct platform_device *pdev) { struct omap2430_glue *glue = platform_get_drvdata(pdev); cancel_work_sync(&glue->omap_musb_mailbox_work); platform_device_unregister(glue->musb); return 0; } #ifdef CONFIG_PM static int omap2430_runtime_suspend(struct device *dev) { struct omap2430_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); if (musb) { musb->context.otg_interfsel = musb_readl(musb->mregs, OTG_INTERFSEL); omap2430_low_level_exit(musb); } return 0; } static int omap2430_runtime_resume(struct device *dev) { struct omap2430_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); if (musb) { omap2430_low_level_init(musb); musb_writel(musb->mregs, OTG_INTERFSEL, musb->context.otg_interfsel); } return 0; } static struct dev_pm_ops omap2430_pm_ops = { .runtime_suspend = omap2430_runtime_suspend, .runtime_resume = omap2430_runtime_resume, }; #define DEV_PM_OPS (&omap2430_pm_ops) #else #define DEV_PM_OPS NULL #endif #ifdef CONFIG_OF static const struct of_device_id omap2430_id_table[] = { { .compatible = "ti,omap4-musb" }, { .compatible = "ti,omap3-musb" }, {}, }; MODULE_DEVICE_TABLE(of, omap2430_id_table); #endif static struct platform_driver omap2430_driver = { .probe = omap2430_probe, .remove = omap2430_remove, .driver = { .name = "musb-omap2430", .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(omap2430_id_table), }, }; MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); MODULE_LICENSE("GPL v2"); static int __init omap2430_init(void) { return platform_driver_register(&omap2430_driver); } subsys_initcall(omap2430_init); static void __exit omap2430_exit(void) { platform_driver_unregister(&omap2430_driver); } module_exit(omap2430_exit);
gpl-2.0
koalo/linux
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
362
51640
/* * Xilinx Axi Ethernet device driver * * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> * Copyright (c) 2010 - 2011 PetaLogix * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. * * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 * and Spartan6. * * TODO: * - Add Axi Fifo support. * - Factor out Axi DMA code into separate driver. * - Test and fix basic multicast filtering. * - Add support for extended multicast filtering. * - Test basic VLAN support. * - Add support for extended VLAN support. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/phy.h> #include <linux/mii.h> #include <linux/ethtool.h> #include "xilinx_axienet.h" /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ #define TX_BD_NUM 64 #define RX_BD_NUM 128 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ #define DRIVER_NAME "xaxienet" #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" #define DRIVER_VERSION "1.00a" #define AXIENET_REGS_N 32 /* Match table for of_platform binding */ static struct of_device_id axienet_of_match[] = { { .compatible = "xlnx,axi-ethernet-1.00.a", }, { .compatible = "xlnx,axi-ethernet-1.01.a", }, { .compatible = "xlnx,axi-ethernet-2.01.a", }, {}, }; MODULE_DEVICE_TABLE(of, axienet_of_match); /* Option table for setting up Axi Ethernet hardware options */ static struct axienet_option axienet_options[] = { /* Turn on jumbo packet support for both Rx and Tx */ { .opt = XAE_OPTION_JUMBO, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_JUM_MASK, }, { .opt = XAE_OPTION_JUMBO, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_JUM_MASK, }, { /* Turn on VLAN packet support for both Rx and Tx */ .opt = XAE_OPTION_VLAN, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_VLAN_MASK, }, { .opt = XAE_OPTION_VLAN, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_VLAN_MASK, }, { /* Turn on FCS stripping on receive packets */ .opt = XAE_OPTION_FCS_STRIP, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_FCS_MASK, }, { /* Turn on FCS insertion on transmit packets */ .opt = XAE_OPTION_FCS_INSERT, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_FCS_MASK, }, { /* Turn off length/type field checking on receive packets */ .opt = XAE_OPTION_LENTYPE_ERR, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_LT_DIS_MASK, }, { /* Turn on Rx flow control */ .opt = XAE_OPTION_FLOW_CONTROL, .reg = XAE_FCC_OFFSET, .m_or = XAE_FCC_FCRX_MASK, }, { /* Turn on Tx flow control */ .opt = XAE_OPTION_FLOW_CONTROL, .reg = XAE_FCC_OFFSET, .m_or = XAE_FCC_FCTX_MASK, }, { /* Turn on promiscuous frame filtering */ .opt = XAE_OPTION_PROMISC, .reg = XAE_FMI_OFFSET, .m_or = XAE_FMI_PM_MASK, }, { /* Enable transmitter */ .opt = XAE_OPTION_TXEN, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_TX_MASK, }, { /* Enable receiver */ .opt = XAE_OPTION_RXEN, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_RX_MASK, }, {} }; /** * axienet_dma_in32 - Memory mapped Axi DMA register read * @lp: Pointer to axienet local structure * @reg: Address offset from the base address of the Axi DMA core * * returns: The contents of the Axi DMA register * * This function returns the contents of the corresponding Axi DMA register. */ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) { return in_be32(lp->dma_regs + reg); } /** * axienet_dma_out32 - Memory mapped Axi DMA register write. * @lp: Pointer to axienet local structure * @reg: Address offset from the base address of the Axi DMA core * @value: Value to be written into the Axi DMA register * * This function writes the desired value into the corresponding Axi DMA * register. */ static inline void axienet_dma_out32(struct axienet_local *lp, off_t reg, u32 value) { out_be32((lp->dma_regs + reg), value); } /** * axienet_dma_bd_release - Release buffer descriptor rings * @ndev: Pointer to the net_device structure * * This function is used to release the descriptors allocated in * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet * driver stop api is called. */ static void axienet_dma_bd_release(struct net_device *ndev) { int i; struct axienet_local *lp = netdev_priv(ndev); for (i = 0; i < RX_BD_NUM; i++) { dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, lp->max_frm_size, DMA_FROM_DEVICE); dev_kfree_skb((struct sk_buff *) (lp->rx_bd_v[i].sw_id_offset)); } if (lp->rx_bd_v) { dma_free_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * RX_BD_NUM, lp->rx_bd_v, lp->rx_bd_p); } if (lp->tx_bd_v) { dma_free_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, lp->tx_bd_v, lp->tx_bd_p); } } /** * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA * @ndev: Pointer to the net_device structure * * returns: 0, on success * -ENOMEM, on failure * * This function is called to initialize the Rx and Tx DMA descriptor * rings. This initializes the descriptors with required default values * and is called when Axi Ethernet driver reset is called. */ static int axienet_dma_bd_init(struct net_device *ndev) { u32 cr; int i; struct sk_buff *skb; struct axienet_local *lp = netdev_priv(ndev); /* Reset the indexes which are used for accessing the BDs */ lp->tx_bd_ci = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; /* * Allocate the Tx and Rx buffer descriptors. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO); if (!lp->tx_bd_v) goto out; lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * RX_BD_NUM, &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO); if (!lp->rx_bd_v) goto out; for (i = 0; i < TX_BD_NUM; i++) { lp->tx_bd_v[i].next = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); } for (i = 0; i < RX_BD_NUM; i++) { lp->rx_bd_v[i].next = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!skb) goto out; lp->rx_bd_v[i].sw_id_offset = (u32) skb; lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); lp->rx_bd_v[i].cntrl = lp->max_frm_size; } /* Start updating the Rx channel control register */ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = ((cr & ~XAXIDMA_COALESCE_MASK) | ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = ((cr & ~XAXIDMA_DELAY_MASK) | (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); /* Start updating the Tx channel control register */ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = (((cr & ~XAXIDMA_DELAY_MASK)) | (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Write to the Tx channel control register */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception.*/ axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting */ axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); return 0; out: axienet_dma_bd_release(ndev); return -ENOMEM; } /** * axienet_set_mac_address - Write the MAC address * @ndev: Pointer to the net_device structure * @address: 6 byte Address to be written as MAC address * * This function is called to initialize the MAC address of the Axi Ethernet * core. It writes to the UAW0 and UAW1 registers of the core. */ static void axienet_set_mac_address(struct net_device *ndev, void *address) { struct axienet_local *lp = netdev_priv(ndev); if (address) memcpy(ndev->dev_addr, address, ETH_ALEN); if (!is_valid_ether_addr(ndev->dev_addr)) eth_random_addr(ndev->dev_addr); /* Set up unicast MAC address filter set its mac address */ axienet_iow(lp, XAE_UAW0_OFFSET, (ndev->dev_addr[0]) | (ndev->dev_addr[1] << 8) | (ndev->dev_addr[2] << 16) | (ndev->dev_addr[3] << 24)); axienet_iow(lp, XAE_UAW1_OFFSET, (((axienet_ior(lp, XAE_UAW1_OFFSET)) & ~XAE_UAW1_UNICASTADDR_MASK) | (ndev->dev_addr[4] | (ndev->dev_addr[5] << 8)))); } /** * netdev_set_mac_address - Write the MAC address (from outside the driver) * @ndev: Pointer to the net_device structure * @p: 6 byte Address to be written as MAC address * * returns: 0 for all conditions. Presently, there is no failure case. * * This function is called to initialize the MAC address of the Axi Ethernet * core. It calls the core specific axienet_set_mac_address. This is the * function that goes into net_device_ops structure entry ndo_set_mac_address. */ static int netdev_set_mac_address(struct net_device *ndev, void *p) { struct sockaddr *addr = p; axienet_set_mac_address(ndev, addr->sa_data); return 0; } /** * axienet_set_multicast_list - Prepare the multicast table * @ndev: Pointer to the net_device structure * * This function is called to initialize the multicast table during * initialization. The Axi Ethernet basic multicast support has a four-entry * multicast table which is initialized here. Additionally this function * goes into the net_device_ops structure entry ndo_set_multicast_list. This * means whenever the multicast table entries need to be updated this * function gets called. */ static void axienet_set_multicast_list(struct net_device *ndev) { int i; u32 reg, af0reg, af1reg; struct axienet_local *lp = netdev_priv(ndev); if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { /* We must make the kernel realize we had to move into * promiscuous mode. If it was a promiscuous mode request * the flag is already set. If not we set it. */ ndev->flags |= IFF_PROMISC; reg = axienet_ior(lp, XAE_FMI_OFFSET); reg |= XAE_FMI_PM_MASK; axienet_iow(lp, XAE_FMI_OFFSET, reg); dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); } else if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; i = 0; netdev_for_each_mc_addr(ha, ndev) { if (i >= XAE_MULTICAST_CAM_TABLE_NUM) break; af0reg = (ha->addr[0]); af0reg |= (ha->addr[1] << 8); af0reg |= (ha->addr[2] << 16); af0reg |= (ha->addr[3] << 24); af1reg = (ha->addr[4]); af1reg |= (ha->addr[5] << 8); reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); axienet_iow(lp, XAE_AF0_OFFSET, af0reg); axienet_iow(lp, XAE_AF1_OFFSET, af1reg); i++; } } else { reg = axienet_ior(lp, XAE_FMI_OFFSET); reg &= ~XAE_FMI_PM_MASK; axienet_iow(lp, XAE_FMI_OFFSET, reg); for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); axienet_iow(lp, XAE_AF0_OFFSET, 0); axienet_iow(lp, XAE_AF1_OFFSET, 0); } dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); } } /** * axienet_setoptions - Set an Axi Ethernet option * @ndev: Pointer to the net_device structure * @options: Option to be enabled/disabled * * The Axi Ethernet core has multiple features which can be selectively turned * on or off. The typical options could be jumbo frame option, basic VLAN * option, promiscuous mode option etc. This function is used to set or clear * these options in the Axi Ethernet hardware. This is done through * axienet_option structure . */ static void axienet_setoptions(struct net_device *ndev, u32 options) { int reg; struct axienet_local *lp = netdev_priv(ndev); struct axienet_option *tp = &axienet_options[0]; while (tp->opt) { reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); if (options & tp->opt) reg |= tp->m_or; axienet_iow(lp, tp->reg, reg); tp++; } lp->options |= options; } static void __axienet_device_reset(struct axienet_local *lp, struct device *dev, off_t offset) { u32 timeout; /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this * reset process. */ axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); timeout = DELAY_OF_ONE_MILLISEC; while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { udelay(1); if (--timeout == 0) { dev_err(dev, "axienet_device_reset DMA " "reset timeout!\n"); break; } } } /** * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. * @ndev: Pointer to the net_device structure * * This function is called to reset and initialize the Axi Ethernet core. This * is typically called during initialization. It does a reset of the Axi DMA * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines * areconnected to Axi Ethernet reset lines, this in turn resets the Axi * Ethernet core. No separate hardware reset is done for the Axi Ethernet * core. */ static void axienet_device_reset(struct net_device *ndev) { u32 axienet_status; struct axienet_local *lp = netdev_priv(ndev); __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; lp->options &= (~XAE_OPTION_JUMBO); if ((ndev->mtu > XAE_MTU) && (ndev->mtu <= XAE_JUMBO_MTU) && (lp->jumbo_support)) { lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE; lp->options |= XAE_OPTION_JUMBO; } if (axienet_dma_bd_init(ndev)) { dev_err(&ndev->dev, "axienet_device_reset descriptor " "allocation failed\n"); } axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); axienet_status &= ~XAE_RCW1_RX_MASK; axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); axienet_status = axienet_ior(lp, XAE_IP_OFFSET); if (axienet_status & XAE_INT_RXRJECT_MASK) axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); /* Sync default options with HW but leave receiver and * transmitter disabled.*/ axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_set_mac_address(ndev, NULL); axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); ndev->trans_start = jiffies; } /** * axienet_adjust_link - Adjust the PHY link speed/duplex. * @ndev: Pointer to the net_device structure * * This function is called to change the speed and duplex setting after * auto negotiation is done by the PHY. This is the function that gets * registered with the PHY interface through the "of_phy_connect" call. */ static void axienet_adjust_link(struct net_device *ndev) { u32 emmc_reg; u32 link_state; u32 setspeed = 1; struct axienet_local *lp = netdev_priv(ndev); struct phy_device *phy = lp->phy_dev; link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) setspeed = 0; } else { if ((phy->speed == SPEED_1000) && (lp->phy_type == XAE_PHY_TYPE_MII)) setspeed = 0; } if (setspeed == 1) { emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; switch (phy->speed) { case SPEED_1000: emmc_reg |= XAE_EMMC_LINKSPD_1000; break; case SPEED_100: emmc_reg |= XAE_EMMC_LINKSPD_100; break; case SPEED_10: emmc_reg |= XAE_EMMC_LINKSPD_10; break; default: dev_err(&ndev->dev, "Speed other than 10, 100 " "or 1Gbps is not supported\n"); break; } axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); lp->last_link = link_state; phy_print_status(phy); } else { dev_err(&ndev->dev, "Error setting Axi Ethernet " "mac speed\n"); } } } /** * axienet_start_xmit_done - Invoked once a transmit is completed by the * Axi DMA Tx channel. * @ndev: Pointer to the net_device structure * * This function is invoked from the Axi DMA Tx isr to notify the completion * of transmit operation. It clears fields in the corresponding Tx BDs and * unmaps the corresponding buffer so that CPU can regain ownership of the * buffer. It finally invokes "netif_wake_queue" to restart transmission if * required. */ static void axienet_start_xmit_done(struct net_device *ndev) { u32 size = 0; u32 packets = 0; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; unsigned int status = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; status = cur_p->status; while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { dma_unmap_single(ndev->dev.parent, cur_p->phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); if (cur_p->app4) dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); /*cur_p->phys = 0;*/ cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app4 = 0; cur_p->status = 0; size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; packets++; lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; status = cur_p->status; } ndev->stats.tx_packets += packets; ndev->stats.tx_bytes += size; netif_wake_queue(ndev); } /** * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy * @lp: Pointer to the axienet_local structure * @num_frag: The number of BDs to check for * * returns: 0, on success * NETDEV_TX_BUSY, if any of the descriptors are not free * * This function is invoked before BDs are allocated and transmission starts. * This function returns 0 if a BD or group of BDs can be allocated for * transmission. If the BD or any of the BDs are not free the function * returns a busy status. This is invoked from axienet_start_xmit. */ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, int num_frag) { struct axidma_bd *cur_p; cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) return NETDEV_TX_BUSY; return 0; } /** * axienet_start_xmit - Starts the transmission. * @skb: sk_buff pointer that contains data to be Txed. * @ndev: Pointer to net_device structure. * * returns: NETDEV_TX_OK, on success * NETDEV_TX_BUSY, if any of the descriptors are not free * * This function is invoked from upper layers to initiate transmission. The * function uses the next available free BDs and populates their fields to * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values. */ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { u32 ii; u32 num_frag; u32 csum_start_off; u32 csum_index_off; skb_frag_t *frag; dma_addr_t tail_p; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; num_frag = skb_shinfo(skb)->nr_frags; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; if (axienet_check_tx_bd_space(lp, num_frag)) { if (!netif_queue_stopped(ndev)) netif_stop_queue(ndev); return NETDEV_TX_BUSY; } if (skb->ip_summed == CHECKSUM_PARTIAL) { if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { csum_start_off = skb_transport_offset(skb); csum_index_off = csum_start_off + skb->csum_offset; /* Tx Partial Checksum Offload Enabled */ cur_p->app0 |= 1; cur_p->app1 = (csum_start_off << 16) | csum_index_off; } } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ } cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); for (ii = 0; ii < num_frag; ii++) { lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; cur_p->phys = dma_map_single(ndev->dev.parent, skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); cur_p->cntrl = skb_frag_size(frag); } cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; cur_p->app4 = (unsigned long)skb; tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; /* Start the transfer */ axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; return NETDEV_TX_OK; } /** * axienet_recv - Is called from Axi DMA Rx Isr to complete the received * BD processing. * @ndev: Pointer to net_device structure. * * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It * does minimal processing and invokes "netif_rx" to complete further * processing. */ static void axienet_recv(struct net_device *ndev) { u32 length; u32 csumstatus; u32 size = 0; u32 packets = 0; dma_addr_t tail_p; struct axienet_local *lp = netdev_priv(ndev); struct sk_buff *skb, *new_skb; struct axidma_bd *cur_p; tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { skb = (struct sk_buff *) (cur_p->sw_id_offset); length = cur_p->app4 & 0x0000FFFF; dma_unmap_single(ndev->dev.parent, cur_p->phys, lp->max_frm_size, DMA_FROM_DEVICE); skb_put(skb, length); skb->protocol = eth_type_trans(skb, ndev); /*skb_checksum_none_assert(skb);*/ skb->ip_summed = CHECKSUM_NONE; /* if we're doing Rx csum offload, set it up */ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { csumstatus = (cur_p->app2 & XAE_FULL_CSUM_STATUS_MASK) >> 3; if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { skb->ip_summed = CHECKSUM_UNNECESSARY; } } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && skb->protocol == __constant_htons(ETH_P_IP) && skb->len > 64) { skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } netif_rx(skb); size += length; packets++; new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!new_skb) return; cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, lp->max_frm_size, DMA_FROM_DEVICE); cur_p->cntrl = lp->max_frm_size; cur_p->status = 0; cur_p->sw_id_offset = (u32) new_skb; lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; } ndev->stats.rx_packets += packets; ndev->stats.rx_bytes += size; axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); } /** * axienet_tx_irq - Tx Done Isr. * @irq: irq number * @_ndev: net_device pointer * * returns: IRQ_HANDLED for all cases. * * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" * to complete the BD processing. */ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) { u32 cr; unsigned int status; struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev); status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { axienet_start_xmit_done(lp->ndev); goto out; } if (!(status & XAXIDMA_IRQ_ALL_MASK)) dev_err(&ndev->dev, "No interrupts asserted in Tx path"); if (status & XAXIDMA_IRQ_ERROR_MASK) { dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); dev_err(&ndev->dev, "Current BD is at: 0x%x\n", (lp->tx_bd_v[lp->tx_bd_ci]).phys); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* Write to the Tx channel control register */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* Write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); tasklet_schedule(&lp->dma_err_tasklet); } out: axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); return IRQ_HANDLED; } /** * axienet_rx_irq - Rx Isr. * @irq: irq number * @_ndev: net_device pointer * * returns: IRQ_HANDLED for all cases. * * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD * processing. */ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) { u32 cr; unsigned int status; struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev); status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { axienet_recv(lp->ndev); goto out; } if (!(status & XAXIDMA_IRQ_ALL_MASK)) dev_err(&ndev->dev, "No interrupts asserted in Rx path"); if (status & XAXIDMA_IRQ_ERROR_MASK) { dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); dev_err(&ndev->dev, "Current BD is at: 0x%x\n", (lp->rx_bd_v[lp->rx_bd_ci]).phys); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* Finally write to the Tx channel control register */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); tasklet_schedule(&lp->dma_err_tasklet); } out: axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); return IRQ_HANDLED; } static void axienet_dma_err_handler(unsigned long data); /** * axienet_open - Driver open routine. * @ndev: Pointer to net_device structure * * returns: 0, on success. * -ENODEV, if PHY cannot be connected to * non-zero error value on failure * * This is the driver open routine. It calls phy_start to start the PHY device. * It also allocates interrupt service routines, enables the interrupt lines * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer * descriptors are initialized. */ static int axienet_open(struct net_device *ndev) { int ret, mdio_mcreg; struct axienet_local *lp = netdev_priv(ndev); dev_dbg(&ndev->dev, "axienet_open()\n"); mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); ret = axienet_mdio_wait_until_ready(lp); if (ret < 0) return ret; /* Disable the MDIO interface till Axi Ethernet Reset is completed. * When we do an Axi Ethernet reset, it resets the complete core * including the MDIO. If MDIO is not disabled when the reset * process is started, MDIO will be broken afterwards. */ axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); axienet_device_reset(ndev); /* Enable the MDIO */ axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); ret = axienet_mdio_wait_until_ready(lp); if (ret < 0) return ret; if (lp->phy_node) { lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, axienet_adjust_link, 0, PHY_INTERFACE_MODE_GMII); if (!lp->phy_dev) { dev_err(lp->dev, "of_phy_connect() failed\n"); return -ENODEV; } phy_start(lp->phy_dev); } /* Enable tasklets for Axi DMA error handling */ tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, (unsigned long) lp); /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); if (ret) goto err_tx_irq; /* Enable interrupts for Axi DMA Rx */ ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); if (ret) goto err_rx_irq; return 0; err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; tasklet_kill(&lp->dma_err_tasklet); dev_err(lp->dev, "request_irq() failed\n"); return ret; } /** * axienet_stop - Driver stop routine. * @ndev: Pointer to net_device structure * * returns: 0, on success. * * This is the driver stop routine. It calls phy_disconnect to stop the PHY * device. It also removes the interrupt handlers and disables the interrupts. * The Axi DMA Tx/Rx BDs are released. */ static int axienet_stop(struct net_device *ndev) { u32 cr; struct axienet_local *lp = netdev_priv(ndev); dev_dbg(&ndev->dev, "axienet_close()\n"); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr & (~XAXIDMA_CR_RUNSTOP_MASK)); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr & (~XAXIDMA_CR_RUNSTOP_MASK)); axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); tasklet_kill(&lp->dma_err_tasklet); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; axienet_dma_bd_release(ndev); return 0; } /** * axienet_change_mtu - Driver change mtu routine. * @ndev: Pointer to net_device structure * @new_mtu: New mtu value to be applied * * returns: Always returns 0 (success). * * This is the change mtu driver routine. It checks if the Axi Ethernet * hardware supports jumbo frames before changing the mtu. This can be * called only when the device is not up. */ static int axienet_change_mtu(struct net_device *ndev, int new_mtu) { struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) return -EBUSY; if (lp->jumbo_support) { if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) return -EINVAL; ndev->mtu = new_mtu; } else { if ((new_mtu > XAE_MTU) || (new_mtu < 64)) return -EINVAL; ndev->mtu = new_mtu; } return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * axienet_poll_controller - Axi Ethernet poll mechanism. * @ndev: Pointer to net_device structure * * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior * to polling the ISRs and are enabled back after the polling is done. */ static void axienet_poll_controller(struct net_device *ndev) { struct axienet_local *lp = netdev_priv(ndev); disable_irq(lp->tx_irq); disable_irq(lp->rx_irq); axienet_rx_irq(lp->tx_irq, ndev); axienet_tx_irq(lp->rx_irq, ndev); enable_irq(lp->tx_irq); enable_irq(lp->rx_irq); } #endif static const struct net_device_ops axienet_netdev_ops = { .ndo_open = axienet_open, .ndo_stop = axienet_stop, .ndo_start_xmit = axienet_start_xmit, .ndo_change_mtu = axienet_change_mtu, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = axienet_set_multicast_list, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = axienet_poll_controller, #endif }; /** * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. * @ndev: Pointer to net_device structure * @ecmd: Pointer to ethtool_cmd structure * * This implements ethtool command for getting PHY settings. If PHY could * not be found, the function returns -ENODEV. This function calls the * relevant PHY ethtool API to get the PHY settings. * Issue "ethtool ethX" under linux prompt to execute this function. */ static int axienet_ethtools_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct axienet_local *lp = netdev_priv(ndev); struct phy_device *phydev = lp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_gset(phydev, ecmd); } /** * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. * @ndev: Pointer to net_device structure * @ecmd: Pointer to ethtool_cmd structure * * This implements ethtool command for setting various PHY settings. If PHY * could not be found, the function returns -ENODEV. This function calls the * relevant PHY ethtool API to set the PHY. * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this * function. */ static int axienet_ethtools_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct axienet_local *lp = netdev_priv(ndev); struct phy_device *phydev = lp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, ecmd); } /** * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. * @ndev: Pointer to net_device structure * @ed: Pointer to ethtool_drvinfo structure * * This implements ethtool command for getting the driver information. * Issue "ethtool -i ethX" under linux prompt to execute this function. */ static void axienet_ethtools_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) { strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; } /** * axienet_ethtools_get_regs_len - Get the total regs length present in the * AxiEthernet core. * @ndev: Pointer to net_device structure * * This implements ethtool command for getting the total register length * information. */ static int axienet_ethtools_get_regs_len(struct net_device *ndev) { return sizeof(u32) * AXIENET_REGS_N; } /** * axienet_ethtools_get_regs - Dump the contents of all registers present * in AxiEthernet core. * @ndev: Pointer to net_device structure * @regs: Pointer to ethtool_regs structure * @ret: Void pointer used to return the contents of the registers. * * This implements ethtool command for getting the Axi Ethernet register dump. * Issue "ethtool -d ethX" to execute this function. */ static void axienet_ethtools_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *ret) { u32 *data = (u32 *) ret; size_t len = sizeof(u32) * AXIENET_REGS_N; struct axienet_local *lp = netdev_priv(ndev); regs->version = 0; regs->len = len; memset(data, 0, len); data[0] = axienet_ior(lp, XAE_RAF_OFFSET); data[1] = axienet_ior(lp, XAE_TPF_OFFSET); data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); data[3] = axienet_ior(lp, XAE_IS_OFFSET); data[4] = axienet_ior(lp, XAE_IP_OFFSET); data[5] = axienet_ior(lp, XAE_IE_OFFSET); data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); data[12] = axienet_ior(lp, XAE_PPST_OFFSET); data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); data[15] = axienet_ior(lp, XAE_TC_OFFSET); data[16] = axienet_ior(lp, XAE_FCC_OFFSET); data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); data[29] = axienet_ior(lp, XAE_FMI_OFFSET); data[30] = axienet_ior(lp, XAE_AF0_OFFSET); data[31] = axienet_ior(lp, XAE_AF1_OFFSET); } /** * axienet_ethtools_get_pauseparam - Get the pause parameter setting for * Tx and Rx paths. * @ndev: Pointer to net_device structure * @epauseparm: Pointer to ethtool_pauseparam structure. * * This implements ethtool command for getting axi ethernet pause frame * setting. Issue "ethtool -a ethX" to execute this function. */ static void axienet_ethtools_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *epauseparm) { u32 regval; struct axienet_local *lp = netdev_priv(ndev); epauseparm->autoneg = 0; regval = axienet_ior(lp, XAE_FCC_OFFSET); epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; } /** * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) * settings. * @ndev: Pointer to net_device structure * @epauseparam:Pointer to ethtool_pauseparam structure * * This implements ethtool command for enabling flow control on Rx and Tx * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this * function. */ static int axienet_ethtools_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *epauseparm) { u32 regval = 0; struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { printk(KERN_ERR "%s: Please stop netif before applying " "configruation\n", ndev->name); return -EFAULT; } regval = axienet_ior(lp, XAE_FCC_OFFSET); if (epauseparm->tx_pause) regval |= XAE_FCC_FCTX_MASK; else regval &= ~XAE_FCC_FCTX_MASK; if (epauseparm->rx_pause) regval |= XAE_FCC_FCRX_MASK; else regval &= ~XAE_FCC_FCRX_MASK; axienet_iow(lp, XAE_FCC_OFFSET, regval); return 0; } /** * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure * * This implements ethtool command for getting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to * execute this function. */ static int axienet_ethtools_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ecoalesce) { u32 regval = 0; struct axienet_local *lp = netdev_priv(ndev); regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) >> XAXIDMA_COALESCE_SHIFT; regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) >> XAXIDMA_COALESCE_SHIFT; return 0; } /** * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure * * This implements ethtool command for setting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux * prompt to execute this function. */ static int axienet_ethtools_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ecoalesce) { struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { printk(KERN_ERR "%s: Please stop netif before applying " "configruation\n", ndev->name); return -EFAULT; } if ((ecoalesce->rx_coalesce_usecs) || (ecoalesce->rx_coalesce_usecs_irq) || (ecoalesce->rx_max_coalesced_frames_irq) || (ecoalesce->tx_coalesce_usecs) || (ecoalesce->tx_coalesce_usecs_irq) || (ecoalesce->tx_max_coalesced_frames_irq) || (ecoalesce->stats_block_coalesce_usecs) || (ecoalesce->use_adaptive_rx_coalesce) || (ecoalesce->use_adaptive_tx_coalesce) || (ecoalesce->pkt_rate_low) || (ecoalesce->rx_coalesce_usecs_low) || (ecoalesce->rx_max_coalesced_frames_low) || (ecoalesce->tx_coalesce_usecs_low) || (ecoalesce->tx_max_coalesced_frames_low) || (ecoalesce->pkt_rate_high) || (ecoalesce->rx_coalesce_usecs_high) || (ecoalesce->rx_max_coalesced_frames_high) || (ecoalesce->tx_coalesce_usecs_high) || (ecoalesce->tx_max_coalesced_frames_high) || (ecoalesce->rate_sample_interval)) return -EOPNOTSUPP; if (ecoalesce->rx_max_coalesced_frames) lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; if (ecoalesce->tx_max_coalesced_frames) lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; return 0; } static struct ethtool_ops axienet_ethtool_ops = { .get_settings = axienet_ethtools_get_settings, .set_settings = axienet_ethtools_set_settings, .get_drvinfo = axienet_ethtools_get_drvinfo, .get_regs_len = axienet_ethtools_get_regs_len, .get_regs = axienet_ethtools_get_regs, .get_link = ethtool_op_get_link, .get_pauseparam = axienet_ethtools_get_pauseparam, .set_pauseparam = axienet_ethtools_set_pauseparam, .get_coalesce = axienet_ethtools_get_coalesce, .set_coalesce = axienet_ethtools_set_coalesce, }; /** * axienet_dma_err_handler - Tasklet handler for Axi DMA Error * @data: Data passed * * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the * Tx/Rx BDs. */ static void axienet_dma_err_handler(unsigned long data) { u32 axienet_status; u32 cr, i; int mdio_mcreg; struct axienet_local *lp = (struct axienet_local *) data; struct net_device *ndev = lp->ndev; struct axidma_bd *cur_p; axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); axienet_mdio_wait_until_ready(lp); /* Disable the MDIO interface till Axi Ethernet Reset is completed. * When we do an Axi Ethernet reset, it resets the complete core * including the MDIO. So if MDIO is not disabled when the reset * process is started, MDIO will be broken afterwards. */ axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & ~XAE_MDIO_MC_MDIOEN_MASK)); __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); axienet_mdio_wait_until_ready(lp); for (i = 0; i < TX_BD_NUM; i++) { cur_p = &lp->tx_bd_v[i]; if (cur_p->phys) dma_unmap_single(ndev->dev.parent, cur_p->phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); if (cur_p->app4) dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); cur_p->phys = 0; cur_p->cntrl = 0; cur_p->status = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app3 = 0; cur_p->app4 = 0; cur_p->sw_id_offset = 0; } for (i = 0; i < RX_BD_NUM; i++) { cur_p = &lp->rx_bd_v[i]; cur_p->status = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app3 = 0; cur_p->app4 = 0; } lp->tx_bd_ci = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; /* Start updating the Rx channel control register */ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = ((cr & ~XAXIDMA_COALESCE_MASK) | (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = ((cr & ~XAXIDMA_DELAY_MASK) | (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Finally write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); /* Start updating the Tx channel control register */ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = (((cr & ~XAXIDMA_DELAY_MASK)) | (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Finally write to the Tx channel control register */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception.*/ axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting */ axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); axienet_status &= ~XAE_RCW1_RX_MASK; axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); axienet_status = axienet_ior(lp, XAE_IP_OFFSET); if (axienet_status & XAE_INT_RXRJECT_MASK) axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); /* Sync default options with HW but leave receiver and * transmitter disabled.*/ axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_set_mac_address(ndev, NULL); axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); } /** * axienet_of_probe - Axi Ethernet probe function. * @op: Pointer to platform device structure. * @match: Pointer to device id structure * * returns: 0, on success * Non-zero error value on failure. * * This is the probe routine for Axi Ethernet driver. This is called before * any other driver routines are invoked. It allocates and sets up the Ethernet * device. Parses through device tree and populates fields of * axienet_local. It registers the Ethernet device. */ static int axienet_of_probe(struct platform_device *op) { __be32 *p; int size, ret = 0; struct device_node *np; struct axienet_local *lp; struct net_device *ndev; const void *addr; ndev = alloc_etherdev(sizeof(*lp)); if (!ndev) return -ENOMEM; ether_setup(ndev); dev_set_drvdata(&op->dev, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; ndev->netdev_ops = &axienet_netdev_ops; ndev->ethtool_ops = &axienet_ethtool_ops; lp = netdev_priv(ndev); lp->ndev = ndev; lp->dev = &op->dev; lp->options = XAE_OPTION_DEFAULTS; /* Map device registers */ lp->regs = of_iomap(op->dev.of_node, 0); if (!lp->regs) { dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); goto nodev; } /* Setup checksum offload, but default to off if not specified */ lp->features = 0; p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); if (p) { switch (be32_to_cpup(p)) { case 1: lp->csum_offload_on_tx_path = XAE_FEATURE_PARTIAL_TX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; break; case 2: lp->csum_offload_on_tx_path = XAE_FEATURE_FULL_TX_CSUM; lp->features |= XAE_FEATURE_FULL_TX_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; break; default: lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; } } p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); if (p) { switch (be32_to_cpup(p)) { case 1: lp->csum_offload_on_rx_path = XAE_FEATURE_PARTIAL_RX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; break; case 2: lp->csum_offload_on_rx_path = XAE_FEATURE_FULL_RX_CSUM; lp->features |= XAE_FEATURE_FULL_RX_CSUM; break; default: lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; } } /* For supporting jumbo frames, the Axi Ethernet hardware must have * a larger Rx/Tx Memory. Typically, the size must be more than or * equal to 16384 bytes, so that we can enable jumbo option and start * supporting jumbo frames. Here we check for memory allocated for * Rx/Tx in the hardware from the device-tree and accordingly set * flags. */ p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL); if (p) { if ((be32_to_cpup(p)) >= 0x4000) lp->jumbo_support = 1; } p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", NULL); if (p) lp->temac_type = be32_to_cpup(p); p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); if (p) lp->phy_type = be32_to_cpup(p); /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); if (!np) { dev_err(&op->dev, "could not find DMA node\n"); goto err_iounmap; } lp->dma_regs = of_iomap(np, 0); if (lp->dma_regs) { dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs); } else { dev_err(&op->dev, "unable to map DMA registers\n"); of_node_put(np); } lp->rx_irq = irq_of_parse_and_map(np, 1); lp->tx_irq = irq_of_parse_and_map(np, 0); of_node_put(np); if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { dev_err(&op->dev, "could not determine irqs\n"); ret = -ENOMEM; goto err_iounmap_2; } /* Retrieve the MAC address */ addr = of_get_property(op->dev.of_node, "local-mac-address", &size); if ((!addr) || (size != 6)) { dev_err(&op->dev, "could not find MAC address\n"); ret = -ENODEV; goto err_iounmap_2; } axienet_set_mac_address(ndev, (void *) addr); lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); ret = axienet_mdio_setup(lp, op->dev.of_node); if (ret) dev_warn(&op->dev, "error registering MDIO bus\n"); ret = register_netdev(lp->ndev); if (ret) { dev_err(lp->dev, "register_netdev() error (%i)\n", ret); goto err_iounmap_2; } return 0; err_iounmap_2: if (lp->dma_regs) iounmap(lp->dma_regs); err_iounmap: iounmap(lp->regs); nodev: free_netdev(ndev); ndev = NULL; return ret; } static int axienet_of_remove(struct platform_device *op) { struct net_device *ndev = dev_get_drvdata(&op->dev); struct axienet_local *lp = netdev_priv(ndev); axienet_mdio_teardown(lp); unregister_netdev(ndev); if (lp->phy_node) of_node_put(lp->phy_node); lp->phy_node = NULL; dev_set_drvdata(&op->dev, NULL); iounmap(lp->regs); if (lp->dma_regs) iounmap(lp->dma_regs); free_netdev(ndev); return 0; } static struct platform_driver axienet_of_driver = { .probe = axienet_of_probe, .remove = axienet_of_remove, .driver = { .owner = THIS_MODULE, .name = "xilinx_axienet", .of_match_table = axienet_of_match, }, }; module_platform_driver(axienet_of_driver); MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); MODULE_AUTHOR("Xilinx"); MODULE_LICENSE("GPL");
gpl-2.0
silvesterlee/linux
tools/perf/builtin-list.c
618
2122
/* * builtin-list.c * * Builtin list command: list all event types * * Copyright (C) 2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <mingo@redhat.com> * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> */ #include "builtin.h" #include "perf.h" #include "util/parse-events.h" #include "util/cache.h" #include "util/pmu.h" #include "util/parse-options.h" int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) { int i; bool raw_dump = false; struct option list_options[] = { OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"), OPT_END() }; const char * const list_usage[] = { "perf list [hw|sw|cache|tracepoint|pmu|event_glob]", NULL }; set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN); argc = parse_options(argc, argv, list_options, list_usage, PARSE_OPT_STOP_AT_NON_OPTION); setup_pager(); if (!raw_dump) printf("\nList of pre-defined events (to be used in -e):\n\n"); if (argc == 0) { print_events(NULL, raw_dump); return 0; } for (i = 0; i < argc; ++i) { if (strcmp(argv[i], "tracepoint") == 0) print_tracepoint_events(NULL, NULL, raw_dump); else if (strcmp(argv[i], "hw") == 0 || strcmp(argv[i], "hardware") == 0) print_symbol_events(NULL, PERF_TYPE_HARDWARE, event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump); else if (strcmp(argv[i], "sw") == 0 || strcmp(argv[i], "software") == 0) print_symbol_events(NULL, PERF_TYPE_SOFTWARE, event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump); else if (strcmp(argv[i], "cache") == 0 || strcmp(argv[i], "hwcache") == 0) print_hwcache_events(NULL, raw_dump); else if (strcmp(argv[i], "pmu") == 0) print_pmu_events(NULL, raw_dump); else { char *sep = strchr(argv[i], ':'), *s; int sep_idx; if (sep == NULL) { print_events(argv[i], raw_dump); continue; } sep_idx = sep - argv[i]; s = strdup(argv[i]); if (s == NULL) return -1; s[sep_idx] = '\0'; print_tracepoint_events(s, s + sep_idx + 1, raw_dump); free(s); } } return 0; }
gpl-2.0
patjak/drm-gma500
net/ipv4/tcp_scalable.c
618
1406
/* Tom Kelly's Scalable TCP * * See http://www.deneholme.net/tom/scalable/ * * John Heffner <jheffner@sc.edu> */ #include <linux/module.h> #include <net/tcp.h> /* These factors derived from the recommended values in the aer: * .01 and and 7/8. We use 50 instead of 100 to account for * delayed ack. */ #define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_MD_SCALE 3 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); if (!tcp_is_cwnd_limited(sk)) return; if (tcp_in_slow_start(tp)) tcp_slow_start(tp, acked); else tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), 1); } static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); } static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, .cong_avoid = tcp_scalable_cong_avoid, .owner = THIS_MODULE, .name = "scalable", }; static int __init tcp_scalable_register(void) { return tcp_register_congestion_control(&tcp_scalable); } static void __exit tcp_scalable_unregister(void) { tcp_unregister_congestion_control(&tcp_scalable); } module_init(tcp_scalable_register); module_exit(tcp_scalable_unregister); MODULE_AUTHOR("John Heffner"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Scalable TCP");
gpl-2.0
Chad0989/Vigor-Incredikernel
drivers/cpufreq/cpufreq_conservative.c
874
20279
/* * drivers/cpufreq/cpufreq_conservative.c * * Copyright (C) 2001 Russell King * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. * Jun Nakajima <jun.nakajima@intel.com> * (C) 2009 Alexander Clouter <alex@digriz.org.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/jiffies.h> #include <linux/kernel_stat.h> #include <linux/mutex.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/ktime.h> #include <linux/sched.h> /* * dbs is used in this file as a shortform for demandbased switching * It helps to keep variable names smaller, simpler */ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) /* * The polling frequency of this governor depends on the capability of * the processor. Default polling frequency is 1000 times the transition * latency of the processor. The governor will work on any processor with * transition latency <= 10mS, using appropriate sampling * rate. * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) * this governor will not work. * All times here are in uS. */ #define MIN_SAMPLING_RATE_RATIO (2) static unsigned int min_sampling_rate; #define LATENCY_MULTIPLIER (1000) #define MIN_LATENCY_MULTIPLIER (100) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) static void do_dbs_timer(struct work_struct *work); struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; cputime64_t prev_cpu_nice; struct cpufreq_policy *cur_policy; struct delayed_work work; unsigned int down_skip; unsigned int requested_freq; int cpu; unsigned int enable:1; /* * percpu mutex that serializes governor limit change with * do_dbs_timer invocation. We do not want do_dbs_timer to run * when user is changing the governor or limits. */ struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on * different CPUs. It protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); static struct workqueue_struct *kconservative_wq; static struct dbs_tuners { unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; unsigned int down_threshold; unsigned int ignore_nice; unsigned int freq_step; } dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, .ignore_nice = 0, .freq_step = 5, }; static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, cputime64_t *wall) { cputime64_t idle_time; cputime64_t cur_wall_time; cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, kstat_cpu(cpu).cpustat.system); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); return (cputime64_t)jiffies_to_usecs(idle_time);; } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) { u64 idle_time = get_cpu_idle_time_us(cpu, wall); if (idle_time == -1ULL) return get_cpu_idle_time_jiffy(cpu, wall); return idle_time; } /* keep track of frequency transitions */ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, freq->cpu); struct cpufreq_policy *policy; if (!this_dbs_info->enable) return 0; policy = this_dbs_info->cur_policy; /* * we only care if our internally tracked freq moves outside * the 'valid' ranges of freqency available to us otherwise * we do not change it */ if (this_dbs_info->requested_freq > policy->max || this_dbs_info->requested_freq < policy->min) this_dbs_info->requested_freq = freq->new; return 0; } static struct notifier_block dbs_cpufreq_notifier_block = { .notifier_call = dbs_cpufreq_notifier }; /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_max(struct kobject *kobj, struct attribute *attr, char *buf) { printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " "sysfs file is deprecated - used by: %s\n", current->comm); return sprintf(buf, "%u\n", -1U); } static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", min_sampling_rate); } define_one_global_ro(sampling_rate_max); define_one_global_ro(sampling_rate_min); /* cpufreq_conservative Governor Tunables */ #define show_one(file_name, object) \ static ssize_t show_##file_name \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ } show_one(sampling_rate, sampling_rate); show_one(sampling_down_factor, sampling_down_factor); show_one(up_threshold, up_threshold); show_one(down_threshold, down_threshold); show_one(ignore_nice_load, ignore_nice); show_one(freq_step, freq_step); /*** delete after deprecation time ***/ #define DEPRECATION_MSG(file_name) \ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ "interface is deprecated - " #file_name "\n"); #define show_one_old(file_name) \ static ssize_t show_##file_name##_old \ (struct cpufreq_policy *unused, char *buf) \ { \ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ "interface is deprecated - " #file_name "\n"); \ return show_##file_name(NULL, NULL, buf); \ } show_one_old(sampling_rate); show_one_old(sampling_down_factor); show_one_old(up_threshold); show_one_old(down_threshold); show_one_old(ignore_nice_load); show_one_old(freq_step); show_one_old(sampling_rate_min); show_one_old(sampling_rate_max); cpufreq_freq_attr_ro_old(sampling_rate_min); cpufreq_freq_attr_ro_old(sampling_rate_max); /*** delete after deprecation time ***/ static ssize_t store_sampling_down_factor(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_down_factor = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); mutex_unlock(&dbs_mutex); return count; } static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); mutex_lock(&dbs_mutex); if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; } dbs_tuners_ins.up_threshold = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); mutex_lock(&dbs_mutex); /* cannot be lower than 11 otherwise freq will not fall */ if (ret != 1 || input < 11 || input > 100 || input >= dbs_tuners_ins.up_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; } dbs_tuners_ins.down_threshold = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; unsigned int j; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; if (input > 1) input = 1; mutex_lock(&dbs_mutex); if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ mutex_unlock(&dbs_mutex); return count; } dbs_tuners_ins.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(cs_cpu_dbs_info, j); dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } mutex_unlock(&dbs_mutex); return count; } static ssize_t store_freq_step(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; if (input > 100) input = 100; /* no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ mutex_lock(&dbs_mutex); dbs_tuners_ins.freq_step = input; mutex_unlock(&dbs_mutex); return count; } define_one_global_rw(sampling_rate); define_one_global_rw(sampling_down_factor); define_one_global_rw(up_threshold); define_one_global_rw(down_threshold); define_one_global_rw(ignore_nice_load); define_one_global_rw(freq_step); static struct attribute *dbs_attributes[] = { &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, &sampling_down_factor.attr, &up_threshold.attr, &down_threshold.attr, &ignore_nice_load.attr, &freq_step.attr, NULL }; static struct attribute_group dbs_attr_group = { .attrs = dbs_attributes, .name = "conservative", }; /*** delete after deprecation time ***/ #define write_one_old(file_name) \ static ssize_t store_##file_name##_old \ (struct cpufreq_policy *unused, const char *buf, size_t count) \ { \ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ "interface is deprecated - " #file_name "\n"); \ return store_##file_name(NULL, NULL, buf, count); \ } write_one_old(sampling_rate); write_one_old(sampling_down_factor); write_one_old(up_threshold); write_one_old(down_threshold); write_one_old(ignore_nice_load); write_one_old(freq_step); cpufreq_freq_attr_rw_old(sampling_rate); cpufreq_freq_attr_rw_old(sampling_down_factor); cpufreq_freq_attr_rw_old(up_threshold); cpufreq_freq_attr_rw_old(down_threshold); cpufreq_freq_attr_rw_old(ignore_nice_load); cpufreq_freq_attr_rw_old(freq_step); static struct attribute *dbs_attributes_old[] = { &sampling_rate_max_old.attr, &sampling_rate_min_old.attr, &sampling_rate_old.attr, &sampling_down_factor_old.attr, &up_threshold_old.attr, &down_threshold_old.attr, &ignore_nice_load_old.attr, &freq_step_old.attr, NULL }; static struct attribute_group dbs_attr_group_old = { .attrs = dbs_attributes_old, .name = "conservative", }; /*** delete after deprecation time ***/ /************************** sysfs end ************************/ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { unsigned int load = 0; unsigned int max_load = 0; unsigned int freq_target; struct cpufreq_policy *policy; unsigned int j; policy = this_dbs_info->cur_policy; /* * Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency * Every sampling_rate*sampling_down_factor, we check, if current * idle time is more than 80%, then we try to decrease frequency * * Any frequency increase takes it to the maximum frequency. * Frequency reduction happens at minimum steps of * 5% (default) of maximum frequency */ /* Get Absolute Load */ for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; cputime64_t cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); wall_time = (unsigned int) cputime64_sub(cur_wall_time, j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; idle_time = (unsigned int) cputime64_sub(cur_idle_time, j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { cputime64_t cur_nice; unsigned long cur_nice_jiffies; cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, j_dbs_info->prev_cpu_nice); /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys */ cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; idle_time += jiffies_to_usecs(cur_nice_jiffies); } if (unlikely(!wall_time || wall_time < idle_time)) continue; load = 100 * (wall_time - idle_time) / wall_time; if (load > max_load) max_load = load; } /* * break out if we 'cannot' reduce the speed as the user might * want freq_step to be zero */ if (dbs_tuners_ins.freq_step == 0) return; /* Check for frequency increase */ if (max_load > dbs_tuners_ins.up_threshold) { this_dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ if (this_dbs_info->requested_freq == policy->max) return; freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; /* max freq cannot be less than 100. But who knows.... */ if (unlikely(freq_target == 0)) freq_target = 5; this_dbs_info->requested_freq += freq_target; if (this_dbs_info->requested_freq > policy->max) this_dbs_info->requested_freq = policy->max; __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } /* * The optimal frequency is the frequency that is the lowest that * can support the current CPU usage without triggering the up * policy. To be safe, we focus 10 points under the threshold. */ if (max_load < (dbs_tuners_ins.down_threshold - 10)) { freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; this_dbs_info->requested_freq -= freq_target; if (this_dbs_info->requested_freq < policy->min) this_dbs_info->requested_freq = policy->min; /* * if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } } static void do_dbs_timer(struct work_struct *work) { struct cpu_dbs_info_s *dbs_info = container_of(work, struct cpu_dbs_info_s, work.work); unsigned int cpu = dbs_info->cpu; /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; mutex_lock(&dbs_info->timer_mutex); dbs_check_cpu(dbs_info); queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) { /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; dbs_info->enable = 1; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { dbs_info->enable = 0; cancel_delayed_work_sync(&dbs_info->work); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; unsigned int j; int rc; this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; mutex_lock(&dbs_mutex); rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); if (rc) { mutex_unlock(&dbs_mutex); return rc; } for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); j_dbs_info->cur_policy = policy; j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) { j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } } this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; mutex_init(&this_dbs_info->timer_mutex); dbs_enable++; /* * Start the timerschedule work, when this governor * is used for first time */ if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; rc = sysfs_create_group(cpufreq_global_kobject, &dbs_attr_group); if (rc) { mutex_unlock(&dbs_mutex); return rc; } /* * conservative does not implement micro like ondemand * governor, thus we are bound to jiffes/HZ */ min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); /* Bring kernel and HW constraints together */ min_sampling_rate = max(min_sampling_rate, MIN_LATENCY_MULTIPLIER * latency); dbs_tuners_ins.sampling_rate = max(min_sampling_rate, latency * LATENCY_MULTIPLIER); cpufreq_register_notifier( &dbs_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } mutex_unlock(&dbs_mutex); dbs_timer_init(this_dbs_info); break; case CPUFREQ_GOV_STOP: dbs_timer_exit(this_dbs_info); mutex_lock(&dbs_mutex); sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); dbs_enable--; mutex_destroy(&this_dbs_info->timer_mutex); /* * Stop the timerschedule work, when this governor * is used for first time */ if (dbs_enable == 0) cpufreq_unregister_notifier( &dbs_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); mutex_unlock(&dbs_mutex); if (!dbs_enable) sysfs_remove_group(cpufreq_global_kobject, &dbs_attr_group); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&this_dbs_info->timer_mutex); break; } return 0; } #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE static #endif struct cpufreq_governor cpufreq_gov_conservative = { .name = "conservative", .governor = cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE, }; static int __init cpufreq_gov_dbs_init(void) { int err; kconservative_wq = create_workqueue("kconservative"); if (!kconservative_wq) { printk(KERN_ERR "Creation of kconservative failed\n"); return -EFAULT; } err = cpufreq_register_governor(&cpufreq_gov_conservative); if (err) destroy_workqueue(kconservative_wq); return err; } static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_conservative); destroy_workqueue(kconservative_wq); } MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " "Low Latency Frequency Transition capable processors " "optimised for use in a battery environment"); MODULE_LICENSE("GPL"); #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE fs_initcall(cpufreq_gov_dbs_init); #else module_init(cpufreq_gov_dbs_init); #endif module_exit(cpufreq_gov_dbs_exit);
gpl-2.0
gbtian/mptcp
crypto/authencesn.c
1642
23962
/* * authencesn.c - AEAD wrapper for IPsec with extended sequence numbers, * derived from authenc.c * * Copyright (C) 2010 secunet Security Networks AG * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/aead.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/spinlock.h> struct authenc_esn_instance_ctx { struct crypto_ahash_spawn auth; struct crypto_skcipher_spawn enc; }; struct crypto_authenc_esn_ctx { unsigned int reqoff; struct crypto_ahash *auth; struct crypto_ablkcipher *enc; }; struct authenc_esn_request_ctx { unsigned int cryptlen; unsigned int headlen; unsigned int trailen; struct scatterlist *sg; struct scatterlist hsg[2]; struct scatterlist tsg[1]; struct scatterlist cipher[2]; crypto_completion_t complete; crypto_completion_t update_complete; crypto_completion_t update_complete2; char tail[]; }; static void authenc_esn_request_complete(struct aead_request *req, int err) { if (err != -EINPROGRESS) aead_request_complete(req, err); } static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, unsigned int keylen) { unsigned int authkeylen; unsigned int enckeylen; struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; struct crypto_ablkcipher *enc = ctx->enc; struct rtattr *rta = (void *)key; struct crypto_authenc_key_param *param; int err = -EINVAL; if (!RTA_OK(rta, keylen)) goto badkey; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) goto badkey; if (RTA_PAYLOAD(rta) < sizeof(*param)) goto badkey; param = RTA_DATA(rta); enckeylen = be32_to_cpu(param->enckeylen); key += RTA_ALIGN(rta->rta_len); keylen -= RTA_ALIGN(rta->rta_len); if (keylen < enckeylen) goto badkey; authkeylen = keylen - enckeylen; crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(auth, key, authkeylen); crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & CRYPTO_TFM_RES_MASK); if (err) goto out; crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & CRYPTO_TFM_RES_MASK); out: return err; badkey: crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN); goto out; } static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); if (err) goto out; ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, areq_ctx->cryptlen); ahash_request_set_callback(ahreq, aead_request_flags(req) & CRYPTO_TFM_REQ_MAY_SLEEP, areq_ctx->update_complete2, req); err = crypto_ahash_update(ahreq); if (err) goto out; ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, areq_ctx->trailen); ahash_request_set_callback(ahreq, aead_request_flags(req) & CRYPTO_TFM_REQ_MAY_SLEEP, areq_ctx->complete, req); err = crypto_ahash_finup(ahreq); if (err) goto out; scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, areq_ctx->cryptlen, crypto_aead_authsize(authenc_esn), 1); out: authenc_esn_request_complete(req, err); } static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); if (err) goto out; ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, areq_ctx->trailen); ahash_request_set_callback(ahreq, aead_request_flags(req) & CRYPTO_TFM_REQ_MAY_SLEEP, areq_ctx->complete, req); err = crypto_ahash_finup(ahreq); if (err) goto out; scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, areq_ctx->cryptlen, crypto_aead_authsize(authenc_esn), 1); out: authenc_esn_request_complete(req, err); } static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); if (err) goto out; scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, areq_ctx->cryptlen, crypto_aead_authsize(authenc_esn), 1); out: aead_request_complete(req, err); } static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq, int err) { u8 *ihash; unsigned int authsize; struct ablkcipher_request *abreq; struct aead_request *req = areq->data; struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int cryptlen = req->cryptlen; if (err) goto out; ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, areq_ctx->cryptlen); ahash_request_set_callback(ahreq, aead_request_flags(req) & CRYPTO_TFM_REQ_MAY_SLEEP, areq_ctx->update_complete2, req); err = crypto_ahash_update(ahreq); if (err) goto out; ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, areq_ctx->trailen); ahash_request_set_callback(ahreq, aead_request_flags(req) & CRYPTO_TFM_REQ_MAY_SLEEP, areq_ctx->complete, req); err = crypto_ahash_finup(ahreq); if (err) goto out; authsize = crypto_aead_authsize(authenc_esn); cryptlen -= authsize; ihash = ahreq->result + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; abreq = aead_request_ctx(req); ablkcipher_request_set_tfm(abreq, ctx->enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, req->iv); err = crypto_ablkcipher_decrypt(abreq); out: authenc_esn_request_complete(req, err); } static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq, int err) { u8 *ihash; unsigned int authsize; struct ablkcipher_request *abreq; struct aead_request *req = areq->data; struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int cryptlen = req->cryptlen; if (err) goto out; ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, areq_ctx->trailen); ahash_request_set_callback(ahreq, aead_request_flags(req) & CRYPTO_TFM_REQ_MAY_SLEEP, areq_ctx->complete, req); err = crypto_ahash_finup(ahreq); if (err) goto out; authsize = crypto_aead_authsize(authenc_esn); cryptlen -= authsize; ihash = ahreq->result + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; abreq = aead_request_ctx(req); ablkcipher_request_set_tfm(abreq, ctx->enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, req->iv); err = crypto_ablkcipher_decrypt(abreq); out: authenc_esn_request_complete(req, err); } static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, int err) { u8 *ihash; unsigned int authsize; struct ablkcipher_request *abreq; struct aead_request *req = areq->data; struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int cryptlen = req->cryptlen; if (err) goto out; authsize = crypto_aead_authsize(authenc_esn); cryptlen -= authsize; ihash = ahreq->result + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; abreq = aead_request_ctx(req); ablkcipher_request_set_tfm(abreq, ctx->enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, req->iv); err = crypto_ablkcipher_decrypt(abreq); out: authenc_esn_request_complete(req, err); } static u8 *crypto_authenc_esn_ahash(struct aead_request *req, unsigned int flags) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); u8 *hash = areq_ctx->tail; int err; hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), crypto_ahash_alignmask(auth) + 1); ahash_request_set_tfm(ahreq, auth); err = crypto_ahash_init(ahreq); if (err) return ERR_PTR(err); ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen); ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, areq_ctx->update_complete, req); err = crypto_ahash_update(ahreq); if (err) return ERR_PTR(err); ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen); ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, areq_ctx->update_complete2, req); err = crypto_ahash_update(ahreq); if (err) return ERR_PTR(err); ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash, areq_ctx->trailen); ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, areq_ctx->complete, req); err = crypto_ahash_finup(ahreq); if (err) return ERR_PTR(err); return hash; } static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, unsigned int flags) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct scatterlist *dst = req->dst; struct scatterlist *assoc = req->assoc; struct scatterlist *cipher = areq_ctx->cipher; struct scatterlist *hsg = areq_ctx->hsg; struct scatterlist *tsg = areq_ctx->tsg; struct scatterlist *assoc1; struct scatterlist *assoc2; unsigned int ivsize = crypto_aead_ivsize(authenc_esn); unsigned int cryptlen = req->cryptlen; struct page *dstp; u8 *vdst; u8 *hash; dstp = sg_page(dst); vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; if (ivsize) { sg_init_table(cipher, 2); sg_set_buf(cipher, iv, ivsize); scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); dst = cipher; cryptlen += ivsize; } if (sg_is_last(assoc)) return -EINVAL; assoc1 = assoc + 1; if (sg_is_last(assoc1)) return -EINVAL; assoc2 = assoc + 2; if (!sg_is_last(assoc2)) return -EINVAL; sg_init_table(hsg, 2); sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); sg_init_table(tsg, 1); sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); areq_ctx->cryptlen = cryptlen; areq_ctx->headlen = assoc->length + assoc2->length; areq_ctx->trailen = assoc1->length; areq_ctx->sg = dst; areq_ctx->complete = authenc_esn_geniv_ahash_done; areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done; areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2; hash = crypto_authenc_esn_ahash(req, flags); if (IS_ERR(hash)) return PTR_ERR(hash); scatterwalk_map_and_copy(hash, dst, cryptlen, crypto_aead_authsize(authenc_esn), 1); return 0; } static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, int err) { struct aead_request *areq = req->data; if (!err) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct ablkcipher_request *abreq = aead_request_ctx(areq); u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(ctx->enc); err = crypto_authenc_esn_genicv(areq, iv, 0); } authenc_esn_request_complete(areq, err); } static int crypto_authenc_esn_encrypt(struct aead_request *req) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct crypto_ablkcipher *enc = ctx->enc; struct scatterlist *dst = req->dst; unsigned int cryptlen = req->cryptlen; struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + ctx->reqoff); u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); int err; ablkcipher_request_set_tfm(abreq, enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_authenc_esn_encrypt_done, req); ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn)); err = crypto_ablkcipher_encrypt(abreq); if (err) return err; return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); } static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req, int err) { struct aead_request *areq = req->data; if (!err) { struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); err = crypto_authenc_esn_genicv(areq, greq->giv, 0); } authenc_esn_request_complete(areq, err); } static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req) { struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct aead_request *areq = &req->areq; struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); u8 *iv = req->giv; int err; skcipher_givcrypt_set_tfm(greq, ctx->enc); skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), crypto_authenc_esn_givencrypt_done, areq); skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, areq->iv); skcipher_givcrypt_set_giv(greq, iv, req->seq); err = crypto_skcipher_givencrypt(greq); if (err) return err; return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); } static int crypto_authenc_esn_verify(struct aead_request *req) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); u8 *ohash; u8 *ihash; unsigned int authsize; areq_ctx->complete = authenc_esn_verify_ahash_done; areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP); if (IS_ERR(ohash)) return PTR_ERR(ohash); authsize = crypto_aead_authsize(authenc_esn); ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, unsigned int cryptlen) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct scatterlist *src = req->src; struct scatterlist *assoc = req->assoc; struct scatterlist *cipher = areq_ctx->cipher; struct scatterlist *hsg = areq_ctx->hsg; struct scatterlist *tsg = areq_ctx->tsg; struct scatterlist *assoc1; struct scatterlist *assoc2; unsigned int ivsize = crypto_aead_ivsize(authenc_esn); struct page *srcp; u8 *vsrc; srcp = sg_page(src); vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; if (ivsize) { sg_init_table(cipher, 2); sg_set_buf(cipher, iv, ivsize); scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); src = cipher; cryptlen += ivsize; } if (sg_is_last(assoc)) return -EINVAL; assoc1 = assoc + 1; if (sg_is_last(assoc1)) return -EINVAL; assoc2 = assoc + 2; if (!sg_is_last(assoc2)) return -EINVAL; sg_init_table(hsg, 2); sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); sg_init_table(tsg, 1); sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); areq_ctx->cryptlen = cryptlen; areq_ctx->headlen = assoc->length + assoc2->length; areq_ctx->trailen = assoc1->length; areq_ctx->sg = src; areq_ctx->complete = authenc_esn_verify_ahash_done; areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2; return crypto_authenc_esn_verify(req); } static int crypto_authenc_esn_decrypt(struct aead_request *req) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct ablkcipher_request *abreq = aead_request_ctx(req); unsigned int cryptlen = req->cryptlen; unsigned int authsize = crypto_aead_authsize(authenc_esn); u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; err = crypto_authenc_esn_iverify(req, iv, cryptlen); if (err) return err; ablkcipher_request_set_tfm(abreq, ctx->enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); return crypto_ablkcipher_decrypt(abreq); } static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_ahash *auth; struct crypto_ablkcipher *enc; int err; auth = crypto_spawn_ahash(&ictx->auth); if (IS_ERR(auth)) return PTR_ERR(auth); enc = crypto_spawn_skcipher(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) goto err_free_ahash; ctx->auth = auth; ctx->enc = enc; ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + crypto_ahash_alignmask(auth), crypto_ahash_alignmask(auth) + 1) + crypto_ablkcipher_ivsize(enc); tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) + ctx->reqoff + max_t(unsigned int, crypto_ahash_reqsize(auth) + sizeof(struct ahash_request), sizeof(struct skcipher_givcrypt_request) + crypto_ablkcipher_reqsize(enc)); return 0; err_free_ahash: crypto_free_ahash(auth); return err; } static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm) { struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_ahash(ctx->auth); crypto_free_ablkcipher(ctx->enc); } static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct hash_alg_common *auth; struct crypto_alg *auth_base; struct crypto_alg *enc; struct authenc_esn_instance_ctx *ctx; const char *enc_name; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(auth)) return ERR_CAST(auth); auth_base = &auth->base; enc_name = crypto_attr_alg_name(tb[2]); err = PTR_ERR(enc_name); if (IS_ERR(enc_name)) goto out_put_auth; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); err = -ENOMEM; if (!inst) goto out_put_auth; ctx = crypto_instance_ctx(inst); err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); if (err) goto err_free_inst; crypto_set_skcipher_spawn(&ctx->enc, inst); err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) goto err_drop_auth; enc = crypto_skcipher_spawn_alg(&ctx->enc); err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", auth_base->cra_driver_name, enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = enc->cra_priority * 10 + auth_base->cra_priority; inst->alg.cra_blocksize = enc->cra_blocksize; inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; inst->alg.cra_aead.maxauthsize = auth->digestsize; inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); inst->alg.cra_init = crypto_authenc_esn_init_tfm; inst->alg.cra_exit = crypto_authenc_esn_exit_tfm; inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey; inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt; inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt; inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt; out: crypto_mod_put(auth_base); return inst; err_drop_enc: crypto_drop_skcipher(&ctx->enc); err_drop_auth: crypto_drop_ahash(&ctx->auth); err_free_inst: kfree(inst); out_put_auth: inst = ERR_PTR(err); goto out; } static void crypto_authenc_esn_free(struct crypto_instance *inst) { struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst); crypto_drop_skcipher(&ctx->enc); crypto_drop_ahash(&ctx->auth); kfree(inst); } static struct crypto_template crypto_authenc_esn_tmpl = { .name = "authencesn", .alloc = crypto_authenc_esn_alloc, .free = crypto_authenc_esn_free, .module = THIS_MODULE, }; static int __init crypto_authenc_esn_module_init(void) { return crypto_register_template(&crypto_authenc_esn_tmpl); } static void __exit crypto_authenc_esn_module_exit(void) { crypto_unregister_template(&crypto_authenc_esn_tmpl); } module_init(crypto_authenc_esn_module_init); module_exit(crypto_authenc_esn_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
gpl-2.0
blade-vec-4g/android_kernel_zte_msm8226
net/ipv6/tcp_ipv6.c
1642
56966
/* * TCP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on: * linux/net/ipv4/tcp.c * linux/net/ipv4/tcp_input.c * linux/net/ipv4/tcp_output.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bottom_half.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/jiffies.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/jhash.h> #include <linux/ipsec.h> #include <linux/times.h> #include <linux/slab.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <net/tcp.h> #include <net/ndisc.h> #include <net/inet6_hashtables.h> #include <net/inet6_connection_sock.h> #include <net/ipv6.h> #include <net/transp_v6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/inet_ecn.h> #include <net/protocol.h> #include <net/xfrm.h> #include <net/snmp.h> #include <net/dsfield.h> #include <net/timewait_sock.h> #include <net/netdma.h> #include <net/inet_common.h> #include <net/secure_seq.h> #include <net/tcp_memcontrol.h> #include <asm/uaccess.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/crypto.h> #include <linux/scatterlist.h> static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, struct request_sock *req); static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static void __tcp_v6_send_check(struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr); static const struct inet_connection_sock_af_ops ipv6_mapped; static const struct inet_connection_sock_af_ops ipv6_specific; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; #else static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, const struct in6_addr *addr) { return NULL; } #endif static void tcp_v6_hash(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { tcp_prot.hash(sk); return; } local_bh_disable(); __inet6_hash(sk, NULL); local_bh_enable(); } } static __inline__ __sum16 tcp_v6_check(int len, const struct in6_addr *saddr, const struct in6_addr *daddr, __wsum base) { return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); } static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) { return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct in6_addr *saddr = NULL, *final_p, final; struct rt6_info *rt; struct flowi6 fl6; struct dst_entry *dst; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; memset(&fl6, 0, sizeof(fl6)); if (np->sndflow) { fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl6.flowlabel); if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; usin->sin6_addr = flowlabel->dst; fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if(ipv6_addr_any(&usin->sin6_addr)) usin->sin6_addr.s6_addr[15] = 0x1; addr_type = ipv6_addr_type(&usin->sin6_addr); if(addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != usin->sin6_scope_id) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } if (tp->rx_opt.ts_recent_stamp && !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } np->daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* * TCP over IPv4 */ if (addr_type == IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); if (__ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; icsk->icsk_af_ops = &ipv6_mapped; sk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; icsk->icsk_af_ops = &ipv6_specific; sk->sk_backlog_rcv = tcp_v6_do_rcv; #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_specific; #endif goto failure; } else { ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); } return err; } if (!ipv6_addr_any(&np->rcv_saddr)) saddr = &np->rcv_saddr; fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = np->daddr; fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; final_p = fl6_update_dst(&fl6, np->opt, &final); security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; } if (saddr == NULL) { saddr = &fl6.saddr; np->rcv_saddr = *saddr; } /* set the source address */ np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; __ip6_dst_store(sk, dst, NULL, NULL); rt = (struct rt6_info *) dst; if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) { struct inet_peer *peer = rt6_get_peer(rt); /* * VJ's idea. We save last timestamp seen from * the destination in peer table, when entering state * TIME-WAIT * and initialize rx_opt.ts_recent from it, * when trying new connection. */ if (peer) { inet_peer_refcheck(peer); if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } } } icsk->icsk_ext_hdr_len = 0; if (np->opt) icsk->icsk_ext_hdr_len = (np->opt->opt_flen + np->opt->opt_nflen); tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); inet->inet_dport = usin->sin6_port; tcp_set_state(sk, TCP_SYN_SENT); err = inet6_hash_connect(&tcp_death_row, sk); if (err) goto late_failure; if (!tp->write_seq) tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, np->daddr.s6_addr32, inet->inet_sport, inet->inet_dport); err = tcp_connect(sk); if (err) goto late_failure; return 0; late_failure: tcp_set_state(sk, TCP_CLOSE); __sk_dst_reset(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; return err; } static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); struct ipv6_pinfo *np; struct sock *sk; int err; struct tcp_sock *tp; __u32 seq; struct net *net = dev_net(skb->dev); sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex); if (sk == NULL) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } bh_lock_sock(sk); if (sock_owned_by_user(sk)) NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); goto out; } tp = tcp_sk(sk); seq = ntohl(th->seq); if (sk->sk_state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = inet6_sk(sk); if (type == ICMPV6_PKT_TOOBIG) { struct dst_entry *dst; if (sock_owned_by_user(sk)) goto out; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) goto out; /* icmp should have updated the destination cache entry */ dst = __sk_dst_check(sk, np->dst_cookie); if (dst == NULL) { struct inet_sock *inet = inet_sk(sk); struct flowi6 fl6; /* BUGGG_FUTURE: Again, it is not clear how to handle rthdr case. Ignore this complexity for now. */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = np->daddr; fl6.saddr = np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet->inet_dport; fl6.fl6_sport = inet->inet_sport; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false); if (IS_ERR(dst)) { sk->sk_err_soft = -PTR_ERR(dst); goto out; } } else dst_hold(dst); if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { tcp_sync_mss(sk, dst_mtu(dst)); tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ dst_release(dst); goto out; } icmpv6_err_convert(type, code, &err); /* Might be for an request_sock */ switch (sk->sk_state) { struct request_sock *req, **prev; case TCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, &hdr->saddr, inet6_iif(skb)); if (!req) goto out; /* ICMPs are not backlogged, hence we cannot get * an established socket here. */ WARN_ON(req->sk != NULL); if (seq != tcp_rsk(req)->snt_isn) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } inet_csk_reqsk_queue_drop(sk, req, prev); goto out; case TCP_SYN_SENT: case TCP_SYN_RECV: /* Cannot happen. It can, it SYNs are crossed. --ANK */ if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ tcp_done(sk); } else sk->sk_err_soft = err; goto out; } if (!sock_owned_by_user(sk) && np->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { struct inet6_request_sock *treq = inet6_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff * skb; struct ipv6_txoptions *opt = NULL; struct in6_addr * final_p, final; struct flowi6 fl6; struct dst_entry *dst; int err; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = treq->rmt_addr; fl6.saddr = treq->loc_addr; fl6.flowlabel = 0; fl6.flowi6_oif = treq->iif; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); opt = np->opt; final_p = fl6_update_dst(&fl6, opt, &final); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; goto done; } skb = tcp_make_synack(sk, dst, req, rvp); err = -ENOMEM; if (skb) { __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); fl6.daddr = treq->rmt_addr; err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = net_xmit_eval(err); } done: if (opt && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); dst_release(dst); return err; } static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); return tcp_v6_send_synack(sk, req, rvp); } static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree_skb(inet6_rsk(req)->pktopts); } #ifdef CONFIG_TCP_MD5SIG static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, const struct in6_addr *addr) { return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); } static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, struct sock *addr_sk) { return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); } static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, struct request_sock *req) { return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); } static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin6->sin6_family != AF_INET6) return -EINVAL; if (!cmd.tcpm_keylen) { if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET); return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6); } if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); } static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, const struct in6_addr *daddr, const struct in6_addr *saddr, int nbytes) { struct tcp6_pseudohdr *bp; struct scatterlist sg; bp = &hp->md5_blk.ip6; /* 1. TCP pseudo-header (RFC2460) */ bp->saddr = *saddr; bp->daddr = *daddr; bp->protocol = cpu_to_be32(IPPROTO_TCP); bp->len = cpu_to_be32(nbytes); sg_init_one(&sg, bp, sizeof(*bp)); return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); } static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, const struct in6_addr *daddr, struct in6_addr *saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct hash_desc *desc; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; desc = &hp->md5_desc; if (crypto_hash_init(desc)) goto clear_hash; if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_header(hp, th)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; if (crypto_hash_final(desc, md5_hash)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, const struct sock *sk, const struct request_sock *req, const struct sk_buff *skb) { const struct in6_addr *saddr, *daddr; struct tcp_md5sig_pool *hp; struct hash_desc *desc; const struct tcphdr *th = tcp_hdr(skb); if (sk) { saddr = &inet6_sk(sk)->saddr; daddr = &inet6_sk(sk)->daddr; } else if (req) { saddr = &inet6_rsk(req)->loc_addr; daddr = &inet6_rsk(req)->rmt_addr; } else { const struct ipv6hdr *ip6h = ipv6_hdr(skb); saddr = &ip6h->saddr; daddr = &ip6h->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; desc = &hp->md5_desc; if (crypto_hash_init(desc)) goto clear_hash; if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) goto clear_hash; if (tcp_md5_hash_header(hp, th)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; if (crypto_hash_final(desc, md5_hash)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) { const __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct ipv6hdr *ip6h = ipv6_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int genhash; u8 newhash[16]; hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) return 0; if (hash_expected && !hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return 1; } if (!hash_expected && hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return 1; } /* check the signature */ genhash = tcp_v6_md5_hash_skb(newhash, hash_expected, NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { if (net_ratelimit()) { printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", genhash ? "failed" : "mismatch", &ip6h->saddr, ntohs(th->source), &ip6h->daddr, ntohs(th->dest)); } return 1; } return 0; } #endif struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .family = AF_INET6, .obj_size = sizeof(struct tcp6_request_sock), .rtx_syn_ack = tcp_v6_rtx_synack, .send_ack = tcp_v6_reqsk_send_ack, .destructor = tcp_v6_reqsk_destructor, .send_reset = tcp_v6_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .md5_lookup = tcp_v6_reqsk_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, }; #endif static void __tcp_v6_send_check(struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr) { struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = tcp_v6_check(skb->len, saddr, daddr, csum_partial(th, th->doff << 2, skb->csum)); } } static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); __tcp_v6_send_check(skb, &np->saddr, &np->daddr); } static int tcp_v6_gso_send_check(struct sk_buff *skb) { const struct ipv6hdr *ipv6h; struct tcphdr *th; if (!pskb_may_pull(skb, sizeof(*th))) return -EINVAL; ipv6h = ipv6_hdr(skb); th = tcp_hdr(skb); th->check = 0; skb->ip_summed = CHECKSUM_PARTIAL; __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); return 0; } static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { const struct ipv6hdr *iph = skb_gro_network_header(skb); switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: NAPI_GRO_CB(skb)->flush = 1; return NULL; } return tcp_gro_receive(head, skb); } static int tcp6_gro_complete(struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr *th = tcp_hdr(skb); th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), &iph->saddr, &iph->daddr, 0); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; return tcp_gro_complete(skb); } static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass) { const struct tcphdr *th = tcp_hdr(skb); struct tcphdr *t1; struct sk_buff *buff; struct flowi6 fl6; struct net *net = dev_net(skb_dst(skb)->dev); struct sock *ctl_sk = net->ipv6.tcp_sk; unsigned int tot_len = sizeof(struct tcphdr); struct dst_entry *dst; __be32 *topt; if (ts) tot_len += TCPOLEN_TSTAMP_ALIGNED; #ifdef CONFIG_TCP_MD5SIG if (key) tot_len += TCPOLEN_MD5SIG_ALIGNED; #endif buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, GFP_ATOMIC); if (buff == NULL) return; skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); t1 = (struct tcphdr *) skb_push(buff, tot_len); skb_reset_transport_header(buff); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = tot_len / 4; t1->seq = htonl(seq); t1->ack_seq = htonl(ack); t1->ack = !rst || !th->ack; t1->rst = rst; t1->window = htons(win); topt = (__be32 *)(t1 + 1); if (ts) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *topt++ = htonl(tcp_time_stamp); *topt++ = htonl(ts); } #ifdef CONFIG_TCP_MD5SIG if (key) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); tcp_v6_md5_hash_hdr((__u8 *)topt, key, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, t1); } #endif memset(&fl6, 0, sizeof(fl6)); fl6.daddr = ipv6_hdr(skb)->saddr; fl6.saddr = ipv6_hdr(skb)->daddr; buff->ip_summed = CHECKSUM_PARTIAL; buff->csum = 0; __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); fl6.flowi6_proto = IPPROTO_TCP; fl6.flowi6_oif = inet6_iif(skb); fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); /* Pass a socket to ip6_dst_lookup either it is for RST * Underlying function will use this to retrieve the network * namespace */ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); if (rst) TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); return; } kfree_skb(buff); } static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); u32 seq = 0, ack_seq = 0; struct tcp_md5sig_key *key = NULL; #ifdef CONFIG_TCP_MD5SIG const __u8 *hash_location = NULL; struct ipv6hdr *ipv6h = ipv6_hdr(skb); unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif if (th->rst) return; if (!ipv6_unicast_destination(skb)) return; #ifdef CONFIG_TCP_MD5SIG hash_location = tcp_parse_md5sig_option(th); if (!sk && hash_location) { /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), &tcp_hashinfo, &ipv6h->daddr, ntohs(th->source), inet6_iif(skb)); if (!sk1) return; rcu_read_lock(); key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); if (!key) goto release_sk1; genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) goto release_sk1; } else { key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL; } #endif if (th->ack) seq = ntohl(th->ack_seq); else ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2); tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0); #ifdef CONFIG_TCP_MD5SIG release_sk1: if (sk1) { rcu_read_unlock(); sock_put(sk1); } #endif } static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, struct tcp_md5sig_key *key, u8 tclass) { tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass); } static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw), tw->tw_tclass); inet_twsk_put(tw); } static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0); } static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { struct request_sock *req, **prev; const struct tcphdr *th = tcp_hdr(skb); struct sock *nsk; /* Find possible connection requests. */ req = inet6_csk_search_req(sk, &prev, th->source, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, inet6_iif(skb)); if (req) return tcp_check_req(sk, skb, req, prev); nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); if (nsk) { if (nsk->sk_state != TCP_TIME_WAIT) { bh_lock_sock(nsk); return nsk; } inet_twsk_put(inet_twsk(nsk)); return NULL; } #ifdef CONFIG_SYN_COOKIES if (!th->syn) sk = cookie_v6_check(sk, skb); #endif return sk; } /* FIXME: this is substantially similar to the ipv4 code. * Can some kind of merge be done? -- erics */ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { struct tcp_extend_values tmp_ext; struct tcp_options_received tmp_opt; const u8 *hash_location; struct request_sock *req; struct inet6_request_sock *treq; struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL; int want_cookie = 0; if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) goto drop; if (inet_csk_reqsk_queue_is_full(sk) && !isn) { want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); if (!want_cookie) goto drop; } if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = inet6_reqsk_alloc(&tcp6_request_sock_ops); if (req == NULL) goto drop; #ifdef CONFIG_TCP_MD5SIG tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; #endif tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tmp_opt.user_mss = tp->rx_opt.user_mss; tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.cookie_plus > 0 && tmp_opt.saw_tstamp && !tp->rx_opt.cookie_out_never && (sysctl_tcp_cookie_size > 0 || (tp->cookie_values != NULL && tp->cookie_values->cookie_desired > 0))) { u8 *c; u32 *d; u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS]; int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE; if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0) goto drop_and_free; /* Secret recipe starts with IP addresses */ d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0]; *mess++ ^= *d++; *mess++ ^= *d++; *mess++ ^= *d++; *mess++ ^= *d++; d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0]; *mess++ ^= *d++; *mess++ ^= *d++; *mess++ ^= *d++; *mess++ ^= *d++; /* plus variable length Initiator Cookie */ c = (u8 *)mess; while (l-- > 0) *c++ ^= *hash_location++; want_cookie = 0; /* not our kind of cookie */ tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { /* redundant indications, but ensure initialization. */ tmp_ext.cookie_out_never = 1; /* true */ tmp_ext.cookie_plus = 0; } else { goto drop_and_free; } tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always; if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb); treq = inet6_rsk(req); treq->rmt_addr = ipv6_hdr(skb)->saddr; treq->loc_addr = ipv6_hdr(skb)->daddr; if (!want_cookie || tmp_opt.tstamp_ok) TCP_ECN_create_request(req, tcp_hdr(skb)); treq->iif = sk->sk_bound_dev_if; /* So that link locals have meaning */ if (!sk->sk_bound_dev_if && ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) treq->iif = inet6_iif(skb); if (!isn) { struct inet_peer *peer = NULL; if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { atomic_inc(&skb->users); treq->pktopts = skb; } if (want_cookie) { isn = cookie_v6_init_sequence(sk, skb, &req->mss); req->cookie_ts = tmp_opt.tstamp_ok; goto have_isn; } /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before * accepting new connection request. * * If "isn" is not zero, this request hit alive * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle && (dst = inet6_csk_route_req(sk, req)) != NULL && (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL && ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6, &treq->rmt_addr)) { inet_peer_refcheck(peer); if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && (s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); goto drop_and_release; } } /* Kill the following clause, if you dislike this way. */ else if (!sysctl_tcp_syncookies && (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < (sysctl_max_syn_backlog >> 2)) && (!peer || !peer->tcp_ts_stamp) && (!dst || !dst_metric(dst, RTAX_RTT))) { /* Without syncookies last quarter of * backlog is filled with destinations, * proven to be alive. * It means that we continue to communicate * to destinations, already remembered * to the moment of synflood. */ LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n", &treq->rmt_addr, ntohs(tcp_hdr(skb)->source)); goto drop_and_release; } isn = tcp_v6_init_sequence(skb); } have_isn: tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->snt_synack = tcp_time_stamp; security_inet_conn_request(sk, skb, req); if (tcp_v6_send_synack(sk, req, (struct request_values *)&tmp_ext) || want_cookie) goto drop_and_free; inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); return 0; drop_and_release: dst_release(dst); drop_and_free: reqsk_free(req); drop: return 0; /* don't send reset */ } static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet6_request_sock *treq; struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct tcp6_sock *newtcp6sk; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; struct ipv6_txoptions *opt; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); if (newsk == NULL) return NULL; newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newinet = inet_sk(newsk); newnp = inet6_sk(newsk); newtp = tcp_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); newnp->rcv_saddr = newnp->saddr; inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb)); /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, tcp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } treq = inet6_rsk(req); opt = np->opt; if (sk_acceptq_is_full(sk)) goto out_overflow; if (!dst) { dst = inet6_csk_route_req(sk, req); if (!dst) goto out; } newsk = tcp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, tcp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ newsk->sk_gso_type = SKB_GSO_TCPV6; __ip6_dst_store(newsk, dst, NULL, NULL); newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newtp = tcp_sk(newsk); newinet = inet_sk(newsk); newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newnp->daddr = treq->rmt_addr; newnp->saddr = treq->loc_addr; newnp->rcv_saddr = treq->loc_addr; newsk->sk_bound_dev_if = treq->iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->inet_opt = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; /* Clone pktoptions received with SYN */ newnp->pktoptions = NULL; if (treq->pktopts != NULL) { newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); kfree_skb(treq->pktopts); treq->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); } newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb)); /* Clone native IPv6 options from listening socket (if any) Yes, keeping reference count would be much more clever, but we make one more one thing there: reattach optmem to newsk. */ if (opt) { newnp->opt = ipv6_dup_options(newsk, opt); if (opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (newnp->opt) inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + newnp->opt->opt_flen); tcp_mtup_init(newsk); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); if (tcp_rsk(req)->snt_synack) tcp_valid_rtt_meas(newsk, tcp_time_stamp - tcp_rsk(req)->snt_synack); newtp->total_retrans = req->retrans; newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { /* We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr, AF_INET6, key->key, key->keylen, GFP_ATOMIC); } #endif if (__inet_inherit_port(sk, newsk) < 0) { sock_put(newsk); goto out; } __inet6_hash(newsk, NULL); return newsk; out_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: if (opt && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); dst_release(dst); out: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; } static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; return 0; } } skb->csum = ~csum_unfold(tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0)); if (skb->len <= 76) { return __skb_checksum_complete(skb); } return 0; } /* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp; struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, tcp_rcv_established and rcv_established handle them correctly, but it is not case with tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); #ifdef CONFIG_TCP_MD5SIG if (tcp_v6_inbound_md5_hash (sk, skb)) goto discard; #endif if (sk_filter(sk, skb)) goto discard; /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) opt_skb = skb_clone(skb, GFP_ATOMIC); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ sock_rps_save_rxhash(sk, skb); if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; } if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v6_hnd_req(sk, skb); if (!nsk) goto discard; /* * Queue it on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket.. */ if(nsk != sk) { sock_rps_save_rxhash(nsk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) __kfree_skb(opt_skb); return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; reset: tcp_v6_send_reset(sk, skb); discard: if (opt_skb) __kfree_skb(opt_skb); kfree_skb(skb); return 0; csum_err: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; ipv6_pktoptions: /* Do you ask, what is it? 1. skb was enqueued by tcp. 2. skb is added to tail of read queue, rather than out of order. 3. socket is not in passive state. 4. Finally, it really contains options, which user wants to receive. */ tp = tcp_sk(sk); if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) np->mcast_oif = inet6_iif(opt_skb); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; if (np->rxopt.bits.rxtclass) np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb)); if (ipv6_opt_accepted(sk, opt_skb)) { skb_set_owner_r(opt_skb, sk); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); opt_skb = xchg(&np->pktoptions, NULL); } } kfree_skb(opt_skb); return 0; } static int tcp_v6_rcv(struct sk_buff *skb) { const struct tcphdr *th; const struct ipv6hdr *hdr; struct sock *sk; int ret; struct net *net = dev_net(skb->dev); if (skb->pkt_type != PACKET_HOST) goto discard_it; /* * Count it even if it's bad. */ TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr)/4) goto bad_packet; if (!pskb_may_pull(skb, th->doff*4)) goto discard_it; if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) goto bad_packet; th = tcp_hdr(skb); hdr = ipv6_hdr(skb); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->when = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; if (sk_filter(sk, skb)) goto discard_and_relse; skb->dev = NULL; bh_lock_sock_nested(sk); ret = 0; if (!sock_owned_by_user(sk)) { #ifdef CONFIG_NET_DMA struct tcp_sock *tp = tcp_sk(sk); if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan) ret = tcp_v6_do_rcv(sk, skb); else #endif { if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } } else if (unlikely(sk_add_backlog(sk, skb))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; } bh_unlock_sock(sk); sock_put(sk); return ret ? -1 : 0; no_tcp_socket: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { bad_packet: TCP_INC_STATS_BH(net, TCP_MIB_INERRS); } else { tcp_v6_send_reset(NULL, skb); } discard_it: /* * Discard frame */ kfree_skb(skb); return 0; discard_and_relse: sock_put(sk); goto discard_it; do_time_wait: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); goto discard_it; } if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { TCP_INC_STATS_BH(net, TCP_MIB_INERRS); inet_twsk_put(inet_twsk(sk)); goto discard_it; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2; sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); if (sk2 != NULL) { struct inet_timewait_sock *tw = inet_twsk(sk); inet_twsk_deschedule(tw, &tcp_death_row); inet_twsk_put(tw); sk = sk2; goto process; } /* Fall through to ACK */ } case TCP_TW_ACK: tcp_v6_timewait_ack(sk, skb); break; case TCP_TW_RST: goto no_tcp_socket; case TCP_TW_SUCCESS:; } goto discard_it; } static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it) { struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct inet_peer *peer; if (!rt || !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) { peer = inet_getpeer_v6(&np->daddr, 1); *release_it = true; } else { if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); peer = rt->rt6i_peer; *release_it = false; } return peer; } static void *tcp_v6_tw_get_peer(struct sock *sk) { const struct inet6_timewait_sock *tw6 = inet6_twsk(sk); const struct inet_timewait_sock *tw = inet_twsk(sk); if (tw->tw_family == AF_INET) return tcp_v4_tw_get_peer(sk); return inet_getpeer_v6(&tw6->tw_v6_daddr, 1); } static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor= tcp_twsk_destructor, .twsk_getpeer = tcp_v6_tw_get_peer, }; static const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .get_peer = tcp_v6_get_peer, .net_header_len = sizeof(struct ipv6hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { .md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, }; #endif /* * TCP over IPv4 via INET6 API */ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .get_peer = tcp_v4_get_peer, .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v6_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); skb_queue_head_init(&tp->out_of_order_queue); tcp_init_xmit_timers(sk); tcp_prequeue_init(tp); icsk->icsk_rto = TCP_TIMEOUT_INIT; tp->mdev = TCP_TIMEOUT_INIT; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control * algorithms that we must have the following bandaid to talk * efficiently to them. -DaveM */ tp->snd_cwnd = 2; /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_clamp = ~0; tp->mss_cache = TCP_MSS_DEFAULT; tp->reordering = sysctl_tcp_reordering; sk->sk_state = TCP_CLOSE; icsk->icsk_af_ops = &ipv6_specific; icsk->icsk_ca_ops = &tcp_init_congestion_ops; icsk->icsk_sync_mss = tcp_sync_mss; sk->sk_write_space = sk_stream_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_specific; #endif /* TCP Cookie Transactions */ if (sysctl_tcp_cookie_size > 0) { /* Default, cookies without s_data_payload. */ tp->cookie_values = kzalloc(sizeof(*tp->cookie_values), sk->sk_allocation); if (tp->cookie_values != NULL) kref_init(&tp->cookie_values->kref); } /* Presumed zeroed, in order of appearance: * cookie_in_always, cookie_out_never, * s_data_constant, s_data_in, s_data_out */ sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; local_bh_disable(); sock_update_memcg(sk); sk_sockets_allocated_inc(sk); local_bh_enable(); return 0; } static void tcp_v6_destroy_sock(struct sock *sk) { tcp_v4_destroy_sock(sk); inet6_destroy_sock(sk); } #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, const struct sock *sk, struct request_sock *req, int i, int uid) { int ttd = req->expires - jiffies; const struct in6_addr *src = &inet6_rsk(req)->loc_addr; const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; if (ttd < 0) ttd = 0; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], ntohs(inet_rsk(req)->loc_port), dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], ntohs(inet_rsk(req)->rmt_port), TCP_SYN_RECV, 0,0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->retrans, uid, 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) { const struct in6_addr *dest, *src; __u16 destp, srcp; int timer_active; unsigned long timer_expires; const struct inet_sock *inet = inet_sk(sp); const struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); const struct ipv6_pinfo *np = inet6_sk(sp); dest = &np->daddr; src = &np->rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); if (icsk->icsk_pending == ICSK_TIME_RETRANS) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, tp->write_seq-tp->snd_una, (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), timer_active, jiffies_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, sock_i_uid(sp), icsk->icsk_probes_out, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, tp->snd_cwnd, tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh ); } static void get_timewait6_sock(struct seq_file *seq, struct inet_timewait_sock *tw, int i) { const struct in6_addr *dest, *src; __u16 destp, srcp; const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); int ttd = tw->tw_ttd - jiffies; if (ttd < 0) ttd = 0; dest = &tw6->tw_v6_daddr; src = &tw6->tw_v6_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, tw->tw_substate, 0, 0, 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw); } static int tcp6_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode\n"); goto out; } st = seq->private; switch (st->state) { case TCP_SEQ_STATE_LISTENING: case TCP_SEQ_STATE_ESTABLISHED: get_tcp6_sock(seq, v, st->num); break; case TCP_SEQ_STATE_OPENREQ: get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); break; case TCP_SEQ_STATE_TIME_WAIT: get_timewait6_sock(seq, v, st->num); break; } out: return 0; } static const struct file_operations tcp6_afinfo_seq_fops = { .owner = THIS_MODULE, .open = tcp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct tcp_seq_afinfo tcp6_seq_afinfo = { .name = "tcp6", .family = AF_INET6, .seq_fops = &tcp6_afinfo_seq_fops, .seq_ops = { .show = tcp6_seq_show, }, }; int __net_init tcp6_proc_init(struct net *net) { return tcp_proc_register(net, &tcp6_seq_afinfo); } void tcp6_proc_exit(struct net *net) { tcp_proc_unregister(net, &tcp6_seq_afinfo); } #endif struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, .close = tcp_close, .connect = tcp_v6_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, .destroy = tcp_v6_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v6_do_rcv, .hash = tcp_v6_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, .sockets_allocated = &tcp_sockets_allocated, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .orphan_count = &tcp_orphan_count, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM .proto_cgroup = tcp_proto_cgroup, #endif }; static const struct inet6_protocol tcpv6_protocol = { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .gso_send_check = tcp_v6_gso_send_check, .gso_segment = tcp_tso_segment, .gro_receive = tcp6_gro_receive, .gro_complete = tcp6_gro_complete, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static struct inet_protosw tcpv6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_TCP, .prot = &tcpv6_prot, .ops = &inet6_stream_ops, .no_check = 0, .flags = INET_PROTOSW_PERMANENT | INET_PROTOSW_ICSK, }; static int __net_init tcpv6_net_init(struct net *net) { return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, SOCK_RAW, IPPROTO_TCP, net); } static void __net_exit tcpv6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.tcp_sk); } static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) { inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); } static struct pernet_operations tcpv6_net_ops = { .init = tcpv6_net_init, .exit = tcpv6_net_exit, .exit_batch = tcpv6_net_exit_batch, }; int __init tcpv6_init(void) { int ret; ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); if (ret) goto out; /* register inet6 protocol */ ret = inet6_register_protosw(&tcpv6_protosw); if (ret) goto out_tcpv6_protocol; ret = register_pernet_subsys(&tcpv6_net_ops); if (ret) goto out_tcpv6_protosw; out: return ret; out_tcpv6_protocol: inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); out_tcpv6_protosw: inet6_unregister_protosw(&tcpv6_protosw); goto out; } void tcpv6_exit(void) { unregister_pernet_subsys(&tcpv6_net_ops); inet6_unregister_protosw(&tcpv6_protosw); inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); }
gpl-2.0
Minia89/one_plus_one
drivers/coresight/coresight-funnel.c
2154
6598
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/of_coresight.h> #include <linux/coresight.h> #include "coresight-priv.h" #define funnel_writel(drvdata, val, off) \ __raw_writel((val), drvdata->base + off) #define funnel_readl(drvdata, off) \ __raw_readl(drvdata->base + off) #define FUNNEL_LOCK(drvdata) \ do { \ mb(); \ funnel_writel(drvdata, 0x0, CORESIGHT_LAR); \ } while (0) #define FUNNEL_UNLOCK(drvdata) \ do { \ funnel_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \ mb(); \ } while (0) #define FUNNEL_FUNCTL (0x000) #define FUNNEL_PRICTL (0x004) #define FUNNEL_ITATBDATA0 (0xEEC) #define FUNNEL_ITATBCTR2 (0xEF0) #define FUNNEL_ITATBCTR1 (0xEF4) #define FUNNEL_ITATBCTR0 (0xEF8) #define FUNNEL_HOLDTIME_MASK (0xF00) #define FUNNEL_HOLDTIME_SHFT (0x8) #define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT) struct funnel_drvdata { void __iomem *base; struct device *dev; struct coresight_device *csdev; struct clk *clk; uint32_t priority; }; static void __funnel_enable(struct funnel_drvdata *drvdata, int port) { uint32_t functl; FUNNEL_UNLOCK(drvdata); functl = funnel_readl(drvdata, FUNNEL_FUNCTL); functl &= ~FUNNEL_HOLDTIME_MASK; functl |= FUNNEL_HOLDTIME; functl |= (1 << port); funnel_writel(drvdata, functl, FUNNEL_FUNCTL); funnel_writel(drvdata, drvdata->priority, FUNNEL_PRICTL); FUNNEL_LOCK(drvdata); } static int funnel_enable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); int ret; ret = clk_prepare_enable(drvdata->clk); if (ret) return ret; __funnel_enable(drvdata, inport); dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); return 0; } static void __funnel_disable(struct funnel_drvdata *drvdata, int inport) { uint32_t functl; FUNNEL_UNLOCK(drvdata); functl = funnel_readl(drvdata, FUNNEL_FUNCTL); functl &= ~(1 << inport); funnel_writel(drvdata, functl, FUNNEL_FUNCTL); FUNNEL_LOCK(drvdata); } static void funnel_disable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); __funnel_disable(drvdata, inport); clk_disable_unprepare(drvdata->clk); dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); } static const struct coresight_ops_link funnel_link_ops = { .enable = funnel_enable, .disable = funnel_disable, }; static const struct coresight_ops funnel_cs_ops = { .link_ops = &funnel_link_ops, }; static ssize_t funnel_show_priority(struct device *dev, struct device_attribute *attr, char *buf) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val = drvdata->priority; return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } static ssize_t funnel_store_priority(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val; if (sscanf(buf, "%lx", &val) != 1) return -EINVAL; drvdata->priority = val; return size; } static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, funnel_show_priority, funnel_store_priority); static struct attribute *funnel_attrs[] = { &dev_attr_priority.attr, NULL, }; static struct attribute_group funnel_attr_grp = { .attrs = funnel_attrs, }; static const struct attribute_group *funnel_attr_grps[] = { &funnel_attr_grp, NULL, }; static int __devinit funnel_probe(struct platform_device *pdev) { int ret; struct device *dev = &pdev->dev; struct coresight_platform_data *pdata; struct funnel_drvdata *drvdata; struct resource *res; struct coresight_desc *desc; if (coresight_fuse_access_disabled()) return -EPERM; if (pdev->dev.of_node) { pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node); if (IS_ERR(pdata)) return PTR_ERR(pdata); pdev->dev.platform_data = pdata; } drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->dev = &pdev->dev; platform_set_drvdata(pdev, drvdata); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "funnel-base"); if (!res) return -ENODEV; drvdata->base = devm_ioremap(dev, res->start, resource_size(res)); if (!drvdata->base) return -ENOMEM; drvdata->clk = devm_clk_get(dev, "core_clk"); if (IS_ERR(drvdata->clk)) return PTR_ERR(drvdata->clk); ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE); if (ret) return ret; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; desc->type = CORESIGHT_DEV_TYPE_LINK; desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG; desc->ops = &funnel_cs_ops; desc->pdata = pdev->dev.platform_data; desc->dev = &pdev->dev; desc->groups = funnel_attr_grps; desc->owner = THIS_MODULE; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); dev_info(dev, "FUNNEL initialized\n"); return 0; } static int __devexit funnel_remove(struct platform_device *pdev) { struct funnel_drvdata *drvdata = platform_get_drvdata(pdev); coresight_unregister(drvdata->csdev); return 0; } static struct of_device_id funnel_match[] = { {.compatible = "arm,coresight-funnel"}, {} }; static struct platform_driver funnel_driver = { .probe = funnel_probe, .remove = __devexit_p(funnel_remove), .driver = { .name = "coresight-funnel", .owner = THIS_MODULE, .of_match_table = funnel_match, }, }; static int __init funnel_init(void) { return platform_driver_register(&funnel_driver); } module_init(funnel_init); static void __exit funnel_exit(void) { platform_driver_unregister(&funnel_driver); } module_exit(funnel_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CoreSight Funnel driver");
gpl-2.0
nopy/android_kernel_huawei_u8815
drivers/staging/spectra/ffsport.c
2922
22025
/* * NAND Flash Controller Device Driver * Copyright (c) 2009, Intel Corporation and its suppliers. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include "ffsport.h" #include "flash.h" #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/log2.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/async.h> /**** Helper functions used for Div, Remainder operation on u64 ****/ /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_Calc_Used_Bits * Inputs: Power of 2 number * Outputs: Number of Used Bits * 0, if the argument is 0 * Description: Calculate the number of bits used by a given power of 2 number * Number can be up to 32 bit *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_Calc_Used_Bits(u32 n) { int tot_bits = 0; if (n >= 1 << 16) { n >>= 16; tot_bits += 16; } if (n >= 1 << 8) { n >>= 8; tot_bits += 8; } if (n >= 1 << 4) { n >>= 4; tot_bits += 4; } if (n >= 1 << 2) { n >>= 2; tot_bits += 2; } if (n >= 1 << 1) tot_bits += 1; return ((n == 0) ? (0) : tot_bits); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_u64_Div * Inputs: Number of u64 * A power of 2 number as Division * Outputs: Quotient of the Divisor operation * Description: It divides the address by divisor by using bit shift operation * (essentially without explicitely using "/"). * Divisor is a power of 2 number and Divided is of u64 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ u64 GLOB_u64_Div(u64 addr, u32 divisor) { return (u64)(addr >> GLOB_Calc_Used_Bits(divisor)); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_u64_Remainder * Inputs: Number of u64 * Divisor Type (1 -PageAddress, 2- BlockAddress) * Outputs: Remainder of the Division operation * Description: It calculates the remainder of a number (of u64) by * divisor(power of 2 number ) by using bit shifting and multiply * operation(essentially without explicitely using "/"). *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type) { u64 result = 0; if (divisor_type == 1) { /* Remainder -- Page */ result = (addr >> DeviceInfo.nBitsInPageDataSize); result = result * DeviceInfo.wPageDataSize; } else if (divisor_type == 2) { /* Remainder -- Block */ result = (addr >> DeviceInfo.nBitsInBlockDataSize); result = result * DeviceInfo.wBlockDataSize; } result = addr - result; return result; } #define NUM_DEVICES 1 #define PARTITIONS 8 #define GLOB_SBD_NAME "nd" #define GLOB_SBD_IRQ_NUM (29) #define GLOB_SBD_IOCTL_GC (0x7701) #define GLOB_SBD_IOCTL_WL (0x7702) #define GLOB_SBD_IOCTL_FORMAT (0x7703) #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704) #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705) #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706) #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707) #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708) #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709) #define GLOB_SBD_IOCTL_READ_DATA (0x770A) static int reserved_mb = 0; module_param(reserved_mb, int, 0); MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)"); int nand_debug_level; module_param(nand_debug_level, int, 0644); MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3"); MODULE_LICENSE("GPL"); struct spectra_nand_dev { struct pci_dev *dev; u64 size; u16 users; spinlock_t qlock; void __iomem *ioaddr; /* Mapped address */ struct request_queue *queue; struct task_struct *thread; struct gendisk *gd; u8 *tmp_buf; }; static int GLOB_SBD_majornum; static char *GLOB_version = GLOB_VERSION; static struct spectra_nand_dev nand_device[NUM_DEVICES]; static struct mutex spectra_lock; static int res_blks_os = 1; struct spectra_indentfy_dev_tag IdentifyDeviceData; static int force_flush_cache(void) { nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (ERR == GLOB_FTL_Flush_Cache()) { printk(KERN_ERR "Fail to Flush FTL Cache!\n"); return -EFAULT; } #if CMD_DMA if (glob_ftl_execute_cmds()) return -EIO; else return 0; #endif return 0; } struct ioctl_rw_page_info { u8 *data; unsigned int page; }; static int ioctl_read_page_data(unsigned long arg) { u8 *buf; struct ioctl_rw_page_info info; int result = PASS; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) return -EFAULT; buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC); if (!buf) { printk(KERN_ERR "ioctl_read_page_data: " "failed to allocate memory\n"); return -ENOMEM; } mutex_lock(&spectra_lock); result = GLOB_FTL_Page_Read(buf, (u64)info.page * IdentifyDeviceData.PageDataSize); mutex_unlock(&spectra_lock); if (copy_to_user((void __user *)info.data, buf, IdentifyDeviceData.PageDataSize)) { printk(KERN_ERR "ioctl_read_page_data: " "failed to copy user data\n"); kfree(buf); return -EFAULT; } kfree(buf); return result; } static int ioctl_write_page_data(unsigned long arg) { u8 *buf; struct ioctl_rw_page_info info; int result = PASS; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) return -EFAULT; buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC); if (!buf) { printk(KERN_ERR "ioctl_write_page_data: " "failed to allocate memory\n"); return -ENOMEM; } if (copy_from_user(buf, (void __user *)info.data, IdentifyDeviceData.PageDataSize)) { printk(KERN_ERR "ioctl_write_page_data: " "failed to copy user data\n"); kfree(buf); return -EFAULT; } mutex_lock(&spectra_lock); result = GLOB_FTL_Page_Write(buf, (u64)info.page * IdentifyDeviceData.PageDataSize); mutex_unlock(&spectra_lock); kfree(buf); return result; } /* Return how many blocks should be reserved for bad block replacement */ static int get_res_blk_num_bad_blk(void) { return IdentifyDeviceData.wDataBlockNum / 10; } /* Return how many blocks should be reserved for OS image */ static int get_res_blk_num_os(void) { u32 res_blks, blk_size; blk_size = IdentifyDeviceData.PageDataSize * IdentifyDeviceData.PagesPerBlock; res_blks = (reserved_mb * 1024 * 1024) / blk_size; if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum)) res_blks = 1; /* Reserved 1 block for block table */ return res_blks; } /* Transfer a full request. */ static int do_transfer(struct spectra_nand_dev *tr, struct request *req) { u64 start_addr, addr; u32 logical_start_sect, hd_start_sect; u32 nsect, hd_sects; u32 rsect, tsect = 0; char *buf; u32 ratio = IdentifyDeviceData.PageDataSize >> 9; start_addr = (u64)(blk_rq_pos(req)) << 9; /* Add a big enough offset to prevent the OS Image from * being accessed or damaged by file system */ start_addr += IdentifyDeviceData.PageDataSize * IdentifyDeviceData.PagesPerBlock * res_blks_os; if (req->cmd_type & REQ_FLUSH) { if (force_flush_cache()) /* Fail to flush cache */ return -EIO; else return 0; } if (req->cmd_type != REQ_TYPE_FS) return -EIO; if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) { printk(KERN_ERR "Spectra error: request over the NAND " "capacity!sector %d, current_nr_sectors %d, " "while capacity is %d\n", (int)blk_rq_pos(req), blk_rq_cur_sectors(req), (int)get_capacity(tr->gd)); return -EIO; } logical_start_sect = start_addr >> 9; hd_start_sect = logical_start_sect / ratio; rsect = logical_start_sect - hd_start_sect * ratio; addr = (u64)hd_start_sect * ratio * 512; buf = req->buffer; nsect = blk_rq_cur_sectors(req); if (rsect) tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect; switch (rq_data_dir(req)) { case READ: /* Read the first NAND page */ if (rsect) { if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9); addr += IdentifyDeviceData.PageDataSize; buf += tsect << 9; nsect -= tsect; } /* Read the other NAND pages */ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) { if (GLOB_FTL_Page_Read(buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } addr += IdentifyDeviceData.PageDataSize; buf += IdentifyDeviceData.PageDataSize; } /* Read the last NAND pages */ if (nsect % ratio) { if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9); } #if CMD_DMA if (glob_ftl_execute_cmds()) return -EIO; else return 0; #endif return 0; case WRITE: /* Write the first NAND page */ if (rsect) { if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9); if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } addr += IdentifyDeviceData.PageDataSize; buf += tsect << 9; nsect -= tsect; } /* Write the other NAND pages */ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) { if (GLOB_FTL_Page_Write(buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } addr += IdentifyDeviceData.PageDataSize; buf += IdentifyDeviceData.PageDataSize; } /* Write the last NAND pages */ if (nsect % ratio) { if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9); if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) { printk(KERN_ERR "Error in %s, Line %d\n", __FILE__, __LINE__); return -EIO; } } #if CMD_DMA if (glob_ftl_execute_cmds()) return -EIO; else return 0; #endif return 0; default: printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); return -EIO; } } /* This function is copied from drivers/mtd/mtd_blkdevs.c */ static int spectra_trans_thread(void *arg) { struct spectra_nand_dev *tr = arg; struct request_queue *rq = tr->queue; struct request *req = NULL; /* we might get involved when memory gets low, so use PF_MEMALLOC */ current->flags |= PF_MEMALLOC; spin_lock_irq(rq->queue_lock); while (!kthread_should_stop()) { int res; if (!req) { req = blk_fetch_request(rq); if (!req) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(rq->queue_lock); schedule(); spin_lock_irq(rq->queue_lock); continue; } } spin_unlock_irq(rq->queue_lock); mutex_lock(&spectra_lock); res = do_transfer(tr, req); mutex_unlock(&spectra_lock); spin_lock_irq(rq->queue_lock); if (!__blk_end_request_cur(req, res)) req = NULL; } if (req) __blk_end_request_all(req, -EIO); spin_unlock_irq(rq->queue_lock); return 0; } /* Request function that "handles clustering". */ static void GLOB_SBD_request(struct request_queue *rq) { struct spectra_nand_dev *pdev = rq->queuedata; wake_up_process(pdev->thread); } static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode) { nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); return 0; } static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode) { int ret; nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); mutex_lock(&spectra_lock); ret = force_flush_cache(); mutex_unlock(&spectra_lock); return 0; } static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo) { geo->heads = 4; geo->sectors = 16; geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); nand_dbg_print(NAND_DBG_DEBUG, "heads: %d, sectors: %d, cylinders: %d\n", geo->heads, geo->sectors, geo->cylinders); return 0; } int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); switch (cmd) { case GLOB_SBD_IOCTL_GC: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Garbage Collection " "being performed\n"); if (PASS != GLOB_FTL_Garbage_Collection()) return -EFAULT; return 0; case GLOB_SBD_IOCTL_WL: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Static Wear Leveling " "being performed\n"); if (PASS != GLOB_FTL_Wear_Leveling()) return -EFAULT; return 0; case GLOB_SBD_IOCTL_FORMAT: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format " "being performed\n"); if (PASS != GLOB_FTL_Flash_Format()) return -EFAULT; return 0; case GLOB_SBD_IOCTL_FLUSH_CACHE: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush " "being performed\n"); mutex_lock(&spectra_lock); ret = force_flush_cache(); mutex_unlock(&spectra_lock); return ret; case GLOB_SBD_IOCTL_COPY_BLK_TABLE: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: " "Copy block table\n"); if (copy_to_user((void __user *)arg, get_blk_table_start_addr(), get_blk_table_len())) return -EFAULT; return 0; case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: " "Copy wear leveling table\n"); if (copy_to_user((void __user *)arg, get_wear_leveling_table_start_addr(), get_wear_leveling_table_len())) return -EFAULT; return 0; case GLOB_SBD_IOCTL_GET_NAND_INFO: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: " "Get NAND info\n"); if (copy_to_user((void __user *)arg, &IdentifyDeviceData, sizeof(IdentifyDeviceData))) return -EFAULT; return 0; case GLOB_SBD_IOCTL_WRITE_DATA: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: " "Write one page data\n"); return ioctl_write_page_data(arg); case GLOB_SBD_IOCTL_READ_DATA: nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: " "Read one page data\n"); return ioctl_read_page_data(arg); } return -ENOTTY; } static DEFINE_MUTEX(ffsport_mutex); int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ffsport_mutex); ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg); mutex_unlock(&ffsport_mutex); return ret; } static struct block_device_operations GLOB_SBD_ops = { .owner = THIS_MODULE, .open = GLOB_SBD_open, .release = GLOB_SBD_release, .ioctl = GLOB_SBD_unlocked_ioctl, .getgeo = GLOB_SBD_getgeo, }; static int SBD_setup_device(struct spectra_nand_dev *dev, int which) { int res_blks; u32 sects; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); memset(dev, 0, sizeof(struct spectra_nand_dev)); nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks " "for OS image, %d blocks for bad block replacement.\n", get_res_blk_num_os(), get_res_blk_num_bad_blk()); res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os(); dev->size = (u64)IdentifyDeviceData.PageDataSize * IdentifyDeviceData.PagesPerBlock * (IdentifyDeviceData.wDataBlockNum - res_blks); res_blks_os = get_res_blk_num_os(); spin_lock_init(&dev->qlock); dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC); if (!dev->tmp_buf) { printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n", __FILE__, __LINE__); goto out_vfree; } dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock); if (dev->queue == NULL) { printk(KERN_ERR "Spectra: Request queue could not be initialized." " Aborting\n "); goto out_vfree; } dev->queue->queuedata = dev; /* As Linux block layer doesn't support >4KB hardware sector, */ /* Here we force report 512 byte hardware sector size to Kernel */ blk_queue_logical_block_size(dev->queue, 512); blk_queue_flush(dev->queue, REQ_FLUSH); dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); if (IS_ERR(dev->thread)) { blk_cleanup_queue(dev->queue); unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME); return PTR_ERR(dev->thread); } dev->gd = alloc_disk(PARTITIONS); if (!dev->gd) { printk(KERN_ERR "Spectra: Could not allocate disk. Aborting \n "); goto out_vfree; } dev->gd->major = GLOB_SBD_majornum; dev->gd->first_minor = which * PARTITIONS; dev->gd->fops = &GLOB_SBD_ops; dev->gd->queue = dev->queue; dev->gd->private_data = dev; snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a'); sects = dev->size >> 9; nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects); set_capacity(dev->gd, sects); add_disk(dev->gd); return 0; out_vfree: return -ENOMEM; } /* static ssize_t show_nand_block_num(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", (int)IdentifyDeviceData.wDataBlockNum); } static ssize_t show_nand_pages_per_block(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", (int)IdentifyDeviceData.PagesPerBlock); } static ssize_t show_nand_page_size(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", (int)IdentifyDeviceData.PageDataSize); } static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL); static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL); static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL); static void create_sysfs_entry(struct device *dev) { if (device_create_file(dev, &dev_attr_nand_block_num)) printk(KERN_ERR "Spectra: " "failed to create sysfs entry nand_block_num.\n"); if (device_create_file(dev, &dev_attr_nand_pages_per_block)) printk(KERN_ERR "Spectra: " "failed to create sysfs entry nand_pages_per_block.\n"); if (device_create_file(dev, &dev_attr_nand_page_size)) printk(KERN_ERR "Spectra: " "failed to create sysfs entry nand_page_size.\n"); } */ static void register_spectra_ftl_async(void *unused, async_cookie_t cookie) { int i; /* create_sysfs_entry(&dev->dev); */ if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) { printk(KERN_ERR "Spectra: Unable to Read Flash Device. " "Aborting\n"); return; } else { nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: " "Num blocks=%d, pagesperblock=%d, " "pagedatasize=%d, ECCBytesPerSector=%d\n", (int)IdentifyDeviceData.NumBlocks, (int)IdentifyDeviceData.PagesPerBlock, (int)IdentifyDeviceData.PageDataSize, (int)IdentifyDeviceData.wECCBytesPerSector); } printk(KERN_ALERT "Spectra: searching block table, please wait ...\n"); if (GLOB_FTL_Init() != PASS) { printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. " "Aborting\n"); goto out_ftl_flash_register; } printk(KERN_ALERT "Spectra: block table has been found.\n"); GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME); if (GLOB_SBD_majornum <= 0) { printk(KERN_ERR "Unable to get the major %d for Spectra", GLOB_SBD_majornum); goto out_ftl_flash_register; } for (i = 0; i < NUM_DEVICES; i++) if (SBD_setup_device(&nand_device[i], i) == -ENOMEM) goto out_blk_register; nand_dbg_print(NAND_DBG_DEBUG, "Spectra: module loaded with major number %d\n", GLOB_SBD_majornum); return; out_blk_register: unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME); out_ftl_flash_register: GLOB_FTL_Cache_Release(); printk(KERN_ERR "Spectra: Module load failed.\n"); } int register_spectra_ftl() { async_schedule(register_spectra_ftl_async, NULL); return 0; } EXPORT_SYMBOL_GPL(register_spectra_ftl); static int GLOB_SBD_init(void) { /* Set debug output level (0~3) here. 3 is most verbose */ printk(KERN_ALERT "Spectra: %s\n", GLOB_version); mutex_init(&spectra_lock); if (PASS != GLOB_FTL_Flash_Init()) { printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. " "Aborting\n"); return -ENODEV; } return 0; } static void __exit GLOB_SBD_exit(void) { int i; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0; i < NUM_DEVICES; i++) { struct spectra_nand_dev *dev = &nand_device[i]; if (dev->gd) { del_gendisk(dev->gd); put_disk(dev->gd); } if (dev->queue) blk_cleanup_queue(dev->queue); kfree(dev->tmp_buf); } unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME); mutex_lock(&spectra_lock); force_flush_cache(); mutex_unlock(&spectra_lock); GLOB_FTL_Cache_Release(); GLOB_FTL_Flash_Release(); nand_dbg_print(NAND_DBG_DEBUG, "Spectra FTL module (major number %d) unloaded.\n", GLOB_SBD_majornum); } module_init(GLOB_SBD_init); module_exit(GLOB_SBD_exit);
gpl-2.0
MoKee/android_kernel_lge_p880
sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
2922
3332
/* sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c * * Copyright 2009 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <sound/soc.h> #include "s3c24xx_simtec.h" /* supported machines: * * Machine Connections AMP * ------- ----------- --- * BAST MIC, HPOUT, LOUT, LIN TPA2001D1 (HPOUTL,R) (gain hardwired) * VR1000 HPOUT, LIN None * VR2000 LIN, LOUT, MIC, HP LM4871 (HPOUTL,R) * DePicture LIN, LOUT, MIC, HP LM4871 (HPOUTL,R) * Anubis LIN, LOUT, MIC, HP TPA2001D1 (HPOUTL,R) */ static const struct snd_soc_dapm_widget dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_LINE("Line In", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), }; static const struct snd_soc_dapm_route base_map[] = { { "Headphone Jack", NULL, "LHPOUT"}, { "Headphone Jack", NULL, "RHPOUT"}, { "Line Out", NULL, "LOUT" }, { "Line Out", NULL, "ROUT" }, { "LLINEIN", NULL, "Line In"}, { "RLINEIN", NULL, "Line In"}, { "MICIN", NULL, "Mic Jack"}, }; /** * simtec_tlv320aic23_init - initialise and add controls * @codec; The codec instance to attach to. * * Attach our controls and configure the necessary codec * mappings for our sound card instance. */ static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_new_controls(dapm, dapm_widgets, ARRAY_SIZE(dapm_widgets)); snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map)); snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); snd_soc_dapm_enable_pin(dapm, "Line In"); snd_soc_dapm_enable_pin(dapm, "Line Out"); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); simtec_audio_init(rtd); snd_soc_dapm_sync(dapm); return 0; } static struct snd_soc_dai_link simtec_dai_aic23 = { .name = "tlv320aic23", .stream_name = "TLV320AIC23", .codec_name = "tlv320aic3x-codec.0-001a", .cpu_dai_name = "s3c24xx-iis", .codec_dai_name = "tlv320aic3x-hifi", .platform_name = "samsung-audio", .init = simtec_tlv320aic23_init, }; /* simtec audio machine driver */ static struct snd_soc_card snd_soc_machine_simtec_aic23 = { .name = "Simtec", .dai_link = &simtec_dai_aic23, .num_links = 1, }; static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd) { return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic23); } static struct platform_driver simtec_audio_tlv320aic23_platdrv = { .driver = { .owner = THIS_MODULE, .name = "s3c24xx-simtec-tlv320aic23", .pm = simtec_audio_pm, }, .probe = simtec_audio_tlv320aic23_probe, .remove = __devexit_p(simtec_audio_remove), }; MODULE_ALIAS("platform:s3c24xx-simtec-tlv320aic23"); static int __init simtec_tlv320aic23_modinit(void) { return platform_driver_register(&simtec_audio_tlv320aic23_platdrv); } static void __exit simtec_tlv320aic23_modexit(void) { platform_driver_unregister(&simtec_audio_tlv320aic23_platdrv); } module_init(simtec_tlv320aic23_modinit); module_exit(simtec_tlv320aic23_modexit); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("ALSA SoC Simtec Audio support"); MODULE_LICENSE("GPL");
gpl-2.0
Andiry/prd
arch/parisc/kernel/unaligned.c
3690
17695
/* * Unaligned memory access handler * * Copyright (C) 2001 Randolph Chung <tausq@debian.org> * Significantly tweaked by LaMont Jones <lamont@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/ratelimit.h> #include <asm/uaccess.h> #include <asm/hardirq.h> /* #define DEBUG_UNALIGNED 1 */ #ifdef DEBUG_UNALIGNED #define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) #else #define DPRINTF(fmt, args...) #endif #ifdef CONFIG_64BIT #define RFMT "%016lx" #else #define RFMT "%08lx" #endif #define FIXUP_BRANCH(lbl) \ "\tldil L%%" #lbl ", %%r1\n" \ "\tldo R%%" #lbl "(%%r1), %%r1\n" \ "\tbv,n %%r0(%%r1)\n" /* If you use FIXUP_BRANCH, then you must list this clobber */ #define FIXUP_BRANCH_CLOBBER "r1" /* 1111 1100 0000 0000 0001 0011 1100 0000 */ #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6) #define OPCODE2(a,b) ((a)<<26|(b)<<1) #define OPCODE3(a,b) ((a)<<26|(b)<<2) #define OPCODE4(a) ((a)<<26) #define OPCODE1_MASK OPCODE1(0x3f,1,0xf) #define OPCODE2_MASK OPCODE2(0x3f,1) #define OPCODE3_MASK OPCODE3(0x3f,1) #define OPCODE4_MASK OPCODE4(0x3f) /* skip LDB - never unaligned (index) */ #define OPCODE_LDH_I OPCODE1(0x03,0,0x1) #define OPCODE_LDW_I OPCODE1(0x03,0,0x2) #define OPCODE_LDD_I OPCODE1(0x03,0,0x3) #define OPCODE_LDDA_I OPCODE1(0x03,0,0x4) #define OPCODE_LDCD_I OPCODE1(0x03,0,0x5) #define OPCODE_LDWA_I OPCODE1(0x03,0,0x6) #define OPCODE_LDCW_I OPCODE1(0x03,0,0x7) /* skip LDB - never unaligned (short) */ #define OPCODE_LDH_S OPCODE1(0x03,1,0x1) #define OPCODE_LDW_S OPCODE1(0x03,1,0x2) #define OPCODE_LDD_S OPCODE1(0x03,1,0x3) #define OPCODE_LDDA_S OPCODE1(0x03,1,0x4) #define OPCODE_LDCD_S OPCODE1(0x03,1,0x5) #define OPCODE_LDWA_S OPCODE1(0x03,1,0x6) #define OPCODE_LDCW_S OPCODE1(0x03,1,0x7) /* skip STB - never unaligned */ #define OPCODE_STH OPCODE1(0x03,1,0x9) #define OPCODE_STW OPCODE1(0x03,1,0xa) #define OPCODE_STD OPCODE1(0x03,1,0xb) /* skip STBY - never unaligned */ /* skip STDBY - never unaligned */ #define OPCODE_STWA OPCODE1(0x03,1,0xe) #define OPCODE_STDA OPCODE1(0x03,1,0xf) #define OPCODE_FLDWX OPCODE1(0x09,0,0x0) #define OPCODE_FLDWXR OPCODE1(0x09,0,0x1) #define OPCODE_FSTWX OPCODE1(0x09,0,0x8) #define OPCODE_FSTWXR OPCODE1(0x09,0,0x9) #define OPCODE_FLDWS OPCODE1(0x09,1,0x0) #define OPCODE_FLDWSR OPCODE1(0x09,1,0x1) #define OPCODE_FSTWS OPCODE1(0x09,1,0x8) #define OPCODE_FSTWSR OPCODE1(0x09,1,0x9) #define OPCODE_FLDDX OPCODE1(0x0b,0,0x0) #define OPCODE_FSTDX OPCODE1(0x0b,0,0x8) #define OPCODE_FLDDS OPCODE1(0x0b,1,0x0) #define OPCODE_FSTDS OPCODE1(0x0b,1,0x8) #define OPCODE_LDD_L OPCODE2(0x14,0) #define OPCODE_FLDD_L OPCODE2(0x14,1) #define OPCODE_STD_L OPCODE2(0x1c,0) #define OPCODE_FSTD_L OPCODE2(0x1c,1) #define OPCODE_LDW_M OPCODE3(0x17,1) #define OPCODE_FLDW_L OPCODE3(0x17,0) #define OPCODE_FSTW_L OPCODE3(0x1f,0) #define OPCODE_STW_M OPCODE3(0x1f,1) #define OPCODE_LDH_L OPCODE4(0x11) #define OPCODE_LDW_L OPCODE4(0x12) #define OPCODE_LDWM OPCODE4(0x13) #define OPCODE_STH_L OPCODE4(0x19) #define OPCODE_STW_L OPCODE4(0x1A) #define OPCODE_STWM OPCODE4(0x1B) #define MAJOR_OP(i) (((i)>>26)&0x3f) #define R1(i) (((i)>>21)&0x1f) #define R2(i) (((i)>>16)&0x1f) #define R3(i) ((i)&0x1f) #define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1)) #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) #define IM5_2(i) IM((i)>>16,5) #define IM5_3(i) IM((i),5) #define IM14(i) IM((i),14) #define ERR_NOTHANDLED -1 #define ERR_PAGEFAULT -2 int unaligned_enabled __read_mostly = 1; void die_if_kernel (char *str, struct pt_regs *regs, long err); static int emulate_ldh(struct pt_regs *regs, int toreg) { unsigned long saddr = regs->ior; unsigned long val = 0; int ret; DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n", regs->isr, regs->ior, toreg); __asm__ __volatile__ ( " mtsp %4, %%sr1\n" "1: ldbs 0(%%sr1,%3), %%r20\n" "2: ldbs 1(%%sr1,%3), %0\n" " depw %%r20, 23, 24, %0\n" " copy %%r0, %1\n" "3: \n" " .section .fixup,\"ax\"\n" "4: ldi -2, %1\n" FIXUP_BRANCH(3b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b, 4b) ASM_EXCEPTIONTABLE_ENTRY(2b, 4b) : "=r" (val), "=r" (ret) : "0" (val), "r" (saddr), "r" (regs->isr) : "r20", FIXUP_BRANCH_CLOBBER ); DPRINTF("val = 0x" RFMT "\n", val); if (toreg) regs->gr[toreg] = val; return ret; } static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; unsigned long val = 0; int ret; DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n", regs->isr, regs->ior, toreg); __asm__ __volatile__ ( " zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */ " mtsp %4, %%sr1\n" " depw %%r0,31,2,%3\n" "1: ldw 0(%%sr1,%3),%0\n" "2: ldw 4(%%sr1,%3),%%r20\n" " subi 32,%%r19,%%r19\n" " mtctl %%r19,11\n" " vshd %0,%%r20,%0\n" " copy %%r0, %1\n" "3: \n" " .section .fixup,\"ax\"\n" "4: ldi -2, %1\n" FIXUP_BRANCH(3b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b, 4b) ASM_EXCEPTIONTABLE_ENTRY(2b, 4b) : "=r" (val), "=r" (ret) : "0" (val), "r" (saddr), "r" (regs->isr) : "r19", "r20", FIXUP_BRANCH_CLOBBER ); DPRINTF("val = 0x" RFMT "\n", val); if (flop) ((__u32*)(regs->fr))[toreg] = val; else if (toreg) regs->gr[toreg] = val; return ret; } static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; __u64 val = 0; int ret; DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n", regs->isr, regs->ior, toreg); #ifdef CONFIG_PA20 #ifndef CONFIG_64BIT if (!flop) return -1; #endif __asm__ __volatile__ ( " depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ " mtsp %4, %%sr1\n" " depd %%r0,63,3,%3\n" "1: ldd 0(%%sr1,%3),%0\n" "2: ldd 8(%%sr1,%3),%%r20\n" " subi 64,%%r19,%%r19\n" " mtsar %%r19\n" " shrpd %0,%%r20,%%sar,%0\n" " copy %%r0, %1\n" "3: \n" " .section .fixup,\"ax\"\n" "4: ldi -2, %1\n" FIXUP_BRANCH(3b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b,4b) ASM_EXCEPTIONTABLE_ENTRY(2b,4b) : "=r" (val), "=r" (ret) : "0" (val), "r" (saddr), "r" (regs->isr) : "r19", "r20", FIXUP_BRANCH_CLOBBER ); #else { unsigned long valh=0,vall=0; __asm__ __volatile__ ( " zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */ " mtsp %6, %%sr1\n" " dep %%r0,31,2,%5\n" "1: ldw 0(%%sr1,%5),%0\n" "2: ldw 4(%%sr1,%5),%1\n" "3: ldw 8(%%sr1,%5),%%r20\n" " subi 32,%%r19,%%r19\n" " mtsar %%r19\n" " vshd %0,%1,%0\n" " vshd %1,%%r20,%1\n" " copy %%r0, %2\n" "4: \n" " .section .fixup,\"ax\"\n" "5: ldi -2, %2\n" FIXUP_BRANCH(4b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b,5b) ASM_EXCEPTIONTABLE_ENTRY(2b,5b) ASM_EXCEPTIONTABLE_ENTRY(3b,5b) : "=r" (valh), "=r" (vall), "=r" (ret) : "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr) : "r19", "r20", FIXUP_BRANCH_CLOBBER ); val=((__u64)valh<<32)|(__u64)vall; } #endif DPRINTF("val = 0x%llx\n", val); if (flop) regs->fr[toreg] = val; else if (toreg) regs->gr[toreg] = val; return ret; } static int emulate_sth(struct pt_regs *regs, int frreg) { unsigned long val = regs->gr[frreg]; int ret; if (!frreg) val = 0; DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, val, regs->isr, regs->ior); __asm__ __volatile__ ( " mtsp %3, %%sr1\n" " extrw,u %1, 23, 8, %%r19\n" "1: stb %1, 1(%%sr1, %2)\n" "2: stb %%r19, 0(%%sr1, %2)\n" " copy %%r0, %0\n" "3: \n" " .section .fixup,\"ax\"\n" "4: ldi -2, %0\n" FIXUP_BRANCH(3b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b,4b) ASM_EXCEPTIONTABLE_ENTRY(2b,4b) : "=r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", FIXUP_BRANCH_CLOBBER ); return ret; } static int emulate_stw(struct pt_regs *regs, int frreg, int flop) { unsigned long val; int ret; if (flop) val = ((__u32*)(regs->fr))[frreg]; else if (frreg) val = regs->gr[frreg]; else val = 0; DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, val, regs->isr, regs->ior); __asm__ __volatile__ ( " mtsp %3, %%sr1\n" " zdep %2, 28, 2, %%r19\n" " dep %%r0, 31, 2, %2\n" " mtsar %%r19\n" " depwi,z -2, %%sar, 32, %%r19\n" "1: ldw 0(%%sr1,%2),%%r20\n" "2: ldw 4(%%sr1,%2),%%r21\n" " vshd %%r0, %1, %%r22\n" " vshd %1, %%r0, %%r1\n" " and %%r20, %%r19, %%r20\n" " andcm %%r21, %%r19, %%r21\n" " or %%r22, %%r20, %%r20\n" " or %%r1, %%r21, %%r21\n" " stw %%r20,0(%%sr1,%2)\n" " stw %%r21,4(%%sr1,%2)\n" " copy %%r0, %0\n" "3: \n" " .section .fixup,\"ax\"\n" "4: ldi -2, %0\n" FIXUP_BRANCH(3b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b,4b) ASM_EXCEPTIONTABLE_ENTRY(2b,4b) : "=r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); return 0; } static int emulate_std(struct pt_regs *regs, int frreg, int flop) { __u64 val; int ret; if (flop) val = regs->fr[frreg]; else if (frreg) val = regs->gr[frreg]; else val = 0; DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg, val, regs->isr, regs->ior); #ifdef CONFIG_PA20 #ifndef CONFIG_64BIT if (!flop) return -1; #endif __asm__ __volatile__ ( " mtsp %3, %%sr1\n" " depd,z %2, 60, 3, %%r19\n" " depd %%r0, 63, 3, %2\n" " mtsar %%r19\n" " depdi,z -2, %%sar, 64, %%r19\n" "1: ldd 0(%%sr1,%2),%%r20\n" "2: ldd 8(%%sr1,%2),%%r21\n" " shrpd %%r0, %1, %%sar, %%r22\n" " shrpd %1, %%r0, %%sar, %%r1\n" " and %%r20, %%r19, %%r20\n" " andcm %%r21, %%r19, %%r21\n" " or %%r22, %%r20, %%r20\n" " or %%r1, %%r21, %%r21\n" "3: std %%r20,0(%%sr1,%2)\n" "4: std %%r21,8(%%sr1,%2)\n" " copy %%r0, %0\n" "5: \n" " .section .fixup,\"ax\"\n" "6: ldi -2, %0\n" FIXUP_BRANCH(5b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b,6b) ASM_EXCEPTIONTABLE_ENTRY(2b,6b) ASM_EXCEPTIONTABLE_ENTRY(3b,6b) ASM_EXCEPTIONTABLE_ENTRY(4b,6b) : "=r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); #else { unsigned long valh=(val>>32),vall=(val&0xffffffffl); __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" " dep %%r0, 31, 2, %2\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" "1: ldw 0(%%sr1,%3),%%r20\n" "2: ldw 8(%%sr1,%3),%%r21\n" " vshd %1, %2, %%r1\n" " vshd %%r0, %1, %1\n" " vshd %2, %%r0, %2\n" " and %%r20, %%r19, %%r20\n" " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" " or %2, %%r21, %2\n" "3: stw %1,0(%%sr1,%1)\n" "4: stw %%r1,4(%%sr1,%3)\n" "5: stw %2,8(%%sr1,%3)\n" " copy %%r0, %0\n" "6: \n" " .section .fixup,\"ax\"\n" "7: ldi -2, %0\n" FIXUP_BRANCH(6b) " .previous\n" ASM_EXCEPTIONTABLE_ENTRY(1b,7b) ASM_EXCEPTIONTABLE_ENTRY(2b,7b) ASM_EXCEPTIONTABLE_ENTRY(3b,7b) ASM_EXCEPTIONTABLE_ENTRY(4b,7b) ASM_EXCEPTIONTABLE_ENTRY(5b,7b) : "=r" (ret) : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER ); } #endif return ret; } void handle_unaligned(struct pt_regs *regs) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0; int modify = 0; int ret = ERR_NOTHANDLED; struct siginfo si; register int flop=0; /* true if this is a flop */ __inc_irq_stat(irq_unaligned_count); /* log a message with pacing */ if (user_mode(regs)) { if (current->thread.flags & PARISC_UAC_SIGBUS) { goto force_sigbus; } if (!(current->thread.flags & PARISC_UAC_NOPRINT) && __ratelimit(&ratelimit)) { char buf[256]; sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); printk(KERN_WARNING "%s", buf); #ifdef DEBUG_UNALIGNED show_regs(regs); #endif } if (!unaligned_enabled) goto force_sigbus; } /* handle modification - OK, it's ugly, see the instruction manual */ switch (MAJOR_OP(regs->iir)) { case 0x03: case 0x09: case 0x0b: if (regs->iir&0x20) { modify = 1; if (regs->iir&0x1000) /* short loads */ if (regs->iir&0x200) newbase += IM5_3(regs->iir); else newbase += IM5_2(regs->iir); else if (regs->iir&0x2000) /* scaled indexed */ { int shift=0; switch (regs->iir & OPCODE1_MASK) { case OPCODE_LDH_I: shift= 1; break; case OPCODE_LDW_I: shift= 2; break; case OPCODE_LDD_I: case OPCODE_LDDA_I: shift= 3; break; } newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift; } else /* simple indexed */ newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0); } break; case 0x13: case 0x1b: modify = 1; newbase += IM14(regs->iir); break; case 0x14: case 0x1c: if (regs->iir&8) { modify = 1; newbase += IM14(regs->iir&~0xe); } break; case 0x16: case 0x1e: modify = 1; newbase += IM14(regs->iir&6); break; case 0x17: case 0x1f: if (regs->iir&4) { modify = 1; newbase += IM14(regs->iir&~4); } break; } /* TODO: make this cleaner... */ switch (regs->iir & OPCODE1_MASK) { case OPCODE_LDH_I: case OPCODE_LDH_S: ret = emulate_ldh(regs, R3(regs->iir)); break; case OPCODE_LDW_I: case OPCODE_LDWA_I: case OPCODE_LDW_S: case OPCODE_LDWA_S: ret = emulate_ldw(regs, R3(regs->iir),0); break; case OPCODE_STH: ret = emulate_sth(regs, R2(regs->iir)); break; case OPCODE_STW: case OPCODE_STWA: ret = emulate_stw(regs, R2(regs->iir),0); break; #ifdef CONFIG_PA20 case OPCODE_LDD_I: case OPCODE_LDDA_I: case OPCODE_LDD_S: case OPCODE_LDDA_S: ret = emulate_ldd(regs, R3(regs->iir),0); break; case OPCODE_STD: case OPCODE_STDA: ret = emulate_std(regs, R2(regs->iir),0); break; #endif case OPCODE_FLDWX: case OPCODE_FLDWS: case OPCODE_FLDWXR: case OPCODE_FLDWSR: flop=1; ret = emulate_ldw(regs,FR3(regs->iir),1); break; case OPCODE_FLDDX: case OPCODE_FLDDS: flop=1; ret = emulate_ldd(regs,R3(regs->iir),1); break; case OPCODE_FSTWX: case OPCODE_FSTWS: case OPCODE_FSTWXR: case OPCODE_FSTWSR: flop=1; ret = emulate_stw(regs,FR3(regs->iir),1); break; case OPCODE_FSTDX: case OPCODE_FSTDS: flop=1; ret = emulate_std(regs,R3(regs->iir),1); break; case OPCODE_LDCD_I: case OPCODE_LDCW_I: case OPCODE_LDCD_S: case OPCODE_LDCW_S: ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ break; } #ifdef CONFIG_PA20 switch (regs->iir & OPCODE2_MASK) { case OPCODE_FLDD_L: flop=1; ret = emulate_ldd(regs,R2(regs->iir),1); break; case OPCODE_FSTD_L: flop=1; ret = emulate_std(regs, R2(regs->iir),1); break; case OPCODE_LDD_L: ret = emulate_ldd(regs, R2(regs->iir),0); break; case OPCODE_STD_L: ret = emulate_std(regs, R2(regs->iir),0); break; } #endif switch (regs->iir & OPCODE3_MASK) { case OPCODE_FLDW_L: flop=1; ret = emulate_ldw(regs, R2(regs->iir),0); break; case OPCODE_LDW_M: ret = emulate_ldw(regs, R2(regs->iir),1); break; case OPCODE_FSTW_L: flop=1; ret = emulate_stw(regs, R2(regs->iir),1); break; case OPCODE_STW_M: ret = emulate_stw(regs, R2(regs->iir),0); break; } switch (regs->iir & OPCODE4_MASK) { case OPCODE_LDH_L: ret = emulate_ldh(regs, R2(regs->iir)); break; case OPCODE_LDW_L: case OPCODE_LDWM: ret = emulate_ldw(regs, R2(regs->iir),0); break; case OPCODE_STH_L: ret = emulate_sth(regs, R2(regs->iir)); break; case OPCODE_STW_L: case OPCODE_STWM: ret = emulate_stw(regs, R2(regs->iir),0); break; } if (modify && R1(regs->iir)) regs->gr[R1(regs->iir)] = newbase; if (ret == ERR_NOTHANDLED) printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir); DPRINTF("ret = %d\n", ret); if (ret) { printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); die_if_kernel("Unaligned data reference", regs, 28); if (ret == ERR_PAGEFAULT) { si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SEGV_MAPERR; si.si_addr = (void __user *)regs->ior; force_sig_info(SIGSEGV, &si, current); } else { force_sigbus: /* couldn't handle it ... */ si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRALN; si.si_addr = (void __user *)regs->ior; force_sig_info(SIGBUS, &si, current); } return; } /* else we handled it, let life go on. */ regs->gr[0]|=PSW_N; } /* * NB: check_unaligned() is only used for PCXS processors right * now, so we only check for PA1.1 encodings at this point. */ int check_unaligned(struct pt_regs *regs) { unsigned long align_mask; /* Get alignment mask */ align_mask = 0UL; switch (regs->iir & OPCODE1_MASK) { case OPCODE_LDH_I: case OPCODE_LDH_S: case OPCODE_STH: align_mask = 1UL; break; case OPCODE_LDW_I: case OPCODE_LDWA_I: case OPCODE_LDW_S: case OPCODE_LDWA_S: case OPCODE_STW: case OPCODE_STWA: align_mask = 3UL; break; default: switch (regs->iir & OPCODE4_MASK) { case OPCODE_LDH_L: case OPCODE_STH_L: align_mask = 1UL; break; case OPCODE_LDW_L: case OPCODE_LDWM: case OPCODE_STW_L: case OPCODE_STWM: align_mask = 3UL; break; } break; } return (int)(regs->ior & align_mask); }
gpl-2.0
davidmueller13/ZenKernel_Flounder
lib/mpi/mpi-pow.c
4202
8464
/* mpi-pow.c - MPI functions * Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include <linux/string.h> #include "mpi-internal.h" #include "longlong.h" /**************** * RES = BASE ^ EXP mod MOD */ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) { mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; mpi_ptr_t xp_marker = NULL; mpi_ptr_t tspace = NULL; mpi_ptr_t rp, ep, mp, bp; mpi_size_t esize, msize, bsize, rsize; int esign, msign, bsign, rsign; mpi_size_t size; int mod_shift_cnt; int negative_result; int assign_rp = 0; mpi_size_t tsize = 0; /* to avoid compiler warning */ /* fixme: we should check that the warning is void */ int rc = -ENOMEM; esize = exp->nlimbs; msize = mod->nlimbs; size = 2 * msize; esign = exp->sign; msign = mod->sign; rp = res->d; ep = exp->d; if (!msize) return -EINVAL; if (!esize) { /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 * depending on if MOD equals 1. */ rp[0] = 1; res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; res->sign = 0; goto leave; } /* Normalize MOD (i.e. make its most significant bit set) as required by * mpn_divrem. This will make the intermediate values in the calculation * slightly larger, but the correct result is obtained after a final * reduction using the original MOD value. */ mp = mp_marker = mpi_alloc_limb_space(msize); if (!mp) goto enomem; mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]); if (mod_shift_cnt) mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); else MPN_COPY(mp, mod->d, msize); bsize = base->nlimbs; bsign = base->sign; if (bsize > msize) { /* The base is larger than the module. Reduce it. */ /* Allocate (BSIZE + 1) with space for remainder and quotient. * (The quotient is (bsize - msize + 1) limbs.) */ bp = bp_marker = mpi_alloc_limb_space(bsize + 1); if (!bp) goto enomem; MPN_COPY(bp, base->d, bsize); /* We don't care about the quotient, store it above the remainder, * at BP + MSIZE. */ mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize); bsize = msize; /* Canonicalize the base, since we are going to multiply with it * quite a few times. */ MPN_NORMALIZE(bp, bsize); } else bp = base->d; if (!bsize) { res->nlimbs = 0; res->sign = 0; goto leave; } if (res->alloced < size) { /* We have to allocate more space for RES. If any of the input * parameters are identical to RES, defer deallocation of the old * space. */ if (rp == ep || rp == mp || rp == bp) { rp = mpi_alloc_limb_space(size); if (!rp) goto enomem; assign_rp = 1; } else { if (mpi_resize(res, size) < 0) goto enomem; rp = res->d; } } else { /* Make BASE, EXP and MOD not overlap with RES. */ if (rp == bp) { /* RES and BASE are identical. Allocate temp. space for BASE. */ BUG_ON(bp_marker); bp = bp_marker = mpi_alloc_limb_space(bsize); if (!bp) goto enomem; MPN_COPY(bp, rp, bsize); } if (rp == ep) { /* RES and EXP are identical. Allocate temp. space for EXP. */ ep = ep_marker = mpi_alloc_limb_space(esize); if (!ep) goto enomem; MPN_COPY(ep, rp, esize); } if (rp == mp) { /* RES and MOD are identical. Allocate temporary space for MOD. */ BUG_ON(mp_marker); mp = mp_marker = mpi_alloc_limb_space(msize); if (!mp) goto enomem; MPN_COPY(mp, rp, msize); } } MPN_COPY(rp, bp, bsize); rsize = bsize; rsign = bsign; { mpi_size_t i; mpi_ptr_t xp; int c; mpi_limb_t e; mpi_limb_t carry_limb; struct karatsuba_ctx karactx; xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); if (!xp) goto enomem; memset(&karactx, 0, sizeof karactx); negative_result = (ep[0] & 1) && base->sign; i = esize - 1; e = ep[i]; c = count_leading_zeros(e); e = (e << c) << 1; /* shift the exp bits to the left, lose msb */ c = BITS_PER_MPI_LIMB - 1 - c; /* Main loop. * * Make the result be pointed to alternately by XP and RP. This * helps us avoid block copying, which would otherwise be necessary * with the overlap restrictions of mpihelp_divmod. With 50% probability * the result after this loop will be in the area originally pointed * by RP (==RES->d), and with 50% probability in the area originally * pointed to by XP. */ for (;;) { while (c) { mpi_ptr_t tp; mpi_size_t xsize; /*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */ if (rsize < KARATSUBA_THRESHOLD) mpih_sqr_n_basecase(xp, rp, rsize); else { if (!tspace) { tsize = 2 * rsize; tspace = mpi_alloc_limb_space(tsize); if (!tspace) goto enomem; } else if (tsize < (2 * rsize)) { mpi_free_limb_space(tspace); tsize = 2 * rsize; tspace = mpi_alloc_limb_space(tsize); if (!tspace) goto enomem; } mpih_sqr_n(xp, rp, rsize, tspace); } xsize = 2 * rsize; if (xsize > msize) { mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize); xsize = msize; } tp = rp; rp = xp; xp = tp; rsize = xsize; if ((mpi_limb_signed_t) e < 0) { /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */ if (bsize < KARATSUBA_THRESHOLD) { mpi_limb_t tmp; if (mpihelp_mul (xp, rp, rsize, bp, bsize, &tmp) < 0) goto enomem; } else { if (mpihelp_mul_karatsuba_case (xp, rp, rsize, bp, bsize, &karactx) < 0) goto enomem; } xsize = rsize + bsize; if (xsize > msize) { mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize); xsize = msize; } tp = rp; rp = xp; xp = tp; rsize = xsize; } e <<= 1; c--; } i--; if (i < 0) break; e = ep[i]; c = BITS_PER_MPI_LIMB; } /* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT * steps. Adjust the result by reducing it with the original MOD. * * Also make sure the result is put in RES->d (where it already * might be, see above). */ if (mod_shift_cnt) { carry_limb = mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt); rp = res->d; if (carry_limb) { rp[rsize] = carry_limb; rsize++; } } else { MPN_COPY(res->d, rp, rsize); rp = res->d; } if (rsize >= msize) { mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize); rsize = msize; } /* Remove any leading zero words from the result. */ if (mod_shift_cnt) mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); MPN_NORMALIZE(rp, rsize); mpihelp_release_karatsuba_ctx(&karactx); } if (negative_result && rsize) { if (mod_shift_cnt) mpihelp_rshift(mp, mp, msize, mod_shift_cnt); mpihelp_sub(rp, mp, msize, rp, rsize); rsize = msize; rsign = msign; MPN_NORMALIZE(rp, rsize); } res->nlimbs = rsize; res->sign = rsign; leave: rc = 0; enomem: if (assign_rp) mpi_assign_limb_space(res, rp, size); if (mp_marker) mpi_free_limb_space(mp_marker); if (bp_marker) mpi_free_limb_space(bp_marker); if (ep_marker) mpi_free_limb_space(ep_marker); if (xp_marker) mpi_free_limb_space(xp_marker); if (tspace) mpi_free_limb_space(tspace); return rc; } EXPORT_SYMBOL_GPL(mpi_powm);
gpl-2.0
ruzarowski/boeffla-kernel-cm-s3
arch/um/sys-x86_64/sysrq.c
4714
1384
/* * Copyright 2003 PathScale, Inc. * * Licensed under the GPL */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/utsname.h> #include <asm/current.h> #include <asm/ptrace.h> #include "sysrq.h" void __show_regs(struct pt_regs *regs) { printk("\n"); print_modules(); printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current), current->comm, print_tainted(), init_utsname()->release); printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff, PT_REGS_RIP(regs)); printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs), PT_REGS_EFLAGS(regs)); printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs)); printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs)); printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs)); printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs)); printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs)); } void show_regs(struct pt_regs *regs) { __show_regs(regs); show_trace(current, (unsigned long *) &regs); }
gpl-2.0
yank555-lu/Hammerhead-3.4-lollipop
arch/um/kernel/skas/clone.c
4970
1267
/* * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <signal.h> #include <sched.h> #include <asm/unistd.h> #include <sys/time.h> #include "as-layout.h" #include "ptrace_user.h" #include "stub-data.h" #include "sysdep/stub.h" /* * This is in a separate file because it needs to be compiled with any * extraneous gcc flags (-pg, -fprofile-arcs, -ftest-coverage) disabled * * Use UM_KERN_PAGE_SIZE instead of PAGE_SIZE because that calls getpagesize * on some systems. */ void __attribute__ ((__section__ (".__syscall_stub"))) stub_clone_handler(void) { struct stub_data *data = (struct stub_data *) STUB_DATA; long err; err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD, STUB_DATA + UM_KERN_PAGE_SIZE / 2 - sizeof(void *)); if (err != 0) goto out; err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0); if (err) goto out; err = stub_syscall3(__NR_setitimer, ITIMER_VIRTUAL, (long) &data->timer, 0); if (err) goto out; remap_stack(data->fd, data->offset); goto done; out: /* * save current result. * Parent: pid; * child: retcode of mmap already saved and it jumps around this * assignment */ data->err = err; done: trap_myself(); }
gpl-2.0
Cpasjuste/android_kernel_lg_p999
drivers/hwmon/ams/ams-input.c
4970
3542
/* * Apple Motion Sensor driver (joystick emulation) * * Copyright (C) 2005 Stelian Pop (stelian@popies.net) * Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include "ams.h" static unsigned int joystick; module_param(joystick, bool, S_IRUGO); MODULE_PARM_DESC(joystick, "Enable the input class device on module load"); static unsigned int invert; module_param(invert, bool, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(invert, "Invert input data on X and Y axis"); static DEFINE_MUTEX(ams_input_mutex); static void ams_idev_poll(struct input_polled_dev *dev) { struct input_dev *idev = dev->input; s8 x, y, z; mutex_lock(&ams_info.lock); ams_sensors(&x, &y, &z); x -= ams_info.xcalib; y -= ams_info.ycalib; z -= ams_info.zcalib; input_report_abs(idev, ABS_X, invert ? -x : x); input_report_abs(idev, ABS_Y, invert ? -y : y); input_report_abs(idev, ABS_Z, z); input_sync(idev); mutex_unlock(&ams_info.lock); } /* Call with ams_info.lock held! */ static int ams_input_enable(void) { struct input_dev *input; s8 x, y, z; int error; ams_sensors(&x, &y, &z); ams_info.xcalib = x; ams_info.ycalib = y; ams_info.zcalib = z; ams_info.idev = input_allocate_polled_device(); if (!ams_info.idev) return -ENOMEM; ams_info.idev->poll = ams_idev_poll; ams_info.idev->poll_interval = 25; input = ams_info.idev->input; input->name = "Apple Motion Sensor"; input->id.bustype = ams_info.bustype; input->id.vendor = 0; input->dev.parent = &ams_info.of_dev->dev; input_set_abs_params(input, ABS_X, -50, 50, 3, 0); input_set_abs_params(input, ABS_Y, -50, 50, 3, 0); input_set_abs_params(input, ABS_Z, -50, 50, 3, 0); set_bit(EV_ABS, input->evbit); set_bit(EV_KEY, input->evbit); set_bit(BTN_TOUCH, input->keybit); error = input_register_polled_device(ams_info.idev); if (error) { input_free_polled_device(ams_info.idev); ams_info.idev = NULL; return error; } joystick = 1; return 0; } static void ams_input_disable(void) { if (ams_info.idev) { input_unregister_polled_device(ams_info.idev); input_free_polled_device(ams_info.idev); ams_info.idev = NULL; } joystick = 0; } static ssize_t ams_input_show_joystick(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", joystick); } static ssize_t ams_input_store_joystick(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long enable; int error = 0; if (strict_strtoul(buf, 0, &enable) || enable > 1) return -EINVAL; mutex_lock(&ams_input_mutex); if (enable != joystick) { if (enable) error = ams_input_enable(); else ams_input_disable(); } mutex_unlock(&ams_input_mutex); return error ? error : count; } static DEVICE_ATTR(joystick, S_IRUGO | S_IWUSR, ams_input_show_joystick, ams_input_store_joystick); int ams_input_init(void) { if (joystick) ams_input_enable(); return device_create_file(&ams_info.of_dev->dev, &dev_attr_joystick); } void ams_input_exit(void) { device_remove_file(&ams_info.of_dev->dev, &dev_attr_joystick); mutex_lock(&ams_input_mutex); ams_input_disable(); mutex_unlock(&ams_input_mutex); }
gpl-2.0
adrientetar/semc-msm-3.4
arch/um/os-Linux/sigio.c
4970
11679
/* * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <poll.h> #include <pty.h> #include <sched.h> #include <signal.h> #include <string.h> #include "kern_util.h" #include "init.h" #include "os.h" #include "sigio.h" #include "um_malloc.h" /* * Protected by sigio_lock(), also used by sigio_cleanup, which is an * exitcall. */ static int write_sigio_pid = -1; static unsigned long write_sigio_stack; /* * These arrays are initialized before the sigio thread is started, and * the descriptors closed after it is killed. So, it can't see them change. * On the UML side, they are changed under the sigio_lock. */ #define SIGIO_FDS_INIT {-1, -1} static int write_sigio_fds[2] = SIGIO_FDS_INIT; static int sigio_private[2] = SIGIO_FDS_INIT; struct pollfds { struct pollfd *poll; int size; int used; }; /* * Protected by sigio_lock(). Used by the sigio thread, but the UML thread * synchronizes with it. */ static struct pollfds current_poll; static struct pollfds next_poll; static struct pollfds all_sigio_fds; static int write_sigio_thread(void *unused) { struct pollfds *fds, tmp; struct pollfd *p; int i, n, respond_fd; char c; signal(SIGWINCH, SIG_IGN); fds = &current_poll; while (1) { n = poll(fds->poll, fds->used, -1); if (n < 0) { if (errno == EINTR) continue; printk(UM_KERN_ERR "write_sigio_thread : poll returned " "%d, errno = %d\n", n, errno); } for (i = 0; i < fds->used; i++) { p = &fds->poll[i]; if (p->revents == 0) continue; if (p->fd == sigio_private[1]) { CATCH_EINTR(n = read(sigio_private[1], &c, sizeof(c))); if (n != sizeof(c)) printk(UM_KERN_ERR "write_sigio_thread : " "read on socket failed, " "err = %d\n", errno); tmp = current_poll; current_poll = next_poll; next_poll = tmp; respond_fd = sigio_private[1]; } else { respond_fd = write_sigio_fds[1]; fds->used--; memmove(&fds->poll[i], &fds->poll[i + 1], (fds->used - i) * sizeof(*fds->poll)); } CATCH_EINTR(n = write(respond_fd, &c, sizeof(c))); if (n != sizeof(c)) printk(UM_KERN_ERR "write_sigio_thread : " "write on socket failed, err = %d\n", errno); } } return 0; } static int need_poll(struct pollfds *polls, int n) { struct pollfd *new; if (n <= polls->size) return 0; new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC); if (new == NULL) { printk(UM_KERN_ERR "need_poll : failed to allocate new " "pollfds\n"); return -ENOMEM; } memcpy(new, polls->poll, polls->used * sizeof(struct pollfd)); kfree(polls->poll); polls->poll = new; polls->size = n; return 0; } /* * Must be called with sigio_lock held, because it's needed by the marked * critical section. */ static void update_thread(void) { unsigned long flags; int n; char c; flags = set_signals(0); CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c))); if (n != sizeof(c)) { printk(UM_KERN_ERR "update_thread : write failed, err = %d\n", errno); goto fail; } CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c))); if (n != sizeof(c)) { printk(UM_KERN_ERR "update_thread : read failed, err = %d\n", errno); goto fail; } set_signals(flags); return; fail: /* Critical section start */ if (write_sigio_pid != -1) { os_kill_process(write_sigio_pid, 1); free_stack(write_sigio_stack, 0); } write_sigio_pid = -1; close(sigio_private[0]); close(sigio_private[1]); close(write_sigio_fds[0]); close(write_sigio_fds[1]); /* Critical section end */ set_signals(flags); } int add_sigio_fd(int fd) { struct pollfd *p; int err = 0, i, n; sigio_lock(); for (i = 0; i < all_sigio_fds.used; i++) { if (all_sigio_fds.poll[i].fd == fd) break; } if (i == all_sigio_fds.used) goto out; p = &all_sigio_fds.poll[i]; for (i = 0; i < current_poll.used; i++) { if (current_poll.poll[i].fd == fd) goto out; } n = current_poll.used; err = need_poll(&next_poll, n + 1); if (err) goto out; memcpy(next_poll.poll, current_poll.poll, current_poll.used * sizeof(struct pollfd)); next_poll.poll[n] = *p; next_poll.used = n + 1; update_thread(); out: sigio_unlock(); return err; } int ignore_sigio_fd(int fd) { struct pollfd *p; int err = 0, i, n = 0; /* * This is called from exitcalls elsewhere in UML - if * sigio_cleanup has already run, then update_thread will hang * or fail because the thread is no longer running. */ if (write_sigio_pid == -1) return -EIO; sigio_lock(); for (i = 0; i < current_poll.used; i++) { if (current_poll.poll[i].fd == fd) break; } if (i == current_poll.used) goto out; err = need_poll(&next_poll, current_poll.used - 1); if (err) goto out; for (i = 0; i < current_poll.used; i++) { p = &current_poll.poll[i]; if (p->fd != fd) next_poll.poll[n++] = *p; } next_poll.used = current_poll.used - 1; update_thread(); out: sigio_unlock(); return err; } static struct pollfd *setup_initial_poll(int fd) { struct pollfd *p; p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL); if (p == NULL) { printk(UM_KERN_ERR "setup_initial_poll : failed to allocate " "poll\n"); return NULL; } *p = ((struct pollfd) { .fd = fd, .events = POLLIN, .revents = 0 }); return p; } static void write_sigio_workaround(void) { struct pollfd *p; int err; int l_write_sigio_fds[2]; int l_sigio_private[2]; int l_write_sigio_pid; /* We call this *tons* of times - and most ones we must just fail. */ sigio_lock(); l_write_sigio_pid = write_sigio_pid; sigio_unlock(); if (l_write_sigio_pid != -1) return; err = os_pipe(l_write_sigio_fds, 1, 1); if (err < 0) { printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, " "err = %d\n", -err); return; } err = os_pipe(l_sigio_private, 1, 1); if (err < 0) { printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, " "err = %d\n", -err); goto out_close1; } p = setup_initial_poll(l_sigio_private[1]); if (!p) goto out_close2; sigio_lock(); /* * Did we race? Don't try to optimize this, please, it's not so likely * to happen, and no more than once at the boot. */ if (write_sigio_pid != -1) goto out_free; current_poll = ((struct pollfds) { .poll = p, .used = 1, .size = 1 }); if (write_sigio_irq(l_write_sigio_fds[0])) goto out_clear_poll; memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds)); memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private)); write_sigio_pid = run_helper_thread(write_sigio_thread, NULL, CLONE_FILES | CLONE_VM, &write_sigio_stack); if (write_sigio_pid < 0) goto out_clear; sigio_unlock(); return; out_clear: write_sigio_pid = -1; write_sigio_fds[0] = -1; write_sigio_fds[1] = -1; sigio_private[0] = -1; sigio_private[1] = -1; out_clear_poll: current_poll = ((struct pollfds) { .poll = NULL, .size = 0, .used = 0 }); out_free: sigio_unlock(); kfree(p); out_close2: close(l_sigio_private[0]); close(l_sigio_private[1]); out_close1: close(l_write_sigio_fds[0]); close(l_write_sigio_fds[1]); } void sigio_broken(int fd, int read) { int err; write_sigio_workaround(); sigio_lock(); err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1); if (err) { printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd " "for descriptor %d\n", fd); goto out; } all_sigio_fds.poll[all_sigio_fds.used++] = ((struct pollfd) { .fd = fd, .events = read ? POLLIN : POLLOUT, .revents = 0 }); out: sigio_unlock(); } /* Changed during early boot */ static int pty_output_sigio; static int pty_close_sigio; void maybe_sigio_broken(int fd, int read) { if (!isatty(fd)) return; if ((read || pty_output_sigio) && (!read || pty_close_sigio)) return; sigio_broken(fd, read); } static void sigio_cleanup(void) { if (write_sigio_pid == -1) return; os_kill_process(write_sigio_pid, 1); free_stack(write_sigio_stack, 0); write_sigio_pid = -1; } __uml_exitcall(sigio_cleanup); /* Used as a flag during SIGIO testing early in boot */ static int got_sigio; static void __init handler(int sig) { got_sigio = 1; } struct openpty_arg { int master; int slave; int err; }; static void openpty_cb(void *arg) { struct openpty_arg *info = arg; info->err = 0; if (openpty(&info->master, &info->slave, NULL, NULL, NULL)) info->err = -errno; } static int async_pty(int master, int slave) { int flags; flags = fcntl(master, F_GETFL); if (flags < 0) return -errno; if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) || (fcntl(master, F_SETOWN, os_getpid()) < 0)) return -errno; if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0)) return -errno; return 0; } static void __init check_one_sigio(void (*proc)(int, int)) { struct sigaction old, new; struct openpty_arg pty = { .master = -1, .slave = -1 }; int master, slave, err; initial_thread_cb(openpty_cb, &pty); if (pty.err) { printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n", -pty.err); return; } master = pty.master; slave = pty.slave; if ((master == -1) || (slave == -1)) { printk(UM_KERN_ERR "check_one_sigio failed to allocate a " "pty\n"); return; } /* Not now, but complain so we now where we failed. */ err = raw(master); if (err < 0) { printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n", -err); return; } err = async_pty(master, slave); if (err < 0) { printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, " "err = %d\n", -err); return; } if (sigaction(SIGIO, NULL, &old) < 0) { printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, " "errno = %d\n", errno); return; } new = old; new.sa_handler = handler; if (sigaction(SIGIO, &new, NULL) < 0) { printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, " "errno = %d\n", errno); return; } got_sigio = 0; (*proc)(master, slave); close(master); close(slave); if (sigaction(SIGIO, &old, NULL) < 0) printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, " "errno = %d\n", errno); } static void tty_output(int master, int slave) { int n; char buf[512]; printk(UM_KERN_INFO "Checking that host ptys support output SIGIO..."); memset(buf, 0, sizeof(buf)); while (write(master, buf, sizeof(buf)) > 0) ; if (errno != EAGAIN) printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n", errno); while (((n = read(slave, buf, sizeof(buf))) > 0) && !({ barrier(); got_sigio; })) ; if (got_sigio) { printk(UM_KERN_CONT "Yes\n"); pty_output_sigio = 1; } else if (n == -EAGAIN) printk(UM_KERN_CONT "No, enabling workaround\n"); else printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n); } static void tty_close(int master, int slave) { printk(UM_KERN_INFO "Checking that host ptys support SIGIO on " "close..."); close(slave); if (got_sigio) { printk(UM_KERN_CONT "Yes\n"); pty_close_sigio = 1; } else printk(UM_KERN_CONT "No, enabling workaround\n"); } static void __init check_sigio(void) { if ((access("/dev/ptmx", R_OK) < 0) && (access("/dev/ptyp0", R_OK) < 0)) { printk(UM_KERN_WARNING "No pseudo-terminals available - " "skipping pty SIGIO check\n"); return; } check_one_sigio(tty_output); check_one_sigio(tty_close); } /* Here because it only does the SIGIO testing for now */ void __init os_check_bugs(void) { check_sigio(); }
gpl-2.0
GeorgeIoak/AM1802-Kernel
fs/nfsd/nfscache.c
5738
7795
/* * Request reply cache. This is currently a global cache, but this may * change in the future and be a per-client cache. * * This code is heavily inspired by the 44BSD implementation, although * it does things a bit differently. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/slab.h> #include "nfsd.h" #include "cache.h" /* Size of reply cache. Common values are: * 4.3BSD: 128 * 4.4BSD: 256 * Solaris2: 1024 * DEC Unix: 512-4096 */ #define CACHESIZE 1024 #define HASHSIZE 64 static struct hlist_head * cache_hash; static struct list_head lru_head; static int cache_disabled = 1; /* * Calculate the hash index from an XID. */ static inline u32 request_hash(u32 xid) { u32 h = xid; h ^= (xid >> 24); return h & (HASHSIZE-1); } static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); /* * locking for the reply cache: * A cache entry is "single use" if c_state == RC_INPROG * Otherwise, it when accessing _prev or _next, the lock must be held. */ static DEFINE_SPINLOCK(cache_lock); int nfsd_reply_cache_init(void) { struct svc_cacherep *rp; int i; INIT_LIST_HEAD(&lru_head); i = CACHESIZE; while (i) { rp = kmalloc(sizeof(*rp), GFP_KERNEL); if (!rp) goto out_nomem; list_add(&rp->c_lru, &lru_head); rp->c_state = RC_UNUSED; rp->c_type = RC_NOCACHE; INIT_HLIST_NODE(&rp->c_hash); i--; } cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); if (!cache_hash) goto out_nomem; cache_disabled = 0; return 0; out_nomem: printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); nfsd_reply_cache_shutdown(); return -ENOMEM; } void nfsd_reply_cache_shutdown(void) { struct svc_cacherep *rp; while (!list_empty(&lru_head)) { rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF) kfree(rp->c_replvec.iov_base); list_del(&rp->c_lru); kfree(rp); } cache_disabled = 1; kfree (cache_hash); cache_hash = NULL; } /* * Move cache entry to end of LRU list */ static void lru_put_end(struct svc_cacherep *rp) { list_move_tail(&rp->c_lru, &lru_head); } /* * Move a cache entry from one hash list to another */ static void hash_refile(struct svc_cacherep *rp) { hlist_del_init(&rp->c_hash); hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); } /* * Try to find an entry matching the current call in the cache. When none * is found, we grab the oldest unlocked entry off the LRU list. * Note that no operation within the loop may sleep. */ int nfsd_cache_lookup(struct svc_rqst *rqstp) { struct hlist_node *hn; struct hlist_head *rh; struct svc_cacherep *rp; __be32 xid = rqstp->rq_xid; u32 proto = rqstp->rq_prot, vers = rqstp->rq_vers, proc = rqstp->rq_proc; unsigned long age; int type = rqstp->rq_cachetype; int rtn; rqstp->rq_cacherep = NULL; if (cache_disabled || type == RC_NOCACHE) { nfsdstats.rcnocache++; return RC_DOIT; } spin_lock(&cache_lock); rtn = RC_DOIT; rh = &cache_hash[request_hash(xid)]; hlist_for_each_entry(rp, hn, rh, c_hash) { if (rp->c_state != RC_UNUSED && xid == rp->c_xid && proc == rp->c_proc && proto == rp->c_prot && vers == rp->c_vers && time_before(jiffies, rp->c_timestamp + 120*HZ) && memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) { nfsdstats.rchits++; goto found_entry; } } nfsdstats.rcmisses++; /* This loop shouldn't take more than a few iterations normally */ { int safe = 0; list_for_each_entry(rp, &lru_head, c_lru) { if (rp->c_state != RC_INPROG) break; if (safe++ > CACHESIZE) { printk("nfsd: loop in repcache LRU list\n"); cache_disabled = 1; goto out; } } } /* All entries on the LRU are in-progress. This should not happen */ if (&rp->c_lru == &lru_head) { static int complaints; printk(KERN_WARNING "nfsd: all repcache entries locked!\n"); if (++complaints > 5) { printk(KERN_WARNING "nfsd: disabling repcache.\n"); cache_disabled = 1; } goto out; } rqstp->rq_cacherep = rp; rp->c_state = RC_INPROG; rp->c_xid = xid; rp->c_proc = proc; memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr)); rp->c_prot = proto; rp->c_vers = vers; rp->c_timestamp = jiffies; hash_refile(rp); /* release any buffer */ if (rp->c_type == RC_REPLBUFF) { kfree(rp->c_replvec.iov_base); rp->c_replvec.iov_base = NULL; } rp->c_type = RC_NOCACHE; out: spin_unlock(&cache_lock); return rtn; found_entry: /* We found a matching entry which is either in progress or done. */ age = jiffies - rp->c_timestamp; rp->c_timestamp = jiffies; lru_put_end(rp); rtn = RC_DROPIT; /* Request being processed or excessive rexmits */ if (rp->c_state == RC_INPROG || age < RC_DELAY) goto out; /* From the hall of fame of impractical attacks: * Is this a user who tries to snoop on the cache? */ rtn = RC_DOIT; if (!rqstp->rq_secure && rp->c_secure) goto out; /* Compose RPC reply header */ switch (rp->c_type) { case RC_NOCACHE: break; case RC_REPLSTAT: svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); rtn = RC_REPLY; break; case RC_REPLBUFF: if (!nfsd_cache_append(rqstp, &rp->c_replvec)) goto out; /* should not happen */ rtn = RC_REPLY; break; default: printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); rp->c_state = RC_UNUSED; } goto out; } /* * Update a cache entry. This is called from nfsd_dispatch when * the procedure has been executed and the complete reply is in * rqstp->rq_res. * * We're copying around data here rather than swapping buffers because * the toplevel loop requires max-sized buffers, which would be a waste * of memory for a cache with a max reply size of 100 bytes (diropokres). * * If we should start to use different types of cache entries tailored * specifically for attrstat and fh's, we may save even more space. * * Also note that a cachetype of RC_NOCACHE can legally be passed when * nfsd failed to encode a reply that otherwise would have been cached. * In this case, nfsd_cache_update is called with statp == NULL. */ void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) { struct svc_cacherep *rp; struct kvec *resv = &rqstp->rq_res.head[0], *cachv; int len; if (!(rp = rqstp->rq_cacherep) || cache_disabled) return; len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); len >>= 2; /* Don't cache excessive amounts of data and XDR failures */ if (!statp || len > (256 >> 2)) { rp->c_state = RC_UNUSED; return; } switch (cachetype) { case RC_REPLSTAT: if (len != 1) printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); rp->c_replstat = *statp; break; case RC_REPLBUFF: cachv = &rp->c_replvec; cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); if (!cachv->iov_base) { spin_lock(&cache_lock); rp->c_state = RC_UNUSED; spin_unlock(&cache_lock); return; } cachv->iov_len = len << 2; memcpy(cachv->iov_base, statp, len << 2); break; } spin_lock(&cache_lock); lru_put_end(rp); rp->c_secure = rqstp->rq_secure; rp->c_type = cachetype; rp->c_state = RC_DONE; rp->c_timestamp = jiffies; spin_unlock(&cache_lock); return; } /* * Copy cached reply to current reply buffer. Should always fit. * FIXME as reply is in a page, we should just attach the page, and * keep a refcount.... */ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) { struct kvec *vec = &rqstp->rq_res.head[0]; if (vec->iov_len + data->iov_len > PAGE_SIZE) { printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", data->iov_len); return 0; } memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); vec->iov_len += data->iov_len; return 1; }
gpl-2.0
jfdsmabalot/kernel_samsung_msm8974ab
net/mac80211/aes_ccm.c
8042
3522
/* * Copyright 2003-2004, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> #include <crypto/aes.h> #include <net/mac80211.h> #include "key.h" #include "aes_ccm.h" static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a) { int i; u8 *b_0, *aad, *b, *s_0; b_0 = scratch + 3 * AES_BLOCK_SIZE; aad = scratch + 4 * AES_BLOCK_SIZE; b = scratch; s_0 = scratch + AES_BLOCK_SIZE; crypto_cipher_encrypt_one(tfm, b, b_0); /* Extra Authenticate-only data (always two AES blocks) */ for (i = 0; i < AES_BLOCK_SIZE; i++) aad[i] ^= b[i]; crypto_cipher_encrypt_one(tfm, b, aad); aad += AES_BLOCK_SIZE; for (i = 0; i < AES_BLOCK_SIZE; i++) aad[i] ^= b[i]; crypto_cipher_encrypt_one(tfm, a, aad); /* Mask out bits from auth-only-b_0 */ b_0[0] &= 0x07; /* S_0 is used to encrypt T (= MIC) */ b_0[14] = 0; b_0[15] = 0; crypto_cipher_encrypt_one(tfm, s_0, b_0); } void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, u8 *data, size_t data_len, u8 *cdata, u8 *mic) { int i, j, last_len, num_blocks; u8 *pos, *cpos, *b, *s_0, *e, *b_0; b = scratch; s_0 = scratch + AES_BLOCK_SIZE; e = scratch + 2 * AES_BLOCK_SIZE; b_0 = scratch + 3 * AES_BLOCK_SIZE; num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE); last_len = data_len % AES_BLOCK_SIZE; aes_ccm_prepare(tfm, scratch, b); /* Process payload blocks */ pos = data; cpos = cdata; for (j = 1; j <= num_blocks; j++) { int blen = (j == num_blocks && last_len) ? last_len : AES_BLOCK_SIZE; /* Authentication followed by encryption */ for (i = 0; i < blen; i++) b[i] ^= pos[i]; crypto_cipher_encrypt_one(tfm, b, b); b_0[14] = (j >> 8) & 0xff; b_0[15] = j & 0xff; crypto_cipher_encrypt_one(tfm, e, b_0); for (i = 0; i < blen; i++) *cpos++ = *pos++ ^ e[i]; } for (i = 0; i < CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s_0[i]; } int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, u8 *cdata, size_t data_len, u8 *mic, u8 *data) { int i, j, last_len, num_blocks; u8 *pos, *cpos, *b, *s_0, *a, *b_0; b = scratch; s_0 = scratch + AES_BLOCK_SIZE; a = scratch + 2 * AES_BLOCK_SIZE; b_0 = scratch + 3 * AES_BLOCK_SIZE; num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE); last_len = data_len % AES_BLOCK_SIZE; aes_ccm_prepare(tfm, scratch, a); /* Process payload blocks */ cpos = cdata; pos = data; for (j = 1; j <= num_blocks; j++) { int blen = (j == num_blocks && last_len) ? last_len : AES_BLOCK_SIZE; /* Decryption followed by authentication */ b_0[14] = (j >> 8) & 0xff; b_0[15] = j & 0xff; crypto_cipher_encrypt_one(tfm, b, b_0); for (i = 0; i < blen; i++) { *pos = *cpos++ ^ b[i]; a[i] ^= *pos++; } crypto_cipher_encrypt_one(tfm, a, a); } for (i = 0; i < CCMP_MIC_LEN; i++) { if ((mic[i] ^ s_0[i]) != a[i]) return -1; } return 0; } struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]) { struct crypto_cipher *tfm; tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (!IS_ERR(tfm)) crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN); return tfm; } void ieee80211_aes_key_free(struct crypto_cipher *tfm) { crypto_free_cipher(tfm); }
gpl-2.0
motley-git/kernel-Nexus4
drivers/net/wimax/i2400m/usb-notif.c
9322
8147
/* * Intel Wireless WiMAX Connection 2400m over USB * Notification handling * * * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <linux-wimax@intel.com> * Yanir Lubetkin <yanirx.lubetkin@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * - Initial implementation * * * The notification endpoint is active when the device is not in boot * mode; in here we just read and get notifications; based on those, * we act to either reinitialize the device after a reboot or to * submit a RX request. * * ROADMAP * * i2400mu_usb_notification_setup() * * i2400mu_usb_notification_release() * * i2400mu_usb_notification_cb() Called when a URB is ready * i2400mu_notif_grok() * i2400m_is_boot_barker() * i2400m_dev_reset_handle() * i2400mu_rx_kick() */ #include <linux/usb.h> #include <linux/slab.h> #include "i2400m-usb.h" #define D_SUBMODULE notif #include "usb-debug-levels.h" static const __le32 i2400m_ZERO_BARKER[4] = { 0, 0, 0, 0 }; /* * Process a received notification * * In normal operation mode, we can only receive two types of payloads * on the notification endpoint: * * - a reboot barker, we do a bootstrap (the device has reseted). * * - a block of zeroes: there is pending data in the IN endpoint */ static int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf, size_t buf_len) { int ret; struct device *dev = &i2400mu->usb_iface->dev; struct i2400m *i2400m = &i2400mu->i2400m; d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n", i2400mu, buf, buf_len); ret = -EIO; if (buf_len < sizeof(i2400m_ZERO_BARKER)) /* Not a bug, just ignore */ goto error_bad_size; ret = 0; if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) { i2400mu_rx_kick(i2400mu); goto out; } ret = i2400m_is_boot_barker(i2400m, buf, buf_len); if (unlikely(ret >= 0)) ret = i2400m_dev_reset_handle(i2400m, "device rebooted"); else /* Unknown or unexpected data in the notif message */ i2400m_unknown_barker(i2400m, buf, buf_len); error_bad_size: out: d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n", i2400mu, buf, buf_len, ret); return ret; } /* * URB callback for the notification endpoint * * @urb: the urb received from the notification endpoint * * This function will just process the USB side of the transaction, * checking everything is fine, pass the processing to * i2400m_notification_grok() and resubmit the URB. */ static void i2400mu_notification_cb(struct urb *urb) { int ret; struct i2400mu *i2400mu = urb->context; struct device *dev = &i2400mu->usb_iface->dev; d_fnstart(4, dev, "(urb %p status %d actual_length %d)\n", urb, urb->status, urb->actual_length); ret = urb->status; switch (ret) { case 0: ret = i2400mu_notification_grok(i2400mu, urb->transfer_buffer, urb->actual_length); if (ret == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) goto error_exceeded; if (ret == -ENOMEM) /* uff...power cycle? shutdown? */ goto error_exceeded; break; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* ditto */ case -ESHUTDOWN: /* URB killed */ case -ECONNRESET: /* disconnection */ goto out; /* Notify around */ default: /* Some error? */ if (edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) goto error_exceeded; dev_err(dev, "notification: URB error %d, retrying\n", urb->status); } usb_mark_last_busy(i2400mu->usb_dev); ret = usb_submit_urb(i2400mu->notif_urb, GFP_ATOMIC); switch (ret) { case 0: case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* ditto */ case -ESHUTDOWN: /* URB killed */ case -ECONNRESET: /* disconnection */ break; /* just ignore */ default: /* Some error? */ dev_err(dev, "notification: cannot submit URB: %d\n", ret); goto error_submit; } d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", urb, urb->status, urb->actual_length); return; error_exceeded: dev_err(dev, "maximum errors in notification URB exceeded; " "resetting device\n"); error_submit: usb_queue_reset_device(i2400mu->usb_iface); out: d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", urb, urb->status, urb->actual_length); } /* * setup the notification endpoint * * @i2400m: device descriptor * * This procedure prepares the notification urb and handler for receiving * unsolicited barkers from the device. */ int i2400mu_notification_setup(struct i2400mu *i2400mu) { struct device *dev = &i2400mu->usb_iface->dev; int usb_pipe, ret = 0; struct usb_endpoint_descriptor *epd; char *buf; d_fnstart(4, dev, "(i2400m %p)\n", i2400mu); buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA); if (buf == NULL) { dev_err(dev, "notification: buffer allocation failed\n"); ret = -ENOMEM; goto error_buf_alloc; } i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL); if (!i2400mu->notif_urb) { ret = -ENOMEM; dev_err(dev, "notification: cannot allocate URB\n"); goto error_alloc_urb; } epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.notification); usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe, buf, I2400MU_MAX_NOTIFICATION_LEN, i2400mu_notification_cb, i2400mu, epd->bInterval); ret = usb_submit_urb(i2400mu->notif_urb, GFP_KERNEL); if (ret != 0) { dev_err(dev, "notification: cannot submit URB: %d\n", ret); goto error_submit; } d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret); return ret; error_submit: usb_free_urb(i2400mu->notif_urb); error_alloc_urb: kfree(buf); error_buf_alloc: d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret); return ret; } /* * Tear down of the notification mechanism * * @i2400m: device descriptor * * Kill the interrupt endpoint urb, free any allocated resources. * * We need to check if we have done it before as for example, * _suspend() call this; if after a suspend() we get a _disconnect() * (as the case is when hibernating), nothing bad happens. */ void i2400mu_notification_release(struct i2400mu *i2400mu) { struct device *dev = &i2400mu->usb_iface->dev; d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); if (i2400mu->notif_urb != NULL) { usb_kill_urb(i2400mu->notif_urb); kfree(i2400mu->notif_urb->transfer_buffer); usb_free_urb(i2400mu->notif_urb); i2400mu->notif_urb = NULL; } d_fnend(4, dev, "(i2400mu %p)\n", i2400mu); }
gpl-2.0
nvl1109/u-boot-k70
drivers/net/fsl_mcdmafec.c
107
16651
/* * (C) Copyright 2000-2004 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * (C) Copyright 2007 Freescale Semiconductor, Inc. * TsiChung Liew (Tsi-Chung.Liew@freescale.com) * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <malloc.h> #include <command.h> #include <config.h> #include <net.h> #include <miiphy.h> #undef ET_DEBUG #undef MII_DEBUG /* Ethernet Transmit and Receive Buffers */ #define DBUF_LENGTH 1520 #define PKT_MAXBUF_SIZE 1518 #define PKT_MINBUF_SIZE 64 #define PKT_MAXBLR_SIZE 1536 #define LAST_PKTBUFSRX PKTBUFSRX - 1 #define BD_ENET_RX_W_E (BD_ENET_RX_WRAP | BD_ENET_RX_EMPTY) #define BD_ENET_TX_RDY_LST (BD_ENET_TX_READY | BD_ENET_TX_LAST) #define FIFO_ERRSTAT (FIFO_STAT_RXW | FIFO_STAT_UF | FIFO_STAT_OF) /* RxBD bits definitions */ #define BD_ENET_RX_ERR (BD_ENET_RX_LG | BD_ENET_RX_NO | BD_ENET_RX_CR | \ BD_ENET_RX_OV | BD_ENET_RX_TR) #include <asm/immap.h> #include <asm/fsl_mcdmafec.h> #include "MCD_dma.h" DECLARE_GLOBAL_DATA_PTR; struct fec_info_dma fec_info[] = { #ifdef CONFIG_SYS_FEC0_IOBASE { 0, /* index */ CONFIG_SYS_FEC0_IOBASE, /* io base */ CONFIG_SYS_FEC0_PINMUX, /* gpio pin muxing */ CONFIG_SYS_FEC0_MIIBASE, /* mii base */ -1, /* phy_addr */ 0, /* duplex and speed */ 0, /* phy name */ 0, /* phyname init */ 0, /* RX BD */ 0, /* TX BD */ 0, /* rx Index */ 0, /* tx Index */ 0, /* tx buffer */ 0, /* initialized flag */ (struct fec_info_dma *)-1, /* next */ FEC0_RX_TASK, /* rxTask */ FEC0_TX_TASK, /* txTask */ FEC0_RX_PRIORITY, /* rxPri */ FEC0_TX_PRIORITY, /* txPri */ FEC0_RX_INIT, /* rxInit */ FEC0_TX_INIT, /* txInit */ 0, /* usedTbdIndex */ 0, /* cleanTbdNum */ }, #endif #ifdef CONFIG_SYS_FEC1_IOBASE { 1, /* index */ CONFIG_SYS_FEC1_IOBASE, /* io base */ CONFIG_SYS_FEC1_PINMUX, /* gpio pin muxing */ CONFIG_SYS_FEC1_MIIBASE, /* mii base */ -1, /* phy_addr */ 0, /* duplex and speed */ 0, /* phy name */ 0, /* phy name init */ #ifdef CONFIG_SYS_DMA_USE_INTSRAM (cbd_t *)DBUF_LENGTH, /* RX BD */ #else 0, /* RX BD */ #endif 0, /* TX BD */ 0, /* rx Index */ 0, /* tx Index */ 0, /* tx buffer */ 0, /* initialized flag */ (struct fec_info_dma *)-1, /* next */ FEC1_RX_TASK, /* rxTask */ FEC1_TX_TASK, /* txTask */ FEC1_RX_PRIORITY, /* rxPri */ FEC1_TX_PRIORITY, /* txPri */ FEC1_RX_INIT, /* rxInit */ FEC1_TX_INIT, /* txInit */ 0, /* usedTbdIndex */ 0, /* cleanTbdNum */ } #endif }; static int fec_send(struct eth_device *dev, volatile void *packet, int length); static int fec_recv(struct eth_device *dev); static int fec_init(struct eth_device *dev, bd_t * bd); static void fec_halt(struct eth_device *dev); #ifdef ET_DEBUG static void dbg_fec_regs(struct eth_device *dev) { struct fec_info_dma *info = dev->priv; volatile fecdma_t *fecp = (fecdma_t *) (info->iobase); printf("=====\n"); printf("ievent %x - %x\n", (int)&fecp->eir, fecp->eir); printf("imask %x - %x\n", (int)&fecp->eimr, fecp->eimr); printf("ecntrl %x - %x\n", (int)&fecp->ecr, fecp->ecr); printf("mii_mframe %x - %x\n", (int)&fecp->mmfr, fecp->mmfr); printf("mii_speed %x - %x\n", (int)&fecp->mscr, fecp->mscr); printf("mii_ctrlstat %x - %x\n", (int)&fecp->mibc, fecp->mibc); printf("r_cntrl %x - %x\n", (int)&fecp->rcr, fecp->rcr); printf("r hash %x - %x\n", (int)&fecp->rhr, fecp->rhr); printf("x_cntrl %x - %x\n", (int)&fecp->tcr, fecp->tcr); printf("padr_l %x - %x\n", (int)&fecp->palr, fecp->palr); printf("padr_u %x - %x\n", (int)&fecp->paur, fecp->paur); printf("op_pause %x - %x\n", (int)&fecp->opd, fecp->opd); printf("iadr_u %x - %x\n", (int)&fecp->iaur, fecp->iaur); printf("iadr_l %x - %x\n", (int)&fecp->ialr, fecp->ialr); printf("gadr_u %x - %x\n", (int)&fecp->gaur, fecp->gaur); printf("gadr_l %x - %x\n", (int)&fecp->galr, fecp->galr); printf("x_wmrk %x - %x\n", (int)&fecp->tfwr, fecp->tfwr); printf("r_fdata %x - %x\n", (int)&fecp->rfdr, fecp->rfdr); printf("r_fstat %x - %x\n", (int)&fecp->rfsr, fecp->rfsr); printf("r_fctrl %x - %x\n", (int)&fecp->rfcr, fecp->rfcr); printf("r_flrfp %x - %x\n", (int)&fecp->rlrfp, fecp->rlrfp); printf("r_flwfp %x - %x\n", (int)&fecp->rlwfp, fecp->rlwfp); printf("r_frfar %x - %x\n", (int)&fecp->rfar, fecp->rfar); printf("r_frfrp %x - %x\n", (int)&fecp->rfrp, fecp->rfrp); printf("r_frfwp %x - %x\n", (int)&fecp->rfwp, fecp->rfwp); printf("t_fdata %x - %x\n", (int)&fecp->tfdr, fecp->tfdr); printf("t_fstat %x - %x\n", (int)&fecp->tfsr, fecp->tfsr); printf("t_fctrl %x - %x\n", (int)&fecp->tfcr, fecp->tfcr); printf("t_flrfp %x - %x\n", (int)&fecp->tlrfp, fecp->tlrfp); printf("t_flwfp %x - %x\n", (int)&fecp->tlwfp, fecp->tlwfp); printf("t_ftfar %x - %x\n", (int)&fecp->tfar, fecp->tfar); printf("t_ftfrp %x - %x\n", (int)&fecp->tfrp, fecp->tfrp); printf("t_ftfwp %x - %x\n", (int)&fecp->tfwp, fecp->tfwp); printf("frst %x - %x\n", (int)&fecp->frst, fecp->frst); printf("ctcwr %x - %x\n", (int)&fecp->ctcwr, fecp->ctcwr); } #endif static void set_fec_duplex_speed(volatile fecdma_t * fecp, bd_t * bd, int dup_spd) { if ((dup_spd >> 16) == FULL) { /* Set maximum frame length */ fecp->rcr = FEC_RCR_MAX_FL(PKT_MAXBUF_SIZE) | FEC_RCR_MII_MODE | FEC_RCR_PROM | 0x100; fecp->tcr = FEC_TCR_FDEN; } else { /* Half duplex mode */ fecp->rcr = FEC_RCR_MAX_FL(PKT_MAXBUF_SIZE) | FEC_RCR_MII_MODE | FEC_RCR_DRT; fecp->tcr &= ~FEC_TCR_FDEN; } if ((dup_spd & 0xFFFF) == _100BASET) { #ifdef MII_DEBUG printf("100Mbps\n"); #endif bd->bi_ethspeed = 100; } else { #ifdef MII_DEBUG printf("10Mbps\n"); #endif bd->bi_ethspeed = 10; } } static int fec_send(struct eth_device *dev, volatile void *packet, int length) { struct fec_info_dma *info = dev->priv; cbd_t *pTbd, *pUsedTbd; u16 phyStatus; miiphy_read(dev->name, info->phy_addr, PHY_BMSR, &phyStatus); /* process all the consumed TBDs */ while (info->cleanTbdNum < CONFIG_SYS_TX_ETH_BUFFER) { pUsedTbd = &info->txbd[info->usedTbdIdx]; if (pUsedTbd->cbd_sc & BD_ENET_TX_READY) { #ifdef ET_DEBUG printf("Cannot clean TBD %d, in use\n", info->cleanTbdNum); #endif return 0; } /* clean this buffer descriptor */ if (info->usedTbdIdx == (CONFIG_SYS_TX_ETH_BUFFER - 1)) pUsedTbd->cbd_sc = BD_ENET_TX_WRAP; else pUsedTbd->cbd_sc = 0; /* update some indeces for a correct handling of the TBD ring */ info->cleanTbdNum++; info->usedTbdIdx = (info->usedTbdIdx + 1) % CONFIG_SYS_TX_ETH_BUFFER; } /* Check for valid length of data. */ if ((length > 1500) || (length <= 0)) { return -1; } /* Check the number of vacant TxBDs. */ if (info->cleanTbdNum < 1) { printf("No available TxBDs ...\n"); return -1; } /* Get the first TxBD to send the mac header */ pTbd = &info->txbd[info->txIdx]; pTbd->cbd_datlen = length; pTbd->cbd_bufaddr = (u32) packet; pTbd->cbd_sc |= BD_ENET_TX_LAST | BD_ENET_TX_TC | BD_ENET_TX_READY; info->txIdx = (info->txIdx + 1) % CONFIG_SYS_TX_ETH_BUFFER; /* Enable DMA transmit task */ MCD_continDma(info->txTask); info->cleanTbdNum -= 1; /* wait until frame is sent . */ while (pTbd->cbd_sc & BD_ENET_TX_READY) { udelay(10); } return (int)(info->txbd[info->txIdx].cbd_sc & BD_ENET_TX_STATS); } static int fec_recv(struct eth_device *dev) { struct fec_info_dma *info = dev->priv; volatile fecdma_t *fecp = (fecdma_t *) (info->iobase); cbd_t *pRbd = &info->rxbd[info->rxIdx]; u32 ievent; int frame_length, len = 0; /* Check if any critical events have happened */ ievent = fecp->eir; if (ievent != 0) { fecp->eir = ievent; if (ievent & (FEC_EIR_BABT | FEC_EIR_TXERR | FEC_EIR_RXERR)) { printf("fec_recv: error\n"); fec_halt(dev); fec_init(dev, NULL); return 0; } if (ievent & FEC_EIR_HBERR) { /* Heartbeat error */ fecp->tcr |= FEC_TCR_GTS; } if (ievent & FEC_EIR_GRA) { /* Graceful stop complete */ if (fecp->tcr & FEC_TCR_GTS) { printf("fec_recv: tcr_gts\n"); fec_halt(dev); fecp->tcr &= ~FEC_TCR_GTS; fec_init(dev, NULL); } } } if (!(pRbd->cbd_sc & BD_ENET_RX_EMPTY)) { if ((pRbd->cbd_sc & BD_ENET_RX_LAST) && !(pRbd->cbd_sc & BD_ENET_RX_ERR) && ((pRbd->cbd_datlen - 4) > 14)) { /* Get buffer address and size */ frame_length = pRbd->cbd_datlen - 4; /* Fill the buffer and pass it to upper layers */ NetReceive((volatile uchar *)pRbd->cbd_bufaddr, frame_length); len = frame_length; } /* Reset buffer descriptor as empty */ if ((info->rxIdx) == (PKTBUFSRX - 1)) pRbd->cbd_sc = (BD_ENET_RX_WRAP | BD_ENET_RX_EMPTY); else pRbd->cbd_sc = BD_ENET_RX_EMPTY; pRbd->cbd_datlen = PKTSIZE_ALIGN; /* Now, we have an empty RxBD, restart the DMA receive task */ MCD_continDma(info->rxTask); /* Increment BD count */ info->rxIdx = (info->rxIdx + 1) % PKTBUFSRX; } return len; } static void fec_set_hwaddr(volatile fecdma_t * fecp, u8 * mac) { u8 currByte; /* byte for which to compute the CRC */ int byte; /* loop - counter */ int bit; /* loop - counter */ u32 crc = 0xffffffff; /* initial value */ for (byte = 0; byte < 6; byte++) { currByte = mac[byte]; for (bit = 0; bit < 8; bit++) { if ((currByte & 0x01) ^ (crc & 0x01)) { crc >>= 1; crc = crc ^ 0xedb88320; } else { crc >>= 1; } currByte >>= 1; } } crc = crc >> 26; /* Set individual hash table register */ if (crc >= 32) { fecp->ialr = (1 << (crc - 32)); fecp->iaur = 0; } else { fecp->ialr = 0; fecp->iaur = (1 << crc); } /* Set physical address */ fecp->palr = (mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3]; fecp->paur = (mac[4] << 24) + (mac[5] << 16) + 0x8808; /* Clear multicast address hash table */ fecp->gaur = 0; fecp->galr = 0; } static int fec_init(struct eth_device *dev, bd_t * bd) { struct fec_info_dma *info = dev->priv; volatile fecdma_t *fecp = (fecdma_t *) (info->iobase); int i; uchar enetaddr[6]; #ifdef ET_DEBUG printf("fec_init: iobase 0x%08x ...\n", info->iobase); #endif fecpin_setclear(dev, 1); fec_halt(dev); #if defined(CONFIG_CMD_MII) || defined (CONFIG_MII) || \ defined (CONFIG_SYS_DISCOVER_PHY) mii_init(); set_fec_duplex_speed(fecp, bd, info->dup_spd); #else #ifndef CONFIG_SYS_DISCOVER_PHY set_fec_duplex_speed(fecp, bd, (FECDUPLEX << 16) | FECSPEED); #endif /* ifndef CONFIG_SYS_DISCOVER_PHY */ #endif /* CONFIG_CMD_MII || CONFIG_MII */ /* We use strictly polling mode only */ fecp->eimr = 0; /* Clear any pending interrupt */ fecp->eir = 0xffffffff; /* Set station address */ if ((u32) fecp == CONFIG_SYS_FEC0_IOBASE) eth_getenv_enetaddr("ethaddr", enetaddr); else eth_getenv_enetaddr("eth1addr", enetaddr); fec_set_hwaddr(fecp, enetaddr); /* Set Opcode/Pause Duration Register */ fecp->opd = 0x00010020; /* Setup Buffers and Buffer Desriptors */ info->rxIdx = 0; info->txIdx = 0; /* Setup Receiver Buffer Descriptors (13.14.24.18) * Settings: Empty, Wrap */ for (i = 0; i < PKTBUFSRX; i++) { info->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY; info->rxbd[i].cbd_datlen = PKTSIZE_ALIGN; info->rxbd[i].cbd_bufaddr = (uint) NetRxPackets[i]; } info->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP; /* Setup Ethernet Transmitter Buffer Descriptors (13.14.24.19) * Settings: Last, Tx CRC */ for (i = 0; i < CONFIG_SYS_TX_ETH_BUFFER; i++) { info->txbd[i].cbd_sc = 0; info->txbd[i].cbd_datlen = 0; info->txbd[i].cbd_bufaddr = (uint) (&info->txbuf[0]); } info->txbd[CONFIG_SYS_TX_ETH_BUFFER - 1].cbd_sc |= BD_ENET_TX_WRAP; info->usedTbdIdx = 0; info->cleanTbdNum = CONFIG_SYS_TX_ETH_BUFFER; /* Set Rx FIFO alarm and granularity value */ fecp->rfcr = 0x0c000000; fecp->rfar = 0x0000030c; /* Set Tx FIFO granularity value */ fecp->tfcr = FIFO_CTRL_FRAME | FIFO_CTRL_GR(6) | 0x00040000; fecp->tfar = 0x00000080; fecp->tfwr = 0x2; fecp->ctcwr = 0x03000000; /* Enable DMA receive task */ MCD_startDma(info->rxTask, /* Dma channel */ (s8 *) info->rxbd, /*Source Address */ 0, /* Source increment */ (s8 *) (&fecp->rfdr), /* dest */ 4, /* dest increment */ 0, /* DMA size */ 4, /* xfer size */ info->rxInit, /* initiator */ info->rxPri, /* priority */ (MCD_FECRX_DMA | MCD_TT_FLAGS_DEF), /* Flags */ (MCD_NO_CSUM | MCD_NO_BYTE_SWAP) /* Function description */ ); /* Enable DMA tx task with no ready buffer descriptors */ MCD_startDma(info->txTask, /* Dma channel */ (s8 *) info->txbd, /*Source Address */ 0, /* Source increment */ (s8 *) (&fecp->tfdr), /* dest */ 4, /* dest incr */ 0, /* DMA size */ 4, /* xfer size */ info->txInit, /* initiator */ info->txPri, /* priority */ (MCD_FECTX_DMA | MCD_TT_FLAGS_DEF), /* Flags */ (MCD_NO_CSUM | MCD_NO_BYTE_SWAP) /* Function description */ ); /* Now enable the transmit and receive processing */ fecp->ecr |= FEC_ECR_ETHER_EN; return 1; } static void fec_halt(struct eth_device *dev) { struct fec_info_dma *info = dev->priv; volatile fecdma_t *fecp = (fecdma_t *) (info->iobase); int counter = 0xffff; /* issue graceful stop command to the FEC transmitter if necessary */ fecp->tcr |= FEC_TCR_GTS; /* wait for graceful stop to register */ while ((counter--) && (!(fecp->eir & FEC_EIR_GRA))) ; /* Disable DMA tasks */ MCD_killDma(info->txTask); MCD_killDma(info->rxTask);; /* Disable the Ethernet Controller */ fecp->ecr &= ~FEC_ECR_ETHER_EN; /* Clear FIFO status registers */ fecp->rfsr &= FIFO_ERRSTAT; fecp->tfsr &= FIFO_ERRSTAT; fecp->frst = 0x01000000; /* Issue a reset command to the FEC chip */ fecp->ecr |= FEC_ECR_RESET; /* wait at least 20 clock cycles */ udelay(10000); #ifdef ET_DEBUG printf("Ethernet task stopped\n"); #endif } int mcdmafec_initialize(bd_t * bis) { struct eth_device *dev; int i; #ifdef CONFIG_SYS_DMA_USE_INTSRAM u32 tmp = CONFIG_SYS_INTSRAM + 0x2000; #endif for (i = 0; i < sizeof(fec_info) / sizeof(fec_info[0]); i++) { dev = (struct eth_device *)memalign(CONFIG_SYS_CACHELINE_SIZE, sizeof *dev); if (dev == NULL) hang(); memset(dev, 0, sizeof(*dev)); sprintf(dev->name, "FEC%d", fec_info[i].index); dev->priv = &fec_info[i]; dev->init = fec_init; dev->halt = fec_halt; dev->send = fec_send; dev->recv = fec_recv; /* setup Receive and Transmit buffer descriptor */ #ifdef CONFIG_SYS_DMA_USE_INTSRAM fec_info[i].rxbd = (cbd_t *)((u32)fec_info[i].rxbd + tmp); tmp = (u32)fec_info[i].rxbd; fec_info[i].txbd = (cbd_t *)((u32)fec_info[i].txbd + tmp + (PKTBUFSRX * sizeof(cbd_t))); tmp = (u32)fec_info[i].txbd; fec_info[i].txbuf = (char *)((u32)fec_info[i].txbuf + tmp + (CONFIG_SYS_TX_ETH_BUFFER * sizeof(cbd_t))); tmp = (u32)fec_info[i].txbuf; #else fec_info[i].rxbd = (cbd_t *) memalign(CONFIG_SYS_CACHELINE_SIZE, (PKTBUFSRX * sizeof(cbd_t))); fec_info[i].txbd = (cbd_t *) memalign(CONFIG_SYS_CACHELINE_SIZE, (CONFIG_SYS_TX_ETH_BUFFER * sizeof(cbd_t))); fec_info[i].txbuf = (char *)memalign(CONFIG_SYS_CACHELINE_SIZE, DBUF_LENGTH); #endif #ifdef ET_DEBUG printf("rxbd %x txbd %x\n", (int)fec_info[i].rxbd, (int)fec_info[i].txbd); #endif fec_info[i].phy_name = (char *)memalign(CONFIG_SYS_CACHELINE_SIZE, 32); eth_register(dev); #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) miiphy_register(dev->name, mcffec_miiphy_read, mcffec_miiphy_write); #endif if (i > 0) fec_info[i - 1].next = &fec_info[i]; } fec_info[i - 1].next = &fec_info[0]; /* default speed */ bis->bi_ethspeed = 10; return 0; }
gpl-2.0
pacificIT/u-boot
arch/arm/cpu/arm920t/at91rm9200/ks8721.c
107
6405
/* * (C) Copyright 2006 * Author : Eric Benard (Eukrea Electromatique) * based on dm9161.c which is : * (C) Copyright 2003 * Author : Hamid Ikdoumi (Atmel) * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <at91rm9200_net.h> #include <net.h> #include <ks8721.h> #ifdef CONFIG_DRIVER_ETHER #if defined(CONFIG_CMD_NET) /* * Name: * ks8721_isphyconnected * Description: * Reads the 2 PHY ID registers * Arguments: * p_mac - pointer to AT91S_EMAC struct * Return value: * 1 - if id read successfully * 0 - if error */ unsigned int ks8721_isphyconnected(AT91PS_EMAC p_mac) { unsigned short id1, id2; at91rm9200_EmacEnableMDIO(p_mac); at91rm9200_EmacReadPhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_PHYID1, &id1); at91rm9200_EmacReadPhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_PHYID2, &id2); at91rm9200_EmacDisableMDIO(p_mac); if ((id1 == (KS8721_PHYID_OUI >> 6)) && ((id2 >> 10) == (KS8721_PHYID_OUI & KS8721_LSB_MASK))) { if ((id2 & KS8721_MODELMASK) == KS8721BL_MODEL) printf("Micrel KS8721bL PHY detected : "); else printf("Unknown Micrel PHY detected : "); return 1; } return 0; } /* * Name: * ks8721_getlinkspeed * Description: * Link parallel detection status of MAC is checked and set in the * MAC configuration registers * Arguments: * p_mac - pointer to MAC * Return value: * 1 - if link status set succesfully * 0 - if link status not set */ unsigned char ks8721_getlinkspeed(AT91PS_EMAC p_mac) { unsigned short stat1; if (!at91rm9200_EmacReadPhy(p_mac, KS8721_BMSR, &stat1)) return 0; if (!(stat1 & KS8721_LINK_STATUS)) { /* link status up? */ printf("Link Down !\n"); return 0; } if (stat1 & KS8721_100BASE_TX_FD) { /* set Emac for 100BaseTX and Full Duplex */ printf("100BT FD\n"); p_mac->EMAC_CFG |= AT91C_EMAC_SPD | AT91C_EMAC_FD; return 1; } if (stat1 & KS8721_10BASE_T_FD) { /* set MII for 10BaseT and Full Duplex */ printf("10BT FD\n"); p_mac->EMAC_CFG = (p_mac->EMAC_CFG & ~(AT91C_EMAC_SPD | AT91C_EMAC_FD)) | AT91C_EMAC_FD; return 1; } if (stat1 & KS8721_100BASE_T4_HD) { /* set MII for 100BaseTX and Half Duplex */ printf("100BT HD\n"); p_mac->EMAC_CFG = (p_mac->EMAC_CFG & ~(AT91C_EMAC_SPD | AT91C_EMAC_FD)) | AT91C_EMAC_SPD; return 1; } if (stat1 & KS8721_10BASE_T_HD) { /* set MII for 10BaseT and Half Duplex */ printf("10BT HD\n"); p_mac->EMAC_CFG &= ~(AT91C_EMAC_SPD | AT91C_EMAC_FD); return 1; } return 0; } /* * Name: * ks8721_initphy * Description: * MAC starts checking its link by using parallel detection and * Autonegotiation and the same is set in the MAC configuration registers * Arguments: * p_mac - pointer to struct AT91S_EMAC * Return value: * 1 - if link status set succesfully * 0 - if link status not set */ unsigned char ks8721_initphy(AT91PS_EMAC p_mac) { unsigned char ret = 1; unsigned short intvalue; at91rm9200_EmacEnableMDIO(p_mac); /* Try another time */ if (!ks8721_getlinkspeed(p_mac)) ret = ks8721_getlinkspeed(p_mac); /* Disable PHY Interrupts */ intvalue = 0; at91rm9200_EmacWritePhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_MDINTR, &intvalue); at91rm9200_EmacDisableMDIO(p_mac); return ret; } /* * Name: * ks8721_autonegotiate * Description: * MAC Autonegotiates with the partner status of same is set in the * MAC configuration registers * Arguments: * dev - pointer to struct net_device * Return value: * 1 - if link status set successfully * 0 - if link status not set */ unsigned char ks8721_autonegotiate(AT91PS_EMAC p_mac, int *status) { unsigned short value; unsigned short phyanar; unsigned short phyanalpar; /* Set ks8721 control register */ if (!at91rm9200_EmacReadPhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_BMCR, &value)) return 0; /* remove autonegotiation enable */ value &= ~KS8721_AUTONEG; /* Electrically isolate PHY */ value |= KS8721_ISOLATE; if (!at91rm9200_EmacWritePhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_BMCR, &value)) { return 0; } /* * Set the Auto_negotiation Advertisement Register * MII advertising for Next page, 100BaseTxFD and HD, * 10BaseTFD and HD, IEEE 802.3 */ phyanar = KS8721_NP | KS8721_TX_FDX | KS8721_TX_HDX | KS8721_10_FDX | KS8721_10_HDX | KS8721_AN_IEEE_802_3; if (!at91rm9200_EmacWritePhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_ANAR, &phyanar)) { return 0; } /* Read the Control Register */ if (!at91rm9200_EmacReadPhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_BMCR, &value)) { return 0; } value |= KS8721_SPEED_SELECT | KS8721_AUTONEG | KS8721_DUPLEX_MODE; if (!at91rm9200_EmacWritePhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_BMCR, &value)) { return 0; } /* Restart Auto_negotiation */ value |= KS8721_RESTART_AUTONEG; value &= ~KS8721_ISOLATE; if (!at91rm9200_EmacWritePhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_BMCR, &value)) { return 0; } /* Check AutoNegotiate complete */ udelay(10000); at91rm9200_EmacReadPhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_BMSR, &value); if (!(value & KS8721_AUTONEG_COMP)) return 0; /* Get the AutoNeg Link partner base page */ if (!at91rm9200_EmacReadPhy(p_mac, CONFIG_PHY_ADDRESS | KS8721_ANLPAR, &phyanalpar)) { return 0; } if ((phyanar & KS8721_TX_FDX) && (phyanalpar & KS8721_TX_FDX)) { /* Set MII for 100BaseTX and Full Duplex */ p_mac->EMAC_CFG |= AT91C_EMAC_SPD | AT91C_EMAC_FD; return 1; } if ((phyanar & KS8721_10_FDX) && (phyanalpar & KS8721_10_FDX)) { /* Set MII for 10BaseT and Full Duplex */ p_mac->EMAC_CFG = (p_mac->EMAC_CFG & ~(AT91C_EMAC_SPD | AT91C_EMAC_FD)) | AT91C_EMAC_FD; return 1; } return 0; } #endif /* CONFIG_CMD_NET */ #endif /* CONFIG_DRIVER_ETHER */
gpl-2.0
bradfa/linux
drivers/ata/sata_highbank.c
363
18196
/* * Calxeda Highbank AHCI SATA platform driver * Copyright 2012 Calxeda, Inc. * * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/module.h> #include <linux/types.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/libata.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include "ahci.h" #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f)) #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2) #define SERDES_CR_CTL 0x80a0 #define SERDES_CR_ADDR 0x80a1 #define SERDES_CR_DATA 0x80a2 #define CR_BUSY 0x0001 #define CR_START 0x0001 #define CR_WR_RDN 0x0002 #define CPHY_TX_INPUT_STS 0x2001 #define CPHY_RX_INPUT_STS 0x2002 #define CPHY_SATA_TX_OVERRIDE 0x8000 #define CPHY_SATA_RX_OVERRIDE 0x4000 #define CPHY_TX_OVERRIDE 0x2004 #define CPHY_RX_OVERRIDE 0x2005 #define SPHY_LANE 0x100 #define SPHY_HALF_RATE 0x0001 #define CPHY_SATA_DPLL_MODE 0x0700 #define CPHY_SATA_DPLL_SHIFT 8 #define CPHY_SATA_DPLL_RESET (1 << 11) #define CPHY_SATA_TX_ATTEN 0x1c00 #define CPHY_SATA_TX_ATTEN_SHIFT 10 #define CPHY_PHY_COUNT 6 #define CPHY_LANE_COUNT 4 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT) static DEFINE_SPINLOCK(cphy_lock); /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based * sata ports to their phys and then to their lanes within the phys */ struct phy_lane_info { void __iomem *phy_base; u8 lane_mapping; u8 phy_devs; u8 tx_atten; }; static struct phy_lane_info port_data[CPHY_PORT_COUNT]; static DEFINE_SPINLOCK(sgpio_lock); #define SCLOCK 0 #define SLOAD 1 #define SDATA 2 #define SGPIO_PINS 3 #define SGPIO_PORTS 8 struct ecx_plat_data { u32 n_ports; /* number of extra clocks that the SGPIO PIC controller expects */ u32 pre_clocks; u32 post_clocks; unsigned sgpio_gpio[SGPIO_PINS]; u32 sgpio_pattern; u32 port_to_sgpio[SGPIO_PORTS]; }; #define SGPIO_SIGNALS 3 #define ECX_ACTIVITY_BITS 0x300000 #define ECX_ACTIVITY_SHIFT 0 #define ECX_LOCATE_BITS 0x80000 #define ECX_LOCATE_SHIFT 1 #define ECX_FAULT_BITS 0x400000 #define ECX_FAULT_SHIFT 2 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, u32 shift) { return 1 << (3 * pdata->port_to_sgpio[port] + shift); } static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state) { if (state & ECX_ACTIVITY_BITS) pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, ECX_ACTIVITY_SHIFT); else pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, ECX_ACTIVITY_SHIFT); if (state & ECX_LOCATE_BITS) pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, ECX_LOCATE_SHIFT); else pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, ECX_LOCATE_SHIFT); if (state & ECX_FAULT_BITS) pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, ECX_FAULT_SHIFT); else pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, ECX_FAULT_SHIFT); } /* * Tell the LED controller that the signal has changed by raising the clock * line for 50 uS and then lowering it for 50 uS. */ static void ecx_led_cycle_clock(struct ecx_plat_data *pdata) { gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1); udelay(50); gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0); udelay(50); } static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state, ssize_t size) { struct ahci_host_priv *hpriv = ap->host->private_data; struct ecx_plat_data *pdata = hpriv->plat_data; struct ahci_port_priv *pp = ap->private_data; unsigned long flags; int pmp, i; struct ahci_em_priv *emp; u32 sgpio_out; /* get the slot number from the message */ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; if (pmp < EM_MAX_SLOTS) emp = &pp->em_priv[pmp]; else return -EINVAL; if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED)) return size; spin_lock_irqsave(&sgpio_lock, flags); ecx_parse_sgpio(pdata, ap->port_no, state); sgpio_out = pdata->sgpio_pattern; for (i = 0; i < pdata->pre_clocks; i++) ecx_led_cycle_clock(pdata); gpio_set_value(pdata->sgpio_gpio[SLOAD], 1); ecx_led_cycle_clock(pdata); gpio_set_value(pdata->sgpio_gpio[SLOAD], 0); /* * bit-bang out the SGPIO pattern, by consuming a bit and then * clocking it out. */ for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) { gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1); sgpio_out >>= 1; ecx_led_cycle_clock(pdata); } for (i = 0; i < pdata->post_clocks; i++) ecx_led_cycle_clock(pdata); /* save off new led state for port/slot */ emp->led_state = state; spin_unlock_irqrestore(&sgpio_lock, flags); return size; } static void highbank_set_em_messages(struct device *dev, struct ahci_host_priv *hpriv, struct ata_port_info *pi) { struct device_node *np = dev->of_node; struct ecx_plat_data *pdata = hpriv->plat_data; int i; int err; for (i = 0; i < SGPIO_PINS; i++) { err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); if (err < 0) return; pdata->sgpio_gpio[i] = err; err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO"); if (err) { pr_err("sata_highbank gpio_request %d failed: %d\n", i, err); return; } gpio_direction_output(pdata->sgpio_gpio[i], 1); } of_property_read_u32_array(np, "calxeda,led-order", pdata->port_to_sgpio, pdata->n_ports); if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks)) pdata->pre_clocks = 0; if (of_property_read_u32(np, "calxeda,post-clocks", &pdata->post_clocks)) pdata->post_clocks = 0; /* store em_loc */ hpriv->em_loc = 0; hpriv->em_buf_sz = 4; hpriv->em_msg_type = EM_MSG_TYPE_LED; pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY; } static u32 __combo_phy_reg_read(u8 sata_port, u32 addr) { u32 data; u8 dev = port_data[sata_port].phy_devs; spin_lock(&cphy_lock); writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr)); spin_unlock(&cphy_lock); return data; } static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data) { u8 dev = port_data[sata_port].phy_devs; spin_lock(&cphy_lock); writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr)); spin_unlock(&cphy_lock); } static void combo_phy_wait_for_ready(u8 sata_port) { while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY) udelay(5); } static u32 combo_phy_read(u8 sata_port, u32 addr) { combo_phy_wait_for_ready(sata_port); __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START); combo_phy_wait_for_ready(sata_port); return __combo_phy_reg_read(sata_port, SERDES_CR_DATA); } static void combo_phy_write(u8 sata_port, u32 addr, u32 data) { combo_phy_wait_for_ready(sata_port); __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data); __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START); } static void highbank_cphy_disable_overrides(u8 sata_port) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; if (unlikely(port_data[sata_port].phy_base == NULL)) return; tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_RX_OVERRIDE; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); } static void cphy_override_tx_attenuation(u8 sata_port, u32 val) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; if (val & 0x8) return; tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_TX_OVERRIDE; combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_TX_OVERRIDE; combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN; combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); } static void cphy_override_rx_mode(u8 sata_port, u32 val) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_RX_OVERRIDE; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_RX_OVERRIDE; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp &= ~CPHY_SATA_DPLL_MODE; tmp |= val << CPHY_SATA_DPLL_SHIFT; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_DPLL_RESET; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp &= ~CPHY_SATA_DPLL_RESET; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); msleep(15); } static void highbank_cphy_override_lane(u8 sata_port) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp, k = 0; if (unlikely(port_data[sata_port].phy_base == NULL)) return; do { tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000)); cphy_override_rx_mode(sata_port, 3); cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten); } static int highbank_initialize_phys(struct device *dev, void __iomem *addr) { struct device_node *sata_node = dev->of_node; int phy_count = 0, phy, port = 0, i; void __iomem *cphy_base[CPHY_PHY_COUNT] = {}; struct device_node *phy_nodes[CPHY_PHY_COUNT] = {}; u32 tx_atten[CPHY_PORT_COUNT] = {}; memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT); do { u32 tmp; struct of_phandle_args phy_data; if (of_parse_phandle_with_args(sata_node, "calxeda,port-phys", "#phy-cells", port, &phy_data)) break; for (phy = 0; phy < phy_count; phy++) { if (phy_nodes[phy] == phy_data.np) break; } if (phy_nodes[phy] == NULL) { phy_nodes[phy] = phy_data.np; cphy_base[phy] = of_iomap(phy_nodes[phy], 0); if (cphy_base[phy] == NULL) { return 0; } phy_count += 1; } port_data[port].lane_mapping = phy_data.args[0]; of_property_read_u32(phy_nodes[phy], "phydev", &tmp); port_data[port].phy_devs = tmp; port_data[port].phy_base = cphy_base[phy]; of_node_put(phy_data.np); port += 1; } while (port < CPHY_PORT_COUNT); of_property_read_u32_array(sata_node, "calxeda,tx-atten", tx_atten, port); for (i = 0; i < port; i++) port_data[i].tx_atten = (u8) tx_atten[i]; return 0; } /* * The Calxeda SATA phy intermittently fails to bring up a link with Gen3 * Retrying the phy hard reset can work around the issue, but the drive * may fail again. In less than 150 out of 15000 test runs, it took more * than 10 tries for the link to be established (but never more than 35). * Triple the maximum observed retry count to provide plenty of margin for * rare events and to guarantee that the link is established. * * Also, the default 2 second time-out on a failed drive is too long in * this situation. The uboot implementation of the same driver function * uses a much shorter time-out period and never experiences a time out * issue. Reducing the time-out to 500ms improves the responsiveness. * The other timing constants were kept the same as the stock AHCI driver. * This change was also tested 15000 times on 24 drives and none of them * experienced a time out. */ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { static const unsigned long timing[] = { 5, 100, 500}; struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; struct ahci_host_priv *hpriv = ap->host->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; bool online; u32 sstatus; int rc; int retry = 100; ahci_stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); tf.command = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); do { highbank_cphy_disable_overrides(link->ap->port_no); rc = sata_link_hardreset(link, timing, deadline, &online, NULL); highbank_cphy_override_lane(link->ap->port_no); /* If the status is 1, we are connected, but the link did not * come up. So retry resetting the link again. */ if (sata_scr_read(link, SCR_STATUS, &sstatus)) break; if (!(sstatus & 0x3)) break; } while (!online && retry--); hpriv->start_engine(ap); if (online) *class = ahci_dev_classify(ap); return rc; } static struct ata_port_operations ahci_highbank_ops = { .inherits = &ahci_ops, .hardreset = ahci_highbank_hardreset, .transmit_led_message = ecx_transmit_led_message, }; static const struct ata_port_info ahci_highbank_port_info = { .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_highbank_ops, }; static struct scsi_host_template ahci_highbank_platform_sht = { AHCI_SHT("sata_highbank"), }; static const struct of_device_id ahci_of_match[] = { { .compatible = "calxeda,hb-ahci" }, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); static int ahci_highbank_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; struct ecx_plat_data *pdata; struct ata_host *host; struct resource *mem; int irq; int i; int rc; u32 n_ports; struct ata_port_info pi = ahci_highbank_port_info; const struct ata_port_info *ppi[] = { &pi, NULL }; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "no mmio space\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "no irq\n"); return -EINVAL; } hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) { dev_err(dev, "can't alloc ahci_host_priv\n"); return -ENOMEM; } pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(dev, "can't alloc ecx_plat_data\n"); return -ENOMEM; } hpriv->irq = irq; hpriv->flags |= (unsigned long)pi.private_data; hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); if (!hpriv->mmio) { dev_err(dev, "can't map %pR\n", mem); return -ENOMEM; } rc = highbank_initialize_phys(dev, hpriv->mmio); if (rc) return rc; ahci_save_initial_config(dev, hpriv); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; if (hpriv->cap & HOST_CAP_64) dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); pdata->n_ports = n_ports; hpriv->plat_data = pdata; highbank_set_em_messages(dev, hpriv, &pi); host = ata_host_alloc_pinfo(dev, ppi, n_ports); if (!host) { rc = -ENOMEM; goto err0; } host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_desc(ap, "mmio %pR", mem); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } rc = ahci_reset_controller(host); if (rc) goto err0; ahci_init_controller(host); ahci_print_info(host, "platform"); rc = ahci_host_activate(host, &ahci_highbank_platform_sht); if (rc) goto err0; return 0; err0: return rc; } #ifdef CONFIG_PM_SLEEP static int ahci_highbank_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->mmio; u32 ctl; int rc; if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { dev_err(dev, "firmware update required for suspend/resume\n"); return -EIO; } /* * AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a * transition of the HBA to D3 state. */ ctl = readl(mmio + HOST_CTL); ctl &= ~HOST_IRQ_EN; writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ rc = ata_host_suspend(host, PMSG_SUSPEND); if (rc) return rc; return 0; } static int ahci_highbank_resume(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); int rc; if (dev->power.power_state.event == PM_EVENT_SUSPEND) { rc = ahci_reset_controller(host); if (rc) return rc; ahci_init_controller(host); } ata_host_resume(host); return 0; } #endif static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops, ahci_highbank_suspend, ahci_highbank_resume); static struct platform_driver ahci_highbank_driver = { .remove = ata_platform_remove_one, .driver = { .name = "highbank-ahci", .of_match_table = ahci_of_match, .pm = &ahci_highbank_pm_ops, }, .probe = ahci_highbank_probe, }; module_platform_driver(ahci_highbank_driver); MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver"); MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("sata:highbank");
gpl-2.0
shane87/linux_wx435kt-froyo
drivers/input/mouse/synaptics.c
363
20683
/* * Synaptics TouchPad PS/2 mouse driver * * 2003 Dmitry Torokhov <dtor@mail.ru> * Added support for pass-through port. Special thanks to Peter Berg Larsen * for explaining various Synaptics quirks. * * 2003 Peter Osterlund <petero2@telia.com> * Ported to 2.5 input device infrastructure. * * Copyright (C) 2001 Stefan Gmeiner <riddlebox@freesurf.ch> * start merging tpconfig and gpm code to a xfree-input module * adding some changes and extensions (ex. 3rd and 4th button) * * Copyright (c) 1997 C. Scott Ananian <cananian@alumni.priceton.edu> * Copyright (c) 1998-2000 Bruce Kalk <kall@compass.com> * code for the special synaptics commands (from the tpconfig-source) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Trademarks are the property of their respective owners. */ #include <linux/module.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/libps2.h> #include "psmouse.h" #include "synaptics.h" /* * The x/y limits are taken from the Synaptics TouchPad interfacing Guide, * section 2.3.2, which says that they should be valid regardless of the * actual size of the sensor. */ #define XMIN_NOMINAL 1472 #define XMAX_NOMINAL 5472 #define YMIN_NOMINAL 1408 #define YMAX_NOMINAL 4448 /***************************************************************************** * Stuff we need even when we do not want native Synaptics support ****************************************************************************/ /* * Set the synaptics touchpad mode byte by special commands */ static int synaptics_mode_cmd(struct psmouse *psmouse, unsigned char mode) { unsigned char param[1]; if (psmouse_sliced_command(psmouse, mode)) return -1; param[0] = SYN_PS_SET_MODE2; if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_SETRATE)) return -1; return 0; } int synaptics_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; param[0] = 0; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO); if (param[1] != 0x47) return -ENODEV; if (set_properties) { psmouse->vendor = "Synaptics"; psmouse->name = "TouchPad"; } return 0; } void synaptics_reset(struct psmouse *psmouse) { /* reset touchpad back to relative mode, gestures enabled */ synaptics_mode_cmd(psmouse, 0); } #ifdef CONFIG_MOUSE_PS2_SYNAPTICS /***************************************************************************** * Synaptics communications functions ****************************************************************************/ /* * Send a command to the synpatics touchpad by special commands */ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c, unsigned char *param) { if (psmouse_sliced_command(psmouse, c)) return -1; if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETINFO)) return -1; return 0; } /* * Read the model-id bytes from the touchpad * see also SYN_MODEL_* macros */ static int synaptics_model_id(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char mi[3]; if (synaptics_send_cmd(psmouse, SYN_QUE_MODEL, mi)) return -1; priv->model_id = (mi[0]<<16) | (mi[1]<<8) | mi[2]; return 0; } /* * Read the capability-bits from the touchpad * see also the SYN_CAP_* macros */ static int synaptics_capability(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char cap[3]; if (synaptics_send_cmd(psmouse, SYN_QUE_CAPABILITIES, cap)) return -1; priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2]; priv->ext_cap = 0; if (!SYN_CAP_VALID(priv->capabilities)) return -1; /* * Unless capExtended is set the rest of the flags should be ignored */ if (!SYN_CAP_EXTENDED(priv->capabilities)) priv->capabilities = 0; if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 1) { if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB, cap)) { printk(KERN_ERR "Synaptics claims to have extended capabilities," " but I'm not able to read them."); } else { priv->ext_cap = (cap[0] << 16) | (cap[1] << 8) | cap[2]; /* * if nExtBtn is greater than 8 it should be considered * invalid and treated as 0 */ if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 8) priv->ext_cap &= 0xff0fff; } } return 0; } /* * Identify Touchpad * See also the SYN_ID_* macros */ static int synaptics_identify(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char id[3]; if (synaptics_send_cmd(psmouse, SYN_QUE_IDENTIFY, id)) return -1; priv->identity = (id[0]<<16) | (id[1]<<8) | id[2]; if (SYN_ID_IS_SYNAPTICS(priv->identity)) return 0; return -1; } /* * Read touchpad resolution * Resolution is left zero if touchpad does not support the query */ static int synaptics_resolution(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char res[3]; if (SYN_ID_MAJOR(priv->identity) < 4) return 0; if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res)) return 0; if ((res[0] != 0) && (res[1] & 0x80) && (res[2] != 0)) { priv->x_res = res[0]; /* x resolution in units/mm */ priv->y_res = res[2]; /* y resolution in units/mm */ } return 0; } static int synaptics_query_hardware(struct psmouse *psmouse) { if (synaptics_identify(psmouse)) return -1; if (synaptics_model_id(psmouse)) return -1; if (synaptics_capability(psmouse)) return -1; if (synaptics_resolution(psmouse)) return -1; return 0; } static int synaptics_set_absolute_mode(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; priv->mode = SYN_BIT_ABSOLUTE_MODE; if (SYN_ID_MAJOR(priv->identity) >= 4) priv->mode |= SYN_BIT_DISABLE_GESTURE; if (SYN_CAP_EXTENDED(priv->capabilities)) priv->mode |= SYN_BIT_W_MODE; if (synaptics_mode_cmd(psmouse, priv->mode)) return -1; return 0; } static void synaptics_set_rate(struct psmouse *psmouse, unsigned int rate) { struct synaptics_data *priv = psmouse->private; if (rate >= 80) { priv->mode |= SYN_BIT_HIGH_RATE; psmouse->rate = 80; } else { priv->mode &= ~SYN_BIT_HIGH_RATE; psmouse->rate = 40; } synaptics_mode_cmd(psmouse, priv->mode); } /***************************************************************************** * Synaptics pass-through PS/2 port support ****************************************************************************/ static int synaptics_pt_write(struct serio *serio, unsigned char c) { struct psmouse *parent = serio_get_drvdata(serio->parent); char rate_param = SYN_PS_CLIENT_CMD; /* indicates that we want pass-through port */ if (psmouse_sliced_command(parent, c)) return -1; if (ps2_command(&parent->ps2dev, &rate_param, PSMOUSE_CMD_SETRATE)) return -1; return 0; } static inline int synaptics_is_pt_packet(unsigned char *buf) { return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; } static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) { struct psmouse *child = serio_get_drvdata(ptport); if (child && child->state == PSMOUSE_ACTIVATED) { serio_interrupt(ptport, packet[1], 0); serio_interrupt(ptport, packet[4], 0); serio_interrupt(ptport, packet[5], 0); if (child->pktsize == 4) serio_interrupt(ptport, packet[2], 0); } else serio_interrupt(ptport, packet[1], 0); } static void synaptics_pt_activate(struct psmouse *psmouse) { struct serio *ptport = psmouse->ps2dev.serio->child; struct psmouse *child = serio_get_drvdata(ptport); struct synaptics_data *priv = psmouse->private; /* adjust the touchpad to child's choice of protocol */ if (child) { if (child->pktsize == 4) priv->mode |= SYN_BIT_FOUR_BYTE_CLIENT; else priv->mode &= ~SYN_BIT_FOUR_BYTE_CLIENT; if (synaptics_mode_cmd(psmouse, priv->mode)) printk(KERN_INFO "synaptics: failed to switch guest protocol\n"); } } static void synaptics_pt_create(struct psmouse *psmouse) { struct serio *serio; serio = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!serio) { printk(KERN_ERR "synaptics: not enough memory to allocate pass-through port\n"); return; } serio->id.type = SERIO_PS_PSTHRU; strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name)); strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->name)); serio->write = synaptics_pt_write; serio->parent = psmouse->ps2dev.serio; psmouse->pt_activate = synaptics_pt_activate; printk(KERN_INFO "serio: %s port at %s\n", serio->name, psmouse->phys); serio_register_port(serio); } /***************************************************************************** * Functions to interpret the absolute mode packets ****************************************************************************/ static void synaptics_parse_hw_state(unsigned char buf[], struct synaptics_data *priv, struct synaptics_hw_state *hw) { memset(hw, 0, sizeof(struct synaptics_hw_state)); if (SYN_MODEL_NEWABS(priv->model_id)) { hw->x = (((buf[3] & 0x10) << 8) | ((buf[1] & 0x0f) << 8) | buf[4]); hw->y = (((buf[3] & 0x20) << 7) | ((buf[1] & 0xf0) << 4) | buf[5]); hw->z = buf[2]; hw->w = (((buf[0] & 0x30) >> 2) | ((buf[0] & 0x04) >> 1) | ((buf[3] & 0x04) >> 2)); hw->left = (buf[0] & 0x01) ? 1 : 0; hw->right = (buf[0] & 0x02) ? 1 : 0; if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) { hw->middle = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0; if (hw->w == 2) hw->scroll = (signed char)(buf[1]); } if (SYN_CAP_FOUR_BUTTON(priv->capabilities)) { hw->up = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0; hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; } if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && ((buf[0] ^ buf[3]) & 0x02)) { switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { default: /* * if nExtBtn is greater than 8 it should be * considered invalid and treated as 0 */ break; case 8: hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0; hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0; case 6: hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0; hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0; case 4: hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0; hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0; case 2: hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0; hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0; } } } else { hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); hw->y = (((buf[4] & 0x1f) << 8) | buf[5]); hw->z = (((buf[0] & 0x30) << 2) | (buf[3] & 0x3F)); hw->w = (((buf[1] & 0x80) >> 4) | ((buf[0] & 0x04) >> 1)); hw->left = (buf[0] & 0x01) ? 1 : 0; hw->right = (buf[0] & 0x02) ? 1 : 0; } } /* * called for each full received packet from the touchpad */ static void synaptics_process_packet(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; struct synaptics_data *priv = psmouse->private; struct synaptics_hw_state hw; int num_fingers; int finger_width; int i; synaptics_parse_hw_state(psmouse->packet, priv, &hw); if (hw.scroll) { priv->scroll += hw.scroll; while (priv->scroll >= 4) { input_report_key(dev, BTN_BACK, !hw.down); input_sync(dev); input_report_key(dev, BTN_BACK, hw.down); input_sync(dev); priv->scroll -= 4; } while (priv->scroll <= -4) { input_report_key(dev, BTN_FORWARD, !hw.up); input_sync(dev); input_report_key(dev, BTN_FORWARD, hw.up); input_sync(dev); priv->scroll += 4; } return; } if (hw.z > 0) { num_fingers = 1; finger_width = 5; if (SYN_CAP_EXTENDED(priv->capabilities)) { switch (hw.w) { case 0 ... 1: if (SYN_CAP_MULTIFINGER(priv->capabilities)) num_fingers = hw.w + 2; break; case 2: if (SYN_MODEL_PEN(priv->model_id)) ; /* Nothing, treat a pen as a single finger */ break; case 4 ... 15: if (SYN_CAP_PALMDETECT(priv->capabilities)) finger_width = hw.w; break; } } } else { num_fingers = 0; finger_width = 0; } /* Post events * BTN_TOUCH has to be first as mousedev relies on it when doing * absolute -> relative conversion */ if (hw.z > 30) input_report_key(dev, BTN_TOUCH, 1); if (hw.z < 25) input_report_key(dev, BTN_TOUCH, 0); if (hw.z > 0) { input_report_abs(dev, ABS_X, hw.x); input_report_abs(dev, ABS_Y, YMAX_NOMINAL + YMIN_NOMINAL - hw.y); } input_report_abs(dev, ABS_PRESSURE, hw.z); input_report_abs(dev, ABS_TOOL_WIDTH, finger_width); input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1); input_report_key(dev, BTN_LEFT, hw.left); input_report_key(dev, BTN_RIGHT, hw.right); if (SYN_CAP_MULTIFINGER(priv->capabilities)) { input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); } if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) input_report_key(dev, BTN_MIDDLE, hw.middle); if (SYN_CAP_FOUR_BUTTON(priv->capabilities)) { input_report_key(dev, BTN_FORWARD, hw.up); input_report_key(dev, BTN_BACK, hw.down); } for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) input_report_key(dev, BTN_0 + i, hw.ext_buttons & (1 << i)); input_sync(dev); } static int synaptics_validate_byte(unsigned char packet[], int idx, unsigned char pkt_type) { static const unsigned char newabs_mask[] = { 0xC8, 0x00, 0x00, 0xC8, 0x00 }; static const unsigned char newabs_rel_mask[] = { 0xC0, 0x00, 0x00, 0xC0, 0x00 }; static const unsigned char newabs_rslt[] = { 0x80, 0x00, 0x00, 0xC0, 0x00 }; static const unsigned char oldabs_mask[] = { 0xC0, 0x60, 0x00, 0xC0, 0x60 }; static const unsigned char oldabs_rslt[] = { 0xC0, 0x00, 0x00, 0x80, 0x00 }; if (idx < 0 || idx > 4) return 0; switch (pkt_type) { case SYN_NEWABS: case SYN_NEWABS_RELAXED: return (packet[idx] & newabs_rel_mask[idx]) == newabs_rslt[idx]; case SYN_NEWABS_STRICT: return (packet[idx] & newabs_mask[idx]) == newabs_rslt[idx]; case SYN_OLDABS: return (packet[idx] & oldabs_mask[idx]) == oldabs_rslt[idx]; default: printk(KERN_ERR "synaptics: unknown packet type %d\n", pkt_type); return 0; } } static unsigned char synaptics_detect_pkt_type(struct psmouse *psmouse) { int i; for (i = 0; i < 5; i++) if (!synaptics_validate_byte(psmouse->packet, i, SYN_NEWABS_STRICT)) { printk(KERN_INFO "synaptics: using relaxed packet validation\n"); return SYN_NEWABS_RELAXED; } return SYN_NEWABS_STRICT; } static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; if (psmouse->pktcnt >= 6) { /* Full packet received */ if (unlikely(priv->pkt_type == SYN_NEWABS)) priv->pkt_type = synaptics_detect_pkt_type(psmouse); if (SYN_CAP_PASS_THROUGH(priv->capabilities) && synaptics_is_pt_packet(psmouse->packet)) { if (psmouse->ps2dev.serio->child) synaptics_pass_pt_packet(psmouse->ps2dev.serio->child, psmouse->packet); } else synaptics_process_packet(psmouse); return PSMOUSE_FULL_PACKET; } return synaptics_validate_byte(psmouse->packet, psmouse->pktcnt - 1, priv->pkt_type) ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; } /***************************************************************************** * Driver initialization/cleanup functions ****************************************************************************/ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) { int i; __set_bit(EV_ABS, dev->evbit); input_set_abs_params(dev, ABS_X, XMIN_NOMINAL, XMAX_NOMINAL, 0, 0); input_set_abs_params(dev, ABS_Y, YMIN_NOMINAL, YMAX_NOMINAL, 0, 0); input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); __set_bit(ABS_TOOL_WIDTH, dev->absbit); __set_bit(EV_KEY, dev->evbit); __set_bit(BTN_TOUCH, dev->keybit); __set_bit(BTN_TOOL_FINGER, dev->keybit); __set_bit(BTN_LEFT, dev->keybit); __set_bit(BTN_RIGHT, dev->keybit); if (SYN_CAP_MULTIFINGER(priv->capabilities)) { __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit); } if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) __set_bit(BTN_MIDDLE, dev->keybit); if (SYN_CAP_FOUR_BUTTON(priv->capabilities) || SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) { __set_bit(BTN_FORWARD, dev->keybit); __set_bit(BTN_BACK, dev->keybit); } for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) __set_bit(BTN_0 + i, dev->keybit); __clear_bit(EV_REL, dev->evbit); __clear_bit(REL_X, dev->relbit); __clear_bit(REL_Y, dev->relbit); dev->absres[ABS_X] = priv->x_res; dev->absres[ABS_Y] = priv->y_res; } static void synaptics_disconnect(struct psmouse *psmouse) { synaptics_reset(psmouse); kfree(psmouse->private); psmouse->private = NULL; } static int synaptics_reconnect(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; struct synaptics_data old_priv = *priv; psmouse_reset(psmouse); if (synaptics_detect(psmouse, 0)) return -1; if (synaptics_query_hardware(psmouse)) { printk(KERN_ERR "Unable to query Synaptics hardware.\n"); return -1; } if (old_priv.identity != priv->identity || old_priv.model_id != priv->model_id || old_priv.capabilities != priv->capabilities || old_priv.ext_cap != priv->ext_cap) return -1; if (synaptics_set_absolute_mode(psmouse)) { printk(KERN_ERR "Unable to initialize Synaptics hardware.\n"); return -1; } return 0; } #if defined(__i386__) #include <linux/dmi.h> static const struct dmi_system_id toshiba_dmi_table[] = { { .ident = "Toshiba Satellite", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), }, }, { .ident = "Toshiba Dynabook", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "dynabook"), }, }, { .ident = "Toshiba Portege M300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"), }, }, { .ident = "Toshiba Portege M300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"), DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"), }, }, { } }; #endif int synaptics_init(struct psmouse *psmouse) { struct synaptics_data *priv; psmouse->private = priv = kzalloc(sizeof(struct synaptics_data), GFP_KERNEL); if (!priv) return -1; psmouse_reset(psmouse); if (synaptics_query_hardware(psmouse)) { printk(KERN_ERR "Unable to query Synaptics hardware.\n"); goto init_fail; } if (synaptics_set_absolute_mode(psmouse)) { printk(KERN_ERR "Unable to initialize Synaptics hardware.\n"); goto init_fail; } priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx\n", SYN_ID_MODEL(priv->identity), SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), priv->model_id, priv->capabilities, priv->ext_cap); set_input_params(psmouse->dev, priv); /* * Encode touchpad model so that it can be used to set * input device->id.version and be visible to userspace. * Because version is __u16 we have to drop something. * Hardware info bits seem to be good candidates as they * are documented to be for Synaptics corp. internal use. */ psmouse->model = ((priv->model_id & 0x00ff0000) >> 8) | (priv->model_id & 0x000000ff); psmouse->protocol_handler = synaptics_process_byte; psmouse->set_rate = synaptics_set_rate; psmouse->disconnect = synaptics_disconnect; psmouse->reconnect = synaptics_reconnect; psmouse->cleanup = synaptics_reset; psmouse->pktsize = 6; /* Synaptics can usually stay in sync without extra help */ psmouse->resync_time = 0; if (SYN_CAP_PASS_THROUGH(priv->capabilities)) synaptics_pt_create(psmouse); #if defined(__i386__) /* * Toshiba's KBC seems to have trouble handling data from * Synaptics as full rate, switch to lower rate which is roughly * thye same as rate of standard PS/2 mouse. */ if (psmouse->rate >= 80 && dmi_check_system(toshiba_dmi_table)) { printk(KERN_INFO "synaptics: Toshiba %s detected, limiting rate to 40pps.\n", dmi_get_system_info(DMI_PRODUCT_NAME)); psmouse->rate = 40; } #endif return 0; init_fail: kfree(priv); return -1; } #else /* CONFIG_MOUSE_PS2_SYNAPTICS */ int synaptics_init(struct psmouse *psmouse) { return -ENOSYS; } #endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
gpl-2.0
meimz/linux
arch/x86/um/sys_call_table_64.c
619
1585
/* * System call table for UML/x86-64, copied from arch/x86/kernel/syscall_*.c * with some changes for UML. */ #include <linux/linkage.h> #include <linux/sys.h> #include <linux/cache.h> #include <generated/user_constants.h> #define __NO_STUBS /* * Below you can see, in terms of #define's, the differences between the x86-64 * and the UML syscall table. */ /* Not going to be implemented by UML, since we have no hardware. */ #define sys_iopl sys_ni_syscall #define sys_ioperm sys_ni_syscall /* * The UML TLS problem. Note that x86_64 does not implement this, so the below * is needed only for the ia32 compatibility. */ /* On UML we call it this way ("old" means it's not mmap2) */ #define sys_mmap old_mmap #define stub_clone sys_clone #define stub_fork sys_fork #define stub_vfork sys_vfork #define stub_execve sys_execve #define stub_execveat sys_execveat #define stub_rt_sigreturn sys_rt_sigreturn #define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) #define __SYSCALL_X32(nr, sym, compat) /* Not supported */ #define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; #include <asm/syscalls_64.h> #undef __SYSCALL_64 #define __SYSCALL_64(nr, sym, compat) [ nr ] = sym, typedef void (*sys_call_ptr_t)(void); extern void sys_ni_syscall(void); const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. */ [0 ... __NR_syscall_max] = &sys_ni_syscall, #include <asm/syscalls_64.h> }; int syscall_table_size = sizeof(sys_call_table);
gpl-2.0
CyanogenMod/htc-kernel-incrediblec
crypto/md5.c
619
7405
/* * Cryptographic API. * * MD5 Message Digest Algorithm (RFC1321). * * Derived from cryptoapi implementation, originally based on the * public domain implementation written by Colin Plumb in 1993. * * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> #define MD5_DIGEST_SIZE 16 #define MD5_HMAC_BLOCK_SIZE 64 #define MD5_BLOCK_WORDS 16 #define MD5_HASH_WORDS 4 #define F1(x, y, z) (z ^ (x & (y ^ z))) #define F2(x, y, z) F1(z, x, y) #define F3(x, y, z) (x ^ y ^ z) #define F4(x, y, z) (y ^ (x | ~z)) #define MD5STEP(f, w, x, y, z, in, s) \ (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) struct md5_ctx { u32 hash[MD5_HASH_WORDS]; u32 block[MD5_BLOCK_WORDS]; u64 byte_count; }; static void md5_transform(u32 *hash, u32 const *in) { u32 a, b, c, d; a = hash[0]; b = hash[1]; c = hash[2]; d = hash[3]; MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { while (words--) { __le32_to_cpus(buf); buf++; } } static inline void cpu_to_le32_array(u32 *buf, unsigned int words) { while (words--) { __cpu_to_le32s(buf); buf++; } } static inline void md5_transform_helper(struct md5_ctx *ctx) { le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); md5_transform(ctx->hash, ctx->block); } static int md5_init(struct shash_desc *desc) { struct md5_ctx *mctx = shash_desc_ctx(desc); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; return 0; } static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_ctx *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md5_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md5_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); return 0; } static int md5_final(struct shash_desc *desc, u8 *out) { struct md5_ctx *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); md5_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32)); md5_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); return 0; } static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_init, .update = md5_update, .final = md5_final, .descsize = sizeof(struct md5_ctx), .base = { .cra_name = "md5", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init md5_mod_init(void) { return crypto_register_shash(&alg); } static void __exit md5_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(md5_mod_init); module_exit(md5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
gpl-2.0
espenfjo/android_kernel_samsung_n8000
drivers/media/tdmb/tcc3170/src/tcpal_linux/tcpal_queue.c
619
4917
/* * * File name : tcpal_queue.c * * Description : tdmb driver * * Copyright (C) (2012, Telechips. ) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "tcpal_os.h" #include "tcpal_debug.h" #include "tcbd_feature.h" #include "tcpal_queue.h" s32 tcbd_queue_is_full(struct tcbd_queue *_queue) { if (_queue->front == ((_queue->rear+1)%_queue->qsize)) return 1; return 0; } s32 tcbd_queue_is_empty(struct tcbd_queue *_queue) { if (_queue->front == _queue->rear) return 1; return 0; } void tcbd_init_queue( struct tcbd_queue *_queue, u8* _buffer, s32 _buff_size) { memset((void *)_queue->q, 0, sizeof(struct tcbd_queue_item) * TCBD_QUEUE_SIZE); _queue->front = 0; _queue->rear = 0; _queue->qsize = TCBD_QUEUE_SIZE; _queue->buff_size = _buff_size; _queue->global_buffer = _buffer; _queue->pointer = 0; tcpal_create_lock(&_queue->sem, "TcbdQueue", 0); } void tcbd_deinit_queue(struct tcbd_queue *_queue) { _queue->front = 0; _queue->rear = 0; _queue->qsize = 0; _queue->buff_size = 0; tcpal_destroy_lock(&_queue->sem); } void tcbd_reset_queue(struct tcbd_queue *_queue) { _queue->front = 0; _queue->rear = 0; _queue->pointer = 0; memset(_queue->q, 0, sizeof(struct tcbd_queue_item)*_queue->qsize); } s32 tcbd_enqueue( struct tcbd_queue *_queue, u8 *_chunk, s32 _size, u8 _subch_id, s32 _type) { if (_chunk == NULL || _size <= 0) { tcbd_debug(DEBUG_ERROR, "Invalid argument!!\n"); return -1; } tcpal_lock(&_queue->sem); if (tcbd_queue_is_full(_queue)) { tcbd_debug(DEBUG_ERROR, "Queue Full!!\n"); _queue->pointer = 0; } if (_queue->q[_queue->rear].buffer < _queue->q[_queue->front].buffer) { u32 next_pos_rear = (u32)_queue->q[_queue->rear].buffer + _size; u32 curr_pos_front = (u32)_queue->q[_queue->front].buffer; if (next_pos_rear > curr_pos_front) { tcbd_debug(DEBUG_ERROR, "Buffer overflow!!\n"); tcbd_reset_queue(_queue); tcpal_unlock(&_queue->sem); return -1; } } _queue->q[_queue->rear].buffer = _queue->global_buffer + _queue->pointer; if (_queue->pointer + _size >= _queue->buff_size) _queue->pointer = 0; else _queue->pointer += _size; memcpy(_queue->q[_queue->rear].buffer, _chunk, _size); _queue->q[_queue->rear].size = _size; _queue->q[_queue->rear].type = _type; _queue->q[_queue->rear].subch_id = _subch_id; _queue->rear = (_queue->rear + 1) % _queue->qsize; tcpal_unlock(&_queue->sem); return 0; } s32 tcbd_dequeue( struct tcbd_queue *_queue, u8 *_chunk, s32 *_size, u8 *_subch_id, s32 *_type) { tcpal_lock(&_queue->sem); if (tcbd_queue_is_empty(_queue)) { tcbd_debug(0, "Queue Empty!!\n"); tcpal_unlock(&_queue->sem); return -1; } if (_queue->q[_queue->front].size > *_size) { tcbd_debug(DEBUG_ERROR, "insufficient buffer!! size:%d, qsize:%d\n", *_size, _queue->q[_queue->front].size); tcpal_unlock(&_queue->sem); return -1; } memcpy(_chunk, _queue->q[_queue->front].buffer, _queue->q[_queue->front].size); *_size = _queue->q[_queue->front].size; if (_type) *_type = _queue->q[_queue->front].type; if (_subch_id) *_subch_id = _queue->q[_queue->front].subch_id; _queue->front = (_queue->front + 1) % _queue->qsize; tcbd_debug(0, "pos:%d, size:%d\n", _queue->pointer, *_size); tcpal_unlock(&_queue->sem); return 0; } s32 tcbd_dequeue_ptr( struct tcbd_queue *_queue, u8 **_chunk, s32 *_size, s32 *_type) { tcpal_lock(&_queue->sem); if (tcbd_queue_is_empty(_queue)) { tcbd_debug(0, "Queue Empty!!\n"); tcpal_unlock(&_queue->sem); return -1; } if (_queue->q[_queue->front].size > *_size) { tcbd_debug(DEBUG_ERROR, "insufficient buffer!! size:%d, qsize:%d\n", *_size, _queue->q[_queue->front].size); tcpal_unlock(&_queue->sem); return -1; } *_chunk = _queue->q[_queue->front].buffer; *_size = _queue->q[_queue->front].size; if (_type) *_type = _queue->q[_queue->front].type; _queue->front = (_queue->front + 1) % _queue->qsize; tcbd_debug(0, "pos:%d, size:%d\n", _queue->pointer, *_size); tcpal_unlock(&_queue->sem); return 0; } s32 tcbd_get_first_queue_ptr( struct tcbd_queue *_queue, u8 **_chunk, s32 *_size, s32 *_type) { tcpal_lock(&_queue->sem); if (tcbd_queue_is_empty(_queue)) { tcbd_debug(0, "Queue Empty!!\n"); tcpal_unlock(&_queue->sem); return -1; } *_size = _queue->q[_queue->front].size; *_chunk = _queue->q[_queue->front].buffer; if (_type) *_type = _queue->q[_queue->front].type; tcbd_debug(0, "pos:%d, size:%d\n", _queue->pointer, *_size); tcpal_unlock(&_queue->sem); return 0; }
gpl-2.0
VanirAOSP/kernel_samsung_skomer
arch/x86/xen/setup.c
875
12657
/* * Machine specific setup for xen * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/pm.h> #include <linux/memblock.h> #include <asm/elf.h> #include <asm/vdso.h> #include <asm/e820.h> #include <asm/setup.h> #include <asm/acpi.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/page.h> #include <xen/interface/callback.h> #include <xen/interface/memory.h> #include <xen/interface/physdev.h> #include <xen/features.h> #include "xen-ops.h" #include "vdso.h" /* These are code, but not functions. Defined in entry.S */ extern const char xen_hypervisor_callback[]; extern const char xen_failsafe_callback[]; extern void xen_sysenter_target(void); extern void xen_syscall_target(void); extern void xen_syscall32_target(void); /* Amount of extra memory space we add to the e820 ranges */ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; /* * The maximum amount of extra memory compared to the base size. The * main scaling factor is the size of struct page. At extreme ratios * of base:extra, all the base memory can be filled with page * structures for the extra memory, leaving no space for anything * else. * * 10x seems like a reasonable balance between scaling flexibility and * leaving a practically usable system. */ #define EXTRA_MEM_RATIO (10) static void __init xen_add_extra_mem(unsigned long pages) { unsigned long pfn; u64 size = (u64)pages * PAGE_SIZE; u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; if (!pages) return; e820_add_region(extra_start, size, E820_RAM); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); xen_extra_mem_size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } static unsigned long __init xen_release_chunk(phys_addr_t start_addr, phys_addr_t end_addr) { struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; unsigned long start, end; unsigned long len = 0; unsigned long pfn; int ret; start = PFN_UP(start_addr); end = PFN_DOWN(end_addr); if (end <= start) return 0; printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ", start, end); for(pfn = start; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); /* Make sure pfn exists to start with */ if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) continue; set_xen_guest_handle(reservation.extent_start, &mfn); reservation.nr_extents = 1; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", start, end, ret); if (ret == 1) { __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); len++; } } printk(KERN_CONT "%ld pages freed\n", len); return len; } static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, const struct e820map *e820) { phys_addr_t max_addr = PFN_PHYS(max_pfn); phys_addr_t last_end = ISA_END_ADDRESS; unsigned long released = 0; int i; /* Free any unused memory above the low 1Mbyte. */ for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { phys_addr_t end = e820->map[i].addr; end = min(max_addr, end); if (last_end < end) released += xen_release_chunk(last_end, end); last_end = max(last_end, e820->map[i].addr + e820->map[i].size); } if (last_end < max_addr) released += xen_release_chunk(last_end, max_addr); printk(KERN_INFO "released %ld pages of unused memory\n", released); return released; } static unsigned long __init xen_set_identity(const struct e820entry *list, ssize_t map_size) { phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; phys_addr_t start_pci = last; const struct e820entry *entry; unsigned long identity = 0; int i; for (i = 0, entry = list; i < map_size; i++, entry++) { phys_addr_t start = entry->addr; phys_addr_t end = start + entry->size; if (start < last) start = last; if (end <= start) continue; /* Skip over the 1MB region. */ if (last > end) continue; if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { if (start > start_pci) identity += set_phys_range_identity( PFN_UP(start_pci), PFN_DOWN(start)); /* Without saving 'last' we would gooble RAM too * at the end of the loop. */ last = end; start_pci = end; continue; } start_pci = min(start, start_pci); last = end; } if (last > start_pci) identity += set_phys_range_identity( PFN_UP(start_pci), PFN_DOWN(last)); return identity; } static unsigned long __init xen_get_max_pages(void) { unsigned long max_pages = MAX_DOMAIN_PAGES; domid_t domid = DOMID_SELF; int ret; /* * For the initial domain we use the maximum reservation as * the maximum page. * * For guest domains the current maximum reservation reflects * the current maximum rather than the static maximum. In this * case the e820 map provided to us will cover the static * maximum region. */ if (xen_initial_domain()) { ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); if (ret > 0) max_pages = ret; } return min(max_pages, MAX_DOMAIN_PAGES); } /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long extra_pages = 0; unsigned long extra_limit; unsigned long identity_pages = 0; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; xen_extra_mem_start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; /* Guard against non-page aligned E820 entries. */ if (map[i].type == E820_RAM) map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; end = map[i].addr + map[i].size; if (map[i].type == E820_RAM && end > mem_end) { /* RAM off the end - may be partially included */ u64 delta = min(map[i].size, end - mem_end); map[i].size -= delta; end -= delta; extra_pages += PFN_DOWN(delta); /* * Set RAM below 4GB that is not for us to be unusable. * This prevents "System RAM" address space from being * used as potential resource for I/O address (happens * when 'allocate_resource' is called). */ if (delta && (xen_initial_domain() && end < 0x100000000ULL)) e820_add_region(end, delta, E820_UNUSABLE); } if (map[i].size > 0 && end > xen_extra_mem_start) xen_extra_mem_start = end; /* Add region if any remains */ if (map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } /* Align the balloon area so that max_low_pfn does not get set * to be at the _end_ of the PCI gap at the far end (fee01000). * Note that xen_extra_mem_start gets set in the loop above to be * past the last E820 region. */ if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. * * In Dom0, the host E820 information can leave gaps in the * ISA range, which would cause us to release those pages. To * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), __pa(xen_start_info->pt_base), "XEN START INFO"); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); extra_limit = xen_get_max_pages(); if (max_pfn + extra_pages > extra_limit) { if (extra_limit > max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; } extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), max_pfn + extra_pages); if (extra_limit >= max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; xen_add_extra_mem(extra_pages); /* * Set P2M for all non-RAM pages and E820 gaps to be identity * type PFNs. We supply it with the non-sanitized version * of the E820. */ identity_pages = xen_set_identity(map_raw, memmap.nr_entries); printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; } /* * Set the bit indicating "nosegneg" library variants should be used. * We only need to bother in pure 32-bit mode; compat 32-bit processes * can have un-truncated segments, so wrapping around is allowed. */ static void __init fiddle_vdso(void) { #ifdef CONFIG_X86_32 u32 *mask; mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK); *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK); *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; #endif } static int __cpuinit register_callback(unsigned type, const void *func) { struct callback_register callback = { .type = type, .address = XEN_CALLBACK(__KERNEL_CS, func), .flags = CALLBACKF_mask_events, }; return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); } void __cpuinit xen_enable_sysenter(void) { int ret; unsigned sysenter_feature; #ifdef CONFIG_X86_32 sysenter_feature = X86_FEATURE_SEP; #else sysenter_feature = X86_FEATURE_SYSENTER32; #endif if (!boot_cpu_has(sysenter_feature)) return; ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); if(ret != 0) setup_clear_cpu_cap(sysenter_feature); } void __cpuinit xen_enable_syscall(void) { #ifdef CONFIG_X86_64 int ret; ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); if (ret != 0) { printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); /* Pretty fatal; 64-bit userspace has no other mechanism for syscalls. */ } if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { ret = register_callback(CALLBACKTYPE_syscall32, xen_syscall32_target); if (ret != 0) setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } #endif /* CONFIG_X86_64 */ } void __init xen_arch_setup(void) { xen_panic_handler_init(); HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); if (!xen_feature(XENFEAT_auto_translated_physmap)) HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3); if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) || register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) BUG(); xen_enable_sysenter(); xen_enable_syscall(); #ifdef CONFIG_ACPI if (!(xen_start_info->flags & SIF_INITDOMAIN)) { printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); disable_acpi(); } #endif memcpy(boot_command_line, xen_start_info->cmd_line, MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); /* Set up idle, making sure it calls safe_halt() pvop */ #ifdef CONFIG_X86_32 boot_cpu_data.hlt_works_ok = 1; #endif pm_idle = default_idle; boot_option_idle_override = IDLE_HALT; fiddle_vdso(); }
gpl-2.0
burstlam/zte-turies-35
fs/bfs/file.c
1387
4856
/* * fs/bfs/file.c * BFS file operations. * Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com> * * Make the file block allocation algorithm understand the size * of the underlying block device. * Copyright (C) 2007 Dmitri Vorobiev <dmitri.vorobiev@gmail.com> * */ #include <linux/fs.h> #include <linux/buffer_head.h> #include "bfs.h" #undef DEBUG #ifdef DEBUG #define dprintf(x...) printf(x) #else #define dprintf(x...) #endif const struct file_operations bfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .aio_read = generic_file_aio_read, .write = do_sync_write, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, }; static int bfs_move_block(unsigned long from, unsigned long to, struct super_block *sb) { struct buffer_head *bh, *new; bh = sb_bread(sb, from); if (!bh) return -EIO; new = sb_getblk(sb, to); memcpy(new->b_data, bh->b_data, bh->b_size); mark_buffer_dirty(new); bforget(bh); brelse(new); return 0; } static int bfs_move_blocks(struct super_block *sb, unsigned long start, unsigned long end, unsigned long where) { unsigned long i; dprintf("%08lx-%08lx->%08lx\n", start, end, where); for (i = start; i <= end; i++) if(bfs_move_block(i, where + i, sb)) { dprintf("failed to move block %08lx -> %08lx\n", i, where + i); return -EIO; } return 0; } static int bfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { unsigned long phys; int err; struct super_block *sb = inode->i_sb; struct bfs_sb_info *info = BFS_SB(sb); struct bfs_inode_info *bi = BFS_I(inode); struct buffer_head *sbh = info->si_sbh; phys = bi->i_sblock + block; if (!create) { if (phys <= bi->i_eblock) { dprintf("c=%d, b=%08lx, phys=%09lx (granted)\n", create, (unsigned long)block, phys); map_bh(bh_result, sb, phys); } return 0; } /* * If the file is not empty and the requested block is within the * range of blocks allocated for this file, we can grant it. */ if (bi->i_sblock && (phys <= bi->i_eblock)) { dprintf("c=%d, b=%08lx, phys=%08lx (interim block granted)\n", create, (unsigned long)block, phys); map_bh(bh_result, sb, phys); return 0; } /* The file will be extended, so let's see if there is enough space. */ if (phys >= info->si_blocks) return -ENOSPC; /* The rest has to be protected against itself. */ mutex_lock(&info->bfs_lock); /* * If the last data block for this file is the last allocated * block, we can extend the file trivially, without moving it * anywhere. */ if (bi->i_eblock == info->si_lf_eblk) { dprintf("c=%d, b=%08lx, phys=%08lx (simple extension)\n", create, (unsigned long)block, phys); map_bh(bh_result, sb, phys); info->si_freeb -= phys - bi->i_eblock; info->si_lf_eblk = bi->i_eblock = phys; mark_inode_dirty(inode); mark_buffer_dirty(sbh); err = 0; goto out; } /* Ok, we have to move this entire file to the next free block. */ phys = info->si_lf_eblk + 1; if (phys + block >= info->si_blocks) { err = -ENOSPC; goto out; } if (bi->i_sblock) { err = bfs_move_blocks(inode->i_sb, bi->i_sblock, bi->i_eblock, phys); if (err) { dprintf("failed to move ino=%08lx -> fs corruption\n", inode->i_ino); goto out; } } else err = 0; dprintf("c=%d, b=%08lx, phys=%08lx (moved)\n", create, (unsigned long)block, phys); bi->i_sblock = phys; phys += block; info->si_lf_eblk = bi->i_eblock = phys; /* * This assumes nothing can write the inode back while we are here * and thus update inode->i_blocks! (XXX) */ info->si_freeb -= bi->i_eblock - bi->i_sblock + 1 - inode->i_blocks; mark_inode_dirty(inode); mark_buffer_dirty(sbh); map_bh(bh_result, sb, phys); out: mutex_unlock(&info->bfs_lock); return err; } static int bfs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, bfs_get_block, wbc); } static int bfs_readpage(struct file *file, struct page *page) { return block_read_full_page(page, bfs_get_block); } static int bfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { *pagep = NULL; return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, bfs_get_block); } static sector_t bfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, bfs_get_block); } const struct address_space_operations bfs_aops = { .readpage = bfs_readpage, .writepage = bfs_writepage, .sync_page = block_sync_page, .write_begin = bfs_write_begin, .write_end = generic_write_end, .bmap = bfs_bmap, }; const struct inode_operations bfs_file_inops;
gpl-2.0
bju2000/android_kernel_lenovo_k30t
drivers/video/console/fbcon.c
1643
90115
/* * linux/drivers/video/fbcon.c -- Low level frame buffer based console driver * * Copyright (C) 1995 Geert Uytterhoeven * * * This file is based on the original Amiga console driver (amicon.c): * * Copyright (C) 1993 Hamish Macdonald * Greg Harp * Copyright (C) 1994 David Carter [carter@compsci.bristol.ac.uk] * * with work by William Rucklidge (wjr@cs.cornell.edu) * Geert Uytterhoeven * Jes Sorensen (jds@kom.auc.dk) * Martin Apel * * and on the original Atari console driver (atacon.c): * * Copyright (C) 1993 Bjoern Brauel * Roman Hodek * * with work by Guenther Kelleter * Martin Schaller * Andreas Schwab * * Hardware cursor support added by Emmanuel Marty (core@ggi-project.org) * Smart redraw scrolling, arbitrary font width support, 512char font support * and software scrollback added by * Jakub Jelinek (jj@ultra.linux.cz) * * Random hacking by Martin Mares <mj@ucw.cz> * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * The low level operations for the various display memory organizations are * now in separate source files. * * Currently the following organizations are supported: * * o afb Amiga bitplanes * o cfb{2,4,8,16,24,32} Packed pixels * o ilbm Amiga interleaved bitplanes * o iplan2p[248] Atari interleaved bitplanes * o mfb Monochrome * o vga VGA characters/attributes * * To do: * * - Implement 16 plane mode (iplan2p16) * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #undef FBCONDEBUG #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/delay.h> /* MSch: for IRQ probe */ #include <linux/console.h> #include <linux/string.h> #include <linux/kd.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/font.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/crc32.h> /* For counting font checksums */ #include <asm/fb.h> #include <asm/irq.h> #include "fbcon.h" #ifdef FBCONDEBUG # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args) #else # define DPRINTK(fmt, args...) #endif enum { FBCON_LOGO_CANSHOW = -1, /* the logo can be shown */ FBCON_LOGO_DRAW = -2, /* draw the logo to a console */ FBCON_LOGO_DONTSHOW = -3 /* do not show the logo */ }; static struct display fb_display[MAX_NR_CONSOLES]; static signed char con2fb_map[MAX_NR_CONSOLES]; static signed char con2fb_map_boot[MAX_NR_CONSOLES]; static int logo_lines; /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO enums. */ static int logo_shown = FBCON_LOGO_CANSHOW; /* Software scrollback */ static int fbcon_softback_size = 32768; static unsigned long softback_buf, softback_curr; static unsigned long softback_in; static unsigned long softback_top, softback_end; static int softback_lines; /* console mappings */ static int first_fb_vc; static int last_fb_vc = MAX_NR_CONSOLES - 1; static int fbcon_is_default = 1; static int fbcon_has_exited; static int primary_device = -1; static int fbcon_has_console_bind; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY static int map_override; static inline void fbcon_map_override(void) { map_override = 1; } #else static inline void fbcon_map_override(void) { } #endif /* CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY */ /* font data */ static char fontname[40]; /* current fb_info */ static int info_idx = -1; /* console rotation */ static int initial_rotation; static int fbcon_has_sysfs; static const struct consw fb_con; #define CM_SOFTBACK (8) #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) static int fbcon_set_origin(struct vc_data *); #define CURSOR_DRAW_DELAY (1) static int vbl_cursor_cnt; static int fbcon_cursor_noblink; #define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1) /* * Interface used by the world */ static const char *fbcon_startup(void); static void fbcon_init(struct vc_data *vc, int init); static void fbcon_deinit(struct vc_data *vc); static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, int width); static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos); static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos); static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); static void fbcon_cursor(struct vc_data *vc, int mode); static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, int count); static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, int height, int width); static int fbcon_switch(struct vc_data *vc); static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch); static int fbcon_set_palette(struct vc_data *vc, unsigned char *table); static int fbcon_scrolldelta(struct vc_data *vc, int lines); /* * Internal routines */ static __inline__ void ywrap_up(struct vc_data *vc, int count); static __inline__ void ywrap_down(struct vc_data *vc, int count); static __inline__ void ypan_up(struct vc_data *vc, int count); static __inline__ void ypan_down(struct vc_data *vc, int count); static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx, int dy, int dx, int height, int width, u_int y_break); static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit); static void fbcon_redraw_move(struct vc_data *vc, struct display *p, int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); static void fbcon_start(void); static void fbcon_exit(void); static struct device *fbcon_device; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION static inline void fbcon_set_rotation(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; if (!(info->flags & FBINFO_MISC_TILEBLITTING) && ops->p->con_rotate < 4) ops->rotate = ops->p->con_rotate; else ops->rotate = 0; } static void fbcon_rotate(struct fb_info *info, u32 rotate) { struct fbcon_ops *ops= info->fbcon_par; struct fb_info *fb_info; if (!ops || ops->currcon == -1) return; fb_info = registered_fb[con2fb_map[ops->currcon]]; if (info == fb_info) { struct display *p = &fb_display[ops->currcon]; if (rotate < 4) p->con_rotate = rotate; else p->con_rotate = 0; fbcon_modechanged(info); } } static void fbcon_rotate_all(struct fb_info *info, u32 rotate) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; struct display *p; int i; if (!ops || ops->currcon < 0 || rotate > 3) return; for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (!vc || vc->vc_mode != KD_TEXT || registered_fb[con2fb_map[i]] != info) continue; p = &fb_display[vc->vc_num]; p->con_rotate = rotate; } fbcon_set_all_vcs(info); } #else static inline void fbcon_set_rotation(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; ops->rotate = FB_ROTATE_UR; } static void fbcon_rotate(struct fb_info *info, u32 rotate) { return; } static void fbcon_rotate_all(struct fb_info *info, u32 rotate) { return; } #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ static int fbcon_get_rotate(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; return (ops) ? ops->rotate : 0; } static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; return (info->state != FBINFO_STATE_RUNNING || vc->vc_mode != KD_TEXT || ops->graphics) && !vt_force_oops_output(vc); } static int get_color(struct vc_data *vc, struct fb_info *info, u16 c, int is_fg) { int depth = fb_get_color_depth(&info->var, &info->fix); int color = 0; if (console_blanked) { unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; c = vc->vc_video_erase_char & charmask; } if (depth != 1) color = (is_fg) ? attr_fgcol((vc->vc_hi_font_mask) ? 9 : 8, c) : attr_bgcol((vc->vc_hi_font_mask) ? 13 : 12, c); switch (depth) { case 1: { int col = mono_col(info); /* 0 or 1 */ int fg = (info->fix.visual != FB_VISUAL_MONO01) ? col : 0; int bg = (info->fix.visual != FB_VISUAL_MONO01) ? 0 : col; if (console_blanked) fg = bg; color = (is_fg) ? fg : bg; break; } case 2: /* * Scale down 16-colors to 4 colors. Default 4-color palette * is grayscale. However, simply dividing the values by 4 * will not work, as colors 1, 2 and 3 will be scaled-down * to zero rendering them invisible. So empirically convert * colors to a sane 4-level grayscale. */ switch (color) { case 0: color = 0; /* black */ break; case 1 ... 6: color = 2; /* white */ break; case 7 ... 8: color = 1; /* gray */ break; default: color = 3; /* intense white */ break; } break; case 3: /* * Last 8 entries of default 16-color palette is a more intense * version of the first 8 (i.e., same chrominance, different * luminance). */ color &= 7; break; } return color; } static void fbcon_update_softback(struct vc_data *vc) { int l = fbcon_softback_size / vc->vc_size_row; if (l > 5) softback_end = softback_buf + l * vc->vc_size_row; else /* Smaller scrollback makes no sense, and 0 would screw the operation totally */ softback_top = 0; } static void fb_flashcursor(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, queue); struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc = NULL; int c; int mode; int ret; /* FIXME: we should sort out the unbind locking instead */ /* instead we just fail to flash the cursor if we can't get * the lock instead of blocking fbcon deinit */ ret = console_trylock(); if (ret == 0) return; if (ops && ops->currcon != -1) vc = vc_cons[ops->currcon].d; if (!vc || !CON_IS_VISIBLE(vc) || registered_fb[con2fb_map[vc->vc_num]] != info || vc->vc_deccm != 1) { console_unlock(); return; } c = scr_readw((u16 *) vc->vc_pos); mode = (!ops->cursor_flash || ops->cursor_state.enable) ? CM_ERASE : CM_DRAW; ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); } static void cursor_timer_handler(unsigned long dev_addr) { struct fb_info *info = (struct fb_info *) dev_addr; struct fbcon_ops *ops = info->fbcon_par; schedule_work(&info->queue); mod_timer(&ops->cursor_timer, jiffies + HZ/5); } static void fbcon_add_cursor_timer(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; if ((!info->queue.func || info->queue.func == fb_flashcursor) && !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) && !fbcon_cursor_noblink) { if (!info->queue.func) INIT_WORK(&info->queue, fb_flashcursor); init_timer(&ops->cursor_timer); ops->cursor_timer.function = cursor_timer_handler; ops->cursor_timer.expires = jiffies + HZ / 5; ops->cursor_timer.data = (unsigned long ) info; add_timer(&ops->cursor_timer); ops->flags |= FBCON_FLAGS_CURSOR_TIMER; } } static void fbcon_del_cursor_timer(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; if (info->queue.func == fb_flashcursor && ops->flags & FBCON_FLAGS_CURSOR_TIMER) { del_timer_sync(&ops->cursor_timer); ops->flags &= ~FBCON_FLAGS_CURSOR_TIMER; } } #ifndef MODULE static int __init fb_console_setup(char *this_opt) { char *options; int i, j; if (!this_opt || !*this_opt) return 1; while ((options = strsep(&this_opt, ",")) != NULL) { if (!strncmp(options, "font:", 5)) strlcpy(fontname, options + 5, sizeof(fontname)); if (!strncmp(options, "scrollback:", 11)) { options += 11; if (*options) { fbcon_softback_size = simple_strtoul(options, &options, 0); if (*options == 'k' || *options == 'K') { fbcon_softback_size *= 1024; options++; } if (*options != ',') return 1; options++; } else return 1; } if (!strncmp(options, "map:", 4)) { options += 4; if (*options) { for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) { if (!options[j]) j = 0; con2fb_map_boot[i] = (options[j++]-'0') % FB_MAX; } fbcon_map_override(); } return 1; } if (!strncmp(options, "vc:", 3)) { options += 3; if (*options) first_fb_vc = simple_strtoul(options, &options, 10) - 1; if (first_fb_vc < 0) first_fb_vc = 0; if (*options++ == '-') last_fb_vc = simple_strtoul(options, &options, 10) - 1; fbcon_is_default = 0; } if (!strncmp(options, "rotate:", 7)) { options += 7; if (*options) initial_rotation = simple_strtoul(options, &options, 0); if (initial_rotation > 3) initial_rotation = 0; } } return 1; } __setup("fbcon=", fb_console_setup); #endif static int search_fb_in_map(int idx) { int i, retval = 0; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) retval = 1; } return retval; } static int search_for_mapped_con(void) { int i, retval = 0; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] != -1) retval = 1; } return retval; } static int do_fbcon_takeover(int show_logo) { int err, i; if (!num_registered_fb) return -ENODEV; if (!show_logo) logo_shown = FBCON_LOGO_DONTSHOW; for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map[i] = info_idx; err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (err) { for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map[i] = -1; info_idx = -1; } else { fbcon_has_console_bind = 1; } return err; } static int fbcon_takeover(int show_logo) { int err, i; if (!num_registered_fb) return -ENODEV; if (!show_logo) logo_shown = FBCON_LOGO_DONTSHOW; for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map[i] = info_idx; err = take_over_console(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (err) { for (i = first_fb_vc; i <= last_fb_vc; i++) { con2fb_map[i] = -1; } info_idx = -1; } else { fbcon_has_console_bind = 1; } return err; } #ifdef MODULE static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, int cols, int rows, int new_cols, int new_rows) { logo_shown = FBCON_LOGO_DONTSHOW; } #else static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, int cols, int rows, int new_cols, int new_rows) { /* Need to make room for the logo */ struct fbcon_ops *ops = info->fbcon_par; int cnt, erase = vc->vc_video_erase_char, step; unsigned short *save = NULL, *r, *q; int logo_height; if (info->flags & FBINFO_MODULE) { logo_shown = FBCON_LOGO_DONTSHOW; return; } /* * remove underline attribute from erase character * if black and white framebuffer. */ if (fb_get_color_depth(&info->var, &info->fix) == 1) erase &= ~0x400; logo_height = fb_prepare_logo(info, ops->rotate); logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height); q = (unsigned short *) (vc->vc_origin + vc->vc_size_row * rows); step = logo_lines * cols; for (r = q - logo_lines * cols; r < q; r++) if (scr_readw(r) != vc->vc_video_erase_char) break; if (r != q && new_rows >= rows + logo_lines) { save = kmalloc(logo_lines * new_cols * 2, GFP_KERNEL); if (save) { int i = cols < new_cols ? cols : new_cols; scr_memsetw(save, erase, logo_lines * new_cols * 2); r = q - step; for (cnt = 0; cnt < logo_lines; cnt++, r += i) scr_memcpyw(save + cnt * new_cols, r, 2 * i); r = q; } } if (r == q) { /* We can scroll screen down */ r = q - step - cols; for (cnt = rows - logo_lines; cnt > 0; cnt--) { scr_memcpyw(r + step, r, vc->vc_size_row); r -= cols; } if (!save) { int lines; if (vc->vc_y + logo_lines >= rows) lines = rows - vc->vc_y - 1; else lines = logo_lines; vc->vc_y += lines; vc->vc_pos += lines * vc->vc_size_row; } } scr_memsetw((unsigned short *) vc->vc_origin, erase, vc->vc_size_row * logo_lines); if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); update_screen(vc); } if (save) { q = (unsigned short *) (vc->vc_origin + vc->vc_size_row * rows); scr_memcpyw(q, save, logo_lines * new_cols * 2); vc->vc_y += logo_lines; vc->vc_pos += logo_lines * vc->vc_size_row; kfree(save); } if (logo_lines > vc->vc_bottom) { logo_shown = FBCON_LOGO_CANSHOW; printk(KERN_INFO "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n"); } else if (logo_shown != FBCON_LOGO_DONTSHOW) { logo_shown = FBCON_LOGO_DRAW; vc->vc_top = logo_lines; } } #endif /* MODULE */ #ifdef CONFIG_FB_TILEBLITTING static void set_blitting_type(struct vc_data *vc, struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; ops->p = &fb_display[vc->vc_num]; if ((info->flags & FBINFO_MISC_TILEBLITTING)) fbcon_set_tileops(vc, info); else { fbcon_set_rotation(info); fbcon_set_bitops(ops); } } static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) { int err = 0; if (info->flags & FBINFO_MISC_TILEBLITTING && info->tileops->fb_get_tilemax(info) < charcount) err = 1; return err; } #else static void set_blitting_type(struct vc_data *vc, struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; info->flags &= ~FBINFO_MISC_TILEBLITTING; ops->p = &fb_display[vc->vc_num]; fbcon_set_rotation(info); fbcon_set_bitops(ops); } static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) { return 0; } #endif /* CONFIG_MISC_TILEBLITTING */ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, int unit, int oldidx) { struct fbcon_ops *ops = NULL; int err = 0; if (!try_module_get(info->fbops->owner)) err = -ENODEV; if (!err && info->fbops->fb_open && info->fbops->fb_open(info, 0)) err = -ENODEV; if (!err) { ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); if (!ops) err = -ENOMEM; } if (!err) { info->fbcon_par = ops; if (vc) set_blitting_type(vc, info); } if (err) { con2fb_map[unit] = oldidx; module_put(info->fbops->owner); } return err; } static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, struct fb_info *newinfo, int unit, int oldidx, int found) { struct fbcon_ops *ops = oldinfo->fbcon_par; int err = 0, ret; if (oldinfo->fbops->fb_release && oldinfo->fbops->fb_release(oldinfo, 0)) { con2fb_map[unit] = oldidx; if (!found && newinfo->fbops->fb_release) newinfo->fbops->fb_release(newinfo, 0); if (!found) module_put(newinfo->fbops->owner); err = -ENODEV; } if (!err) { fbcon_del_cursor_timer(oldinfo); kfree(ops->cursor_state.mask); kfree(ops->cursor_data); kfree(ops->fontbuffer); kfree(oldinfo->fbcon_par); oldinfo->fbcon_par = NULL; module_put(oldinfo->fbops->owner); /* If oldinfo and newinfo are driving the same hardware, the fb_release() method of oldinfo may attempt to restore the hardware state. This will leave the newinfo in an undefined state. Thus, a call to fb_set_par() may be needed for the newinfo. */ if (newinfo->fbops->fb_set_par) { ret = newinfo->fbops->fb_set_par(newinfo); if (ret) printk(KERN_ERR "con2fb_release_oldinfo: " "detected unhandled fb_set_par error, " "error code %d\n", ret); } } return err; } static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, int unit, int show_logo) { struct fbcon_ops *ops = info->fbcon_par; int ret; ops->currcon = fg_console; if (info->fbops->fb_set_par && !(ops->flags & FBCON_FLAGS_INIT)) { ret = info->fbops->fb_set_par(info); if (ret) printk(KERN_ERR "con2fb_init_display: detected " "unhandled fb_set_par error, " "error code %d\n", ret); } ops->flags |= FBCON_FLAGS_INIT; ops->graphics = 0; fbcon_set_disp(info, &info->var, unit); if (show_logo) { struct vc_data *fg_vc = vc_cons[fg_console].d; struct fb_info *fg_info = registered_fb[con2fb_map[fg_console]]; fbcon_prepare_logo(fg_vc, fg_info, fg_vc->vc_cols, fg_vc->vc_rows, fg_vc->vc_cols, fg_vc->vc_rows); } update_screen(vc_cons[fg_console].d); } /** * set_con2fb_map - map console to frame buffer device * @unit: virtual console number to map * @newidx: frame buffer index to map virtual console to * @user: user request * * Maps a virtual console @unit to a frame buffer device * @newidx. * * This should be called with the console lock held. */ static int set_con2fb_map(int unit, int newidx, int user) { struct vc_data *vc = vc_cons[unit].d; int oldidx = con2fb_map[unit]; struct fb_info *info = registered_fb[newidx]; struct fb_info *oldinfo = NULL; int found, err = 0; if (oldidx == newidx) return 0; if (!info) return -EINVAL; if (!search_for_mapped_con() || !con_is_bound(&fb_con)) { info_idx = newidx; return do_fbcon_takeover(0); } if (oldidx != -1) oldinfo = registered_fb[oldidx]; found = search_fb_in_map(newidx); con2fb_map[unit] = newidx; if (!err && !found) err = con2fb_acquire_newinfo(vc, info, unit, oldidx); /* * If old fb is not mapped to any of the consoles, * fbcon should release it. */ if (!err && oldinfo && !search_fb_in_map(oldidx)) err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx, found); if (!err) { int show_logo = (fg_console == 0 && !user && logo_shown != FBCON_LOGO_DONTSHOW); if (!found) fbcon_add_cursor_timer(info); con2fb_map_boot[unit] = newidx; con2fb_init_display(vc, info, unit, show_logo); } if (!search_fb_in_map(info_idx)) info_idx = newidx; return err; } /* * Low Level Operations */ /* NOTE: fbcon cannot be __init: it may be called from take_over_console later */ static int var_to_display(struct display *disp, struct fb_var_screeninfo *var, struct fb_info *info) { disp->xres_virtual = var->xres_virtual; disp->yres_virtual = var->yres_virtual; disp->bits_per_pixel = var->bits_per_pixel; disp->grayscale = var->grayscale; disp->nonstd = var->nonstd; disp->accel_flags = var->accel_flags; disp->height = var->height; disp->width = var->width; disp->red = var->red; disp->green = var->green; disp->blue = var->blue; disp->transp = var->transp; disp->rotate = var->rotate; disp->mode = fb_match_mode(var, &info->modelist); if (disp->mode == NULL) /* This should not happen */ return -EINVAL; return 0; } static void display_to_var(struct fb_var_screeninfo *var, struct display *disp) { fb_videomode_to_var(var, disp->mode); var->xres_virtual = disp->xres_virtual; var->yres_virtual = disp->yres_virtual; var->bits_per_pixel = disp->bits_per_pixel; var->grayscale = disp->grayscale; var->nonstd = disp->nonstd; var->accel_flags = disp->accel_flags; var->height = disp->height; var->width = disp->width; var->red = disp->red; var->green = disp->green; var->blue = disp->blue; var->transp = disp->transp; var->rotate = disp->rotate; } static const char *fbcon_startup(void) { const char *display_desc = "frame buffer device"; struct display *p = &fb_display[fg_console]; struct vc_data *vc = vc_cons[fg_console].d; const struct font_desc *font = NULL; struct module *owner; struct fb_info *info = NULL; struct fbcon_ops *ops; int rows, cols; /* * If num_registered_fb is zero, this is a call for the dummy part. * The frame buffer devices weren't initialized yet. */ if (!num_registered_fb || info_idx == -1) return display_desc; /* * Instead of blindly using registered_fb[0], we use info_idx, set by * fb_console_init(); */ info = registered_fb[info_idx]; if (!info) return NULL; owner = info->fbops->owner; if (!try_module_get(owner)) return NULL; if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) { module_put(owner); return NULL; } ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); if (!ops) { module_put(owner); return NULL; } ops->currcon = -1; ops->graphics = 1; ops->cur_rotate = -1; info->fbcon_par = ops; p->con_rotate = initial_rotation; set_blitting_type(vc, info); if (info->fix.type != FB_TYPE_TEXT) { if (fbcon_softback_size) { if (!softback_buf) { softback_buf = (unsigned long) kmalloc(fbcon_softback_size, GFP_KERNEL); if (!softback_buf) { fbcon_softback_size = 0; softback_top = 0; } } } else { if (softback_buf) { kfree((void *) softback_buf); softback_buf = 0; softback_top = 0; } } if (softback_buf) softback_in = softback_top = softback_curr = softback_buf; softback_lines = 0; } /* Setup default font */ if (!p->fontdata && !vc->vc_font.data) { if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, info->var.yres, info->pixmap.blit_x, info->pixmap.blit_y); vc->vc_font.width = font->width; vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ } else { p->fontdata = vc->vc_font.data; } cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); DPRINTK("mode: %s\n", info->fix.id); DPRINTK("visual: %d\n", info->fix.visual); DPRINTK("res: %dx%d-%d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); fbcon_add_cursor_timer(info); fbcon_has_exited = 0; return display_desc; } static void fbcon_init(struct vc_data *vc, int init) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops; struct vc_data **default_mode = vc->vc_display_fg; struct vc_data *svc = *default_mode; struct display *t, *p = &fb_display[vc->vc_num]; int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256; int cap, ret; if (info_idx == -1 || info == NULL) return; cap = info->flags; if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW || (info->fix.type == FB_TYPE_TEXT)) logo = 0; if (var_to_display(p, &info->var, info)) return; if (!info->fbcon_par) con2fb_acquire_newinfo(vc, info, vc->vc_num, -1); /* If we are not the first console on this fb, copy the font from that console */ t = &fb_display[fg_console]; if (!p->fontdata) { if (t->fontdata) { struct vc_data *fvc = vc_cons[fg_console].d; vc->vc_font.data = (void *)(p->fontdata = fvc->vc_font.data); vc->vc_font.width = fvc->vc_font.width; vc->vc_font.height = fvc->vc_font.height; p->userfont = t->userfont; if (p->userfont) REFCOUNT(p->fontdata)++; } else { const struct font_desc *font = NULL; if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, info->var.yres, info->pixmap.blit_x, info->pixmap.blit_y); vc->vc_font.width = font->width; vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ } } if (p->userfont) charcnt = FNTCHARCNT(p->fontdata); vc->vc_panic_force_write = !!(info->flags & FBINFO_CAN_FORCE_OUTPUT); vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; if (charcnt == 256) { vc->vc_hi_font_mask = 0; } else { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) vc->vc_complement_mask <<= 1; } if (!*svc->vc_uni_pagedir_loc) con_set_default_unimap(svc); if (!*vc->vc_uni_pagedir_loc) con_copy_unimap(vc, svc); ops = info->fbcon_par; p->con_rotate = initial_rotation; set_blitting_type(vc, info); cols = vc->vc_cols; rows = vc->vc_rows; new_cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); new_cols /= vc->vc_font.width; new_rows /= vc->vc_font.height; /* * We must always set the mode. The mode of the previous console * driver could be in the same resolution but we are using different * hardware so we have to initialize the hardware. * * We need to do it in fbcon_init() to prevent screen corruption. */ if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) { if (info->fbops->fb_set_par && !(ops->flags & FBCON_FLAGS_INIT)) { ret = info->fbops->fb_set_par(info); if (ret) printk(KERN_ERR "fbcon_init: detected " "unhandled fb_set_par error, " "error code %d\n", ret); } ops->flags |= FBCON_FLAGS_INIT; } ops->graphics = 0; if ((cap & FBINFO_HWACCEL_COPYAREA) && !(cap & FBINFO_HWACCEL_DISABLED)) p->scrollmode = SCROLL_MOVE; else /* default to something safe */ p->scrollmode = SCROLL_REDRAW; /* * ++guenther: console.c:vc_allocate() relies on initializing * vc_{cols,rows}, but we must not set those if we are only * resizing the console. */ if (init) { vc->vc_cols = new_cols; vc->vc_rows = new_rows; } else vc_resize(vc, new_cols, new_rows); if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); if (vc == svc && softback_buf) fbcon_update_softback(vc); if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); } ops->p = &fb_display[fg_console]; } static void fbcon_free_font(struct display *p, bool freefont) { if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int)); p->fontdata = NULL; p->userfont = 0; } static void fbcon_deinit(struct vc_data *vc) { struct display *p = &fb_display[vc->vc_num]; struct fb_info *info; struct fbcon_ops *ops; int idx; bool free_font = true; idx = con2fb_map[vc->vc_num]; if (idx == -1) goto finished; info = registered_fb[idx]; if (!info) goto finished; if (info->flags & FBINFO_MISC_FIRMWARE) free_font = false; ops = info->fbcon_par; if (!ops) goto finished; if (CON_IS_VISIBLE(vc)) fbcon_del_cursor_timer(info); ops->flags &= ~FBCON_FLAGS_INIT; finished: fbcon_free_font(p, free_font); if (free_font) vc->vc_font.data = NULL; if (!con_is_bound(&fb_con)) fbcon_exit(); return; } /* ====================================================================== */ /* fbcon_XXX routines - interface used by the world * * This system is now divided into two levels because of complications * caused by hardware scrolling. Top level functions: * * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins() * * handles y values in range [0, scr_height-1] that correspond to real * screen positions. y_wrap shift means that first line of bitmap may be * anywhere on this display. These functions convert lineoffsets to * bitmap offsets and deal with the wrap-around case by splitting blits. * * fbcon_bmove_physical_8() -- These functions fast implementations * fbcon_clear_physical_8() -- of original fbcon_XXX fns. * fbcon_putc_physical_8() -- (font width != 8) may be added later * * WARNING: * * At the moment fbcon_putc() cannot blit across vertical wrap boundary * Implies should only really hardware scroll in rows. Only reason for * restriction is simplicity & efficiency at the moment. */ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, int width) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; struct display *p = &fb_display[vc->vc_num]; u_int y_break; if (fbcon_is_inactive(vc, info)) return; if (!height || !width) return; if (sy < vc->vc_top && vc->vc_top == logo_lines) { vc->vc_top = 0; /* * If the font dimensions are not an integral of the display * dimensions then the ops->clear below won't end up clearing * the margins. Call clear_margins here in case the logo * bitmap stretched into the margin area. */ fbcon_clear_margins(vc, 0); } /* Split blits that cross physical y_wrap boundary */ y_break = p->vrows - p->yscroll; if (sy < y_break && sy + height - 1 >= y_break) { u_int b = y_break - sy; ops->clear(vc, info, real_y(p, sy), sx, b, width); ops->clear(vc, info, real_y(p, sy + b), sx, height - b, width); } else ops->clear(vc, info, real_y(p, sy), sx, height, width); } static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) ops->putcs(vc, info, s, count, real_y(p, ypos), xpos, get_color(vc, info, scr_readw(s), 1), get_color(vc, info, scr_readw(s), 0)); } static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos) { unsigned short chr; scr_writew(c, &chr); fbcon_putcs(vc, &chr, 1, ypos, xpos); } static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) ops->clear_margins(vc, info, bottom_only); } static void fbcon_cursor(struct vc_data *vc, int mode) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; int y; int c = scr_readw((u16 *) vc->vc_pos); if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) return; if (vc->vc_cursor_type & 0x10) fbcon_del_cursor_timer(info); else fbcon_add_cursor_timer(info); ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; if (mode & CM_SOFTBACK) { mode &= ~CM_SOFTBACK; y = softback_lines; } else { if (softback_lines) fbcon_set_origin(vc); y = 0; } ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); vbl_cursor_cnt = CURSOR_DRAW_DELAY; } static int scrollback_phys_max = 0; static int scrollback_max = 0; static int scrollback_current = 0; static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit) { struct display *p, *t; struct vc_data **default_mode, *vc; struct vc_data *svc; struct fbcon_ops *ops = info->fbcon_par; int rows, cols, charcnt = 256; p = &fb_display[unit]; if (var_to_display(p, var, info)) return; vc = vc_cons[unit].d; if (!vc) return; default_mode = vc->vc_display_fg; svc = *default_mode; t = &fb_display[svc->vc_num]; if (!vc->vc_font.data) { vc->vc_font.data = (void *)(p->fontdata = t->fontdata); vc->vc_font.width = (*default_mode)->vc_font.width; vc->vc_font.height = (*default_mode)->vc_font.height; p->userfont = t->userfont; if (p->userfont) REFCOUNT(p->fontdata)++; } if (p->userfont) charcnt = FNTCHARCNT(p->fontdata); var->activate = FB_ACTIVATE_NOW; info->var.activate = var->activate; var->yoffset = info->var.yoffset; var->xoffset = info->var.xoffset; fb_set_var(info, var); ops->var = info->var; vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; if (charcnt == 256) { vc->vc_hi_font_mask = 0; } else { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) vc->vc_complement_mask <<= 1; } if (!*svc->vc_uni_pagedir_loc) con_set_default_unimap(svc); if (!*vc->vc_uni_pagedir_loc) con_copy_unimap(vc, svc); cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); if (CON_IS_VISIBLE(vc)) { update_screen(vc); if (softback_buf) fbcon_update_softback(vc); } } static __inline__ void ywrap_up(struct vc_data *vc, int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; struct display *p = &fb_display[vc->vc_num]; p->yscroll += count; if (p->yscroll >= p->vrows) /* Deal with wrap */ p->yscroll -= p->vrows; ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode |= FB_VMODE_YWRAP; ops->update_start(info); scrollback_max += count; if (scrollback_max > scrollback_phys_max) scrollback_max = scrollback_phys_max; scrollback_current = 0; } static __inline__ void ywrap_down(struct vc_data *vc, int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; struct display *p = &fb_display[vc->vc_num]; p->yscroll -= count; if (p->yscroll < 0) /* Deal with wrap */ p->yscroll += p->vrows; ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode |= FB_VMODE_YWRAP; ops->update_start(info); scrollback_max -= count; if (scrollback_max < 0) scrollback_max = 0; scrollback_current = 0; } static __inline__ void ypan_up(struct vc_data *vc, int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; p->yscroll += count; if (p->yscroll > p->vrows - vc->vc_rows) { ops->bmove(vc, info, p->vrows - vc->vc_rows, 0, 0, 0, vc->vc_rows, vc->vc_cols); p->yscroll -= p->vrows - vc->vc_rows; } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max += count; if (scrollback_max > scrollback_phys_max) scrollback_max = scrollback_phys_max; scrollback_current = 0; } static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; struct display *p = &fb_display[vc->vc_num]; p->yscroll += count; if (p->yscroll > p->vrows - vc->vc_rows) { p->yscroll -= p->vrows - vc->vc_rows; fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max += count; if (scrollback_max > scrollback_phys_max) scrollback_max = scrollback_phys_max; scrollback_current = 0; } static __inline__ void ypan_down(struct vc_data *vc, int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; p->yscroll -= count; if (p->yscroll < 0) { ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, 0, vc->vc_rows, vc->vc_cols); p->yscroll += p->vrows - vc->vc_rows; } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max -= count; if (scrollback_max < 0) scrollback_max = 0; scrollback_current = 0; } static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; struct display *p = &fb_display[vc->vc_num]; p->yscroll -= count; if (p->yscroll < 0) { p->yscroll += p->vrows - vc->vc_rows; fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max -= count; if (scrollback_max < 0) scrollback_max = 0; scrollback_current = 0; } static void fbcon_redraw_softback(struct vc_data *vc, struct display *p, long delta) { int count = vc->vc_rows; unsigned short *d, *s; unsigned long n; int line = 0; d = (u16 *) softback_curr; if (d == (u16 *) softback_in) d = (u16 *) vc->vc_origin; n = softback_curr + delta * vc->vc_size_row; softback_lines -= delta; if (delta < 0) { if (softback_curr < softback_top && n < softback_buf) { n += softback_end - softback_buf; if (n < softback_top) { softback_lines -= (softback_top - n) / vc->vc_size_row; n = softback_top; } } else if (softback_curr >= softback_top && n < softback_top) { softback_lines -= (softback_top - n) / vc->vc_size_row; n = softback_top; } } else { if (softback_curr > softback_in && n >= softback_end) { n += softback_buf - softback_end; if (n > softback_in) { n = softback_in; softback_lines = 0; } } else if (softback_curr <= softback_in && n > softback_in) { n = softback_in; softback_lines = 0; } } if (n == softback_curr) return; softback_curr = n; s = (u16 *) softback_curr; if (s == (u16 *) softback_in) s = (u16 *) vc->vc_origin; while (count--) { unsigned short *start; unsigned short *le; unsigned short c; int x = 0; unsigned short attr = 1; start = s; le = advance_row(s, 1); do { c = scr_readw(s); if (attr != (c & 0xff00)) { attr = c & 0xff00; if (s > start) { fbcon_putcs(vc, start, s - start, line, x); x += s - start; start = s; } } if (c == scr_readw(d)) { if (s > start) { fbcon_putcs(vc, start, s - start, line, x); x += s - start + 1; start = s + 1; } else { x++; start++; } } s++; d++; } while (s < le); if (s > start) fbcon_putcs(vc, start, s - start, line, x); line++; if (d == (u16 *) softback_end) d = (u16 *) softback_buf; if (d == (u16 *) softback_in) d = (u16 *) vc->vc_origin; if (s == (u16 *) softback_end) s = (u16 *) softback_buf; if (s == (u16 *) softback_in) s = (u16 *) vc->vc_origin; } } static void fbcon_redraw_move(struct vc_data *vc, struct display *p, int line, int count, int dy) { unsigned short *s = (unsigned short *) (vc->vc_origin + vc->vc_size_row * line); while (count--) { unsigned short *start = s; unsigned short *le = advance_row(s, 1); unsigned short c; int x = 0; unsigned short attr = 1; do { c = scr_readw(s); if (attr != (c & 0xff00)) { attr = c & 0xff00; if (s > start) { fbcon_putcs(vc, start, s - start, dy, x); x += s - start; start = s; } } console_conditional_schedule(); s++; } while (s < le); if (s > start) fbcon_putcs(vc, start, s - start, dy, x); console_conditional_schedule(); dy++; } } static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, struct display *p, int line, int count, int ycount) { int offset = ycount * vc->vc_cols; unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * line); unsigned short *s = d + offset; struct fbcon_ops *ops = info->fbcon_par; while (count--) { unsigned short *start = s; unsigned short *le = advance_row(s, 1); unsigned short c; int x = 0; do { c = scr_readw(s); if (c == scr_readw(d)) { if (s > start) { ops->bmove(vc, info, line + ycount, x, line, x, 1, s-start); x += s - start + 1; start = s + 1; } else { x++; start++; } } scr_writew(c, d); console_conditional_schedule(); s++; d++; } while (s < le); if (s > start) ops->bmove(vc, info, line + ycount, x, line, x, 1, s-start); console_conditional_schedule(); if (ycount > 0) line++; else { line--; /* NOTE: We subtract two lines from these pointers */ s -= vc->vc_size_row; d -= vc->vc_size_row; } } } static void fbcon_redraw(struct vc_data *vc, struct display *p, int line, int count, int offset) { unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * line); unsigned short *s = d + offset; while (count--) { unsigned short *start = s; unsigned short *le = advance_row(s, 1); unsigned short c; int x = 0; unsigned short attr = 1; do { c = scr_readw(s); if (attr != (c & 0xff00)) { attr = c & 0xff00; if (s > start) { fbcon_putcs(vc, start, s - start, line, x); x += s - start; start = s; } } if (c == scr_readw(d)) { if (s > start) { fbcon_putcs(vc, start, s - start, line, x); x += s - start + 1; start = s + 1; } else { x++; start++; } } scr_writew(c, d); console_conditional_schedule(); s++; d++; } while (s < le); if (s > start) fbcon_putcs(vc, start, s - start, line, x); console_conditional_schedule(); if (offset > 0) line++; else { line--; /* NOTE: We subtract two lines from these pointers */ s -= vc->vc_size_row; d -= vc->vc_size_row; } } } static inline void fbcon_softback_note(struct vc_data *vc, int t, int count) { unsigned short *p; if (vc->vc_num != fg_console) return; p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); while (count) { scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); count--; p = advance_row(p, 1); softback_in += vc->vc_size_row; if (softback_in == softback_end) softback_in = softback_buf; if (softback_in == softback_top) { softback_top += vc->vc_size_row; if (softback_top == softback_end) softback_top = softback_buf; } } softback_curr = softback_in; } static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct display *p = &fb_display[vc->vc_num]; int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; if (fbcon_is_inactive(vc, info)) return -EINVAL; fbcon_cursor(vc, CM_ERASE); /* * ++Geert: Only use ywrap/ypan if the console is in text mode * ++Andrew: Only use ypan on hardware text mode when scrolling the * whole screen (prevents flicker). */ switch (dir) { case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; if (softback_top) fbcon_softback_note(vc, t, count); if (logo_shown >= 0) goto redraw_up; switch (p->scrollmode) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, t, b - t - count, count); fbcon_clear(vc, b - count, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * (b - count)), vc->vc_video_erase_char, vc->vc_size_row * count); return 1; break; case SCROLL_WRAP_MOVE: if (b - t - count > 3 * vc->vc_rows >> 2) { if (t > 0) fbcon_bmove(vc, 0, 0, count, 0, t, vc->vc_cols); ywrap_up(vc, count); if (vc->vc_rows - b > 0) fbcon_bmove(vc, b - count, 0, b, 0, vc->vc_rows - b, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t + count, 0, t, 0, b - t - count, vc->vc_cols); else goto redraw_up; fbcon_clear(vc, b - count, 0, count, vc->vc_cols); break; case SCROLL_PAN_REDRAW: if ((p->yscroll + count <= 2 * (p->vrows - vc->vc_rows)) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (t > 0) fbcon_redraw_move(vc, p, 0, t, count); ypan_up_redraw(vc, t, count); if (vc->vc_rows - b > 0) fbcon_redraw_move(vc, p, b, vc->vc_rows - b, b); } else fbcon_redraw_move(vc, p, t + count, b - t - count, t); fbcon_clear(vc, b - count, 0, count, vc->vc_cols); break; case SCROLL_PAN_MOVE: if ((p->yscroll + count <= 2 * (p->vrows - vc->vc_rows)) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (t > 0) fbcon_bmove(vc, 0, 0, count, 0, t, vc->vc_cols); ypan_up(vc, count); if (vc->vc_rows - b > 0) fbcon_bmove(vc, b - count, 0, b, 0, vc->vc_rows - b, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t + count, 0, t, 0, b - t - count, vc->vc_cols); else goto redraw_up; fbcon_clear(vc, b - count, 0, count, vc->vc_cols); break; case SCROLL_REDRAW: redraw_up: fbcon_redraw(vc, p, t, b - t - count, count * vc->vc_cols); fbcon_clear(vc, b - count, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * (b - count)), vc->vc_video_erase_char, vc->vc_size_row * count); return 1; } break; case SM_DOWN: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; if (logo_shown >= 0) goto redraw_down; switch (p->scrollmode) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, -count); fbcon_clear(vc, t, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * t), vc->vc_video_erase_char, vc->vc_size_row * count); return 1; break; case SCROLL_WRAP_MOVE: if (b - t - count > 3 * vc->vc_rows >> 2) { if (vc->vc_rows - b > 0) fbcon_bmove(vc, b, 0, b - count, 0, vc->vc_rows - b, vc->vc_cols); ywrap_down(vc, count); if (t > 0) fbcon_bmove(vc, count, 0, 0, 0, t, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t, 0, t + count, 0, b - t - count, vc->vc_cols); else goto redraw_down; fbcon_clear(vc, t, 0, count, vc->vc_cols); break; case SCROLL_PAN_MOVE: if ((count - p->yscroll <= p->vrows - vc->vc_rows) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (vc->vc_rows - b > 0) fbcon_bmove(vc, b, 0, b - count, 0, vc->vc_rows - b, vc->vc_cols); ypan_down(vc, count); if (t > 0) fbcon_bmove(vc, count, 0, 0, 0, t, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t, 0, t + count, 0, b - t - count, vc->vc_cols); else goto redraw_down; fbcon_clear(vc, t, 0, count, vc->vc_cols); break; case SCROLL_PAN_REDRAW: if ((count - p->yscroll <= p->vrows - vc->vc_rows) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (vc->vc_rows - b > 0) fbcon_redraw_move(vc, p, b, vc->vc_rows - b, b - count); ypan_down_redraw(vc, t, count); if (t > 0) fbcon_redraw_move(vc, p, count, t, 0); } else fbcon_redraw_move(vc, p, t, b - t - count, t + count); fbcon_clear(vc, t, 0, count, vc->vc_cols); break; case SCROLL_REDRAW: redraw_down: fbcon_redraw(vc, p, b - 1, b - t - count, -count * vc->vc_cols); fbcon_clear(vc, t, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * t), vc->vc_video_erase_char, vc->vc_size_row * count); return 1; } } return 0; } static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, int height, int width) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct display *p = &fb_display[vc->vc_num]; if (fbcon_is_inactive(vc, info)) return; if (!width || !height) return; /* Split blits that cross physical y_wrap case. * Pathological case involves 4 blits, better to use recursive * code rather than unrolled case * * Recursive invocations don't need to erase the cursor over and * over again, so we use fbcon_bmove_rec() */ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, p->vrows - p->yscroll); } static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx, int dy, int dx, int height, int width, u_int y_break) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; u_int b; if (sy < y_break && sy + height > y_break) { b = y_break - sy; if (dy < sy) { /* Avoid trashing self */ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); } else { fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); } return; } if (dy < y_break && dy + height > y_break) { b = y_break - dy; if (dy < sy) { /* Avoid trashing self */ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); } else { fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); } return; } ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, height, width); } static void updatescrollmode(struct display *p, struct fb_info *info, struct vc_data *vc) { struct fbcon_ops *ops = info->fbcon_par; int fh = vc->vc_font.height; int cap = info->flags; u16 t = 0; int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, info->fix.xpanstep); int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, info->var.xres_virtual); int good_pan = (cap & FBINFO_HWACCEL_YPAN) && divides(ypan, vc->vc_font.height) && vyres > yres; int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && divides(ywrap, vc->vc_font.height) && divides(vc->vc_font.height, vyres) && divides(vc->vc_font.height, yres); int reading_fast = cap & FBINFO_READS_FAST; int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && !(cap & FBINFO_HWACCEL_DISABLED); int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && !(cap & FBINFO_HWACCEL_DISABLED); p->vrows = vyres/fh; if (yres > (fh * (vc->vc_rows + 1))) p->vrows -= (yres - (fh * vc->vc_rows)) / fh; if ((yres % fh) && (vyres % fh < yres % fh)) p->vrows--; if (good_wrap || good_pan) { if (reading_fast || fast_copyarea) p->scrollmode = good_wrap ? SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE; else p->scrollmode = good_wrap ? SCROLL_REDRAW : SCROLL_PAN_REDRAW; } else { if (reading_fast || (fast_copyarea && !fast_imageblit)) p->scrollmode = SCROLL_MOVE; else p->scrollmode = SCROLL_REDRAW; } } static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; struct display *p = &fb_display[vc->vc_num]; struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; virt_w = FBCON_SWAP(ops->rotate, width, height); virt_h = FBCON_SWAP(ops->rotate, height, width); virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, vc->vc_font.height); virt_fh = FBCON_SWAP(ops->rotate, vc->vc_font.height, vc->vc_font.width); var.xres = virt_w * virt_fw; var.yres = virt_h * virt_fh; x_diff = info->var.xres - var.xres; y_diff = info->var.yres - var.yres; if (x_diff < 0 || x_diff > virt_fw || y_diff < 0 || y_diff > virt_fh) { const struct fb_videomode *mode; DPRINTK("attempting resize %ix%i\n", var.xres, var.yres); mode = fb_find_best_mode(&var, &info->modelist); if (mode == NULL) return -EINVAL; display_to_var(&var, p); fb_videomode_to_var(&var, mode); if (virt_w > var.xres/virt_fw || virt_h > var.yres/virt_fh) return -EINVAL; DPRINTK("resize now %ix%i\n", var.xres, var.yres); if (CON_IS_VISIBLE(vc)) { var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; fb_set_var(info, &var); } var_to_display(p, &info->var, info); ops->var = info->var; } updatescrollmode(p, info, vc); return 0; } static int fbcon_switch(struct vc_data *vc) { struct fb_info *info, *old_info = NULL; struct fbcon_ops *ops; struct display *p = &fb_display[vc->vc_num]; struct fb_var_screeninfo var; int i, ret, prev_console, charcnt = 256; info = registered_fb[con2fb_map[vc->vc_num]]; ops = info->fbcon_par; if (softback_top) { if (softback_lines) fbcon_set_origin(vc); softback_top = softback_curr = softback_in = softback_buf; softback_lines = 0; fbcon_update_softback(vc); } if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; if (conp2->vc_top == logo_lines && conp2->vc_bottom == conp2->vc_rows) conp2->vc_top = 0; logo_shown = FBCON_LOGO_CANSHOW; } prev_console = ops->currcon; if (prev_console != -1) old_info = registered_fb[con2fb_map[prev_console]]; /* * FIXME: If we have multiple fbdev's loaded, we need to * update all info->currcon. Perhaps, we can place this * in a centralized structure, but this might break some * drivers. * * info->currcon = vc->vc_num; */ for (i = 0; i < FB_MAX; i++) { if (registered_fb[i] != NULL && registered_fb[i]->fbcon_par) { struct fbcon_ops *o = registered_fb[i]->fbcon_par; o->currcon = vc->vc_num; } } memset(&var, 0, sizeof(struct fb_var_screeninfo)); display_to_var(&var, p); var.activate = FB_ACTIVATE_NOW; /* * make sure we don't unnecessarily trip the memcmp() * in fb_set_var() */ info->var.activate = var.activate; var.vmode |= info->var.vmode & ~FB_VMODE_MASK; fb_set_var(info, &var); ops->var = info->var; if (old_info != NULL && (old_info != info || info->flags & FBINFO_MISC_ALWAYS_SETPAR)) { if (info->fbops->fb_set_par) { ret = info->fbops->fb_set_par(info); if (ret) printk(KERN_ERR "fbcon_switch: detected " "unhandled fb_set_par error, " "error code %d\n", ret); } if (old_info != info) fbcon_del_cursor_timer(old_info); } if (fbcon_is_inactive(vc, info) || ops->blank_state != FB_BLANK_UNBLANK) fbcon_del_cursor_timer(info); else fbcon_add_cursor_timer(info); set_blitting_type(vc, info); ops->cursor_reset = 1; if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); } vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; if (p->userfont) charcnt = FNTCHARCNT(vc->vc_font.data); if (charcnt > 256) vc->vc_complement_mask <<= 1; updatescrollmode(p, info, vc); switch (p->scrollmode) { case SCROLL_WRAP_MOVE: scrollback_phys_max = p->vrows - vc->vc_rows; break; case SCROLL_PAN_MOVE: case SCROLL_PAN_REDRAW: scrollback_phys_max = p->vrows - 2 * vc->vc_rows; if (scrollback_phys_max < 0) scrollback_phys_max = 0; break; default: scrollback_phys_max = 0; break; } scrollback_max = 0; scrollback_current = 0; if (!fbcon_is_inactive(vc, info)) { ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; ops->update_start(info); } fbcon_set_palette(vc, color_table); fbcon_clear_margins(vc, 0); if (logo_shown == FBCON_LOGO_DRAW) { logo_shown = fg_console; /* This is protected above by initmem_freed */ fb_show_logo(info, ops->rotate); update_region(vc, vc->vc_origin + vc->vc_size_row * vc->vc_top, vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2); return 0; } return 1; } static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, int blank) { struct fb_event event; if (blank) { unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; unsigned short oldc; oldc = vc->vc_video_erase_char; vc->vc_video_erase_char &= charmask; fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols); vc->vc_video_erase_char = oldc; } if (!lock_fb_info(info)) return; event.info = info; event.data = &blank; fb_notifier_call_chain(FB_EVENT_CONBLANK, &event); unlock_fb_info(info); } static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; if (mode_switch) { struct fb_var_screeninfo var = info->var; ops->graphics = 1; if (!blank) { var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; fb_set_var(info, &var); ops->graphics = 0; ops->var = info->var; } } if (!fbcon_is_inactive(vc, info)) { if (ops->blank_state != blank) { ops->blank_state = blank; fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); ops->cursor_flash = (!blank); if (!(info->flags & FBINFO_MISC_USEREVENT)) if (fb_blank(info, blank)) fbcon_generic_blank(vc, info, blank); } if (!blank) update_screen(vc); } if (mode_switch || fbcon_is_inactive(vc, info) || ops->blank_state != FB_BLANK_UNBLANK) fbcon_del_cursor_timer(info); else fbcon_add_cursor_timer(info); return 0; } static int fbcon_debug_enter(struct vc_data *vc) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; ops->save_graphics = ops->graphics; ops->graphics = 0; if (info->fbops->fb_debug_enter) info->fbops->fb_debug_enter(info); fbcon_set_palette(vc, color_table); return 0; } static int fbcon_debug_leave(struct vc_data *vc) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; ops->graphics = ops->save_graphics; if (info->fbops->fb_debug_leave) info->fbops->fb_debug_leave(info); return 0; } static int fbcon_get_font(struct vc_data *vc, struct console_font *font) { u8 *fontdata = vc->vc_font.data; u8 *data = font->data; int i, j; font->width = vc->vc_font.width; font->height = vc->vc_font.height; font->charcount = vc->vc_hi_font_mask ? 512 : 256; if (!font->data) return 0; if (font->width <= 8) { j = vc->vc_font.height; for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 32 - j); data += 32; fontdata += j; } } else if (font->width <= 16) { j = vc->vc_font.height * 2; for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 64 - j); data += 64; fontdata += j; } } else if (font->width <= 24) { for (i = 0; i < font->charcount; i++) { for (j = 0; j < vc->vc_font.height; j++) { *data++ = fontdata[0]; *data++ = fontdata[1]; *data++ = fontdata[2]; fontdata += sizeof(u32); } memset(data, 0, 3 * (32 - j)); data += 3 * (32 - j); } } else { j = vc->vc_font.height * 4; for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 128 - j); data += 128; fontdata += j; } } return 0; } static int fbcon_do_set_font(struct vc_data *vc, int w, int h, const u8 * data, int userfont) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; struct display *p = &fb_display[vc->vc_num]; int resize; int cnt; char *old_data = NULL; if (CON_IS_VISIBLE(vc) && softback_lines) fbcon_set_origin(vc); resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); if (p->userfont) old_data = vc->vc_font.data; if (userfont) cnt = FNTCHARCNT(data); else cnt = 256; vc->vc_font.data = (void *)(p->fontdata = data); if ((p->userfont = userfont)) REFCOUNT(data)++; vc->vc_font.width = w; vc->vc_font.height = h; if (vc->vc_hi_font_mask && cnt == 256) { vc->vc_hi_font_mask = 0; if (vc->vc_can_do_color) { vc->vc_complement_mask >>= 1; vc->vc_s_complement_mask >>= 1; } /* ++Edmund: reorder the attribute bits */ if (vc->vc_can_do_color) { unsigned short *cp = (unsigned short *) vc->vc_origin; int count = vc->vc_screenbuf_size / 2; unsigned short c; for (; count > 0; count--, cp++) { c = scr_readw(cp); scr_writew(((c & 0xfe00) >> 1) | (c & 0xff), cp); } c = vc->vc_video_erase_char; vc->vc_video_erase_char = ((c & 0xfe00) >> 1) | (c & 0xff); vc->vc_attr >>= 1; } } else if (!vc->vc_hi_font_mask && cnt == 512) { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) { vc->vc_complement_mask <<= 1; vc->vc_s_complement_mask <<= 1; } /* ++Edmund: reorder the attribute bits */ { unsigned short *cp = (unsigned short *) vc->vc_origin; int count = vc->vc_screenbuf_size / 2; unsigned short c; for (; count > 0; count--, cp++) { unsigned short newc; c = scr_readw(cp); if (vc->vc_can_do_color) newc = ((c & 0xff00) << 1) | (c & 0xff); else newc = c & ~0x100; scr_writew(newc, cp); } c = vc->vc_video_erase_char; if (vc->vc_can_do_color) { vc->vc_video_erase_char = ((c & 0xff00) << 1) | (c & 0xff); vc->vc_attr <<= 1; } else vc->vc_video_erase_char = c & ~0x100; } } if (resize) { int cols, rows; cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= w; rows /= h; vc_resize(vc, cols, rows); if (CON_IS_VISIBLE(vc) && softback_buf) fbcon_update_softback(vc); } else if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); update_screen(vc); } if (old_data && (--REFCOUNT(old_data) == 0)) kfree(old_data - FONT_EXTRA_WORDS * sizeof(int)); return 0; } static int fbcon_copy_font(struct vc_data *vc, int con) { struct display *od = &fb_display[con]; struct console_font *f = &vc->vc_font; if (od->fontdata == f->data) return 0; /* already the same font... */ return fbcon_do_set_font(vc, f->width, f->height, od->fontdata, od->userfont); } /* * User asked to set font; we are guaranteed that * a) width and height are in range 1..32 * b) charcount does not exceed 512 * but lets not assume that, since someone might someday want to use larger * fonts. And charcount of 512 is small for unicode support. * * However, user space gives the font in 32 rows , regardless of * actual font height. So a new API is needed if support for larger fonts * is ever implemented. */ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigned flags) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; unsigned charcount = font->charcount; int w = font->width; int h = font->height; int size; int i, csum; u8 *new_data, *data = font->data; int pitch = (font->width+7) >> 3; /* Is there a reason why fbconsole couldn't handle any charcount >256? * If not this check should be changed to charcount < 256 */ if (charcount != 256 && charcount != 512) return -EINVAL; /* Make sure drawing engine can handle the font */ if (!(info->pixmap.blit_x & (1 << (font->width - 1))) || !(info->pixmap.blit_y & (1 << (font->height - 1)))) return -EINVAL; /* Make sure driver can handle the font length */ if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; size = h * pitch * charcount; new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); if (!new_data) return -ENOMEM; new_data += FONT_EXTRA_WORDS * sizeof(int); FNTSIZE(new_data) = size; FNTCHARCNT(new_data) = charcount; REFCOUNT(new_data) = 0; /* usage counter */ for (i=0; i< charcount; i++) { memcpy(new_data + i*h*pitch, data + i*32*pitch, h*pitch); } /* Since linux has a nice crc32 function use it for counting font * checksums. */ csum = crc32(0, new_data, size); FNTSUM(new_data) = csum; /* Check if the same font is on some other console already */ for (i = first_fb_vc; i <= last_fb_vc; i++) { struct vc_data *tmp = vc_cons[i].d; if (fb_display[i].userfont && fb_display[i].fontdata && FNTSUM(fb_display[i].fontdata) == csum && FNTSIZE(fb_display[i].fontdata) == size && tmp->vc_font.width == w && !memcmp(fb_display[i].fontdata, new_data, size)) { kfree(new_data - FONT_EXTRA_WORDS * sizeof(int)); new_data = (u8 *)fb_display[i].fontdata; break; } } return fbcon_do_set_font(vc, font->width, font->height, new_data, 1); } static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; const struct font_desc *f; if (!name) f = get_default_font(info->var.xres, info->var.yres, info->pixmap.blit_x, info->pixmap.blit_y); else if (!(f = find_font(name))) return -ENOENT; font->width = f->width; font->height = f->height; return fbcon_do_set_font(vc, f->width, f->height, f->data, 0); } static u16 palette_red[16]; static u16 palette_green[16]; static u16 palette_blue[16]; static struct fb_cmap palette_cmap = { 0, 16, palette_red, palette_green, palette_blue, NULL }; static int fbcon_set_palette(struct vc_data *vc, unsigned char *table) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; int i, j, k, depth; u8 val; if (fbcon_is_inactive(vc, info)) return -EINVAL; if (!CON_IS_VISIBLE(vc)) return 0; depth = fb_get_color_depth(&info->var, &info->fix); if (depth > 3) { for (i = j = 0; i < 16; i++) { k = table[i]; val = vc->vc_palette[j++]; palette_red[k] = (val << 8) | val; val = vc->vc_palette[j++]; palette_green[k] = (val << 8) | val; val = vc->vc_palette[j++]; palette_blue[k] = (val << 8) | val; } palette_cmap.len = 16; palette_cmap.start = 0; /* * If framebuffer is capable of less than 16 colors, * use default palette of fbcon. */ } else fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap); return fb_set_cmap(&palette_cmap, info); } static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) { unsigned long p; int line; if (vc->vc_num != fg_console || !softback_lines) return (u16 *) (vc->vc_origin + offset); line = offset / vc->vc_size_row; if (line >= softback_lines) return (u16 *) (vc->vc_origin + offset - softback_lines * vc->vc_size_row); p = softback_curr + offset; if (p >= softback_end) p += softback_buf - softback_end; return (u16 *) p; } static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, int *px, int *py) { unsigned long ret; int x, y; if (pos >= vc->vc_origin && pos < vc->vc_scr_end) { unsigned long offset = (pos - vc->vc_origin) / 2; x = offset % vc->vc_cols; y = offset / vc->vc_cols; if (vc->vc_num == fg_console) y += softback_lines; ret = pos + (vc->vc_cols - x) * 2; } else if (vc->vc_num == fg_console && softback_lines) { unsigned long offset = pos - softback_curr; if (pos < softback_curr) offset += softback_end - softback_buf; offset /= 2; x = offset % vc->vc_cols; y = offset / vc->vc_cols; ret = pos + (vc->vc_cols - x) * 2; if (ret == softback_end) ret = softback_buf; if (ret == softback_in) ret = vc->vc_origin; } else { /* Should not happen */ x = y = 0; ret = vc->vc_origin; } if (px) *px = x; if (py) *py = y; return ret; } /* As we might be inside of softback, we may work with non-contiguous buffer, that's why we have to use a separate routine. */ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) { while (cnt--) { u16 a = scr_readw(p); if (!vc->vc_can_do_color) a ^= 0x0800; else if (vc->vc_hi_font_mask == 0x100) a = ((a) & 0x11ff) | (((a) & 0xe000) >> 4) | (((a) & 0x0e00) << 4); else a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); scr_writew(a, p++); if (p == (u16 *) softback_end) p = (u16 *) softback_buf; if (p == (u16 *) softback_in) p = (u16 *) vc->vc_origin; } } static int fbcon_scrolldelta(struct vc_data *vc, int lines) { struct fb_info *info = registered_fb[con2fb_map[fg_console]]; struct fbcon_ops *ops = info->fbcon_par; struct display *disp = &fb_display[fg_console]; int offset, limit, scrollback_old; if (softback_top) { if (vc->vc_num != fg_console) return 0; if (vc->vc_mode != KD_TEXT || !lines) return 0; if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; if (conp2->vc_top == logo_lines && conp2->vc_bottom == conp2->vc_rows) conp2->vc_top = 0; if (logo_shown == vc->vc_num) { unsigned long p, q; int i; p = softback_in; q = vc->vc_origin + logo_lines * vc->vc_size_row; for (i = 0; i < logo_lines; i++) { if (p == softback_top) break; if (p == softback_buf) p = softback_end; p -= vc->vc_size_row; q -= vc->vc_size_row; scr_memcpyw((u16 *) q, (u16 *) p, vc->vc_size_row); } softback_in = softback_curr = p; update_region(vc, vc->vc_origin, logo_lines * vc->vc_cols); } logo_shown = FBCON_LOGO_CANSHOW; } fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); fbcon_redraw_softback(vc, disp, lines); fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); return 0; } if (!scrollback_phys_max) return -ENOSYS; scrollback_old = scrollback_current; scrollback_current -= lines; if (scrollback_current < 0) scrollback_current = 0; else if (scrollback_current > scrollback_max) scrollback_current = scrollback_max; if (scrollback_current == scrollback_old) return 0; if (fbcon_is_inactive(vc, info)) return 0; fbcon_cursor(vc, CM_ERASE); offset = disp->yscroll - scrollback_current; limit = disp->vrows; switch (disp->scrollmode) { case SCROLL_WRAP_MOVE: info->var.vmode |= FB_VMODE_YWRAP; break; case SCROLL_PAN_MOVE: case SCROLL_PAN_REDRAW: limit -= vc->vc_rows; info->var.vmode &= ~FB_VMODE_YWRAP; break; } if (offset < 0) offset += limit; else if (offset >= limit) offset -= limit; ops->var.xoffset = 0; ops->var.yoffset = offset * vc->vc_font.height; ops->update_start(info); if (!scrollback_current) fbcon_cursor(vc, CM_DRAW); return 0; } static int fbcon_set_origin(struct vc_data *vc) { if (softback_lines) fbcon_scrolldelta(vc, softback_lines); return 0; } static void fbcon_suspended(struct fb_info *info) { struct vc_data *vc = NULL; struct fbcon_ops *ops = info->fbcon_par; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; /* Clear cursor, restore saved data */ fbcon_cursor(vc, CM_ERASE); } static void fbcon_resumed(struct fb_info *info) { struct vc_data *vc; struct fbcon_ops *ops = info->fbcon_par; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; update_screen(vc); } static void fbcon_modechanged(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; struct display *p; int rows, cols; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; if (vc->vc_mode != KD_TEXT || registered_fb[con2fb_map[ops->currcon]] != info) return; p = &fb_display[vc->vc_num]; set_blitting_type(vc, info); if (CON_IS_VISIBLE(vc)) { var_to_display(p, &info->var, info); cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); updatescrollmode(p, info, vc); scrollback_max = 0; scrollback_current = 0; if (!fbcon_is_inactive(vc, info)) { ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; ops->update_start(info); } fbcon_set_palette(vc, color_table); update_screen(vc); if (softback_buf) fbcon_update_softback(vc); } } static void fbcon_set_all_vcs(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; struct display *p; int i, rows, cols, fg = -1; if (!ops || ops->currcon < 0) return; for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (!vc || vc->vc_mode != KD_TEXT || registered_fb[con2fb_map[i]] != info) continue; if (CON_IS_VISIBLE(vc)) { fg = i; continue; } p = &fb_display[vc->vc_num]; set_blitting_type(vc, info); var_to_display(p, &info->var, info); cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); } if (fg != -1) fbcon_modechanged(info); } static int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode) { struct fb_info *fb_info; struct display *p; int i, j, found = 0; /* before deletion, ensure that mode is not in use */ for (i = first_fb_vc; i <= last_fb_vc; i++) { j = con2fb_map[i]; if (j == -1) continue; fb_info = registered_fb[j]; if (fb_info != info) continue; p = &fb_display[i]; if (!p || !p->mode) continue; if (fb_mode_is_equal(p->mode, mode)) { found = 1; break; } } return found; } #ifdef CONFIG_VT_HW_CONSOLE_BINDING static int fbcon_unbind(void) { int ret; ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (!ret) fbcon_has_console_bind = 0; return ret; } #else static inline int fbcon_unbind(void) { return -EINVAL; } #endif /* CONFIG_VT_HW_CONSOLE_BINDING */ /* called with console_lock held */ static int fbcon_fb_unbind(int idx) { int i, new_idx = -1, ret = 0; if (!fbcon_has_console_bind) return 0; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] != idx && con2fb_map[i] != -1) { new_idx = i; break; } } if (new_idx != -1) { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) set_con2fb_map(i, new_idx, 0); } } else ret = fbcon_unbind(); return ret; } /* called with console_lock held */ static int fbcon_fb_unregistered(struct fb_info *info) { int i, idx; idx = info->node; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) con2fb_map[i] = -1; } if (idx == info_idx) { info_idx = -1; for (i = 0; i < FB_MAX; i++) { if (registered_fb[i] != NULL) { info_idx = i; break; } } } if (info_idx != -1) { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == -1) con2fb_map[i] = info_idx; } } if (primary_device == idx) primary_device = -1; if (!num_registered_fb) do_unregister_con_driver(&fb_con); return 0; } /* called with console_lock held */ static void fbcon_remap_all(int idx) { int i; for (i = first_fb_vc; i <= last_fb_vc; i++) set_con2fb_map(i, idx, 0); if (con_is_bound(&fb_con)) { printk(KERN_INFO "fbcon: Remapping primary device, " "fb%i, to tty %i-%i\n", idx, first_fb_vc + 1, last_fb_vc + 1); info_idx = idx; } } #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY static void fbcon_select_primary(struct fb_info *info) { if (!map_override && primary_device == -1 && fb_is_primary_device(info)) { int i; printk(KERN_INFO "fbcon: %s (fb%i) is primary device\n", info->fix.id, info->node); primary_device = info->node; for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map_boot[i] = primary_device; if (con_is_bound(&fb_con)) { printk(KERN_INFO "fbcon: Remapping primary device, " "fb%i, to tty %i-%i\n", info->node, first_fb_vc + 1, last_fb_vc + 1); info_idx = primary_device; } } } #else static inline void fbcon_select_primary(struct fb_info *info) { return; } #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */ /* called with console_lock held */ static int fbcon_fb_registered(struct fb_info *info) { int ret = 0, i, idx; idx = info->node; fbcon_select_primary(info); if (info_idx == -1) { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map_boot[i] == idx) { info_idx = idx; break; } } if (info_idx != -1) ret = do_fbcon_takeover(1); } else { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map_boot[i] == idx) set_con2fb_map(i, idx, 0); } } return ret; } static void fbcon_fb_blanked(struct fb_info *info, int blank) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; if (vc->vc_mode != KD_TEXT || registered_fb[con2fb_map[ops->currcon]] != info) return; if (CON_IS_VISIBLE(vc)) { if (blank) do_blank_screen(0); else do_unblank_screen(0); } ops->blank_state = blank; } static void fbcon_new_modelist(struct fb_info *info) { int i; struct vc_data *vc; struct fb_var_screeninfo var; const struct fb_videomode *mode; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (registered_fb[con2fb_map[i]] != info) continue; if (!fb_display[i].mode) continue; vc = vc_cons[i].d; display_to_var(&var, &fb_display[i]); mode = fb_find_nearest_mode(fb_display[i].mode, &info->modelist); fb_videomode_to_var(&var, mode); fbcon_set_disp(info, &var, vc->vc_num); } } static void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps) { struct vc_data *vc; struct display *p; if (caps->flags) { int i, charcnt; for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (vc && vc->vc_mode == KD_TEXT && info->node == con2fb_map[i]) { p = &fb_display[i]; caps->x |= 1 << (vc->vc_font.width - 1); caps->y |= 1 << (vc->vc_font.height - 1); charcnt = (p->userfont) ? FNTCHARCNT(p->fontdata) : 256; if (caps->len < charcnt) caps->len = charcnt; } } } else { vc = vc_cons[fg_console].d; if (vc && vc->vc_mode == KD_TEXT && info->node == con2fb_map[fg_console]) { p = &fb_display[fg_console]; caps->x = 1 << (vc->vc_font.width - 1); caps->y = 1 << (vc->vc_font.height - 1); caps->len = (p->userfont) ? FNTCHARCNT(p->fontdata) : 256; } } } static int fbcon_event_notify(struct notifier_block *self, unsigned long action, void *data) { struct fb_event *event = data; struct fb_info *info = event->info; struct fb_videomode *mode; struct fb_con2fbmap *con2fb; struct fb_blit_caps *caps; int idx, ret = 0; /* * ignore all events except driver registration and deregistration * if fbcon is not active */ if (fbcon_has_exited && !(action == FB_EVENT_FB_REGISTERED || action == FB_EVENT_FB_UNREGISTERED)) goto done; switch(action) { case FB_EVENT_SUSPEND: fbcon_suspended(info); break; case FB_EVENT_RESUME: fbcon_resumed(info); break; case FB_EVENT_MODE_CHANGE: fbcon_modechanged(info); break; case FB_EVENT_MODE_CHANGE_ALL: fbcon_set_all_vcs(info); break; case FB_EVENT_MODE_DELETE: mode = event->data; ret = fbcon_mode_deleted(info, mode); break; case FB_EVENT_FB_UNBIND: idx = info->node; ret = fbcon_fb_unbind(idx); break; case FB_EVENT_FB_REGISTERED: ret = fbcon_fb_registered(info); break; case FB_EVENT_FB_UNREGISTERED: ret = fbcon_fb_unregistered(info); break; case FB_EVENT_SET_CONSOLE_MAP: /* called with console lock held */ con2fb = event->data; ret = set_con2fb_map(con2fb->console - 1, con2fb->framebuffer, 1); break; case FB_EVENT_GET_CONSOLE_MAP: con2fb = event->data; con2fb->framebuffer = con2fb_map[con2fb->console - 1]; break; case FB_EVENT_BLANK: fbcon_fb_blanked(info, *(int *)event->data); break; case FB_EVENT_NEW_MODELIST: fbcon_new_modelist(info); break; case FB_EVENT_GET_REQ: caps = event->data; fbcon_get_requirement(info, caps); break; case FB_EVENT_REMAP_ALL_CONSOLE: idx = info->node; fbcon_remap_all(idx); break; } done: return ret; } /* * The console `switch' structure for the frame buffer based console */ static const struct consw fb_con = { .owner = THIS_MODULE, .con_startup = fbcon_startup, .con_init = fbcon_init, .con_deinit = fbcon_deinit, .con_clear = fbcon_clear, .con_putc = fbcon_putc, .con_putcs = fbcon_putcs, .con_cursor = fbcon_cursor, .con_scroll = fbcon_scroll, .con_bmove = fbcon_bmove, .con_switch = fbcon_switch, .con_blank = fbcon_blank, .con_font_set = fbcon_set_font, .con_font_get = fbcon_get_font, .con_font_default = fbcon_set_def_font, .con_font_copy = fbcon_copy_font, .con_set_palette = fbcon_set_palette, .con_scrolldelta = fbcon_scrolldelta, .con_set_origin = fbcon_set_origin, .con_invert_region = fbcon_invert_region, .con_screen_pos = fbcon_screen_pos, .con_getxy = fbcon_getxy, .con_resize = fbcon_resize, .con_debug_enter = fbcon_debug_enter, .con_debug_leave = fbcon_debug_leave, }; static struct notifier_block fbcon_event_notifier = { .notifier_call = fbcon_event_notify, }; static ssize_t store_rotate(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *info; int rotate, idx; char **last = NULL; if (fbcon_has_exited) return count; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || registered_fb[idx] == NULL) goto err; info = registered_fb[idx]; rotate = simple_strtoul(buf, last, 0); fbcon_rotate(info, rotate); err: console_unlock(); return count; } static ssize_t store_rotate_all(struct device *device, struct device_attribute *attr,const char *buf, size_t count) { struct fb_info *info; int rotate, idx; char **last = NULL; if (fbcon_has_exited) return count; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || registered_fb[idx] == NULL) goto err; info = registered_fb[idx]; rotate = simple_strtoul(buf, last, 0); fbcon_rotate_all(info, rotate); err: console_unlock(); return count; } static ssize_t show_rotate(struct device *device, struct device_attribute *attr,char *buf) { struct fb_info *info; int rotate = 0, idx; if (fbcon_has_exited) return 0; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || registered_fb[idx] == NULL) goto err; info = registered_fb[idx]; rotate = fbcon_get_rotate(info); err: console_unlock(); return snprintf(buf, PAGE_SIZE, "%d\n", rotate); } static ssize_t show_cursor_blink(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *info; struct fbcon_ops *ops; int idx, blink = -1; if (fbcon_has_exited) return 0; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || registered_fb[idx] == NULL) goto err; info = registered_fb[idx]; ops = info->fbcon_par; if (!ops) goto err; blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0; err: console_unlock(); return snprintf(buf, PAGE_SIZE, "%d\n", blink); } static ssize_t store_cursor_blink(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *info; int blink, idx; char **last = NULL; if (fbcon_has_exited) return count; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || registered_fb[idx] == NULL) goto err; info = registered_fb[idx]; if (!info->fbcon_par) goto err; blink = simple_strtoul(buf, last, 0); if (blink) { fbcon_cursor_noblink = 0; fbcon_add_cursor_timer(info); } else { fbcon_cursor_noblink = 1; fbcon_del_cursor_timer(info); } err: console_unlock(); return count; } static struct device_attribute device_attrs[] = { __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate), __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all), __ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink, store_cursor_blink), }; static int fbcon_init_device(void) { int i, error = 0; fbcon_has_sysfs = 1; for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { error = device_create_file(fbcon_device, &device_attrs[i]); if (error) break; } if (error) { while (--i >= 0) device_remove_file(fbcon_device, &device_attrs[i]); fbcon_has_sysfs = 0; } return 0; } static void fbcon_start(void) { if (num_registered_fb) { int i; console_lock(); for (i = 0; i < FB_MAX; i++) { if (registered_fb[i] != NULL) { info_idx = i; break; } } console_unlock(); fbcon_takeover(0); } } static void fbcon_exit(void) { struct fb_info *info; int i, j, mapped; if (fbcon_has_exited) return; kfree((void *)softback_buf); softback_buf = 0UL; for (i = 0; i < FB_MAX; i++) { int pending = 0; mapped = 0; info = registered_fb[i]; if (info == NULL) continue; if (info->queue.func) pending = cancel_work_sync(&info->queue); DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : "no")); for (j = first_fb_vc; j <= last_fb_vc; j++) { if (con2fb_map[j] == i) mapped = 1; } if (mapped) { if (info->fbops->fb_release) info->fbops->fb_release(info, 0); module_put(info->fbops->owner); if (info->fbcon_par) { struct fbcon_ops *ops = info->fbcon_par; fbcon_del_cursor_timer(info); kfree(ops->cursor_src); kfree(info->fbcon_par); info->fbcon_par = NULL; } if (info->queue.func == fb_flashcursor) info->queue.func = NULL; } } fbcon_has_exited = 1; } static int __init fb_console_init(void) { int i; console_lock(); fb_register_client(&fbcon_event_notifier); fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon"); if (IS_ERR(fbcon_device)) { printk(KERN_WARNING "Unable to create device " "for fbcon; errno = %ld\n", PTR_ERR(fbcon_device)); fbcon_device = NULL; } else fbcon_init_device(); for (i = 0; i < MAX_NR_CONSOLES; i++) con2fb_map[i] = -1; console_unlock(); fbcon_start(); return 0; } module_init(fb_console_init); #ifdef MODULE static void __exit fbcon_deinit_device(void) { int i; if (fbcon_has_sysfs) { for (i = 0; i < ARRAY_SIZE(device_attrs); i++) device_remove_file(fbcon_device, &device_attrs[i]); fbcon_has_sysfs = 0; } } static void __exit fb_console_exit(void) { console_lock(); fb_unregister_client(&fbcon_event_notifier); fbcon_deinit_device(); device_destroy(fb_class, MKDEV(0, 0)); fbcon_exit(); console_unlock(); unregister_con_driver(&fb_con); } module_exit(fb_console_exit); #endif MODULE_LICENSE("GPL");
gpl-2.0
xapp-le/kernel
drivers/net/wireless/iwlwifi/mvm/scan.c
1643
13797
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include <linux/etherdevice.h> #include <net/mac80211.h> #include "mvm.h" #include "iwl-eeprom-parse.h" #include "fw-api-scan.h" #define IWL_PLCP_QUIET_THRESH 1 #define IWL_ACTIVE_QUIET_TIME 10 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) { u16 rx_chain; u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw); rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS; return cpu_to_le16(rx_chain); } static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif) { if (vif->bss_conf.assoc) return cpu_to_le32(200 * 1024); else return 0; } static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif) { if (vif->bss_conf.assoc) return cpu_to_le32(vif->bss_conf.beacon_int); else return 0; } static inline __le32 iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req) { if (req->channels[0]->band == IEEE80211_BAND_2GHZ) return cpu_to_le32(PHY_BAND_24); else return cpu_to_le32(PHY_BAND_5); } static inline __le32 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, bool no_cck) { u32 tx_ant; mvm->scan_last_antenna_idx = iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw), mvm->scan_last_antenna_idx); tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; if (band == IEEE80211_BAND_2GHZ && !no_cck) return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | tx_ant); else return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); } /* * We insert the SSIDs in an inverted order, because the FW will * invert it back. The most prioritized SSID, which is first in the * request list, is not copied here, but inserted directly to the probe * request. */ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd, struct cfg80211_scan_request *req) { int fw_idx, req_idx; for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0; req_idx--, fw_idx++) { cmd->direct_scan[fw_idx].id = WLAN_EID_SSID; cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len; memcpy(cmd->direct_scan[fw_idx].ssid, req->ssids[req_idx].ssid, req->ssids[req_idx].ssid_len); } } /* * If req->n_ssids > 0, it means we should do an active scan. * In case of active scan w/o directed scan, we receive a zero-length SSID * just to notify that this scan is active and not passive. * In order to notify the FW of the number of SSIDs we wish to scan (including * the zero-length one), we need to set the corresponding bits in chan->type, * one for each SSID, and set the active bit (first). */ static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) { if (band == IEEE80211_BAND_2GHZ) return 30 + 3 * (n_ssids + 1); return 20 + 2 * (n_ssids + 1); } static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) { return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; } static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd, struct cfg80211_scan_request *req) { u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band); u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band, req->n_ssids); struct iwl_scan_channel *chan = (struct iwl_scan_channel *) (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); int i; for (i = 0; i < cmd->channel_count; i++) { chan->channel = cpu_to_le16(req->channels[i]->hw_value); chan->type = cpu_to_le32(BIT(req->n_ssids) - 1); if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE); chan->active_dwell = cpu_to_le16(active_dwell); chan->passive_dwell = cpu_to_le16(passive_dwell); chan->iteration_count = cpu_to_le16(1); chan++; } } /* * Fill in probe request with the following parameters: * TA is our vif HW address, which mac80211 ensures we have. * Packet is broadcasted, so this is both SA and DA. * The probe request IE is made out of two: first comes the most prioritized * SSID if a directed scan is requested. Second comes whatever extra * information was given to us as the scan request IE. */ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta, int n_ssids, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, int left) { int len = 0; u8 *pos = NULL; /* Make sure there is enough space for the probe request, * two mandatory IEs and the data */ left -= 24; if (left < 0) return 0; frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(frame->da); memcpy(frame->sa, ta, ETH_ALEN); eth_broadcast_addr(frame->bssid); frame->seq_ctrl = 0; len += 24; /* for passive scans, no need to fill anything */ if (n_ssids == 0) return (u16)len; /* points to the payload of the request */ pos = &frame->u.probe_req.variable[0]; /* fill in our SSID IE */ left -= ssid_len + 2; if (left < 0) return 0; *pos++ = WLAN_EID_SSID; *pos++ = ssid_len; if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */ memcpy(pos, ssid, ssid_len); pos += ssid_len; } len += ssid_len + 2; if (WARN_ON(left < ie_len)) return len; if (ie && ie_len) { memcpy(pos, ie, ie_len); len += ie_len; } return (u16)len; } int iwl_mvm_scan_request(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) { struct iwl_host_cmd hcmd = { .id = SCAN_REQUEST_CMD, .len = { 0, }, .data = { mvm->scan_cmd, }, .flags = CMD_SYNC, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_scan_cmd *cmd = mvm->scan_cmd; int ret; u32 status; int ssid_len = 0; u8 *ssid = NULL; lockdep_assert_held(&mvm->mutex); BUG_ON(mvm->scan_cmd == NULL); IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n"); mvm->scan_status = IWL_MVM_SCAN_OS; memset(cmd, 0, sizeof(struct iwl_scan_cmd) + mvm->fw->ucode_capa.max_probe_length + (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel))); cmd->channel_count = (u8)req->n_channels; cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm); cmd->max_out_time = iwl_mvm_scan_max_out_time(vif); cmd->suspend_time = iwl_mvm_scan_suspend_time(vif); cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req); cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | MAC_FILTER_IN_BEACON); if (vif->type == NL80211_IFTYPE_P2P_DEVICE) cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED); else cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); /* * TODO: This is a WA due to a bug in the FW AUX framework that does not * properly handle time events that fail to be scheduled */ cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); cmd->repeats = cpu_to_le32(1); /* * If the user asked for passive scan, don't change to active scan if * you see any activity on the channel - remain passive. */ if (req->n_ssids > 0) { cmd->passive2active = cpu_to_le16(1); ssid = req->ssids[0].ssid; ssid_len = req->ssids[0].ssid_len; } else { cmd->passive2active = 0; } iwl_mvm_scan_fill_ssids(cmd, req); cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); cmd->tx_cmd.rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band, req->no_cck); cmd->tx_cmd.len = cpu_to_le16(iwl_mvm_fill_probe_req( (struct ieee80211_mgmt *)cmd->data, vif->addr, req->n_ssids, ssid, ssid_len, req->ie, req->ie_len, mvm->fw->ucode_capa.max_probe_length)); iwl_mvm_scan_fill_channels(cmd, req); cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) + le16_to_cpu(cmd->tx_cmd.len) + (cmd->channel_count * sizeof(struct iwl_scan_channel))); hcmd.len[0] = le16_to_cpu(cmd->len); status = SCAN_RESPONSE_OK; ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status); if (!ret && status == SCAN_RESPONSE_OK) { IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); } else { /* * If the scan failed, it usually means that the FW was unable * to allocate the time events. Warn on it, but maybe we * should try to send the command again with different params. */ IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n", status, ret); mvm->scan_status = IWL_MVM_SCAN_NONE; ret = -EIO; } return ret; } int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_cmd_response *resp = (void *)pkt->data; IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n", le32_to_cpu(resp->status)); return 0; } int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_scan_complete_notif *notif = (void *)pkt->data; IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n", notif->status, notif->scanned_channels); mvm->scan_status = IWL_MVM_SCAN_NONE; ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK); return 0; } static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_scan_complete_notif *notif; u32 *resp; switch (pkt->hdr.cmd) { case SCAN_ABORT_CMD: resp = (void *)pkt->data; if (*resp == CAN_ABORT_STATUS) { IWL_DEBUG_SCAN(mvm, "Scan can be aborted, wait until completion\n"); return false; } IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n", *resp); return true; case SCAN_COMPLETE_NOTIFICATION: notif = (void *)pkt->data; IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n", notif->status); return true; default: WARN_ON(1); return false; }; } void iwl_mvm_cancel_scan(struct iwl_mvm *mvm) { struct iwl_notification_wait wait_scan_abort; static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD, SCAN_COMPLETE_NOTIFICATION }; int ret; iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, scan_abort_notif, ARRAY_SIZE(scan_abort_notif), iwl_mvm_scan_abort_notif, NULL); ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); if (ret) { IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); goto out_remove_notif; } ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ); if (ret) IWL_ERR(mvm, "%s - failed on timeout\n", __func__); return; out_remove_notif: iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort); }
gpl-2.0
Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary
sound/soc/samsung/s3c2412-i2s.c
2155
5312
/* sound/soc/samsung/s3c2412-i2s.c * * ALSA Soc Audio Layer - S3C2412 I2S driver * * Copyright (c) 2006 Wolfson Microelectronics PLC. * Graeme Gregory graeme.gregory@wolfsonmicro.com * linux@wolfsonmicro.com * * Copyright (c) 2007, 2004-2005 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <mach/dma.h> #include "dma.h" #include "regs-i2s-v2.h" #include "s3c2412-i2s.h" static struct s3c2410_dma_client s3c2412_dma_client_out = { .name = "I2S PCM Stereo out" }; static struct s3c2410_dma_client s3c2412_dma_client_in = { .name = "I2S PCM Stereo in" }; static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = { .client = &s3c2412_dma_client_out, .channel = DMACH_I2S_OUT, .dma_addr = S3C2410_PA_IIS + S3C2412_IISTXD, .dma_size = 4, }; static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = { .client = &s3c2412_dma_client_in, .channel = DMACH_I2S_IN, .dma_addr = S3C2410_PA_IIS + S3C2412_IISRXD, .dma_size = 4, }; static struct s3c_i2sv2_info s3c2412_i2s; static int s3c2412_i2s_probe(struct snd_soc_dai *dai) { int ret; pr_debug("Entered %s\n", __func__); ret = s3c_i2sv2_probe(dai, &s3c2412_i2s, S3C2410_PA_IIS); if (ret) return ret; s3c2412_i2s.dma_capture = &s3c2412_i2s_pcm_stereo_in; s3c2412_i2s.dma_playback = &s3c2412_i2s_pcm_stereo_out; s3c2412_i2s.iis_cclk = clk_get(dai->dev, "i2sclk"); if (IS_ERR(s3c2412_i2s.iis_cclk)) { pr_err("failed to get i2sclk clock\n"); iounmap(s3c2412_i2s.regs); return PTR_ERR(s3c2412_i2s.iis_cclk); } /* Set MPLL as the source for IIS CLK */ clk_set_parent(s3c2412_i2s.iis_cclk, clk_get(NULL, "mpll")); clk_enable(s3c2412_i2s.iis_cclk); s3c2412_i2s.iis_cclk = s3c2412_i2s.iis_pclk; /* Configure the I2S pins (GPE0...GPE4) in correct mode */ s3c_gpio_cfgall_range(S3C2410_GPE(0), 5, S3C_GPIO_SFN(2), S3C_GPIO_PULL_NONE); return 0; } static int s3c2412_i2s_remove(struct snd_soc_dai *dai) { clk_disable(s3c2412_i2s.iis_cclk); clk_put(s3c2412_i2s.iis_cclk); iounmap(s3c2412_i2s.regs); return 0; } static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(cpu_dai); struct s3c_dma_params *dma_data; u32 iismod; pr_debug("Entered %s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dma_data = i2s->dma_playback; else dma_data = i2s->dma_capture; snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("%s: r: IISMOD: %x\n", __func__, iismod); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: iismod |= S3C2412_IISMOD_8BIT; break; case SNDRV_PCM_FORMAT_S16_LE: iismod &= ~S3C2412_IISMOD_8BIT; break; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("%s: w: IISMOD: %x\n", __func__, iismod); return 0; } #define S3C2412_I2S_RATES \ (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \ SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) static const struct snd_soc_dai_ops s3c2412_i2s_dai_ops = { .hw_params = s3c2412_i2s_hw_params, }; static struct snd_soc_dai_driver s3c2412_i2s_dai = { .probe = s3c2412_i2s_probe, .remove = s3c2412_i2s_remove, .playback = { .channels_min = 2, .channels_max = 2, .rates = S3C2412_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = S3C2412_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &s3c2412_i2s_dai_ops, }; static const struct snd_soc_component_driver s3c2412_i2s_component = { .name = "s3c2412-i2s", }; static int s3c2412_iis_dev_probe(struct platform_device *pdev) { int ret = 0; ret = s3c_i2sv2_register_component(&pdev->dev, -1, &s3c2412_i2s_component, &s3c2412_i2s_dai); if (ret) { pr_err("failed to register the dai\n"); return ret; } ret = asoc_dma_platform_register(&pdev->dev); if (ret) { pr_err("failed to register the DMA: %d\n", ret); goto err; } return 0; err: snd_soc_unregister_component(&pdev->dev); return ret; } static int s3c2412_iis_dev_remove(struct platform_device *pdev) { asoc_dma_platform_unregister(&pdev->dev); snd_soc_unregister_component(&pdev->dev); return 0; } static struct platform_driver s3c2412_iis_driver = { .probe = s3c2412_iis_dev_probe, .remove = s3c2412_iis_dev_remove, .driver = { .name = "s3c2412-iis", .owner = THIS_MODULE, }, }; module_platform_driver(s3c2412_iis_driver); /* Module information */ MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); MODULE_DESCRIPTION("S3C2412 I2S SoC Interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:s3c2412-iis");
gpl-2.0
jthatch12/STi
drivers/net/ftmac100.c
2923
31217
/* * Faraday FTMAC100 10/100 Ethernet * * (C) Copyright 2009-2011 Faraday Technology * Po-Yu Chuang <ratbert@faraday-tech.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/io.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include "ftmac100.h" #define DRV_NAME "ftmac100" #define DRV_VERSION "0.2" #define RX_QUEUE_ENTRIES 128 /* must be power of 2 */ #define TX_QUEUE_ENTRIES 16 /* must be power of 2 */ #define MAX_PKT_SIZE 1518 #define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */ #if MAX_PKT_SIZE > 0x7ff #error invalid MAX_PKT_SIZE #endif #if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE #error invalid RX_BUF_SIZE #endif /****************************************************************************** * private data *****************************************************************************/ struct ftmac100_descs { struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES]; }; struct ftmac100 { struct resource *res; void __iomem *base; int irq; struct ftmac100_descs *descs; dma_addr_t descs_dma_addr; unsigned int rx_pointer; unsigned int tx_clean_pointer; unsigned int tx_pointer; unsigned int tx_pending; spinlock_t tx_lock; struct net_device *netdev; struct device *dev; struct napi_struct napi; struct mii_if_info mii; }; static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes, gfp_t gfp); /****************************************************************************** * internal functions (hardware register access) *****************************************************************************/ #define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \ FTMAC100_INT_NORXBUF | \ FTMAC100_INT_XPKT_OK | \ FTMAC100_INT_XPKT_LOST | \ FTMAC100_INT_RPKT_LOST | \ FTMAC100_INT_AHB_ERR | \ FTMAC100_INT_PHYSTS_CHG) #define INT_MASK_ALL_DISABLED 0 static void ftmac100_enable_all_int(struct ftmac100 *priv) { iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR); } static void ftmac100_disable_all_int(struct ftmac100 *priv) { iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR); } static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr) { iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR); } static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr) { iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR); } static void ftmac100_txdma_start_polling(struct ftmac100 *priv) { iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD); } static int ftmac100_reset(struct ftmac100 *priv) { struct net_device *netdev = priv->netdev; int i; /* NOTE: reset clears all registers */ iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR); for (i = 0; i < 5; i++) { unsigned int maccr; maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR); if (!(maccr & FTMAC100_MACCR_SW_RST)) { /* * FTMAC100_MACCR_SW_RST cleared does not indicate * that hardware reset completed (what the f*ck). * We still need to wait for a while. */ udelay(500); return 0; } udelay(1000); } netdev_err(netdev, "software reset failed\n"); return -EIO; } static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac) { unsigned int maddr = mac[0] << 8 | mac[1]; unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR); iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR); } #define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \ FTMAC100_MACCR_RCV_EN | \ FTMAC100_MACCR_XDMA_EN | \ FTMAC100_MACCR_RDMA_EN | \ FTMAC100_MACCR_CRC_APD | \ FTMAC100_MACCR_FULLDUP | \ FTMAC100_MACCR_RX_RUNT | \ FTMAC100_MACCR_RX_BROADPKT) static int ftmac100_start_hw(struct ftmac100 *priv) { struct net_device *netdev = priv->netdev; if (ftmac100_reset(priv)) return -EIO; /* setup ring buffer base registers */ ftmac100_set_rx_ring_base(priv, priv->descs_dma_addr + offsetof(struct ftmac100_descs, rxdes)); ftmac100_set_tx_ring_base(priv, priv->descs_dma_addr + offsetof(struct ftmac100_descs, txdes)); iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC); ftmac100_set_mac(priv, netdev->dev_addr); iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR); return 0; } static void ftmac100_stop_hw(struct ftmac100 *priv) { iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR); } /****************************************************************************** * internal functions (receive descriptor) *****************************************************************************/ static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS); } static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS); } static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); } static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes) { /* clear status bits */ rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); } static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR); } static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR); } static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL); } static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT); } static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB); } static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes) { return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL; } static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes) { return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST); } static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes, unsigned int size) { rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR); rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size)); } static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes) { rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR); } static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes, dma_addr_t addr) { rxdes->rxdes2 = cpu_to_le32(addr); } static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes) { return le32_to_cpu(rxdes->rxdes2); } /* * rxdes3 is not used by hardware. We use it to keep track of page. * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). */ static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page) { rxdes->rxdes3 = (unsigned int)page; } static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes) { return (struct page *)rxdes->rxdes3; } /****************************************************************************** * internal functions (receive) *****************************************************************************/ static int ftmac100_next_rx_pointer(int pointer) { return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); } static void ftmac100_rx_pointer_advance(struct ftmac100 *priv) { priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer); } static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv) { return &priv->descs->rxdes[priv->rx_pointer]; } static struct ftmac100_rxdes * ftmac100_rx_locate_first_segment(struct ftmac100 *priv) { struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); while (!ftmac100_rxdes_owned_by_dma(rxdes)) { if (ftmac100_rxdes_first_segment(rxdes)) return rxdes; ftmac100_rxdes_set_dma_own(rxdes); ftmac100_rx_pointer_advance(priv); rxdes = ftmac100_current_rxdes(priv); } return NULL; } static bool ftmac100_rx_packet_error(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes) { struct net_device *netdev = priv->netdev; bool error = false; if (unlikely(ftmac100_rxdes_rx_error(rxdes))) { if (net_ratelimit()) netdev_info(netdev, "rx err\n"); netdev->stats.rx_errors++; error = true; } if (unlikely(ftmac100_rxdes_crc_error(rxdes))) { if (net_ratelimit()) netdev_info(netdev, "rx crc err\n"); netdev->stats.rx_crc_errors++; error = true; } if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) { if (net_ratelimit()) netdev_info(netdev, "rx frame too long\n"); netdev->stats.rx_length_errors++; error = true; } else if (unlikely(ftmac100_rxdes_runt(rxdes))) { if (net_ratelimit()) netdev_info(netdev, "rx runt\n"); netdev->stats.rx_length_errors++; error = true; } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) { if (net_ratelimit()) netdev_info(netdev, "rx odd nibble\n"); netdev->stats.rx_length_errors++; error = true; } return error; } static void ftmac100_rx_drop_packet(struct ftmac100 *priv) { struct net_device *netdev = priv->netdev; struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); bool done = false; if (net_ratelimit()) netdev_dbg(netdev, "drop packet %p\n", rxdes); do { if (ftmac100_rxdes_last_segment(rxdes)) done = true; ftmac100_rxdes_set_dma_own(rxdes); ftmac100_rx_pointer_advance(priv); rxdes = ftmac100_current_rxdes(priv); } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes)); netdev->stats.rx_dropped++; } static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) { struct net_device *netdev = priv->netdev; struct ftmac100_rxdes *rxdes; struct sk_buff *skb; struct page *page; dma_addr_t map; int length; rxdes = ftmac100_rx_locate_first_segment(priv); if (!rxdes) return false; if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { ftmac100_rx_drop_packet(priv); return true; } /* * It is impossible to get multi-segment packets * because we always provide big enough receive buffers. */ if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) BUG(); /* start processing */ skb = netdev_alloc_skb_ip_align(netdev, 128); if (unlikely(!skb)) { if (net_ratelimit()) netdev_err(netdev, "rx skb alloc failed\n"); ftmac100_rx_drop_packet(priv); return true; } if (unlikely(ftmac100_rxdes_multicast(rxdes))) netdev->stats.multicast++; map = ftmac100_rxdes_get_dma_addr(rxdes); dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); length = ftmac100_rxdes_frame_length(rxdes); page = ftmac100_rxdes_get_page(rxdes); skb_fill_page_desc(skb, 0, page, 0, length); skb->len += length; skb->data_len += length; skb->truesize += length; __pskb_pull_tail(skb, min(length, 64)); ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); ftmac100_rx_pointer_advance(priv); skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; /* push packet to protocol stack */ netif_receive_skb(skb); (*processed)++; return true; } /****************************************************************************** * internal functions (transmit descriptor) *****************************************************************************/ static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes) { /* clear all except end of ring bit */ txdes->txdes0 = 0; txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR); txdes->txdes2 = 0; txdes->txdes3 = 0; } static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes) { return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); } static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes) { /* * Make sure dma own bit will not be set before any other * descriptor fields. */ wmb(); txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); } static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes) { return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL); } static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes) { return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL); } static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes) { txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR); } static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes) { txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS); } static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes) { txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS); } static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes) { txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC); } static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes, unsigned int len) { txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len)); } static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes, dma_addr_t addr) { txdes->txdes2 = cpu_to_le32(addr); } static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes) { return le32_to_cpu(txdes->txdes2); } /* * txdes3 is not used by hardware. We use it to keep track of socket buffer. * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). */ static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb) { txdes->txdes3 = (unsigned int)skb; } static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes) { return (struct sk_buff *)txdes->txdes3; } /****************************************************************************** * internal functions (transmit) *****************************************************************************/ static int ftmac100_next_tx_pointer(int pointer) { return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); } static void ftmac100_tx_pointer_advance(struct ftmac100 *priv) { priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer); } static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv) { priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer); } static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv) { return &priv->descs->txdes[priv->tx_pointer]; } static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv) { return &priv->descs->txdes[priv->tx_clean_pointer]; } static bool ftmac100_tx_complete_packet(struct ftmac100 *priv) { struct net_device *netdev = priv->netdev; struct ftmac100_txdes *txdes; struct sk_buff *skb; dma_addr_t map; if (priv->tx_pending == 0) return false; txdes = ftmac100_current_clean_txdes(priv); if (ftmac100_txdes_owned_by_dma(txdes)) return false; skb = ftmac100_txdes_get_skb(txdes); map = ftmac100_txdes_get_dma_addr(txdes); if (unlikely(ftmac100_txdes_excessive_collision(txdes) || ftmac100_txdes_late_collision(txdes))) { /* * packet transmitted to ethernet lost due to late collision * or excessive collision */ netdev->stats.tx_aborted_errors++; } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); dev_kfree_skb(skb); ftmac100_txdes_reset(txdes); ftmac100_tx_clean_pointer_advance(priv); spin_lock(&priv->tx_lock); priv->tx_pending--; spin_unlock(&priv->tx_lock); netif_wake_queue(netdev); return true; } static void ftmac100_tx_complete(struct ftmac100 *priv) { while (ftmac100_tx_complete_packet(priv)) ; } static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, dma_addr_t map) { struct net_device *netdev = priv->netdev; struct ftmac100_txdes *txdes; unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; txdes = ftmac100_current_txdes(priv); ftmac100_tx_pointer_advance(priv); /* setup TX descriptor */ ftmac100_txdes_set_skb(txdes, skb); ftmac100_txdes_set_dma_addr(txdes, map); ftmac100_txdes_set_first_segment(txdes); ftmac100_txdes_set_last_segment(txdes); ftmac100_txdes_set_txint(txdes); ftmac100_txdes_set_buffer_size(txdes, len); spin_lock(&priv->tx_lock); priv->tx_pending++; if (priv->tx_pending == TX_QUEUE_ENTRIES) netif_stop_queue(netdev); /* start transmit */ ftmac100_txdes_set_dma_own(txdes); spin_unlock(&priv->tx_lock); ftmac100_txdma_start_polling(priv); return NETDEV_TX_OK; } /****************************************************************************** * internal functions (buffer) *****************************************************************************/ static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes, gfp_t gfp) { struct net_device *netdev = priv->netdev; struct page *page; dma_addr_t map; page = alloc_page(gfp); if (!page) { if (net_ratelimit()) netdev_err(netdev, "failed to allocate rx page\n"); return -ENOMEM; } map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(priv->dev, map))) { if (net_ratelimit()) netdev_err(netdev, "failed to map rx page\n"); __free_page(page); return -ENOMEM; } ftmac100_rxdes_set_page(rxdes, page); ftmac100_rxdes_set_dma_addr(rxdes, map); ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE); ftmac100_rxdes_set_dma_own(rxdes); return 0; } static void ftmac100_free_buffers(struct ftmac100 *priv) { int i; for (i = 0; i < RX_QUEUE_ENTRIES; i++) { struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; struct page *page = ftmac100_rxdes_get_page(rxdes); dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes); if (!page) continue; dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); __free_page(page); } for (i = 0; i < TX_QUEUE_ENTRIES; i++) { struct ftmac100_txdes *txdes = &priv->descs->txdes[i]; struct sk_buff *skb = ftmac100_txdes_get_skb(txdes); dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes); if (!skb) continue; dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); dev_kfree_skb(skb); } dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs), priv->descs, priv->descs_dma_addr); } static int ftmac100_alloc_buffers(struct ftmac100 *priv) { int i; priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs), &priv->descs_dma_addr, GFP_KERNEL); if (!priv->descs) return -ENOMEM; memset(priv->descs, 0, sizeof(struct ftmac100_descs)); /* initialize RX ring */ ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); for (i = 0; i < RX_QUEUE_ENTRIES; i++) { struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) goto err; } /* initialize TX ring */ ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); return 0; err: ftmac100_free_buffers(priv); return -ENOMEM; } /****************************************************************************** * struct mii_if_info functions *****************************************************************************/ static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg) { struct ftmac100 *priv = netdev_priv(netdev); unsigned int phycr; int i; phycr = FTMAC100_PHYCR_PHYAD(phy_id) | FTMAC100_PHYCR_REGAD(reg) | FTMAC100_PHYCR_MIIRD; iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); for (i = 0; i < 10; i++) { phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); if ((phycr & FTMAC100_PHYCR_MIIRD) == 0) return phycr & FTMAC100_PHYCR_MIIRDATA; udelay(100); } netdev_err(netdev, "mdio read timed out\n"); return 0; } static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg, int data) { struct ftmac100 *priv = netdev_priv(netdev); unsigned int phycr; int i; phycr = FTMAC100_PHYCR_PHYAD(phy_id) | FTMAC100_PHYCR_REGAD(reg) | FTMAC100_PHYCR_MIIWR; data = FTMAC100_PHYWDATA_MIIWDATA(data); iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA); iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); for (i = 0; i < 10; i++) { phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); if ((phycr & FTMAC100_PHYCR_MIIWR) == 0) return; udelay(100); } netdev_err(netdev, "mdio write timed out\n"); } /****************************************************************************** * struct ethtool_ops functions *****************************************************************************/ static void ftmac100_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, dev_name(&netdev->dev)); } static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct ftmac100 *priv = netdev_priv(netdev); return mii_ethtool_gset(&priv->mii, cmd); } static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct ftmac100 *priv = netdev_priv(netdev); return mii_ethtool_sset(&priv->mii, cmd); } static int ftmac100_nway_reset(struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); return mii_nway_restart(&priv->mii); } static u32 ftmac100_get_link(struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); return mii_link_ok(&priv->mii); } static const struct ethtool_ops ftmac100_ethtool_ops = { .set_settings = ftmac100_set_settings, .get_settings = ftmac100_get_settings, .get_drvinfo = ftmac100_get_drvinfo, .nway_reset = ftmac100_nway_reset, .get_link = ftmac100_get_link, }; /****************************************************************************** * interrupt handler *****************************************************************************/ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) { struct net_device *netdev = dev_id; struct ftmac100 *priv = netdev_priv(netdev); if (likely(netif_running(netdev))) { /* Disable interrupts for polling */ ftmac100_disable_all_int(priv); napi_schedule(&priv->napi); } return IRQ_HANDLED; } /****************************************************************************** * struct napi_struct functions *****************************************************************************/ static int ftmac100_poll(struct napi_struct *napi, int budget) { struct ftmac100 *priv = container_of(napi, struct ftmac100, napi); struct net_device *netdev = priv->netdev; unsigned int status; bool completed = true; int rx = 0; status = ioread32(priv->base + FTMAC100_OFFSET_ISR); if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) { /* * FTMAC100_INT_RPKT_FINISH: * RX DMA has received packets into RX buffer successfully * * FTMAC100_INT_NORXBUF: * RX buffer unavailable */ bool retry; do { retry = ftmac100_rx_packet(priv, &rx); } while (retry && rx < budget); if (retry && rx == budget) completed = false; } if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) { /* * FTMAC100_INT_XPKT_OK: * packet transmitted to ethernet successfully * * FTMAC100_INT_XPKT_LOST: * packet transmitted to ethernet lost due to late * collision or excessive collision */ ftmac100_tx_complete(priv); } if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST | FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) { if (net_ratelimit()) netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "", status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "", status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "", status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : ""); if (status & FTMAC100_INT_NORXBUF) { /* RX buffer unavailable */ netdev->stats.rx_over_errors++; } if (status & FTMAC100_INT_RPKT_LOST) { /* received packet lost due to RX FIFO full */ netdev->stats.rx_fifo_errors++; } if (status & FTMAC100_INT_PHYSTS_CHG) { /* PHY link status change */ mii_check_link(&priv->mii); } } if (completed) { /* stop polling */ napi_complete(napi); ftmac100_enable_all_int(priv); } return rx; } /****************************************************************************** * struct net_device_ops functions *****************************************************************************/ static int ftmac100_open(struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); int err; err = ftmac100_alloc_buffers(priv); if (err) { netdev_err(netdev, "failed to allocate buffers\n"); goto err_alloc; } err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev); if (err) { netdev_err(netdev, "failed to request irq %d\n", priv->irq); goto err_irq; } priv->rx_pointer = 0; priv->tx_clean_pointer = 0; priv->tx_pointer = 0; priv->tx_pending = 0; err = ftmac100_start_hw(priv); if (err) goto err_hw; napi_enable(&priv->napi); netif_start_queue(netdev); ftmac100_enable_all_int(priv); return 0; err_hw: free_irq(priv->irq, netdev); err_irq: ftmac100_free_buffers(priv); err_alloc: return err; } static int ftmac100_stop(struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); ftmac100_disable_all_int(priv); netif_stop_queue(netdev); napi_disable(&priv->napi); ftmac100_stop_hw(priv); free_irq(priv->irq, netdev); ftmac100_free_buffers(priv); return 0; } static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); dma_addr_t map; if (unlikely(skb->len > MAX_PKT_SIZE)) { if (net_ratelimit()) netdev_dbg(netdev, "tx packet too big\n"); netdev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(priv->dev, map))) { /* drop packet */ if (net_ratelimit()) netdev_err(netdev, "map socket buffer failed\n"); netdev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } return ftmac100_xmit(priv, skb, map); } /* optional */ static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct ftmac100 *priv = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); return generic_mii_ioctl(&priv->mii, data, cmd, NULL); } static const struct net_device_ops ftmac100_netdev_ops = { .ndo_open = ftmac100_open, .ndo_stop = ftmac100_stop, .ndo_start_xmit = ftmac100_hard_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = ftmac100_do_ioctl, }; /****************************************************************************** * struct platform_driver functions *****************************************************************************/ static int ftmac100_probe(struct platform_device *pdev) { struct resource *res; int irq; struct net_device *netdev; struct ftmac100 *priv; int err; if (!pdev) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; /* setup net_device */ netdev = alloc_etherdev(sizeof(*priv)); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops); netdev->netdev_ops = &ftmac100_netdev_ops; platform_set_drvdata(pdev, netdev); /* setup private data */ priv = netdev_priv(netdev); priv->netdev = netdev; priv->dev = &pdev->dev; spin_lock_init(&priv->tx_lock); /* initialize NAPI */ netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64); /* map io memory */ priv->res = request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev)); if (!priv->res) { dev_err(&pdev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto err_req_mem; } priv->base = ioremap(res->start, resource_size(res)); if (!priv->base) { dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); err = -EIO; goto err_ioremap; } priv->irq = irq; /* initialize struct mii_if_info */ priv->mii.phy_id = 0; priv->mii.phy_id_mask = 0x1f; priv->mii.reg_num_mask = 0x1f; priv->mii.dev = netdev; priv->mii.mdio_read = ftmac100_mdio_read; priv->mii.mdio_write = ftmac100_mdio_write; /* register network device */ err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); goto err_register_netdev; } netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); if (!is_valid_ether_addr(netdev->dev_addr)) { random_ether_addr(netdev->dev_addr); netdev_info(netdev, "generated random MAC address %pM\n", netdev->dev_addr); } return 0; err_register_netdev: iounmap(priv->base); err_ioremap: release_resource(priv->res); err_req_mem: netif_napi_del(&priv->napi); platform_set_drvdata(pdev, NULL); free_netdev(netdev); err_alloc_etherdev: return err; } static int __exit ftmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftmac100 *priv; netdev = platform_get_drvdata(pdev); priv = netdev_priv(netdev); unregister_netdev(netdev); iounmap(priv->base); release_resource(priv->res); netif_napi_del(&priv->napi); platform_set_drvdata(pdev, NULL); free_netdev(netdev); return 0; } static struct platform_driver ftmac100_driver = { .probe = ftmac100_probe, .remove = __exit_p(ftmac100_remove), .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; /****************************************************************************** * initialization / finalization *****************************************************************************/ static int __init ftmac100_init(void) { pr_info("Loading version " DRV_VERSION " ...\n"); return platform_driver_register(&ftmac100_driver); } static void __exit ftmac100_exit(void) { platform_driver_unregister(&ftmac100_driver); } module_init(ftmac100_init); module_exit(ftmac100_exit); MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); MODULE_DESCRIPTION("FTMAC100 driver"); MODULE_LICENSE("GPL");
gpl-2.0
mericon/Xperia-S-msm8660
drivers/staging/et131x/et1310_eeprom.c
2923
12463
/* * Agere Systems Inc. * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * http://www.agere.com * *------------------------------------------------------------------------------ * * et1310_eeprom.c - Code used to access the device's EEPROM * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include "et131x_version.h" #include "et131x_defs.h" #include <linux/pci.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/io.h> #include <asm/system.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include "et1310_phy.h" #include "et131x_adapter.h" #include "et131x.h" /* * EEPROM Defines */ /* LBCIF Register Groups (addressed via 32-bit offsets) */ #define LBCIF_DWORD0_GROUP 0xAC #define LBCIF_DWORD1_GROUP 0xB0 /* LBCIF Registers (addressed via 8-bit offsets) */ #define LBCIF_ADDRESS_REGISTER 0xAC #define LBCIF_DATA_REGISTER 0xB0 #define LBCIF_CONTROL_REGISTER 0xB1 #define LBCIF_STATUS_REGISTER 0xB2 /* LBCIF Control Register Bits */ #define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 #define LBCIF_CONTROL_PAGE_WRITE 0x02 #define LBCIF_CONTROL_EEPROM_RELOAD 0x08 #define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 #define LBCIF_CONTROL_I2C_WRITE 0x40 #define LBCIF_CONTROL_LBCIF_ENABLE 0x80 /* LBCIF Status Register Bits */ #define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 #define LBCIF_STATUS_I2C_IDLE 0x02 #define LBCIF_STATUS_ACK_ERROR 0x04 #define LBCIF_STATUS_GENERAL_ERROR 0x08 #define LBCIF_STATUS_CHECKSUM_ERROR 0x40 #define LBCIF_STATUS_EEPROM_PRESENT 0x80 /* Miscellaneous Constraints */ #define MAX_NUM_REGISTER_POLLS 1000 #define MAX_NUM_WRITE_RETRIES 2 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) { u32 reg; int i; /* * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and * bits 7,1:0 both equal to 1, at least once after reset. * Subsequent operations need only to check that bits 1:0 are equal * to 1 prior to starting a single byte read/write */ for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { /* Read registers grouped in DWORD1 */ if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) return -EIO; /* I2C idle and Phy Queue Avail both true */ if ((reg & 0x3000) == 0x3000) { if (status) *status = reg; return reg & 0xFF; } } return -ETIMEDOUT; } /** * eeprom_write - Write a byte to the ET1310's EEPROM * @etdev: pointer to our private adapter structure * @addr: the address to write * @data: the value to write * * Returns 1 for a successful write. */ static int eeprom_write(struct et131x_adapter *etdev, u32 addr, u8 data) { struct pci_dev *pdev = etdev->pdev; int index = 0; int retries; int err = 0; int i2c_wack = 0; int writeok = 0; u32 status; u32 val = 0; /* * For an EEPROM, an I2C single byte write is defined as a START * condition followed by the device address, EEPROM address, one byte * of data and a STOP condition. The STOP condition will trigger the * EEPROM's internally timed write cycle to the nonvolatile memory. * All inputs are disabled during this write cycle and the EEPROM will * not respond to any access until the internal write is complete. */ err = eeprom_wait_ready(pdev, NULL); if (err) return err; /* * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, * and bits 1:0 both =0. Bit 5 should be set according to the * type of EEPROM being accessed (1=two byte addressing, 0=one * byte addressing). */ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) return -EIO; i2c_wack = 1; /* Prepare EEPROM address for Step 3 */ for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { /* Write the address to the LBCIF Address Register */ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) break; /* * Write the data to the LBCIF Data Register (the I2C write * will begin). */ if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) break; /* * Monitor bit 1:0 of the LBCIF Status Register. When bits * 1:0 are both equal to 1, the I2C write has completed and the * internal write cycle of the EEPROM is about to start. * (bits 1:0 = 01 is a legal state while waiting from both * equal to 1, but bits 1:0 = 10 is invalid and implies that * something is broken). */ err = eeprom_wait_ready(pdev, &status); if (err < 0) return 0; /* * Check bit 3 of the LBCIF Status Register. If equal to 1, * an error has occurred.Don't break here if we are revision * 1, this is so we do a blind write for load bug. */ if ((status & LBCIF_STATUS_GENERAL_ERROR) && etdev->pdev->revision == 0) break; /* * Check bit 2 of the LBCIF Status Register. If equal to 1 an * ACK error has occurred on the address phase of the write. * This could be due to an actual hardware failure or the * EEPROM may still be in its internal write cycle from a * previous write. This write operation was ignored and must be *repeated later. */ if (status & LBCIF_STATUS_ACK_ERROR) { /* * This could be due to an actual hardware failure * or the EEPROM may still be in its internal write * cycle from a previous write. This write operation * was ignored and must be repeated later. */ udelay(10); continue; } writeok = 1; break; } /* * Set bit 6 of the LBCIF Control Register = 0. */ udelay(10); while (i2c_wack) { if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE)) writeok = 0; /* Do read until internal ACK_ERROR goes away meaning write * completed */ do { pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr); do { pci_read_config_dword(pdev, LBCIF_DATA_REGISTER, &val); } while ((val & 0x00010000) == 0); } while (val & 0x00040000); if ((val & 0xFF00) != 0xC000 || index == 10000) break; index++; } return writeok ? 0 : -EIO; } /** * eeprom_read - Read a byte from the ET1310's EEPROM * @etdev: pointer to our private adapter structure * @addr: the address from which to read * @pdata: a pointer to a byte in which to store the value of the read * @eeprom_id: the ID of the EEPROM * @addrmode: how the EEPROM is to be accessed * * Returns 1 for a successful read */ static int eeprom_read(struct et131x_adapter *etdev, u32 addr, u8 *pdata) { struct pci_dev *pdev = etdev->pdev; int err; u32 status; /* * A single byte read is similar to the single byte write, with the * exception of the data flow: */ err = eeprom_wait_ready(pdev, NULL); if (err) return err; /* * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, * and bits 1:0 both =0. Bit 5 should be set according to the type * of EEPROM being accessed (1=two byte addressing, 0=one byte * addressing). */ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE)) return -EIO; /* * Write the address to the LBCIF Address Register (I2C read will * begin). */ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) return -EIO; /* * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure * has occurred). */ err = eeprom_wait_ready(pdev, &status); if (err < 0) return err; /* * Regardless of error status, read data byte from LBCIF Data * Register. */ *pdata = err; /* * Check bit 2 of the LBCIF Status Register. If = 1, * then an error has occurred. */ return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; } int et131x_init_eeprom(struct et131x_adapter *etdev) { struct pci_dev *pdev = etdev->pdev; u8 eestatus; /* We first need to check the EEPROM Status code located at offset * 0xB2 of config space */ pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); /* THIS IS A WORKAROUND: * I need to call this function twice to get my card in a * LG M1 Express Dual running. I tried also a msleep before this * function, because I thougth there could be some time condidions * but it didn't work. Call the whole function twice also work. */ if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { dev_err(&pdev->dev, "Could not read PCI config space for EEPROM Status\n"); return -EIO; } /* Determine if the error(s) we care about are present. If they are * present we need to fail. */ if (eestatus & 0x4C) { int write_failed = 0; if (pdev->revision == 0x01) { int i; static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; /* Re-write the first 4 bytes if we have an eeprom * present and the revision id is 1, this fixes the * corruption seen with 1310 B Silicon */ for (i = 0; i < 3; i++) if (eeprom_write(etdev, i, eedata[i]) < 0) write_failed = 1; } if (pdev->revision != 0x01 || write_failed) { dev_err(&pdev->dev, "Fatal EEPROM Status Error - 0x%04x\n", eestatus); /* This error could mean that there was an error * reading the eeprom or that the eeprom doesn't exist. * We will treat each case the same and not try to * gather additional information that normally would * come from the eeprom, like MAC Address */ etdev->has_eeprom = 0; return -EIO; } } etdev->has_eeprom = 1; /* Read the EEPROM for information regarding LED behavior. Refer to * ET1310_phy.c, et131x_xcvr_init(), for its use. */ eeprom_read(etdev, 0x70, &etdev->eeprom_data[0]); eeprom_read(etdev, 0x71, &etdev->eeprom_data[1]); if (etdev->eeprom_data[0] != 0xcd) /* Disable all optional features */ etdev->eeprom_data[1] = 0x00; return 0; }
gpl-2.0
AndyLavr/htc_kernel_oxp
drivers/net/ibm_newemac/mal.c
2923
20267
/* * drivers/net/ibm_newemac/mal.c * * Memory Access Layer (MAL) support * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Based on original work by * Benjamin Herrenschmidt <benh@kernel.crashing.org>, * David Gibson <hermes@gibson.dropbear.id.au>, * * Armin Kuster <akuster@mvista.com> * Copyright 2002 MontaVista Softare Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/delay.h> #include <linux/slab.h> #include "core.h" #include <asm/dcr-regs.h> static int mal_count; int __devinit mal_register_commac(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "reg(%08x, %08x)" NL, commac->tx_chan_mask, commac->rx_chan_mask); /* Don't let multiple commacs claim the same channel(s) */ if ((mal->tx_chan_mask & commac->tx_chan_mask) || (mal->rx_chan_mask & commac->rx_chan_mask)) { spin_unlock_irqrestore(&mal->lock, flags); printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", mal->index); return -EBUSY; } if (list_empty(&mal->list)) napi_enable(&mal->napi); mal->tx_chan_mask |= commac->tx_chan_mask; mal->rx_chan_mask |= commac->rx_chan_mask; list_add(&commac->list, &mal->list); spin_unlock_irqrestore(&mal->lock, flags); return 0; } void mal_unregister_commac(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "unreg(%08x, %08x)" NL, commac->tx_chan_mask, commac->rx_chan_mask); mal->tx_chan_mask &= ~commac->tx_chan_mask; mal->rx_chan_mask &= ~commac->rx_chan_mask; list_del_init(&commac->list); if (list_empty(&mal->list)) napi_disable(&mal->napi); spin_unlock_irqrestore(&mal->lock, flags); } int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) { BUG_ON(channel < 0 || channel >= mal->num_rx_chans || size > MAL_MAX_RX_SIZE); MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); if (size & 0xf) { printk(KERN_WARNING "mal%d: incorrect RX size %lu for the channel %d\n", mal->index, size, channel); return -EINVAL; } set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); return 0; } int mal_tx_bd_offset(struct mal_instance *mal, int channel) { BUG_ON(channel < 0 || channel >= mal->num_tx_chans); return channel * NUM_TX_BUFF; } int mal_rx_bd_offset(struct mal_instance *mal, int channel) { BUG_ON(channel < 0 || channel >= mal->num_rx_chans); return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; } void mal_enable_tx_channel(struct mal_instance *mal, int channel) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "enable_tx(%d)" NL, channel); set_mal_dcrn(mal, MAL_TXCASR, get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); spin_unlock_irqrestore(&mal->lock, flags); } void mal_disable_tx_channel(struct mal_instance *mal, int channel) { set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); MAL_DBG(mal, "disable_tx(%d)" NL, channel); } void mal_enable_rx_channel(struct mal_instance *mal, int channel) { unsigned long flags; /* * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple * of 8, but enabling in MAL_RXCASR needs the divided by 8 value * for the bitmask */ if (!(channel % 8)) channel >>= 3; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "enable_rx(%d)" NL, channel); set_mal_dcrn(mal, MAL_RXCASR, get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); spin_unlock_irqrestore(&mal->lock, flags); } void mal_disable_rx_channel(struct mal_instance *mal, int channel) { /* * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple * of 8, but enabling in MAL_RXCASR needs the divided by 8 value * for the bitmask */ if (!(channel % 8)) channel >>= 3; set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); MAL_DBG(mal, "disable_rx(%d)" NL, channel); } void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "poll_add(%p)" NL, commac); /* starts disabled */ set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); list_add_tail(&commac->poll_list, &mal->poll_list); spin_unlock_irqrestore(&mal->lock, flags); } void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "poll_del(%p)" NL, commac); list_del(&commac->poll_list); spin_unlock_irqrestore(&mal->lock, flags); } /* synchronized by mal_poll() */ static inline void mal_enable_eob_irq(struct mal_instance *mal) { MAL_DBG2(mal, "enable_irq" NL); // XXX might want to cache MAL_CFG as the DCR read can be slooooow set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); } /* synchronized by NAPI state */ static inline void mal_disable_eob_irq(struct mal_instance *mal) { // XXX might want to cache MAL_CFG as the DCR read can be slooooow set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); MAL_DBG2(mal, "disable_irq" NL); } static irqreturn_t mal_serr(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 esr = get_mal_dcrn(mal, MAL_ESR); /* Clear the error status register */ set_mal_dcrn(mal, MAL_ESR, esr); MAL_DBG(mal, "SERR %08x" NL, esr); if (esr & MAL_ESR_EVB) { if (esr & MAL_ESR_DE) { /* We ignore Descriptor error, * TXDE or RXDE interrupt will be generated anyway. */ return IRQ_HANDLED; } if (esr & MAL_ESR_PEIN) { /* PLB error, it's probably buggy hardware or * incorrect physical address in BD (i.e. bug) */ if (net_ratelimit()) printk(KERN_ERR "mal%d: system error, " "PLB (ESR = 0x%08x)\n", mal->index, esr); return IRQ_HANDLED; } /* OPB error, it's probably buggy hardware or incorrect * EBC setup */ if (net_ratelimit()) printk(KERN_ERR "mal%d: system error, OPB (ESR = 0x%08x)\n", mal->index, esr); } return IRQ_HANDLED; } static inline void mal_schedule_poll(struct mal_instance *mal) { if (likely(napi_schedule_prep(&mal->napi))) { MAL_DBG2(mal, "schedule_poll" NL); mal_disable_eob_irq(mal); __napi_schedule(&mal->napi); } else MAL_DBG2(mal, "already in poll" NL); } static irqreturn_t mal_txeob(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); MAL_DBG2(mal, "txeob %08x" NL, r); mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_TXEOBISR, r); #ifdef CONFIG_PPC_DCR_NATIVE if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) mtdcri(SDR0, DCRN_SDR_ICINTSTAT, (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); #endif return IRQ_HANDLED; } static irqreturn_t mal_rxeob(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); MAL_DBG2(mal, "rxeob %08x" NL, r); mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_RXEOBISR, r); #ifdef CONFIG_PPC_DCR_NATIVE if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) mtdcri(SDR0, DCRN_SDR_ICINTSTAT, (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); #endif return IRQ_HANDLED; } static irqreturn_t mal_txde(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); set_mal_dcrn(mal, MAL_TXDEIR, deir); MAL_DBG(mal, "txde %08x" NL, deir); if (net_ratelimit()) printk(KERN_ERR "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", mal->index, deir); return IRQ_HANDLED; } static irqreturn_t mal_rxde(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; struct list_head *l; u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); MAL_DBG(mal, "rxde %08x" NL, deir); list_for_each(l, &mal->list) { struct mal_commac *mc = list_entry(l, struct mal_commac, list); if (deir & mc->rx_chan_mask) { set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); mc->ops->rxde(mc->dev); } } mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_RXDEIR, deir); return IRQ_HANDLED; } static irqreturn_t mal_int(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 esr = get_mal_dcrn(mal, MAL_ESR); if (esr & MAL_ESR_EVB) { /* descriptor error */ if (esr & MAL_ESR_DE) { if (esr & MAL_ESR_CIDT) return mal_rxde(irq, dev_instance); else return mal_txde(irq, dev_instance); } else { /* SERR */ return mal_serr(irq, dev_instance); } } return IRQ_HANDLED; } void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) { /* Spinlock-type semantics: only one caller disable poll at a time */ while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) msleep(1); /* Synchronize with the MAL NAPI poller */ napi_synchronize(&mal->napi); } void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) { smp_wmb(); clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); /* Feels better to trigger a poll here to catch up with events that * may have happened on this channel while disabled. It will most * probably be delayed until the next interrupt but that's mostly a * non-issue in the context where this is called. */ napi_schedule(&mal->napi); } static int mal_poll(struct napi_struct *napi, int budget) { struct mal_instance *mal = container_of(napi, struct mal_instance, napi); struct list_head *l; int received = 0; unsigned long flags; MAL_DBG2(mal, "poll(%d)" NL, budget); again: /* Process TX skbs */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); mc->ops->poll_tx(mc->dev); } /* Process RX skbs. * * We _might_ need something more smart here to enforce polling * fairness. */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); int n; if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) continue; n = mc->ops->poll_rx(mc->dev, budget); if (n) { received += n; budget -= n; if (budget <= 0) goto more_work; // XXX What if this is the last one ? } } /* We need to disable IRQs to protect from RXDE IRQ here */ spin_lock_irqsave(&mal->lock, flags); __napi_complete(napi); mal_enable_eob_irq(mal); spin_unlock_irqrestore(&mal->lock, flags); /* Check for "rotting" packet(s) */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) continue; if (unlikely(mc->ops->peek_rx(mc->dev) || test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { MAL_DBG2(mal, "rotting packet" NL); if (napi_reschedule(napi)) mal_disable_eob_irq(mal); else MAL_DBG2(mal, "already in poll list" NL); if (budget > 0) goto again; else goto more_work; } mc->ops->poll_tx(mc->dev); } more_work: MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); return received; } static void mal_reset(struct mal_instance *mal) { int n = 10; MAL_DBG(mal, "reset" NL); set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); /* Wait for reset to complete (1 system clock) */ while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) --n; if (unlikely(!n)) printk(KERN_ERR "mal%d: reset timeout\n", mal->index); } int mal_get_regs_len(struct mal_instance *mal) { return sizeof(struct emac_ethtool_regs_subhdr) + sizeof(struct mal_regs); } void *mal_dump_regs(struct mal_instance *mal, void *buf) { struct emac_ethtool_regs_subhdr *hdr = buf; struct mal_regs *regs = (struct mal_regs *)(hdr + 1); int i; hdr->version = mal->version; hdr->index = mal->index; regs->tx_count = mal->num_tx_chans; regs->rx_count = mal->num_rx_chans; regs->cfg = get_mal_dcrn(mal, MAL_CFG); regs->esr = get_mal_dcrn(mal, MAL_ESR); regs->ier = get_mal_dcrn(mal, MAL_IER); regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); for (i = 0; i < regs->tx_count; ++i) regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); for (i = 0; i < regs->rx_count; ++i) { regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); } return regs + 1; } static int __devinit mal_probe(struct platform_device *ofdev) { struct mal_instance *mal; int err = 0, i, bd_size; int index = mal_count++; unsigned int dcr_base; const u32 *prop; u32 cfg; unsigned long irqflags; irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde; mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); if (!mal) { printk(KERN_ERR "mal%d: out of memory allocating MAL structure!\n", index); return -ENOMEM; } mal->index = index; mal->ofdev = ofdev; mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1; MAL_DBG(mal, "probe" NL); prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL); if (prop == NULL) { printk(KERN_ERR "mal%d: can't find MAL num-tx-chans property!\n", index); err = -ENODEV; goto fail; } mal->num_tx_chans = prop[0]; prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL); if (prop == NULL) { printk(KERN_ERR "mal%d: can't find MAL num-rx-chans property!\n", index); err = -ENODEV; goto fail; } mal->num_rx_chans = prop[0]; dcr_base = dcr_resource_start(ofdev->dev.of_node, 0); if (dcr_base == 0) { printk(KERN_ERR "mal%d: can't find DCR resource!\n", index); err = -ENODEV; goto fail; } mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100); if (!DCR_MAP_OK(mal->dcr_host)) { printk(KERN_ERR "mal%d: failed to map DCRs !\n", index); err = -ENODEV; goto fail; } if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) { #if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \ defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR) mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | MAL_FTR_COMMON_ERR_INT); #else printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", ofdev->dev.of_node->full_name); err = -ENODEV; goto fail; #endif } mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2); if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { mal->txde_irq = mal->rxde_irq = mal->serr_irq; } else { mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3); mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4); } if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || mal->rxde_irq == NO_IRQ) { printk(KERN_ERR "mal%d: failed to map interrupts !\n", index); err = -ENODEV; goto fail_unmap; } INIT_LIST_HEAD(&mal->poll_list); INIT_LIST_HEAD(&mal->list); spin_lock_init(&mal->lock); init_dummy_netdev(&mal->dummy_dev); netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll, CONFIG_IBM_NEW_EMAC_POLL_WEIGHT); /* Load power-on reset defaults */ mal_reset(mal); /* Set the MAL configuration register */ cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; /* Current Axon is not happy with priority being non-0, it can * deadlock, fix it up here */ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon")) cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); /* Apply configuration */ set_mal_dcrn(mal, MAL_CFG, cfg); /* Allocate space for BD rings */ BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); bd_size = sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + NUM_RX_BUFF * mal->num_rx_chans); mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL); if (mal->bd_virt == NULL) { printk(KERN_ERR "mal%d: out of memory allocating RX/TX descriptors!\n", index); err = -ENOMEM; goto fail_unmap; } memset(mal->bd_virt, 0, bd_size); for (i = 0; i < mal->num_tx_chans; ++i) set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + sizeof(struct mal_descriptor) * mal_tx_bd_offset(mal, i)); for (i = 0; i < mal->num_rx_chans; ++i) set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + sizeof(struct mal_descriptor) * mal_rx_bd_offset(mal, i)); if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { irqflags = IRQF_SHARED; hdlr_serr = hdlr_txde = hdlr_rxde = mal_int; } else { irqflags = 0; hdlr_serr = mal_serr; hdlr_txde = mal_txde; hdlr_rxde = mal_rxde; } err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal); if (err) goto fail2; err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal); if (err) goto fail3; err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); if (err) goto fail4; err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal); if (err) goto fail5; err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); if (err) goto fail6; /* Enable all MAL SERR interrupt sources */ if (mal->version == 2) set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS); else set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS); /* Enable EOB interrupt */ mal_enable_eob_irq(mal); printk(KERN_INFO "MAL v%d %s, %d TX channels, %d RX channels\n", mal->version, ofdev->dev.of_node->full_name, mal->num_tx_chans, mal->num_rx_chans); /* Advertise this instance to the rest of the world */ wmb(); dev_set_drvdata(&ofdev->dev, mal); mal_dbg_register(mal); return 0; fail6: free_irq(mal->rxde_irq, mal); fail5: free_irq(mal->txeob_irq, mal); fail4: free_irq(mal->txde_irq, mal); fail3: free_irq(mal->serr_irq, mal); fail2: dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); fail_unmap: dcr_unmap(mal->dcr_host, 0x100); fail: kfree(mal); return err; } static int __devexit mal_remove(struct platform_device *ofdev) { struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); MAL_DBG(mal, "remove" NL); /* Synchronize with scheduled polling */ napi_disable(&mal->napi); if (!list_empty(&mal->list)) { /* This is *very* bad */ printk(KERN_EMERG "mal%d: commac list is not empty on remove!\n", mal->index); WARN_ON(1); } dev_set_drvdata(&ofdev->dev, NULL); free_irq(mal->serr_irq, mal); free_irq(mal->txde_irq, mal); free_irq(mal->txeob_irq, mal); free_irq(mal->rxde_irq, mal); free_irq(mal->rxeob_irq, mal); mal_reset(mal); mal_dbg_unregister(mal); dma_free_coherent(&ofdev->dev, sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, mal->bd_dma); kfree(mal); return 0; } static struct of_device_id mal_platform_match[] = { { .compatible = "ibm,mcmal", }, { .compatible = "ibm,mcmal2", }, /* Backward compat */ { .type = "mcmal-dma", .compatible = "ibm,mcmal", }, { .type = "mcmal-dma", .compatible = "ibm,mcmal2", }, {}, }; static struct platform_driver mal_of_driver = { .driver = { .name = "mcmal", .owner = THIS_MODULE, .of_match_table = mal_platform_match, }, .probe = mal_probe, .remove = mal_remove, }; int __init mal_init(void) { return platform_driver_register(&mal_of_driver); } void mal_exit(void) { platform_driver_unregister(&mal_of_driver); }
gpl-2.0
NikitaProAndroid/android_kernel_lge_msm8x26
net/netfilter/nf_conntrack_proto_sctp.c
4459
24359
/* * Connection tracking protocol helper module for SCTP. * * SCTP is defined in RFC 2960. References to various sections in this code * are to this RFC. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/timer.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/sctp.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_ecache.h> /* FIXME: Examine ipfilter's timeouts and conntrack transitions more closely. They're more complex. --RR And so for me for SCTP :D -Kiran */ static const char *const sctp_conntrack_names[] = { "NONE", "CLOSED", "COOKIE_WAIT", "COOKIE_ECHOED", "ESTABLISHED", "SHUTDOWN_SENT", "SHUTDOWN_RECD", "SHUTDOWN_ACK_SENT", }; #define SECS * HZ #define MINS * 60 SECS #define HOURS * 60 MINS #define DAYS * 24 HOURS static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = { [SCTP_CONNTRACK_CLOSED] = 10 SECS, [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS, [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, }; #define sNO SCTP_CONNTRACK_NONE #define sCL SCTP_CONNTRACK_CLOSED #define sCW SCTP_CONNTRACK_COOKIE_WAIT #define sCE SCTP_CONNTRACK_COOKIE_ECHOED #define sES SCTP_CONNTRACK_ESTABLISHED #define sSS SCTP_CONNTRACK_SHUTDOWN_SENT #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT #define sIV SCTP_CONNTRACK_MAX /* These are the descriptions of the states: NOTE: These state names are tantalizingly similar to the states of an SCTP endpoint. But the interpretation of the states is a little different, considering that these are the states of the connection and not of an end point. Please note the subtleties. -Kiran NONE - Nothing so far. COOKIE WAIT - We have seen an INIT chunk in the original direction, or also an INIT_ACK chunk in the reply direction. COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite to that of the SHUTDOWN chunk. CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of the SHUTDOWN chunk. Connection is closed. */ /* TODO - I have assumed that the first INIT is in the original direction. This messes things when an INIT comes in the reply direction in CLOSED state. - Check the error type in the reply dir before transitioning from cookie echoed to closed. - Sec 5.2.4 of RFC 2960 - Multi Homing support. */ /* SCTP conntrack state transitions */ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ /* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA}, /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA}, /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA}, /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/ /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ /* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */ /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL} }, { /* REPLY */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ /* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */ /* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA}, /* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA}, /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA}, /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, /* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */ /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA}, /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL} } }; static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { const struct sctphdr *hp; struct sctphdr _hdr; /* Actually only need first 8 bytes. */ hp = skb_header_pointer(skb, dataoff, 8, &_hdr); if (hp == NULL) return false; tuple->src.u.sctp.port = hp->source; tuple->dst.u.sctp.port = hp->dest; return true; } static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { tuple->src.u.sctp.port = orig->dst.u.sctp.port; tuple->dst.u.sctp.port = orig->src.u.sctp.port; return true; } /* Print out the per-protocol part of the tuple. */ static int sctp_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { return seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.sctp.port), ntohs(tuple->dst.u.sctp.port)); } /* Print out the private part of the conntrack. */ static int sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) { enum sctp_conntrack state; spin_lock_bh(&ct->lock); state = ct->proto.sctp.state; spin_unlock_bh(&ct->lock); return seq_printf(s, "%s ", sctp_conntrack_names[state]); } #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ for ((offset) = (dataoff) + sizeof(sctp_sctphdr_t), (count) = 0; \ (offset) < (skb)->len && \ ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \ (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++) /* Some validity checks to make sure the chunks are fine */ static int do_basic_checks(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned long *map) { u_int32_t offset, count; sctp_chunkhdr_t _sch, *sch; int flag; flag = 0; for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { pr_debug("Chunk Num: %d Type: %d\n", count, sch->type); if (sch->type == SCTP_CID_INIT || sch->type == SCTP_CID_INIT_ACK || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) flag = 1; /* * Cookie Ack/Echo chunks not the first OR * Init / Init Ack / Shutdown compl chunks not the only chunks * OR zero-length. */ if (((sch->type == SCTP_CID_COOKIE_ACK || sch->type == SCTP_CID_COOKIE_ECHO || flag) && count != 0) || !sch->length) { pr_debug("Basic checks failed\n"); return 1; } if (map) set_bit(sch->type, map); } pr_debug("Basic checks passed\n"); return count == 0; } static int sctp_new_state(enum ip_conntrack_dir dir, enum sctp_conntrack cur_state, int chunk_type) { int i; pr_debug("Chunk type: %d\n", chunk_type); switch (chunk_type) { case SCTP_CID_INIT: pr_debug("SCTP_CID_INIT\n"); i = 0; break; case SCTP_CID_INIT_ACK: pr_debug("SCTP_CID_INIT_ACK\n"); i = 1; break; case SCTP_CID_ABORT: pr_debug("SCTP_CID_ABORT\n"); i = 2; break; case SCTP_CID_SHUTDOWN: pr_debug("SCTP_CID_SHUTDOWN\n"); i = 3; break; case SCTP_CID_SHUTDOWN_ACK: pr_debug("SCTP_CID_SHUTDOWN_ACK\n"); i = 4; break; case SCTP_CID_ERROR: pr_debug("SCTP_CID_ERROR\n"); i = 5; break; case SCTP_CID_COOKIE_ECHO: pr_debug("SCTP_CID_COOKIE_ECHO\n"); i = 6; break; case SCTP_CID_COOKIE_ACK: pr_debug("SCTP_CID_COOKIE_ACK\n"); i = 7; break; case SCTP_CID_SHUTDOWN_COMPLETE: pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n"); i = 8; break; default: /* Other chunks like DATA, SACK, HEARTBEAT and its ACK do not cause a change in state */ pr_debug("Unknown chunk type, Will stay in %s\n", sctp_conntrack_names[cur_state]); return cur_state; } pr_debug("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", dir, sctp_conntrack_names[cur_state], chunk_type, sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); return sctp_conntracks[dir][i][cur_state]; } static unsigned int *sctp_get_timeouts(struct net *net) { return sctp_timeouts; } /* Returns verdict for packet, or -NF_ACCEPT for invalid. */ static int sctp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) { enum sctp_conntrack new_state, old_state; enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); const struct sctphdr *sh; struct sctphdr _sctph; const struct sctp_chunkhdr *sch; struct sctp_chunkhdr _sch; u_int32_t offset, count; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); if (sh == NULL) goto out; if (do_basic_checks(ct, skb, dataoff, map) != 0) goto out; /* Check the verification tag (Sec 8.5) */ if (!test_bit(SCTP_CID_INIT, map) && !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && !test_bit(SCTP_CID_COOKIE_ECHO, map) && !test_bit(SCTP_CID_ABORT, map) && !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && sh->vtag != ct->proto.sctp.vtag[dir]) { pr_debug("Verification tag check failed\n"); goto out; } old_state = new_state = SCTP_CONNTRACK_NONE; spin_lock_bh(&ct->lock); for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { /* Special cases of Verification tag check (Sec 8.5.1) */ if (sch->type == SCTP_CID_INIT) { /* Sec 8.5.1 (A) */ if (sh->vtag != 0) goto out_unlock; } else if (sch->type == SCTP_CID_ABORT) { /* Sec 8.5.1 (B) */ if (sh->vtag != ct->proto.sctp.vtag[dir] && sh->vtag != ct->proto.sctp.vtag[!dir]) goto out_unlock; } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { /* Sec 8.5.1 (C) */ if (sh->vtag != ct->proto.sctp.vtag[dir] && sh->vtag != ct->proto.sctp.vtag[!dir] && sch->flags & SCTP_CHUNK_FLAG_T) goto out_unlock; } else if (sch->type == SCTP_CID_COOKIE_ECHO) { /* Sec 8.5.1 (D) */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; } old_state = ct->proto.sctp.state; new_state = sctp_new_state(dir, old_state, sch->type); /* Invalid */ if (new_state == SCTP_CONNTRACK_MAX) { pr_debug("nf_conntrack_sctp: Invalid dir=%i ctype=%u " "conntrack=%u\n", dir, sch->type, old_state); goto out_unlock; } /* If it is an INIT or an INIT ACK note down the vtag */ if (sch->type == SCTP_CID_INIT || sch->type == SCTP_CID_INIT_ACK) { sctp_inithdr_t _inithdr, *ih; ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), sizeof(_inithdr), &_inithdr); if (ih == NULL) goto out_unlock; pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir); ct->proto.sctp.vtag[!dir] = ih->init_tag; } ct->proto.sctp.state = new_state; if (old_state != new_state) nf_conntrack_event_cache(IPCT_PROTOINFO, ct); } spin_unlock_bh(&ct->lock); nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && dir == IP_CT_DIR_REPLY && new_state == SCTP_CONNTRACK_ESTABLISHED) { pr_debug("Setting assured bit\n"); set_bit(IPS_ASSURED_BIT, &ct->status); nf_conntrack_event_cache(IPCT_ASSURED, ct); } return NF_ACCEPT; out_unlock: spin_unlock_bh(&ct->lock); out: return -NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { enum sctp_conntrack new_state; const struct sctphdr *sh; struct sctphdr _sctph; const struct sctp_chunkhdr *sch; struct sctp_chunkhdr _sch; u_int32_t offset, count; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); if (sh == NULL) return false; if (do_basic_checks(ct, skb, dataoff, map) != 0) return false; /* If an OOTB packet has any of these chunks discard (Sec 8.4) */ if (test_bit(SCTP_CID_ABORT, map) || test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) || test_bit(SCTP_CID_COOKIE_ACK, map)) return false; memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp)); new_state = SCTP_CONNTRACK_MAX; for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { /* Don't need lock here: this conntrack not in circulation yet */ new_state = sctp_new_state(IP_CT_DIR_ORIGINAL, SCTP_CONNTRACK_NONE, sch->type); /* Invalid: delete conntrack */ if (new_state == SCTP_CONNTRACK_NONE || new_state == SCTP_CONNTRACK_MAX) { pr_debug("nf_conntrack_sctp: invalid new deleting.\n"); return false; } /* Copy the vtag into the state info */ if (sch->type == SCTP_CID_INIT) { if (sh->vtag == 0) { sctp_inithdr_t _inithdr, *ih; ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), sizeof(_inithdr), &_inithdr); if (ih == NULL) return false; pr_debug("Setting vtag %x for new conn\n", ih->init_tag); ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag; } else { /* Sec 8.5.1 (A) */ return false; } } /* If it is a shutdown ack OOTB packet, we expect a return shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ else { pr_debug("Setting vtag %x for new conn OOTB\n", sh->vtag); ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; } ct->proto.sctp.state = new_state; } return true; } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct) { struct nlattr *nest_parms; spin_lock_bh(&ct->lock); nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); NLA_PUT_BE32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]); NLA_PUT_BE32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY, ct->proto.sctp.vtag[IP_CT_DIR_REPLY]); spin_unlock_bh(&ct->lock); nla_nest_end(skb, nest_parms); return 0; nla_put_failure: spin_unlock_bh(&ct->lock); return -1; } static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = { [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 }, [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 }, [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 }, }; static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) { struct nlattr *attr = cda[CTA_PROTOINFO_SCTP]; struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1]; int err; /* updates may not contain the internal protocol info, skip parsing */ if (!attr) return 0; err = nla_parse_nested(tb, CTA_PROTOINFO_SCTP_MAX, attr, sctp_nla_policy); if (err < 0) return err; if (!tb[CTA_PROTOINFO_SCTP_STATE] || !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] || !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]) return -EINVAL; spin_lock_bh(&ct->lock); ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]); ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]); spin_unlock_bh(&ct->lock); return 0; } static int sctp_nlattr_size(void) { return nla_total_size(0) /* CTA_PROTOINFO_SCTP */ + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1); } #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) { unsigned int *timeouts = data; int i; /* set default SCTP timeouts. */ for (i=0; i<SCTP_CONNTRACK_MAX; i++) timeouts[i] = sctp_timeouts[i]; /* there's a 1:1 mapping between attributes and protocol states. */ for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) { if (tb[i]) { timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; } } return 0; } static int sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; int i; for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ)); return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = { [CTA_TIMEOUT_SCTP_CLOSED] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_COOKIE_WAIT] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_COOKIE_ECHOED] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_ESTABLISHED] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL static unsigned int sctp_sysctl_table_users; static struct ctl_table_header *sctp_sysctl_header; static struct ctl_table sctp_sysctl_table[] = { { .procname = "nf_conntrack_sctp_timeout_closed", .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_sctp_timeout_cookie_wait", .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_sctp_timeout_cookie_echoed", .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_sctp_timeout_established", .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_sctp_timeout_shutdown_sent", .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_sctp_timeout_shutdown_recd", .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT static struct ctl_table sctp_compat_sysctl_table[] = { { .procname = "ip_conntrack_sctp_timeout_closed", .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ip_conntrack_sctp_timeout_cookie_wait", .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ip_conntrack_sctp_timeout_cookie_echoed", .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ip_conntrack_sctp_timeout_established", .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ip_conntrack_sctp_timeout_shutdown_sent", .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ip_conntrack_sctp_timeout_shutdown_recd", .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent", .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT], .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ #endif static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { .l3proto = PF_INET, .l4proto = IPPROTO_SCTP, .name = "sctp", .pkt_to_tuple = sctp_pkt_to_tuple, .invert_tuple = sctp_invert_tuple, .print_tuple = sctp_print_tuple, .print_conntrack = sctp_print_conntrack, .packet = sctp_packet, .get_timeouts = sctp_get_timeouts, .new = sctp_new, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = sctp_to_nlattr, .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = sctp_timeout_nlattr_to_obj, .obj_to_nlattr = sctp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_SCTP_MAX, .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, .nla_policy = sctp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL .ctl_table_users = &sctp_sysctl_table_users, .ctl_table_header = &sctp_sysctl_header, .ctl_table = sctp_sysctl_table, #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT .ctl_compat_table = sctp_compat_sysctl_table, #endif #endif }; static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { .l3proto = PF_INET6, .l4proto = IPPROTO_SCTP, .name = "sctp", .pkt_to_tuple = sctp_pkt_to_tuple, .invert_tuple = sctp_invert_tuple, .print_tuple = sctp_print_tuple, .print_conntrack = sctp_print_conntrack, .packet = sctp_packet, .get_timeouts = sctp_get_timeouts, .new = sctp_new, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = sctp_to_nlattr, .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = sctp_timeout_nlattr_to_obj, .obj_to_nlattr = sctp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_SCTP_MAX, .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, .nla_policy = sctp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #endif #ifdef CONFIG_SYSCTL .ctl_table_users = &sctp_sysctl_table_users, .ctl_table_header = &sctp_sysctl_header, .ctl_table = sctp_sysctl_table, #endif }; static int __init nf_conntrack_proto_sctp_init(void) { int ret; ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); if (ret) { pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n"); goto out; } ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); if (ret) { pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n"); goto cleanup_sctp4; } return ret; cleanup_sctp4: nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); out: return ret; } static void __exit nf_conntrack_proto_sctp_fini(void) { nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); } module_init(nf_conntrack_proto_sctp_init); module_exit(nf_conntrack_proto_sctp_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kiran Kumar Immidi"); MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP"); MODULE_ALIAS("ip_conntrack_proto_sctp");
gpl-2.0
raden/kencana-kernel
drivers/net/irda/nsc-ircc.c
5227
60907
/********************************************************************* * * Filename: nsc-ircc.c * Version: 1.0 * Description: Driver for the NSC PC'108 and PC'338 IrDA chipsets * Status: Stable. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Nov 7 21:43:15 1998 * Modified at: Wed Mar 1 11:29:34 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no> * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com> * Copyright (c) 1998 Actisys Corp., www.actisys.com * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com> * All Rights Reserved * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * * Notice that all functions that needs to access the chip in _any_ * way, must save BSR register on entry, and restore it on exit. * It is _very_ important to follow this policy! * * __u8 bank; * * bank = inb(iobase+BSR); * * do_your_stuff_here(); * * outb(bank, iobase+BSR); * * If you find bugs in this file, its very likely that the same bug * will also be in w83977af_ir.c since the implementations are quite * similar. * ********************************************************************/ #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/rtnetlink.h> #include <linux/dma-mapping.h> #include <linux/pnp.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <net/irda/wrapper.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include "nsc-ircc.h" #define CHIP_IO_EXTENT 8 #define BROKEN_DONGLE_ID static char *driver_name = "nsc-ircc"; /* Power Management */ #define NSC_IRCC_DRIVER_NAME "nsc-ircc" static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state); static int nsc_ircc_resume(struct platform_device *dev); static struct platform_driver nsc_ircc_driver = { .suspend = nsc_ircc_suspend, .resume = nsc_ircc_resume, .driver = { .name = NSC_IRCC_DRIVER_NAME, }, }; /* Module parameters */ static int qos_mtt_bits = 0x07; /* 1 ms or more */ static int dongle_id; /* Use BIOS settions by default, but user may supply module parameters */ static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 }; static unsigned int irq[] = { 0, 0, 0, 0, 0 }; static unsigned int dma[] = { 0, 0, 0, 0, 0 }; static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); #ifdef CONFIG_PNP static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); #endif /* These are the known NSC chips */ static nsc_chip_t chips[] = { /* Name, {cfg registers}, chip id index reg, chip id expected value, revision mask */ { "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0, nsc_ircc_probe_108, nsc_ircc_init_108 }, { "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8, nsc_ircc_probe_338, nsc_ircc_init_338 }, /* Contributed by Steffen Pingel - IBM X40 */ { "PC8738x", { 0x164e, 0x4e, 0x2e }, 0x20, 0xf4, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, /* Contributed by Jan Frey - IBM A30/A31 */ { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, /* IBM ThinkPads using PC8738x (T60/X60/Z60) */ { "IBM-PC8738x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, /* IBM ThinkPads using PC8394T (T43/R52/?) */ { "IBM-PC8394T", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf9, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, { NULL } }; static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL }; static char *dongle_types[] = { "Differential serial interface", "Differential serial interface", "Reserved", "Reserved", "Sharp RY5HD01", "Reserved", "Single-ended serial interface", "Consumer-IR only", "HP HSDL-2300, HP HSDL-3600/HSDL-3610", "IBM31T1100 or Temic TFDS6000/TFDS6500", "Reserved", "Reserved", "HP HSDL-1100/HSDL-2100", "HP HSDL-1100/HSDL-2100", "Supports SIR Mode only", "No dongle connected", }; /* PNP probing */ static chipio_t pnp_info; static const struct pnp_device_id nsc_ircc_pnp_table[] = { { .id = "NSC6001", .driver_data = 0 }, { .id = "HWPC224", .driver_data = 0 }, { .id = "IBM0071", .driver_data = NSC_FORCE_DONGLE_TYPE9 }, { } }; MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); static struct pnp_driver nsc_ircc_pnp_driver = { #ifdef CONFIG_PNP .name = "nsc-ircc", .id_table = nsc_ircc_pnp_table, .probe = nsc_ircc_pnp_probe, #endif }; /* Some prototypes */ static int nsc_ircc_open(chipio_t *info); static int nsc_ircc_close(struct nsc_ircc_cb *self); static int nsc_ircc_setup(chipio_t *info); static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self); static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self); static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase); static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev); static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size); static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase); static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud); static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self); static int nsc_ircc_read_dongle_id (int iobase); static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id); static int nsc_ircc_net_open(struct net_device *dev); static int nsc_ircc_net_close(struct net_device *dev); static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); /* Globals */ static int pnp_registered; static int pnp_succeeded; /* * Function nsc_ircc_init () * * Initialize chip. Just try to find out how many chips we are dealing with * and where they are */ static int __init nsc_ircc_init(void) { chipio_t info; nsc_chip_t *chip; int ret; int cfg_base; int cfg, id; int reg; int i = 0; ret = platform_driver_register(&nsc_ircc_driver); if (ret) { IRDA_ERROR("%s, Can't register driver!\n", driver_name); return ret; } /* Register with PnP subsystem to detect disable ports */ ret = pnp_register_driver(&nsc_ircc_pnp_driver); if (!ret) pnp_registered = 1; ret = -ENODEV; /* Probe for all the NSC chipsets we know about */ for (chip = chips; chip->name ; chip++) { IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name); /* Try all config registers for this chip */ for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) { cfg_base = chip->cfg[cfg]; if (!cfg_base) continue; /* Read index register */ reg = inb(cfg_base); if (reg == 0xff) { IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base); continue; } /* Read chip identification register */ outb(chip->cid_index, cfg_base); id = inb(cfg_base+1); if ((id & chip->cid_mask) == chip->cid_value) { IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", __func__, chip->name, id & ~chip->cid_mask); /* * If we found a correct PnP setting, * we first try it. */ if (pnp_succeeded) { memset(&info, 0, sizeof(chipio_t)); info.cfg_base = cfg_base; info.fir_base = pnp_info.fir_base; info.dma = pnp_info.dma; info.irq = pnp_info.irq; if (info.fir_base < 0x2000) { IRDA_MESSAGE("%s, chip->init\n", driver_name); chip->init(chip, &info); } else chip->probe(chip, &info); if (nsc_ircc_open(&info) >= 0) ret = 0; } /* * Opening based on PnP values failed. * Let's fallback to user values, or probe * the chip. */ if (ret) { IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name); memset(&info, 0, sizeof(chipio_t)); info.cfg_base = cfg_base; info.fir_base = io[i]; info.dma = dma[i]; info.irq = irq[i]; /* * If the user supplies the base address, then * we init the chip, if not we probe the values * set by the BIOS */ if (io[i] < 0x2000) { chip->init(chip, &info); } else chip->probe(chip, &info); if (nsc_ircc_open(&info) >= 0) ret = 0; } i++; } else { IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id); } } } if (ret) { platform_driver_unregister(&nsc_ircc_driver); pnp_unregister_driver(&nsc_ircc_pnp_driver); pnp_registered = 0; } return ret; } /* * Function nsc_ircc_cleanup () * * Close all configured chips * */ static void __exit nsc_ircc_cleanup(void) { int i; for (i = 0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) nsc_ircc_close(dev_self[i]); } platform_driver_unregister(&nsc_ircc_driver); if (pnp_registered) pnp_unregister_driver(&nsc_ircc_pnp_driver); pnp_registered = 0; } static const struct net_device_ops nsc_ircc_sir_ops = { .ndo_open = nsc_ircc_net_open, .ndo_stop = nsc_ircc_net_close, .ndo_start_xmit = nsc_ircc_hard_xmit_sir, .ndo_do_ioctl = nsc_ircc_net_ioctl, }; static const struct net_device_ops nsc_ircc_fir_ops = { .ndo_open = nsc_ircc_net_open, .ndo_stop = nsc_ircc_net_close, .ndo_start_xmit = nsc_ircc_hard_xmit_fir, .ndo_do_ioctl = nsc_ircc_net_ioctl, }; /* * Function nsc_ircc_open (iobase, irq) * * Open driver instance * */ static int __init nsc_ircc_open(chipio_t *info) { struct net_device *dev; struct nsc_ircc_cb *self; void *ret; int err, chip_index; IRDA_DEBUG(2, "%s()\n", __func__); for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { if (!dev_self[chip_index]) break; } if (chip_index == ARRAY_SIZE(dev_self)) { IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__); return -ENOMEM; } IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, info->cfg_base); if ((nsc_ircc_setup(info)) == -1) return -1; IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name); dev = alloc_irdadev(sizeof(struct nsc_ircc_cb)); if (dev == NULL) { IRDA_ERROR("%s(), can't allocate memory for " "control block!\n", __func__); return -ENOMEM; } self = netdev_priv(dev); self->netdev = dev; spin_lock_init(&self->lock); /* Need to store self somewhere */ dev_self[chip_index] = self; self->index = chip_index; /* Initialize IO */ self->io.cfg_base = info->cfg_base; self->io.fir_base = info->fir_base; self->io.irq = info->irq; self->io.fir_ext = CHIP_IO_EXTENT; self->io.dma = info->dma; self->io.fifo_size = 32; /* Reserve the ioports that we need */ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); if (!ret) { IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__, self->io.fir_base); err = -ENODEV; goto out1; } /* Initialize QoS for this device */ irda_init_max_qos_capabilies(&self->qos); /* The only value we must override it the baudrate */ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| IR_115200|IR_576000|IR_1152000 |(IR_4000000 << 8); self->qos.min_turn_time.bits = qos_mtt_bits; irda_qos_bits_to_value(&self->qos); /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */ self->rx_buff.truesize = 14384; self->tx_buff.truesize = 14384; /* Allocate memory if needed */ self->rx_buff.head = dma_alloc_coherent(NULL, self->rx_buff.truesize, &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; goto out2; } memset(self->rx_buff.head, 0, self->rx_buff.truesize); self->tx_buff.head = dma_alloc_coherent(NULL, self->tx_buff.truesize, &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; goto out3; } memset(self->tx_buff.head, 0, self->tx_buff.truesize); self->rx_buff.in_frame = FALSE; self->rx_buff.state = OUTSIDE_FRAME; self->tx_buff.data = self->tx_buff.head; self->rx_buff.data = self->rx_buff.head; /* Reset Tx queue info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; /* Override the network functions we need to use */ dev->netdev_ops = &nsc_ircc_sir_ops; err = register_netdev(dev); if (err) { IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); goto out4; } IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); /* Check if user has supplied a valid dongle id or not */ if ((dongle_id <= 0) || (dongle_id >= ARRAY_SIZE(dongle_types))) { dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, dongle_types[dongle_id]); } else { IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name, dongle_types[dongle_id]); } self->io.dongle_id = dongle_id; nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME, self->index, NULL, 0); if (IS_ERR(self->pldev)) { err = PTR_ERR(self->pldev); goto out5; } platform_set_drvdata(self->pldev, self); return chip_index; out5: unregister_netdev(dev); out4: dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); out3: dma_free_coherent(NULL, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); out2: release_region(self->io.fir_base, self->io.fir_ext); out1: free_netdev(dev); dev_self[chip_index] = NULL; return err; } /* * Function nsc_ircc_close (self) * * Close driver instance * */ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) { int iobase; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return -1;); iobase = self->io.fir_base; platform_device_unregister(self->pldev); /* Remove netdevice */ unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); if (self->rx_buff.head) dma_free_coherent(NULL, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); dev_self[self->index] = NULL; free_netdev(self->netdev); return 0; } /* * Function nsc_ircc_init_108 (iobase, cfg_base, irq, dma) * * Initialize the NSC '108 chip * */ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; __u8 temp=0; outb(2, cfg_base); /* Mode Control Register (MCTL) */ outb(0x00, cfg_base+1); /* Disable device */ /* Base Address and Interrupt Control Register (BAIC) */ outb(CFG_108_BAIC, cfg_base); switch (info->fir_base) { case 0x3e8: outb(0x14, cfg_base+1); break; case 0x2e8: outb(0x15, cfg_base+1); break; case 0x3f8: outb(0x16, cfg_base+1); break; case 0x2f8: outb(0x17, cfg_base+1); break; default: IRDA_ERROR("%s(), invalid base_address", __func__); } /* Control Signal Routing Register (CSRT) */ switch (info->irq) { case 3: temp = 0x01; break; case 4: temp = 0x02; break; case 5: temp = 0x03; break; case 7: temp = 0x04; break; case 9: temp = 0x05; break; case 11: temp = 0x06; break; case 15: temp = 0x07; break; default: IRDA_ERROR("%s(), invalid irq", __func__); } outb(CFG_108_CSRT, cfg_base); switch (info->dma) { case 0: outb(0x08+temp, cfg_base+1); break; case 1: outb(0x10+temp, cfg_base+1); break; case 3: outb(0x18+temp, cfg_base+1); break; default: IRDA_ERROR("%s(), invalid dma", __func__); } outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */ outb(0x03, cfg_base+1); /* Enable device */ return 0; } /* * Function nsc_ircc_probe_108 (chip, info) * * * */ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int reg; /* Read address and interrupt control register (BAIC) */ outb(CFG_108_BAIC, cfg_base); reg = inb(cfg_base+1); switch (reg & 0x03) { case 0: info->fir_base = 0x3e8; break; case 1: info->fir_base = 0x2e8; break; case 2: info->fir_base = 0x3f8; break; case 3: info->fir_base = 0x2f8; break; } info->sir_base = info->fir_base; IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); /* Read control signals routing register (CSRT) */ outb(CFG_108_CSRT, cfg_base); reg = inb(cfg_base+1); switch (reg & 0x07) { case 0: info->irq = -1; break; case 1: info->irq = 3; break; case 2: info->irq = 4; break; case 3: info->irq = 5; break; case 4: info->irq = 7; break; case 5: info->irq = 9; break; case 6: info->irq = 11; break; case 7: info->irq = 15; break; } IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); /* Currently we only read Rx DMA but it will also be used for Tx */ switch ((reg >> 3) & 0x03) { case 0: info->dma = -1; break; case 1: info->dma = 0; break; case 2: info->dma = 1; break; case 3: info->dma = 3; break; } IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); /* Read mode control register (MCTL) */ outb(CFG_108_MCTL, cfg_base); reg = inb(cfg_base+1); info->enabled = reg & 0x01; info->suspended = !((reg >> 1) & 0x01); return 0; } /* * Function nsc_ircc_init_338 (chip, info) * * Initialize the NSC '338 chip. Remember that the 87338 needs two * consecutive writes to the data registers while CPU interrupts are * disabled. The 97338 does not require this, but shouldn't be any * harm if we do it anyway. */ static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info) { /* No init yet */ return 0; } /* * Function nsc_ircc_probe_338 (chip, info) * * * */ static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int reg, com = 0; int pnp; /* Read function enable register (FER) */ outb(CFG_338_FER, cfg_base); reg = inb(cfg_base+1); info->enabled = (reg >> 2) & 0x01; /* Check if we are in Legacy or PnP mode */ outb(CFG_338_PNP0, cfg_base); reg = inb(cfg_base+1); pnp = (reg >> 3) & 0x01; if (pnp) { IRDA_DEBUG(2, "(), Chip is in PnP mode\n"); outb(0x46, cfg_base); reg = (inb(cfg_base+1) & 0xfe) << 2; outb(0x47, cfg_base); reg |= ((inb(cfg_base+1) & 0xfc) << 8); info->fir_base = reg; } else { /* Read function address register (FAR) */ outb(CFG_338_FAR, cfg_base); reg = inb(cfg_base+1); switch ((reg >> 4) & 0x03) { case 0: info->fir_base = 0x3f8; break; case 1: info->fir_base = 0x2f8; break; case 2: com = 3; break; case 3: com = 4; break; } if (com) { switch ((reg >> 6) & 0x03) { case 0: if (com == 3) info->fir_base = 0x3e8; else info->fir_base = 0x2e8; break; case 1: if (com == 3) info->fir_base = 0x338; else info->fir_base = 0x238; break; case 2: if (com == 3) info->fir_base = 0x2e8; else info->fir_base = 0x2e0; break; case 3: if (com == 3) info->fir_base = 0x220; else info->fir_base = 0x228; break; } } } info->sir_base = info->fir_base; /* Read PnP register 1 (PNP1) */ outb(CFG_338_PNP1, cfg_base); reg = inb(cfg_base+1); info->irq = reg >> 4; /* Read PnP register 3 (PNP3) */ outb(CFG_338_PNP3, cfg_base); reg = inb(cfg_base+1); info->dma = (reg & 0x07) - 1; /* Read power and test register (PTR) */ outb(CFG_338_PTR, cfg_base); reg = inb(cfg_base+1); info->suspended = reg & 0x01; return 0; } /* * Function nsc_ircc_init_39x (chip, info) * * Now that we know it's a '39x (see probe below), we need to * configure it so we can use it. * * The NSC '338 chip is a Super I/O chip with a "bank" architecture, * the configuration of the different functionality (serial, parallel, * floppy...) are each in a different bank (Logical Device Number). * The base address, irq and dma configuration registers are common * to all functionalities (index 0x30 to 0x7F). * There is only one configuration register specific to the * serial port, CFG_39X_SPC. * JeanII * * Note : this code was written by Jan Frey <janfrey@web.de> */ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int enabled; /* User is sure about his config... accept it. */ IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " "io=0x%04x, irq=%d, dma=%d\n", __func__, info->fir_base, info->irq, info->dma); /* Access bank for SP2 */ outb(CFG_39X_LDN, cfg_base); outb(0x02, cfg_base+1); /* Configure SP2 */ /* We want to enable the device if not enabled */ outb(CFG_39X_ACT, cfg_base); enabled = inb(cfg_base+1) & 0x01; if (!enabled) { /* Enable the device */ outb(CFG_39X_SIOCF1, cfg_base); outb(0x01, cfg_base+1); /* May want to update info->enabled. Jean II */ } /* Enable UART bank switching (bit 7) ; Sets the chip to normal * power mode (wake up from sleep mode) (bit 1) */ outb(CFG_39X_SPC, cfg_base); outb(0x82, cfg_base+1); return 0; } /* * Function nsc_ircc_probe_39x (chip, info) * * Test if we really have a '39x chip at the given address * * Note : this code was written by Jan Frey <janfrey@web.de> */ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int reg1, reg2, irq, irqt, dma1, dma2; int enabled, susp; IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n", __func__, cfg_base); /* This function should be executed with irq off to avoid * another driver messing with the Super I/O bank - Jean II */ /* Access bank for SP2 */ outb(CFG_39X_LDN, cfg_base); outb(0x02, cfg_base+1); /* Read infos about SP2 ; store in info struct */ outb(CFG_39X_BASEH, cfg_base); reg1 = inb(cfg_base+1); outb(CFG_39X_BASEL, cfg_base); reg2 = inb(cfg_base+1); info->fir_base = (reg1 << 8) | reg2; outb(CFG_39X_IRQNUM, cfg_base); irq = inb(cfg_base+1); outb(CFG_39X_IRQSEL, cfg_base); irqt = inb(cfg_base+1); info->irq = irq; outb(CFG_39X_DMA0, cfg_base); dma1 = inb(cfg_base+1); outb(CFG_39X_DMA1, cfg_base); dma2 = inb(cfg_base+1); info->dma = dma1 -1; outb(CFG_39X_ACT, cfg_base); info->enabled = enabled = inb(cfg_base+1) & 0x01; outb(CFG_39X_SPC, cfg_base); susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1); IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp); /* Configure SP2 */ /* We want to enable the device if not enabled */ outb(CFG_39X_ACT, cfg_base); enabled = inb(cfg_base+1) & 0x01; if (!enabled) { /* Enable the device */ outb(CFG_39X_SIOCF1, cfg_base); outb(0x01, cfg_base+1); /* May want to update info->enabled. Jean II */ } /* Enable UART bank switching (bit 7) ; Sets the chip to normal * power mode (wake up from sleep mode) (bit 1) */ outb(CFG_39X_SPC, cfg_base); outb(0x82, cfg_base+1); return 0; } #ifdef CONFIG_PNP /* PNP probing */ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) { memset(&pnp_info, 0, sizeof(chipio_t)); pnp_info.irq = -1; pnp_info.dma = -1; pnp_succeeded = 1; if (id->driver_data & NSC_FORCE_DONGLE_TYPE9) dongle_id = 0x9; /* There doesn't seem to be any way of getting the cfg_base. * On my box, cfg_base is in the PnP descriptor of the * motherboard. Oh well... Jean II */ if (pnp_port_valid(dev, 0) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.fir_base = pnp_port_start(dev, 0); if (pnp_irq_valid(dev, 0) && !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.irq = pnp_irq(dev, 0); if (pnp_dma_valid(dev, 0) && !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.dma = pnp_dma(dev, 0); IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); if((pnp_info.fir_base == 0) || (pnp_info.irq == -1) || (pnp_info.dma == -1)) { /* Returning an error will disable the device. Yuck ! */ //return -EINVAL; pnp_succeeded = 0; } return 0; } #endif /* * Function nsc_ircc_setup (info) * * Returns non-negative on success. * */ static int nsc_ircc_setup(chipio_t *info) { int version; int iobase = info->fir_base; /* Read the Module ID */ switch_bank(iobase, BANK3); version = inb(iobase+MID); IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n", __func__, driver_name, version); /* Should be 0x2? */ if (0x20 != (version & 0xf0)) { IRDA_ERROR("%s, Wrong chip version %02x\n", driver_name, version); return -1; } /* Switch to advanced mode */ switch_bank(iobase, BANK2); outb(ECR1_EXT_SL, iobase+ECR1); switch_bank(iobase, BANK0); /* Set FIFO threshold to TX17, RX16, reset and enable FIFO's */ switch_bank(iobase, BANK0); outb(FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR); outb(0x03, iobase+LCR); /* 8 bit word length */ outb(MCR_SIR, iobase+MCR); /* Start at SIR-mode, also clears LSR*/ /* Set FIFO size to 32 */ switch_bank(iobase, BANK2); outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2); /* IRCR2: FEND_MD is not set */ switch_bank(iobase, BANK5); outb(0x02, iobase+4); /* Make sure that some defaults are OK */ switch_bank(iobase, BANK6); outb(0x20, iobase+0); /* Set 32 bits FIR CRC */ outb(0x0a, iobase+1); /* Set MIR pulse width */ outb(0x0d, iobase+2); /* Set SIR pulse width to 1.6us */ outb(0x2a, iobase+4); /* Set beginning frag, and preamble length */ /* Enable receive interrupts */ switch_bank(iobase, BANK0); outb(IER_RXHDL_IE, iobase+IER); return 0; } /* * Function nsc_ircc_read_dongle_id (void) * * Try to read dongle indentification. This procedure needs to be executed * once after power-on/reset. It also needs to be used whenever you suspect * that the user may have plugged/unplugged the IrDA Dongle. */ static int nsc_ircc_read_dongle_id (int iobase) { int dongle_id; __u8 bank; bank = inb(iobase+BSR); /* Select Bank 7 */ switch_bank(iobase, BANK7); /* IRCFG4: IRSL0_DS and IRSL21_DS are cleared */ outb(0x00, iobase+7); /* ID0, 1, and 2 are pulled up/down very slowly */ udelay(50); /* IRCFG1: read the ID bits */ dongle_id = inb(iobase+4) & 0x0f; #ifdef BROKEN_DONGLE_ID if (dongle_id == 0x0a) dongle_id = 0x09; #endif /* Go back to bank 0 before returning */ switch_bank(iobase, BANK0); outb(bank, iobase+BSR); return dongle_id; } /* * Function nsc_ircc_init_dongle_interface (iobase, dongle_id) * * This function initializes the dongle for the transceiver that is * used. This procedure needs to be executed once after * power-on/reset. It also needs to be used whenever you suspect that * the dongle is changed. */ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id) { int bank; /* Save current bank */ bank = inb(iobase+BSR); /* Select Bank 7 */ switch_bank(iobase, BANK7); /* IRCFG4: set according to dongle_id */ switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved, but this is what the Thinkpad reports */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ IRDA_DEBUG(0, "%s(), %s\n", __func__, dongle_types[dongle_id]); break; case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ /* * Set irsl0 as input, irsl[1-2] as output, and separate * inputs are used for SIR and MIR/FIR */ outb(0x48, iobase+7); break; case 0x0E: /* Supports SIR Mode only */ outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0F: /* No dongle connected */ IRDA_DEBUG(0, "%s(), %s\n", __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: IRDA_DEBUG(0, "%s(), invalid dongle_id %#x", __func__, dongle_id); } /* IRCFG1: IRSL1 and 2 are set to IrDA mode */ outb(0x00, iobase+4); /* Restore bank register */ outb(bank, iobase+BSR); } /* set_up_dongle_interface */ /* * Function nsc_ircc_change_dongle_speed (iobase, speed, dongle_id) * * Change speed of the attach dongle * */ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) { __u8 bank; /* Save current bank */ bank = inb(iobase+BSR); /* Select Bank 7 */ switch_bank(iobase, BANK7); /* IRCFG1: set according to dongle_id */ switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ IRDA_DEBUG(0, "%s(), %s\n", __func__, dongle_types[dongle_id]); outb(0x00, iobase+4); if (speed > 115200) outb(0x01, iobase+4); break; case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ outb(0x01, iobase+4); if (speed == 4000000) { /* There was a cli() there, but we now are already * under spin_lock_irqsave() - JeanII */ outb(0x81, iobase+4); outb(0x80, iobase+4); } else outb(0x00, iobase+4); break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ break; case 0x0E: /* Supports SIR Mode only */ break; case 0x0F: /* No dongle connected */ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__); } /* Restore bank register */ outb(bank, iobase+BSR); } /* * Function nsc_ircc_change_speed (self, baud) * * Change the speed of the device * * This function *must* be called with irq off and spin-lock. */ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) { struct net_device *dev = self->netdev; __u8 mcr = MCR_SIR; int iobase; __u8 bank; __u8 ier; /* Interrupt enable register */ IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed); IRDA_ASSERT(self != NULL, return 0;); iobase = self->io.fir_base; /* Update accounting for new speed */ self->io.speed = speed; /* Save current bank */ bank = inb(iobase+BSR); /* Disable interrupts */ switch_bank(iobase, BANK0); outb(0, iobase+IER); /* Select Bank 2 */ switch_bank(iobase, BANK2); outb(0x00, iobase+BGDH); switch (speed) { case 9600: outb(0x0c, iobase+BGDL); break; case 19200: outb(0x06, iobase+BGDL); break; case 38400: outb(0x03, iobase+BGDL); break; case 57600: outb(0x02, iobase+BGDL); break; case 115200: outb(0x01, iobase+BGDL); break; case 576000: switch_bank(iobase, BANK5); /* IRCR2: MDRS is set */ outb(inb(iobase+4) | 0x04, iobase+4); mcr = MCR_MIR; IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__); break; case 1152000: mcr = MCR_MIR; IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__); break; case 4000000: mcr = MCR_FIR; IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__); break; default: mcr = MCR_FIR; IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__, speed); break; } /* Set appropriate speed mode */ switch_bank(iobase, BANK0); outb(mcr | MCR_TX_DFR, iobase+MCR); /* Give some hits to the transceiver */ nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); /* Set FIFO threshold to TX17, RX16 */ switch_bank(iobase, BANK0); outb(0x00, iobase+FCR); outb(FCR_FIFO_EN, iobase+FCR); outb(FCR_RXTH| /* Set Rx FIFO threshold */ FCR_TXTH| /* Set Tx FIFO threshold */ FCR_TXSR| /* Reset Tx FIFO */ FCR_RXSR| /* Reset Rx FIFO */ FCR_FIFO_EN, /* Enable FIFOs */ iobase+FCR); /* Set FIFO size to 32 */ switch_bank(iobase, BANK2); outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2); /* Enable some interrupts so we can receive frames */ switch_bank(iobase, BANK0); if (speed > 115200) { /* Install FIR xmit handler */ dev->netdev_ops = &nsc_ircc_fir_ops; ier = IER_SFIF_IE; nsc_ircc_dma_receive(self); } else { /* Install SIR xmit handler */ dev->netdev_ops = &nsc_ircc_sir_ops; ier = IER_RXHDL_IE; } /* Set our current interrupt mask */ outb(ier, iobase+IER); /* Restore BSR */ outb(bank, iobase+BSR); /* Make sure interrupt handlers keep the proper interrupt mask */ return ier; } /* * Function nsc_ircc_hard_xmit (skb, dev) * * Transmit the frame! * */ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev) { struct nsc_ircc_cb *self; unsigned long flags; int iobase; __s32 speed; __u8 bank; self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;); iobase = self->io.fir_base; netif_stop_queue(dev); /* Make sure tests *& speed change are atomic */ spin_lock_irqsave(&self->lock, flags); /* Check if we need to change the speed */ speed = irda_get_next_speed(skb); if ((speed != self->io.speed) && (speed != -1)) { /* Check for empty frame. */ if (!skb->len) { /* If we just sent a frame, we get called before * the last bytes get out (because of the SIR FIFO). * If this is the case, let interrupt handler change * the speed itself... Jean II */ if (self->io.direction == IO_RECV) { nsc_ircc_change_speed(self, speed); /* TODO : For SIR->SIR, the next packet * may get corrupted - Jean II */ netif_wake_queue(dev); } else { self->new_speed = speed; /* Queue will be restarted after speed change * to make sure packets gets through the * proper xmit handler - Jean II */ } dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } else self->new_speed = speed; } /* Save current bank */ bank = inb(iobase+BSR); self->tx_buff.data = self->tx_buff.head; self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize); dev->stats.tx_bytes += self->tx_buff.len; /* Add interrupt on tx low level (will fire immediately) */ switch_bank(iobase, BANK0); outb(IER_TXLDL_IE, iobase+IER); /* Restore bank register */ outb(bank, iobase+BSR); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) { struct nsc_ircc_cb *self; unsigned long flags; int iobase; __s32 speed; __u8 bank; int mtt, diff; self = netdev_priv(dev); iobase = self->io.fir_base; netif_stop_queue(dev); /* Make sure tests *& speed change are atomic */ spin_lock_irqsave(&self->lock, flags); /* Check if we need to change the speed */ speed = irda_get_next_speed(skb); if ((speed != self->io.speed) && (speed != -1)) { /* Check for empty frame. */ if (!skb->len) { /* If we are currently transmitting, defer to * interrupt handler. - Jean II */ if(self->tx_fifo.len == 0) { nsc_ircc_change_speed(self, speed); netif_wake_queue(dev); } else { self->new_speed = speed; /* Keep queue stopped : * the speed change operation may change the * xmit handler, and we want to make sure * the next packet get through the proper * Tx path, so block the Tx queue until * the speed change has been done. * Jean II */ } dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } else { /* Change speed after current frame */ self->new_speed = speed; } } /* Save current bank */ bank = inb(iobase+BSR); /* Register and copy this frame to DMA memory */ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; self->tx_fifo.tail += skb->len; dev->stats.tx_bytes += skb->len; skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); self->tx_fifo.len++; self->tx_fifo.free++; /* Start transmit only if there is currently no transmit going on */ if (self->tx_fifo.len == 1) { /* Check if we must wait the min turn time or not */ mtt = irda_get_mtt(skb); if (mtt) { /* Check how much time we have used already */ do_gettimeofday(&self->now); diff = self->now.tv_usec - self->stamp.tv_usec; if (diff < 0) diff += 1000000; /* Check if the mtt is larger than the time we have * already used by all the protocol processing */ if (mtt > diff) { mtt -= diff; /* * Use timer if delay larger than 125 us, and * use udelay for smaller values which should * be acceptable */ if (mtt > 125) { /* Adjust for timer resolution */ mtt = mtt / 125; /* Setup timer */ switch_bank(iobase, BANK4); outb(mtt & 0xff, iobase+TMRL); outb((mtt >> 8) & 0x0f, iobase+TMRH); /* Start timer */ outb(IRCR1_TMR_EN, iobase+IRCR1); self->io.direction = IO_XMIT; /* Enable timer interrupt */ switch_bank(iobase, BANK0); outb(IER_TMR_IE, iobase+IER); /* Timer will take care of the rest */ goto out; } else udelay(mtt); } } /* Enable DMA interrupt */ switch_bank(iobase, BANK0); outb(IER_DMA_IE, iobase+IER); /* Transmit frame */ nsc_ircc_dma_xmit(self, iobase); } out: /* Not busy transmitting anymore if window is not full, * and if we don't need to change speed */ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) netif_wake_queue(self->netdev); /* Restore bank register */ outb(bank, iobase+BSR); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * Function nsc_ircc_dma_xmit (self, iobase) * * Transmit data using DMA * */ static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase) { int bsr; /* Save current bank */ bsr = inb(iobase+BSR); /* Disable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR); self->io.direction = IO_XMIT; /* Choose transmit DMA channel */ switch_bank(iobase, BANK2); outb(ECR1_DMASWP|ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1); irda_setup_dma(self->io.dma, ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - self->tx_buff.head) + self->tx_buff_dma, self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); /* Enable DMA and SIR interaction pulse */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR)|MCR_TX_DFR|MCR_DMA_EN|MCR_IR_PLS, iobase+MCR); /* Restore bank register */ outb(bsr, iobase+BSR); } /* * Function nsc_ircc_pio_xmit (self, iobase) * * Transmit data using PIO. Returns the number of bytes that actually * got transferred * */ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size) { int actual = 0; __u8 bank; IRDA_DEBUG(4, "%s()\n", __func__); /* Save current bank */ bank = inb(iobase+BSR); switch_bank(iobase, BANK0); if (!(inb_p(iobase+LSR) & LSR_TXEMP)) { IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n", __func__); /* FIFO may still be filled to the Tx interrupt threshold */ fifo_size -= 17; } /* Fill FIFO with current frame */ while ((fifo_size-- > 0) && (actual < len)) { /* Transmit next byte */ outb(buf[actual++], iobase+TXD); } IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", __func__, fifo_size, actual, len); /* Restore bank */ outb(bank, iobase+BSR); return actual; } /* * Function nsc_ircc_dma_xmit_complete (self) * * The transfer of a frame in finished. This function will only be called * by the interrupt handler * */ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self) { int iobase; __u8 bank; int ret = TRUE; IRDA_DEBUG(2, "%s()\n", __func__); iobase = self->io.fir_base; /* Save current bank */ bank = inb(iobase+BSR); /* Disable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR); /* Check for underrun! */ if (inb(iobase+ASCR) & ASCR_TXUR) { self->netdev->stats.tx_errors++; self->netdev->stats.tx_fifo_errors++; /* Clear bit, by writing 1 into it */ outb(ASCR_TXUR, iobase+ASCR); } else { self->netdev->stats.tx_packets++; } /* Finished with this frame, so prepare for next */ self->tx_fifo.ptr++; self->tx_fifo.len--; /* Any frames to be sent back-to-back? */ if (self->tx_fifo.len) { nsc_ircc_dma_xmit(self, iobase); /* Not finished yet! */ ret = FALSE; } else { /* Reset Tx FIFO info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; } /* Make sure we have room for more frames and * that we don't need to change speed */ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) { /* Not busy transmitting anymore */ /* Tell the network layer, that we can accept more frames */ netif_wake_queue(self->netdev); } /* Restore bank */ outb(bank, iobase+BSR); return ret; } /* * Function nsc_ircc_dma_receive (self) * * Get ready for receiving a frame. The device will initiate a DMA * if it starts to receive a frame. * */ static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self) { int iobase; __u8 bsr; iobase = self->io.fir_base; /* Reset Tx FIFO info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; /* Save current bank */ bsr = inb(iobase+BSR); /* Disable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR); /* Choose DMA Rx, DMA Fairness, and Advanced mode */ switch_bank(iobase, BANK2); outb(ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1); self->io.direction = IO_RECV; self->rx_buff.data = self->rx_buff.head; /* Reset Rx FIFO. This will also flush the ST_FIFO */ switch_bank(iobase, BANK0); outb(FCR_RXSR|FCR_FIFO_EN, iobase+FCR); self->st_fifo.len = self->st_fifo.pending_bytes = 0; self->st_fifo.tail = self->st_fifo.head = 0; irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, DMA_RX_MODE); /* Enable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR)|MCR_DMA_EN, iobase+MCR); /* Restore bank register */ outb(bsr, iobase+BSR); return 0; } /* * Function nsc_ircc_dma_receive_complete (self) * * Finished with receiving frames * * */ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) { struct st_fifo *st_fifo; struct sk_buff *skb; __u8 status; __u8 bank; int len; st_fifo = &self->st_fifo; /* Save current bank */ bank = inb(iobase+BSR); /* Read all entries in status FIFO */ switch_bank(iobase, BANK5); while ((status = inb(iobase+FRM_ST)) & FRM_ST_VLD) { /* We must empty the status FIFO no matter what */ len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8); if (st_fifo->tail >= MAX_RX_WINDOW) { IRDA_DEBUG(0, "%s(), window is full!\n", __func__); continue; } st_fifo->entries[st_fifo->tail].status = status; st_fifo->entries[st_fifo->tail].len = len; st_fifo->pending_bytes += len; st_fifo->tail++; st_fifo->len++; } /* Try to process all entries in status FIFO */ while (st_fifo->len > 0) { /* Get first entry */ status = st_fifo->entries[st_fifo->head].status; len = st_fifo->entries[st_fifo->head].len; st_fifo->pending_bytes -= len; st_fifo->head++; st_fifo->len--; /* Check for errors */ if (status & FRM_ST_ERR_MSK) { if (status & FRM_ST_LOST_FR) { /* Add number of lost frames to stats */ self->netdev->stats.rx_errors += len; } else { /* Skip frame */ self->netdev->stats.rx_errors++; self->rx_buff.data += len; if (status & FRM_ST_MAX_LEN) self->netdev->stats.rx_length_errors++; if (status & FRM_ST_PHY_ERR) self->netdev->stats.rx_frame_errors++; if (status & FRM_ST_BAD_CRC) self->netdev->stats.rx_crc_errors++; } /* The errors below can be reported in both cases */ if (status & FRM_ST_OVR1) self->netdev->stats.rx_fifo_errors++; if (status & FRM_ST_OVR2) self->netdev->stats.rx_fifo_errors++; } else { /* * First we must make sure that the frame we * want to deliver is all in main memory. If we * cannot tell, then we check if the Rx FIFO is * empty. If not then we will have to take a nap * and try again later. */ if (st_fifo->pending_bytes < self->io.fifo_size) { switch_bank(iobase, BANK0); if (inb(iobase+LSR) & LSR_RXDA) { /* Put this entry back in fifo */ st_fifo->head--; st_fifo->len++; st_fifo->pending_bytes += len; st_fifo->entries[st_fifo->head].status = status; st_fifo->entries[st_fifo->head].len = len; /* * DMA not finished yet, so try again * later, set timer value, resolution * 125 us */ switch_bank(iobase, BANK4); outb(0x02, iobase+TMRL); /* x 125 us */ outb(0x00, iobase+TMRH); /* Start timer */ outb(IRCR1_TMR_EN, iobase+IRCR1); /* Restore bank register */ outb(bank, iobase+BSR); return FALSE; /* I'll be back! */ } } /* * Remember the time we received this frame, so we can * reduce the min turn time a bit since we will know * how much time we have used for protocol processing */ do_gettimeofday(&self->stamp); skb = dev_alloc_skb(len+1); if (skb == NULL) { IRDA_WARNING("%s(), memory squeeze, " "dropping frame.\n", __func__); self->netdev->stats.rx_dropped++; /* Restore bank register */ outb(bank, iobase+BSR); return FALSE; } /* Make sure IP header gets aligned */ skb_reserve(skb, 1); /* Copy frame without CRC */ if (self->io.speed < 4000000) { skb_put(skb, len-2); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 2); } else { skb_put(skb, len-4); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); } /* Move to next frame */ self->rx_buff.data += len; self->netdev->stats.rx_bytes += len; self->netdev->stats.rx_packets++; skb->dev = self->netdev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); } } /* Restore bank register */ outb(bank, iobase+BSR); return TRUE; } /* * Function nsc_ircc_pio_receive (self) * * Receive all data in receiver FIFO * */ static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self) { __u8 byte; int iobase; iobase = self->io.fir_base; /* Receive all characters in Rx FIFO */ do { byte = inb(iobase+RXD); async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, byte); } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */ } /* * Function nsc_ircc_sir_interrupt (self, eir) * * Handle SIR interrupt * */ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir) { int actual; /* Check if transmit FIFO is low on data */ if (eir & EIR_TXLDL_EV) { /* Write data left in transmit buffer */ actual = nsc_ircc_pio_write(self->io.fir_base, self->tx_buff.data, self->tx_buff.len, self->io.fifo_size); self->tx_buff.data += actual; self->tx_buff.len -= actual; self->io.direction = IO_XMIT; /* Check if finished */ if (self->tx_buff.len > 0) self->ier = IER_TXLDL_IE; else { self->netdev->stats.tx_packets++; netif_wake_queue(self->netdev); self->ier = IER_TXEMP_IE; } } /* Check if transmission has completed */ if (eir & EIR_TXEMP_EV) { /* Turn around and get ready to receive some data */ self->io.direction = IO_RECV; self->ier = IER_RXHDL_IE; /* Check if we need to change the speed? * Need to be after self->io.direction to avoid race with * nsc_ircc_hard_xmit_sir() - Jean II */ if (self->new_speed) { IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__); self->ier = nsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; netif_wake_queue(self->netdev); /* Check if we are going to FIR */ if (self->io.speed > 115200) { /* No need to do anymore SIR stuff */ return; } } } /* Rx FIFO threshold or timeout */ if (eir & EIR_RXHDL_EV) { nsc_ircc_pio_receive(self); /* Keep receiving */ self->ier = IER_RXHDL_IE; } } /* * Function nsc_ircc_fir_interrupt (self, eir) * * Handle MIR/FIR interrupt * */ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase, int eir) { __u8 bank; bank = inb(iobase+BSR); /* Status FIFO event*/ if (eir & EIR_SFIF_EV) { /* Check if DMA has finished */ if (nsc_ircc_dma_receive_complete(self, iobase)) { /* Wait for next status FIFO interrupt */ self->ier = IER_SFIF_IE; } else { self->ier = IER_SFIF_IE | IER_TMR_IE; } } else if (eir & EIR_TMR_EV) { /* Timer finished */ /* Disable timer */ switch_bank(iobase, BANK4); outb(0, iobase+IRCR1); /* Clear timer event */ switch_bank(iobase, BANK0); outb(ASCR_CTE, iobase+ASCR); /* Check if this is a Tx timer interrupt */ if (self->io.direction == IO_XMIT) { nsc_ircc_dma_xmit(self, iobase); /* Interrupt on DMA */ self->ier = IER_DMA_IE; } else { /* Check (again) if DMA has finished */ if (nsc_ircc_dma_receive_complete(self, iobase)) { self->ier = IER_SFIF_IE; } else { self->ier = IER_SFIF_IE | IER_TMR_IE; } } } else if (eir & EIR_DMA_EV) { /* Finished with all transmissions? */ if (nsc_ircc_dma_xmit_complete(self)) { if(self->new_speed != 0) { /* As we stop the Tx queue, the speed change * need to be done when the Tx fifo is * empty. Ask for a Tx done interrupt */ self->ier = IER_TXEMP_IE; } else { /* Check if there are more frames to be * transmitted */ if (irda_device_txqueue_empty(self->netdev)) { /* Prepare for receive */ nsc_ircc_dma_receive(self); self->ier = IER_SFIF_IE; } else IRDA_WARNING("%s(), potential " "Tx queue lockup !\n", __func__); } } else { /* Not finished yet, so interrupt on DMA again */ self->ier = IER_DMA_IE; } } else if (eir & EIR_TXEMP_EV) { /* The Tx FIFO has totally drained out, so now we can change * the speed... - Jean II */ self->ier = nsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; netif_wake_queue(self->netdev); /* Note : nsc_ircc_change_speed() restarted Rx fifo */ } outb(bank, iobase+BSR); } /* * Function nsc_ircc_interrupt (irq, dev_id, regs) * * An interrupt from the chip has arrived. Time to do some work * */ static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct nsc_ircc_cb *self; __u8 bsr, eir; int iobase; self = netdev_priv(dev); spin_lock(&self->lock); iobase = self->io.fir_base; bsr = inb(iobase+BSR); /* Save current bank */ switch_bank(iobase, BANK0); self->ier = inb(iobase+IER); eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */ outb(0, iobase+IER); /* Disable interrupts */ if (eir) { /* Dispatch interrupt handler for the current speed */ if (self->io.speed > 115200) nsc_ircc_fir_interrupt(self, iobase, eir); else nsc_ircc_sir_interrupt(self, eir); } outb(self->ier, iobase+IER); /* Restore interrupts */ outb(bsr, iobase+BSR); /* Restore bank register */ spin_unlock(&self->lock); return IRQ_RETVAL(eir); } /* * Function nsc_ircc_is_receiving (self) * * Return TRUE is we are currently receiving a frame * */ static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self) { unsigned long flags; int status = FALSE; int iobase; __u8 bank; IRDA_ASSERT(self != NULL, return FALSE;); spin_lock_irqsave(&self->lock, flags); if (self->io.speed > 115200) { iobase = self->io.fir_base; /* Check if rx FIFO is not empty */ bank = inb(iobase+BSR); switch_bank(iobase, BANK2); if ((inb(iobase+RXFLV) & 0x3f) != 0) { /* We are receiving something */ status = TRUE; } outb(bank, iobase+BSR); } else status = (self->rx_buff.state != OUTSIDE_FRAME); spin_unlock_irqrestore(&self->lock, flags); return status; } /* * Function nsc_ircc_net_open (dev) * * Start the device * */ static int nsc_ircc_net_open(struct net_device *dev) { struct nsc_ircc_cb *self; int iobase; char hwname[32]; __u8 bank; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); iobase = self->io.fir_base; if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) { IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name, self->io.irq); return -EAGAIN; } /* * Always allocate the DMA channel after the IRQ, and clean up on * failure. */ if (request_dma(self->io.dma, dev->name)) { IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } /* Save current bank */ bank = inb(iobase+BSR); /* turn on interrupts */ switch_bank(iobase, BANK0); outb(IER_LS_IE | IER_RXHDL_IE, iobase+IER); /* Restore bank register */ outb(bank, iobase+BSR); /* Ready to play! */ netif_start_queue(dev); /* Give self a hardware name */ sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base); /* * Open new IrLAP layer instance, now that everything should be * initialized properly */ self->irlap = irlap_open(dev, &self->qos, hwname); return 0; } /* * Function nsc_ircc_net_close (dev) * * Stop the device * */ static int nsc_ircc_net_close(struct net_device *dev) { struct nsc_ircc_cb *self; int iobase; __u8 bank; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); /* Stop device */ netif_stop_queue(dev); /* Stop and remove instance of IrLAP */ if (self->irlap) irlap_close(self->irlap); self->irlap = NULL; iobase = self->io.fir_base; disable_dma(self->io.dma); /* Save current bank */ bank = inb(iobase+BSR); /* Disable interrupts */ switch_bank(iobase, BANK0); outb(0, iobase+IER); free_irq(self->io.irq, dev); free_dma(self->io.dma); /* Restore bank register */ outb(bank, iobase+BSR); return 0; } /* * Function nsc_ircc_net_ioctl (dev, rq, cmd) * * Process IOCTL commands for this device * */ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *) rq; struct nsc_ircc_cb *self; unsigned long flags; int ret = 0; IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return -1;); IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } spin_lock_irqsave(&self->lock, flags); nsc_ircc_change_speed(self, irq->ifr_baudrate); spin_unlock_irqrestore(&self->lock, flags); break; case SIOCSMEDIABUSY: /* Set media busy */ if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } irda_device_set_media_busy(self->netdev, TRUE); break; case SIOCGRECEIVING: /* Check if we are receiving right now */ /* This is already protected */ irq->ifr_receiving = nsc_ircc_is_receiving(self); break; default: ret = -EOPNOTSUPP; } return ret; } static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state) { struct nsc_ircc_cb *self = platform_get_drvdata(dev); int bank; unsigned long flags; int iobase = self->io.fir_base; if (self->io.suspended) return 0; IRDA_DEBUG(1, "%s, Suspending\n", driver_name); rtnl_lock(); if (netif_running(self->netdev)) { netif_device_detach(self->netdev); spin_lock_irqsave(&self->lock, flags); /* Save current bank */ bank = inb(iobase+BSR); /* Disable interrupts */ switch_bank(iobase, BANK0); outb(0, iobase+IER); /* Restore bank register */ outb(bank, iobase+BSR); spin_unlock_irqrestore(&self->lock, flags); free_irq(self->io.irq, self->netdev); disable_dma(self->io.dma); } self->io.suspended = 1; rtnl_unlock(); return 0; } static int nsc_ircc_resume(struct platform_device *dev) { struct nsc_ircc_cb *self = platform_get_drvdata(dev); unsigned long flags; if (!self->io.suspended) return 0; IRDA_DEBUG(1, "%s, Waking up\n", driver_name); rtnl_lock(); nsc_ircc_setup(&self->io); nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id); if (netif_running(self->netdev)) { if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, self->netdev->name, self->netdev)) { IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name, self->io.irq); /* * Don't fail resume process, just kill this * network interface */ unregister_netdevice(self->netdev); } else { spin_lock_irqsave(&self->lock, flags); nsc_ircc_change_speed(self, self->io.speed); spin_unlock_irqrestore(&self->lock, flags); netif_device_attach(self->netdev); } } else { spin_lock_irqsave(&self->lock, flags); nsc_ircc_change_speed(self, 9600); spin_unlock_irqrestore(&self->lock, flags); } self->io.suspended = 0; rtnl_unlock(); return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); MODULE_DESCRIPTION("NSC IrDA Device Driver"); MODULE_LICENSE("GPL"); module_param(qos_mtt_bits, int, 0); MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time"); module_param_array(io, int, NULL, 0); MODULE_PARM_DESC(io, "Base I/O addresses"); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(irq, "IRQ lines"); module_param_array(dma, int, NULL, 0); MODULE_PARM_DESC(dma, "DMA channels"); module_param(dongle_id, int, 0); MODULE_PARM_DESC(dongle_id, "Type-id of used dongle"); module_init(nsc_ircc_init); module_exit(nsc_ircc_cleanup);
gpl-2.0
AscendG630-DEV/android_kernel_huawei_g630
arch/powerpc/platforms/ps3/spu.c
7019
15518
/* * PS3 Platform spu routines. * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mmzone.h> #include <linux/export.h> #include <linux/io.h> #include <linux/mm.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/lv1call.h> #include <asm/ps3.h> #include "../cell/spufs/spufs.h" #include "platform.h" /* spu_management_ops */ /** * enum spe_type - Type of spe to create. * @spe_type_logical: Standard logical spe. * * For use with lv1_construct_logical_spe(). The current HV does not support * any types other than those listed. */ enum spe_type { SPE_TYPE_LOGICAL = 0, }; /** * struct spe_shadow - logical spe shadow register area. * * Read-only shadow of spe registers. */ struct spe_shadow { u8 padding_0140[0x0140]; u64 int_status_class0_RW; /* 0x0140 */ u64 int_status_class1_RW; /* 0x0148 */ u64 int_status_class2_RW; /* 0x0150 */ u8 padding_0158[0x0610-0x0158]; u64 mfc_dsisr_RW; /* 0x0610 */ u8 padding_0618[0x0620-0x0618]; u64 mfc_dar_RW; /* 0x0620 */ u8 padding_0628[0x0800-0x0628]; u64 mfc_dsipr_R; /* 0x0800 */ u8 padding_0808[0x0810-0x0808]; u64 mfc_lscrr_R; /* 0x0810 */ u8 padding_0818[0x0c00-0x0818]; u64 mfc_cer_R; /* 0x0c00 */ u8 padding_0c08[0x0f00-0x0c08]; u64 spe_execution_status; /* 0x0f00 */ u8 padding_0f08[0x1000-0x0f08]; }; /** * enum spe_ex_state - Logical spe execution state. * @spe_ex_state_unexecutable: Uninitialized. * @spe_ex_state_executable: Enabled, not ready. * @spe_ex_state_executed: Ready for use. * * The execution state (status) of the logical spe as reported in * struct spe_shadow:spe_execution_status. */ enum spe_ex_state { SPE_EX_STATE_UNEXECUTABLE = 0, SPE_EX_STATE_EXECUTABLE = 2, SPE_EX_STATE_EXECUTED = 3, }; /** * struct priv1_cache - Cached values of priv1 registers. * @masks[]: Array of cached spe interrupt masks, indexed by class. * @sr1: Cached mfc_sr1 register. * @tclass_id: Cached mfc_tclass_id register. */ struct priv1_cache { u64 masks[3]; u64 sr1; u64 tclass_id; }; /** * struct spu_pdata - Platform state variables. * @spe_id: HV spe id returned by lv1_construct_logical_spe(). * @resource_id: HV spe resource id returned by * ps3_repository_read_spe_resource_id(). * @priv2_addr: lpar address of spe priv2 area returned by * lv1_construct_logical_spe(). * @shadow_addr: lpar address of spe register shadow area returned by * lv1_construct_logical_spe(). * @shadow: Virtual (ioremap) address of spe register shadow area. * @cache: Cached values of priv1 registers. */ struct spu_pdata { u64 spe_id; u64 resource_id; u64 priv2_addr; u64 shadow_addr; struct spe_shadow __iomem *shadow; struct priv1_cache cache; }; static struct spu_pdata *spu_pdata(struct spu *spu) { return spu->pdata; } #define dump_areas(_a, _b, _c, _d, _e) \ _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__) static void _dump_areas(unsigned int spe_id, unsigned long priv2, unsigned long problem, unsigned long ls, unsigned long shadow, const char* func, int line) { pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id); pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2); pr_debug("%s:%d: problem: %lxh\n", func, line, problem); pr_debug("%s:%d: ls: %lxh\n", func, line, ls); pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow); } inline u64 ps3_get_spe_id(void *arg) { return spu_pdata(arg)->spe_id; } EXPORT_SYMBOL_GPL(ps3_get_spe_id); static unsigned long get_vas_id(void) { u64 id; lv1_get_logical_ppe_id(&id); lv1_get_virtual_address_space_id_of_ppe(&id); return id; } static int __init construct_spu(struct spu *spu) { int result; u64 unused; u64 problem_phys; u64 local_store_phys; result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL, &spu_pdata(spu)->priv2_addr, &problem_phys, &local_store_phys, &unused, &spu_pdata(spu)->shadow_addr, &spu_pdata(spu)->spe_id); spu->problem_phys = problem_phys; spu->local_store_phys = local_store_phys; if (result) { pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; } return result; } static void spu_unmap(struct spu *spu) { iounmap(spu->priv2); iounmap(spu->problem); iounmap((__force u8 __iomem *)spu->local_store); iounmap(spu_pdata(spu)->shadow); } /** * setup_areas - Map the spu regions into the address space. * * The current HV requires the spu shadow regs to be mapped with the * PTE page protection bits set as read-only (PP=3). This implementation * uses the low level __ioremap() to bypass the page protection settings * inforced by ioremap_prot() to get the needed PTE bits set for the * shadow regs. */ static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3; spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow), shadow_flags); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; } spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys, LS_SIZE, _PAGE_NO_CACHE); if (!spu->local_store) { pr_debug("%s:%d: ioremap local_store failed\n", __func__, __LINE__); goto fail_ioremap; } spu->problem = ioremap(spu->problem_phys, sizeof(struct spu_problem)); if (!spu->problem) { pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__); goto fail_ioremap; } spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr, sizeof(struct spu_priv2)); if (!spu->priv2) { pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__); goto fail_ioremap; } dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr, spu->problem_phys, spu->local_store_phys, spu_pdata(spu)->shadow_addr); dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2, (unsigned long)spu->problem, (unsigned long)spu->local_store, (unsigned long)spu_pdata(spu)->shadow); return 0; fail_ioremap: spu_unmap(spu); return -ENOMEM; } static int __init setup_interrupts(struct spu *spu) { int result; result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 0, &spu->irqs[0]); if (result) goto fail_alloc_0; result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 1, &spu->irqs[1]); if (result) goto fail_alloc_1; result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id, 2, &spu->irqs[2]); if (result) goto fail_alloc_2; return result; fail_alloc_2: ps3_spe_irq_destroy(spu->irqs[1]); fail_alloc_1: ps3_spe_irq_destroy(spu->irqs[0]); fail_alloc_0: spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ; return result; } static int __init enable_spu(struct spu *spu) { int result; result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id, spu_pdata(spu)->resource_id); if (result) { pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail_enable; } result = setup_areas(spu); if (result) goto fail_areas; result = setup_interrupts(spu); if (result) goto fail_interrupts; return 0; fail_interrupts: spu_unmap(spu); fail_areas: lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0); fail_enable: return result; } static int ps3_destroy_spu(struct spu *spu) { int result; pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number); result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0); BUG_ON(result); ps3_spe_irq_destroy(spu->irqs[2]); ps3_spe_irq_destroy(spu->irqs[1]); ps3_spe_irq_destroy(spu->irqs[0]); spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ; spu_unmap(spu); result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id); BUG_ON(result); kfree(spu->pdata); spu->pdata = NULL; return 0; } static int __init ps3_create_spu(struct spu *spu, void *data) { int result; pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number); spu->pdata = kzalloc(sizeof(struct spu_pdata), GFP_KERNEL); if (!spu->pdata) { result = -ENOMEM; goto fail_malloc; } spu_pdata(spu)->resource_id = (unsigned long)data; /* Init cached reg values to HV defaults. */ spu_pdata(spu)->cache.sr1 = 0x33; result = construct_spu(spu); if (result) goto fail_construct; /* For now, just go ahead and enable it. */ result = enable_spu(spu); if (result) goto fail_enable; /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */ /* need something better here!!! */ while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status) != SPE_EX_STATE_EXECUTED) (void)0; return result; fail_enable: fail_construct: ps3_destroy_spu(spu); fail_malloc: return result; } static int __init ps3_enumerate_spus(int (*fn)(void *data)) { int result; unsigned int num_resource_id; unsigned int i; result = ps3_repository_read_num_spu_resource_id(&num_resource_id); pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__, num_resource_id); /* * For now, just create logical spus equal to the number * of physical spus reserved for the partition. */ for (i = 0; i < num_resource_id; i++) { enum ps3_spu_resource_type resource_type; unsigned int resource_id; result = ps3_repository_read_spu_resource_id(i, &resource_type, &resource_id); if (result) break; if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) { result = fn((void*)(unsigned long)resource_id); if (result) break; } } if (result) { printk(KERN_WARNING "%s:%d: Error initializing spus\n", __func__, __LINE__); return result; } return num_resource_id; } static int ps3_init_affinity(void) { return 0; } /** * ps3_enable_spu - Enable SPU run control. * * An outstanding enhancement for the PS3 would be to add a guard to check * for incorrect access to the spu problem state when the spu context is * disabled. This check could be implemented with a flag added to the spu * context that would inhibit mapping problem state pages, and a routine * to unmap spu problem state pages. When the spu is enabled with * ps3_enable_spu() the flag would be set allowing pages to be mapped, * and when the spu is disabled with ps3_disable_spu() the flag would be * cleared and the mapped problem state pages would be unmapped. */ static void ps3_enable_spu(struct spu_context *ctx) { } static void ps3_disable_spu(struct spu_context *ctx) { ctx->ops->runcntl_stop(ctx); } const struct spu_management_ops spu_management_ps3_ops = { .enumerate_spus = ps3_enumerate_spus, .create_spu = ps3_create_spu, .destroy_spu = ps3_destroy_spu, .enable_spu = ps3_enable_spu, .disable_spu = ps3_disable_spu, .init_affinity = ps3_init_affinity, }; /* spu_priv1_ops */ static void int_mask_and(struct spu *spu, int class, u64 mask) { u64 old_mask; /* are these serialized by caller??? */ old_mask = spu_int_mask_get(spu, class); spu_int_mask_set(spu, class, old_mask & mask); } static void int_mask_or(struct spu *spu, int class, u64 mask) { u64 old_mask; old_mask = spu_int_mask_get(spu, class); spu_int_mask_set(spu, class, old_mask | mask); } static void int_mask_set(struct spu *spu, int class, u64 mask) { spu_pdata(spu)->cache.masks[class] = mask; lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class, spu_pdata(spu)->cache.masks[class]); } static u64 int_mask_get(struct spu *spu, int class) { return spu_pdata(spu)->cache.masks[class]; } static void int_stat_clear(struct spu *spu, int class, u64 stat) { /* Note that MFC_DSISR will be cleared when class1[MF] is set. */ lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class, stat, 0); } static u64 int_stat_get(struct spu *spu, int class) { u64 stat; lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat); return stat; } static void cpu_affinity_set(struct spu *spu, int cpu) { /* No support. */ } static u64 mfc_dar_get(struct spu *spu) { return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW); } static void mfc_dsisr_set(struct spu *spu, u64 dsisr) { /* Nothing to do, cleared in int_stat_clear(). */ } static u64 mfc_dsisr_get(struct spu *spu) { return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW); } static void mfc_sdr_setup(struct spu *spu) { /* Nothing to do. */ } static void mfc_sr1_set(struct spu *spu, u64 sr1) { /* Check bits allowed by HV. */ static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | MFC_STATE1_PROBLEM_STATE_MASK); BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); spu_pdata(spu)->cache.sr1 = sr1; lv1_set_spe_privilege_state_area_1_register( spu_pdata(spu)->spe_id, offsetof(struct spu_priv1, mfc_sr1_RW), spu_pdata(spu)->cache.sr1); } static u64 mfc_sr1_get(struct spu *spu) { return spu_pdata(spu)->cache.sr1; } static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) { spu_pdata(spu)->cache.tclass_id = tclass_id; lv1_set_spe_privilege_state_area_1_register( spu_pdata(spu)->spe_id, offsetof(struct spu_priv1, mfc_tclass_id_RW), spu_pdata(spu)->cache.tclass_id); } static u64 mfc_tclass_id_get(struct spu *spu) { return spu_pdata(spu)->cache.tclass_id; } static void tlb_invalidate(struct spu *spu) { /* Nothing to do. */ } static void resource_allocation_groupID_set(struct spu *spu, u64 id) { /* No support. */ } static u64 resource_allocation_groupID_get(struct spu *spu) { return 0; /* No support. */ } static void resource_allocation_enable_set(struct spu *spu, u64 enable) { /* No support. */ } static u64 resource_allocation_enable_get(struct spu *spu) { return 0; /* No support. */ } const struct spu_priv1_ops spu_priv1_ps3_ops = { .int_mask_and = int_mask_and, .int_mask_or = int_mask_or, .int_mask_set = int_mask_set, .int_mask_get = int_mask_get, .int_stat_clear = int_stat_clear, .int_stat_get = int_stat_get, .cpu_affinity_set = cpu_affinity_set, .mfc_dar_get = mfc_dar_get, .mfc_dsisr_set = mfc_dsisr_set, .mfc_dsisr_get = mfc_dsisr_get, .mfc_sdr_setup = mfc_sdr_setup, .mfc_sr1_set = mfc_sr1_set, .mfc_sr1_get = mfc_sr1_get, .mfc_tclass_id_set = mfc_tclass_id_set, .mfc_tclass_id_get = mfc_tclass_id_get, .tlb_invalidate = tlb_invalidate, .resource_allocation_groupID_set = resource_allocation_groupID_set, .resource_allocation_groupID_get = resource_allocation_groupID_get, .resource_allocation_enable_set = resource_allocation_enable_set, .resource_allocation_enable_get = resource_allocation_enable_get, }; void ps3_spu_set_platform(void) { spu_priv1_ops = &spu_priv1_ps3_ops; spu_management_ops = &spu_management_ps3_ops; }
gpl-2.0
drewx2/android_kernel_htc_dlx
net/ipv4/xfrm4_tunnel.c
7275
2765
/* xfrm4_tunnel.c: Generic IP tunnel transformer. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #define pr_fmt(fmt) "IPsec: " fmt #include <linux/skbuff.h> #include <linux/module.h> #include <linux/mutex.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/protocol.h> static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) { skb_push(skb, -skb_network_offset(skb)); return 0; } static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb) { return ip_hdr(skb)->protocol; } static int ipip_init_state(struct xfrm_state *x) { if (x->props.mode != XFRM_MODE_TUNNEL) return -EINVAL; if (x->encap) return -EINVAL; x->props.header_len = sizeof(struct iphdr); return 0; } static void ipip_destroy(struct xfrm_state *x) { } static const struct xfrm_type ipip_type = { .description = "IPIP", .owner = THIS_MODULE, .proto = IPPROTO_IPIP, .init_state = ipip_init_state, .destructor = ipip_destroy, .input = ipip_xfrm_rcv, .output = ipip_output }; static int xfrm_tunnel_rcv(struct sk_buff *skb) { return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr); } static int xfrm_tunnel_err(struct sk_buff *skb, u32 info) { return -ENOENT; } static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, .priority = 2, }; #if IS_ENABLED(CONFIG_IPV6) static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, .priority = 2, }; #endif static int __init ipip_init(void) { if (xfrm_register_type(&ipip_type, AF_INET) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) { pr_info("%s: can't add xfrm handler for AF_INET\n", __func__); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) { pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__); xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } #endif return 0; } static void __exit ipip_fini(void) { #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6)) pr_info("%s: can't remove xfrm handler for AF_INET6\n", __func__); #endif if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET)) pr_info("%s: can't remove xfrm handler for AF_INET\n", __func__); if (xfrm_unregister_type(&ipip_type, AF_INET) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ipip_init); module_exit(ipip_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP);
gpl-2.0
ResurrectionRemix-Devices/android_kernel_samsung_smdk4412
drivers/media/video/saa7164/saa7164-api.c
8043
44788
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/wait.h> #include <linux/slab.h> #include "saa7164.h" int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i) { int ret; if (!(saa_debug & DBGLVL_CPU)) return 0; dprintk(DBGLVL_API, "%s()\n", __func__); i->deviceinst = 0; i->devicespec = 0; i->mode = 0; i->status = 0; ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_FW_STATUS_CONTROL, sizeof(struct tmFwInfoStruct), i); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); printk(KERN_INFO "saa7164[%d]-CPU: %d percent", dev->nr, i->CPULoad); return ret; } int saa7164_api_collect_debug(struct saa7164_dev *dev) { struct tmComResDebugGetData d; u8 more = 255; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); while (more--) { memset(&d, 0, sizeof(d)); ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_DEBUG_DATA_CONTROL, sizeof(d), &d); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); if (d.dwResult != SAA_OK) break; printk(KERN_INFO "saa7164[%d]-FWMSG: %s", dev->nr, d.ucDebugData); } return 0; } int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level) { struct tmComResDebugSetLevel lvl; int ret; dprintk(DBGLVL_API, "%s(level=%d)\n", __func__, level); /* Retrieve current state */ ret = saa7164_cmd_send(dev, 0, GET_CUR, SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s() Was %d\n", __func__, lvl.dwDebugLevel); lvl.dwDebugLevel = level; /* set new state */ ret = saa7164_cmd_send(dev, 0, SET_CUR, SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_vbi_format(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResProbeCommit fmt, rsp; int ret; dprintk(DBGLVL_API, "%s(nr=%d, unitid=0x%x)\n", __func__, port->nr, port->hwcfg.unitid); fmt.bmHint = 0; fmt.bFormatIndex = 1; fmt.bFrameIndex = 1; /* Probe, see if it can support this format */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_PROBE_CONTROL, sizeof(fmt), &fmt); if (ret != SAA_OK) printk(KERN_ERR "%s() set error, ret = 0x%x\n", __func__, ret); /* See of the format change was successful */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, GET_CUR, SAA_PROBE_CONTROL, sizeof(rsp), &rsp); if (ret != SAA_OK) { printk(KERN_ERR "%s() get error, ret = 0x%x\n", __func__, ret); } else { /* Compare requested vs received, should be same */ if (memcmp(&fmt, &rsp, sizeof(rsp)) == 0) { dprintk(DBGLVL_API, "SET/PROBE Verified\n"); /* Ask the device to select the negotiated format */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_COMMIT_CONTROL, sizeof(fmt), &fmt); if (ret != SAA_OK) printk(KERN_ERR "%s() commit error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, GET_CUR, SAA_COMMIT_CONTROL, sizeof(rsp), &rsp); if (ret != SAA_OK) printk(KERN_ERR "%s() GET commit error, ret = 0x%x\n", __func__, ret); if (memcmp(&fmt, &rsp, sizeof(rsp)) != 0) { printk(KERN_ERR "%s() memcmp error, ret = 0x%x\n", __func__, ret); } else dprintk(DBGLVL_API, "SET/COMMIT Verified\n"); dprintk(DBGLVL_API, "rsp.bmHint = 0x%x\n", rsp.bmHint); dprintk(DBGLVL_API, "rsp.bFormatIndex = 0x%x\n", rsp.bFormatIndex); dprintk(DBGLVL_API, "rsp.bFrameIndex = 0x%x\n", rsp.bFrameIndex); } else printk(KERN_ERR "%s() compare failed\n", __func__); } if (ret == SAA_OK) dprintk(DBGLVL_API, "%s(nr=%d) Success\n", __func__, port->nr); return ret; } int saa7164_api_set_gop_size(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoGopStructure gs; int ret; dprintk(DBGLVL_ENC, "%s()\n", __func__); gs.ucRefFrameDist = port->encoder_params.refdist; gs.ucGOPSize = port->encoder_params.gop_size; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_GOP_STRUCTURE_CONTROL, sizeof(gs), &gs); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_encoder(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoBitRate vb; struct tmComResEncAudioBitRate ab; int ret; dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid); if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) port->encoder_profile = EU_PROFILE_PS_DVD; else port->encoder_profile = EU_PROFILE_TS_HQ; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Resolution */ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Establish video bitrates */ if (port->encoder_params.bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_CONSTANT; else vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK; vb.dwVideoBitRate = port->encoder_params.bitrate; vb.dwVideoBitRatePeak = port->encoder_params.bitrate_peak; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_BIT_RATE_CONTROL, sizeof(struct tmComResEncVideoBitRate), &vb); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Establish audio bitrates */ ab.ucAudioBitRateMode = 0; ab.dwAudioBitRate = 384000; ab.dwAudioBitRatePeak = ab.dwAudioBitRate; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_AUDIO_BIT_RATE_CONTROL, sizeof(struct tmComResEncAudioBitRate), &ab); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); saa7164_api_set_aspect_ratio(port); saa7164_api_set_gop_size(port); return ret; } int saa7164_api_get_encoder(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoBitRate v; struct tmComResEncAudioBitRate a; struct tmComResEncVideoInputAspectRatio ar; int ret; dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid); port->encoder_profile = 0; port->video_format = 0; port->video_resolution = 0; port->audio_format = 0; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_RESOLUTION_CONTROL, sizeof(u8), &port->video_resolution); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_FORMAT_CONTROL, sizeof(u8), &port->video_format); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_BIT_RATE_CONTROL, sizeof(v), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_AUDIO_FORMAT_CONTROL, sizeof(u8), &port->audio_format); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_AUDIO_BIT_RATE_CONTROL, sizeof(a), &a); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Aspect Ratio */ ar.width = 0; ar.height = 0; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_INPUT_ASPECT_CONTROL, sizeof(struct tmComResEncVideoInputAspectRatio), &ar); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_ENC, "encoder_profile = %d\n", port->encoder_profile); dprintk(DBGLVL_ENC, "video_format = %d\n", port->video_format); dprintk(DBGLVL_ENC, "audio_format = %d\n", port->audio_format); dprintk(DBGLVL_ENC, "video_resolution= %d\n", port->video_resolution); dprintk(DBGLVL_ENC, "v.ucVideoBitRateMode = %d\n", v.ucVideoBitRateMode); dprintk(DBGLVL_ENC, "v.dwVideoBitRate = %d\n", v.dwVideoBitRate); dprintk(DBGLVL_ENC, "v.dwVideoBitRatePeak = %d\n", v.dwVideoBitRatePeak); dprintk(DBGLVL_ENC, "a.ucVideoBitRateMode = %d\n", a.ucAudioBitRateMode); dprintk(DBGLVL_ENC, "a.dwVideoBitRate = %d\n", a.dwAudioBitRate); dprintk(DBGLVL_ENC, "a.dwVideoBitRatePeak = %d\n", a.dwAudioBitRatePeak); dprintk(DBGLVL_ENC, "aspect.width / height = %d:%d\n", ar.width, ar.height); return ret; } int saa7164_api_set_aspect_ratio(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoInputAspectRatio ar; int ret; dprintk(DBGLVL_ENC, "%s(%d)\n", __func__, port->encoder_params.ctl_aspect); switch (port->encoder_params.ctl_aspect) { case V4L2_MPEG_VIDEO_ASPECT_1x1: ar.width = 1; ar.height = 1; break; case V4L2_MPEG_VIDEO_ASPECT_4x3: ar.width = 4; ar.height = 3; break; case V4L2_MPEG_VIDEO_ASPECT_16x9: ar.width = 16; ar.height = 9; break; case V4L2_MPEG_VIDEO_ASPECT_221x100: ar.width = 221; ar.height = 100; break; default: BUG(); } dprintk(DBGLVL_ENC, "%s(%d) now %d:%d\n", __func__, port->encoder_params.ctl_aspect, ar.width, ar.height); /* Aspect Ratio */ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_INPUT_ASPECT_CONTROL, sizeof(struct tmComResEncVideoInputAspectRatio), &ar); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl) { struct saa7164_dev *dev = port->dev; int ret; u16 val; if (ctl == PU_BRIGHTNESS_CONTROL) val = port->ctl_brightness; else if (ctl == PU_CONTRAST_CONTROL) val = port->ctl_contrast; else if (ctl == PU_HUE_CONTROL) val = port->ctl_hue; else if (ctl == PU_SATURATION_CONTROL) val = port->ctl_saturation; else if (ctl == PU_SHARPNESS_CONTROL) val = port->ctl_sharpness; else return -EINVAL; dprintk(DBGLVL_ENC, "%s() unitid=0x%x ctl=%d, val=%d\n", __func__, port->encunit.vsourceid, ctl, val); ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, SET_CUR, ctl, sizeof(u16), &val); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl) { struct saa7164_dev *dev = port->dev; int ret; u16 val; ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, GET_CUR, ctl, sizeof(u16), &val); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } dprintk(DBGLVL_ENC, "%s() ctl=%d, val=%d\n", __func__, ctl, val); if (ctl == PU_BRIGHTNESS_CONTROL) port->ctl_brightness = val; else if (ctl == PU_CONTRAST_CONTROL) port->ctl_contrast = val; else if (ctl == PU_HUE_CONTROL) port->ctl_hue = val; else if (ctl == PU_SATURATION_CONTROL) port->ctl_saturation = val; else if (ctl == PU_SHARPNESS_CONTROL) port->ctl_sharpness = val; return ret; } int saa7164_api_set_videomux(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; u8 inputs[] = { 1, 2, 2, 2, 5, 5, 5 }; int ret; dprintk(DBGLVL_ENC, "%s() v_mux=%d a_mux=%d\n", __func__, port->mux_input, inputs[port->mux_input - 1]); /* Audio Mute */ ret = saa7164_api_audio_mute(port, 1); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Video Mux */ ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, SET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Audio Mux */ ret = saa7164_cmd_send(port->dev, port->audfeat.sourceid, SET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &inputs[port->mux_input - 1]); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Audio UnMute */ ret = saa7164_api_audio_mute(port, 0); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_audio_mute(struct saa7164_port *port, int mute) { struct saa7164_dev *dev = port->dev; u8 v = mute; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, mute); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, MUTE_CONTROL, sizeof(u8), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } /* 0 = silence, 0xff = full */ int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level) { struct saa7164_dev *dev = port->dev; s16 v, min, max; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, level); /* Obtain the min/max ranges */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MIN, VOLUME_CONTROL, sizeof(u16), &min); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MAX, VOLUME_CONTROL, sizeof(u16), &max); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v); v = level; if (v < min) v = min; if (v > max) v = max; /* Left */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(s16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Right */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, (0x02 << 8) | VOLUME_CONTROL, sizeof(s16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v); return ret; } int saa7164_api_set_audio_std(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResAudioDefaults lvl; struct tmComResTunerStandard tvaudio; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); /* Establish default levels */ lvl.ucDecoderLevel = TMHW_LEV_ADJ_DECLEV_DEFAULT; lvl.ucDecoderFM_Level = TMHW_LEV_ADJ_DECLEV_DEFAULT; lvl.ucMonoLevel = TMHW_LEV_ADJ_MONOLEV_DEFAULT; lvl.ucNICAM_Level = TMHW_LEV_ADJ_NICLEV_DEFAULT; lvl.ucSAP_Level = TMHW_LEV_ADJ_SAPLEV_DEFAULT; lvl.ucADC_Level = TMHW_LEV_ADJ_ADCLEV_DEFAULT; ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, AUDIO_DEFAULT_CONTROL, sizeof(struct tmComResAudioDefaults), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Manually select the appropriate TV audio standard */ if (port->encodernorm.id & V4L2_STD_NTSC) { tvaudio.std = TU_STANDARD_NTSC_M; tvaudio.country = 1; } else { tvaudio.std = TU_STANDARD_PAL_I; tvaudio.country = 44; } ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR, TU_STANDARD_CONTROL, sizeof(tvaudio), &tvaudio); if (ret != SAA_OK) printk(KERN_ERR "%s() TU_STANDARD_CONTROL error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect) { struct saa7164_dev *dev = port->dev; struct tmComResTunerStandardAuto p; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, autodetect); /* Disable TV Audio autodetect if not already set (buggy) */ if (autodetect) p.mode = TU_STANDARD_AUTO; else p.mode = TU_STANDARD_MANUAL; ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR, TU_STANDARD_AUTO_CONTROL, sizeof(p), &p); if (ret != SAA_OK) printk(KERN_ERR "%s() TU_STANDARD_AUTO_CONTROL error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_get_videomux(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, GET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_ENC, "%s() v_mux=%d\n", __func__, port->mux_input); return ret; } int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val) { struct saa7164_dev *dev = port->dev; u16 len = 0; u8 buf[256]; int ret; u8 mas; dprintk(DBGLVL_API, "%s(nr=%d type=%d val=%x)\n", __func__, port->nr, port->type, val); if (port->nr == 0) mas = 0xd0; else mas = 0xe0; memset(buf, 0, sizeof(buf)); buf[0x00] = 0x04; buf[0x01] = 0x00; buf[0x02] = 0x00; buf[0x03] = 0x00; buf[0x04] = 0x04; buf[0x05] = 0x00; buf[0x06] = 0x00; buf[0x07] = 0x00; buf[0x08] = reg; buf[0x09] = 0x26; buf[0x0a] = mas; buf[0x0b] = 0xb0; buf[0x0c] = val; buf[0x0d] = 0x00; buf[0x0e] = 0x00; buf[0x0f] = 0x00; ret = saa7164_cmd_send(dev, port->ifunit.unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } ret = saa7164_cmd_send(dev, port->ifunit.unitid, SET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); #if 0 saa7164_dumphex16(dev, buf, 16); #endif return ret == SAA_OK ? 0 : -EIO; } /* Disable the IF block AGC controls */ int saa7164_api_configure_dif(struct saa7164_port *port, u32 std) { struct saa7164_dev *dev = port->dev; int ret = 0; u8 agc_disable; dprintk(DBGLVL_API, "%s(nr=%d, 0x%x)\n", __func__, port->nr, std); if (std & V4L2_STD_NTSC) { dprintk(DBGLVL_API, " NTSC\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_I) { dprintk(DBGLVL_API, " PAL-I\n"); saa7164_api_set_dif(port, 0x00, 0x08); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_M) { dprintk(DBGLVL_API, " PAL-M\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_N) { dprintk(DBGLVL_API, " PAL-N\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_Nc) { dprintk(DBGLVL_API, " PAL-Nc\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_B) { dprintk(DBGLVL_API, " PAL-B\n"); saa7164_api_set_dif(port, 0x00, 0x02); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_DK) { dprintk(DBGLVL_API, " PAL-DK\n"); saa7164_api_set_dif(port, 0x00, 0x10); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_SECAM_L) { dprintk(DBGLVL_API, " SECAM-L\n"); saa7164_api_set_dif(port, 0x00, 0x20); /* Video Standard */ agc_disable = 0; } else { /* Unknown standard, assume DTV */ dprintk(DBGLVL_API, " Unknown (assuming DTV)\n"); /* Undefinded Video Standard */ saa7164_api_set_dif(port, 0x00, 0x80); agc_disable = 1; } saa7164_api_set_dif(port, 0x48, 0xa0); /* AGC Functions 1 */ saa7164_api_set_dif(port, 0xc0, agc_disable); /* AGC Output Disable */ saa7164_api_set_dif(port, 0x7c, 0x04); /* CVBS EQ */ saa7164_api_set_dif(port, 0x04, 0x01); /* Active */ msleep(100); saa7164_api_set_dif(port, 0x04, 0x00); /* Active (again) */ msleep(100); return ret; } /* Ensure the dif is in the correct state for the operating mode * (analog / dtv). We only configure the diff through the analog encoder * so when we're in digital mode we need to find the appropriate encoder * and use it to configure the DIF. */ int saa7164_api_initialize_dif(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_port *p = NULL; int ret = -EINVAL; u32 std = 0; dprintk(DBGLVL_API, "%s(nr=%d type=%d)\n", __func__, port->nr, port->type); if (port->type == SAA7164_MPEG_ENCODER) { /* Pick any analog standard to init the diff. * we'll come back during encoder_init' * and set the correct standard if requried. */ std = V4L2_STD_NTSC; } else if (port->type == SAA7164_MPEG_DVB) { if (port->nr == SAA7164_PORT_TS1) p = &dev->ports[SAA7164_PORT_ENC1]; else p = &dev->ports[SAA7164_PORT_ENC2]; } else if (port->type == SAA7164_MPEG_VBI) { std = V4L2_STD_NTSC; if (port->nr == SAA7164_PORT_VBI1) p = &dev->ports[SAA7164_PORT_ENC1]; else p = &dev->ports[SAA7164_PORT_ENC2]; } else BUG(); if (p) ret = saa7164_api_configure_dif(p, std); return ret; } int saa7164_api_transition_port(struct saa7164_port *port, u8 mode) { struct saa7164_dev *dev = port->dev; int ret; dprintk(DBGLVL_API, "%s(nr=%d unitid=0x%x,%d)\n", __func__, port->nr, port->hwcfg.unitid, mode); ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_STATE_CONTROL, sizeof(mode), &mode); if (ret != SAA_OK) printk(KERN_ERR "%s(portnr %d unitid 0x%x) error, ret = 0x%x\n", __func__, port->nr, port->hwcfg.unitid, ret); return ret; } int saa7164_api_get_fw_version(struct saa7164_dev *dev, u32 *version) { int ret; ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_FW_VERSION_CONTROL, sizeof(u32), version); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen) { u8 reg[] = { 0x0f, 0x00 }; if (buflen < 128) return -ENOMEM; /* Assumption: Hauppauge eeprom is at 0xa0 on on bus 0 */ /* TODO: Pull the details from the boards struct */ return saa7164_api_i2c_read(&dev->i2c_bus[0], 0xa0 >> 1, sizeof(reg), &reg[0], 128, buf); } int saa7164_api_configure_port_vbi(struct saa7164_dev *dev, struct saa7164_port *port) { struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc; dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex); dprintk(DBGLVL_API, " VideoStandard = 0x%x\n", fmt->VideoStandard); dprintk(DBGLVL_API, " StartLine = %d\n", fmt->StartLine); dprintk(DBGLVL_API, " EndLine = %d\n", fmt->EndLine); dprintk(DBGLVL_API, " FieldRate = %d\n", fmt->FieldRate); dprintk(DBGLVL_API, " bNumLines = %d\n", fmt->bNumLines); /* Cache the hardware configuration in the port */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_VBI (becomes dev->en[%d])\n", port->nr); return 0; } int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev, struct saa7164_port *port, struct tmComResTSFormatDescrHeader *tsfmt) { dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex); dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset); dprintk(DBGLVL_API, " bPacketLength= 0x%x\n", tsfmt->bPacketLength); dprintk(DBGLVL_API, " bStrideLength= 0x%x\n", tsfmt->bStrideLength); dprintk(DBGLVL_API, " bguid = (....)\n"); /* Cache the hardware configuration in the port */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_MPEGTS (becomes dev->ts[%d])\n", port->nr); return 0; } int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev, struct saa7164_port *port, struct tmComResPSFormatDescrHeader *fmt) { dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex); dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength); dprintk(DBGLVL_API, " wPackLength= 0x%x\n", fmt->wPackLength); dprintk(DBGLVL_API, " bPackDataType= 0x%x\n", fmt->bPackDataType); /* Cache the hardware configuration in the port */ /* TODO: CHECK THIS in the port config */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_MPEGPS (becomes dev->enc[%d])\n", port->nr); return 0; } int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len) { struct saa7164_port *tsport = NULL; struct saa7164_port *encport = NULL; struct saa7164_port *vbiport = NULL; u32 idx, next_offset; int i; struct tmComResDescrHeader *hdr, *t; struct tmComResExtDevDescrHeader *exthdr; struct tmComResPathDescrHeader *pathhdr; struct tmComResAntTermDescrHeader *anttermhdr; struct tmComResTunerDescrHeader *tunerunithdr; struct tmComResDMATermDescrHeader *vcoutputtermhdr; struct tmComResTSFormatDescrHeader *tsfmt; struct tmComResPSFormatDescrHeader *psfmt; struct tmComResSelDescrHeader *psel; struct tmComResProcDescrHeader *pdh; struct tmComResAFeatureDescrHeader *afd; struct tmComResEncoderDescrHeader *edh; struct tmComResVBIFormatDescrHeader *vbifmt; u32 currpath = 0; dprintk(DBGLVL_API, "%s(?,?,%d) sizeof(struct tmComResDescrHeader) = %d bytes\n", __func__, len, (u32)sizeof(struct tmComResDescrHeader)); for (idx = 0; idx < (len - sizeof(struct tmComResDescrHeader));) { hdr = (struct tmComResDescrHeader *)(buf + idx); if (hdr->type != CS_INTERFACE) return SAA_ERR_NOT_SUPPORTED; dprintk(DBGLVL_API, "@ 0x%x =\n", idx); switch (hdr->subtype) { case GENERAL_REQUEST: dprintk(DBGLVL_API, " GENERAL_REQUEST\n"); break; case VC_TUNER_PATH: dprintk(DBGLVL_API, " VC_TUNER_PATH\n"); pathhdr = (struct tmComResPathDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " pathid = 0x%x\n", pathhdr->pathid); currpath = pathhdr->pathid; break; case VC_INPUT_TERMINAL: dprintk(DBGLVL_API, " VC_INPUT_TERMINAL\n"); anttermhdr = (struct tmComResAntTermDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " terminalid = 0x%x\n", anttermhdr->terminalid); dprintk(DBGLVL_API, " terminaltype = 0x%x\n", anttermhdr->terminaltype); switch (anttermhdr->terminaltype) { case ITT_ANTENNA: dprintk(DBGLVL_API, " = ITT_ANTENNA\n"); break; case LINE_CONNECTOR: dprintk(DBGLVL_API, " = LINE_CONNECTOR\n"); break; case SPDIF_CONNECTOR: dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n"); break; case COMPOSITE_CONNECTOR: dprintk(DBGLVL_API, " = COMPOSITE_CONNECTOR\n"); break; case SVIDEO_CONNECTOR: dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n"); break; case COMPONENT_CONNECTOR: dprintk(DBGLVL_API, " = COMPONENT_CONNECTOR\n"); break; case STANDARD_DMA: dprintk(DBGLVL_API, " = STANDARD_DMA\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", anttermhdr->terminaltype); } dprintk(DBGLVL_API, " assocterminal= 0x%x\n", anttermhdr->assocterminal); dprintk(DBGLVL_API, " iterminal = 0x%x\n", anttermhdr->iterminal); dprintk(DBGLVL_API, " controlsize = 0x%x\n", anttermhdr->controlsize); break; case VC_OUTPUT_TERMINAL: dprintk(DBGLVL_API, " VC_OUTPUT_TERMINAL\n"); vcoutputtermhdr = (struct tmComResDMATermDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", vcoutputtermhdr->unitid); dprintk(DBGLVL_API, " terminaltype = 0x%x\n", vcoutputtermhdr->terminaltype); switch (vcoutputtermhdr->terminaltype) { case ITT_ANTENNA: dprintk(DBGLVL_API, " = ITT_ANTENNA\n"); break; case LINE_CONNECTOR: dprintk(DBGLVL_API, " = LINE_CONNECTOR\n"); break; case SPDIF_CONNECTOR: dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n"); break; case COMPOSITE_CONNECTOR: dprintk(DBGLVL_API, " = COMPOSITE_CONNECTOR\n"); break; case SVIDEO_CONNECTOR: dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n"); break; case COMPONENT_CONNECTOR: dprintk(DBGLVL_API, " = COMPONENT_CONNECTOR\n"); break; case STANDARD_DMA: dprintk(DBGLVL_API, " = STANDARD_DMA\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", vcoutputtermhdr->terminaltype); } dprintk(DBGLVL_API, " assocterminal= 0x%x\n", vcoutputtermhdr->assocterminal); dprintk(DBGLVL_API, " sourceid = 0x%x\n", vcoutputtermhdr->sourceid); dprintk(DBGLVL_API, " iterminal = 0x%x\n", vcoutputtermhdr->iterminal); dprintk(DBGLVL_API, " BARLocation = 0x%x\n", vcoutputtermhdr->BARLocation); dprintk(DBGLVL_API, " flags = 0x%x\n", vcoutputtermhdr->flags); dprintk(DBGLVL_API, " interruptid = 0x%x\n", vcoutputtermhdr->interruptid); dprintk(DBGLVL_API, " buffercount = 0x%x\n", vcoutputtermhdr->buffercount); dprintk(DBGLVL_API, " metadatasize = 0x%x\n", vcoutputtermhdr->metadatasize); dprintk(DBGLVL_API, " controlsize = 0x%x\n", vcoutputtermhdr->controlsize); dprintk(DBGLVL_API, " numformats = 0x%x\n", vcoutputtermhdr->numformats); t = (struct tmComResDescrHeader *) ((struct tmComResDMATermDescrHeader *)(buf + idx)); next_offset = idx + (vcoutputtermhdr->len); for (i = 0; i < vcoutputtermhdr->numformats; i++) { t = (struct tmComResDescrHeader *) (buf + next_offset); switch (t->subtype) { case VS_FORMAT_MPEG2TS: tsfmt = (struct tmComResTSFormatDescrHeader *)t; if (currpath == 1) tsport = &dev->ports[SAA7164_PORT_TS1]; else tsport = &dev->ports[SAA7164_PORT_TS2]; memcpy(&tsport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); saa7164_api_configure_port_mpeg2ts(dev, tsport, tsfmt); break; case VS_FORMAT_MPEG2PS: psfmt = (struct tmComResPSFormatDescrHeader *)t; if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); saa7164_api_configure_port_mpeg2ps(dev, encport, psfmt); break; case VS_FORMAT_VBI: vbifmt = (struct tmComResVBIFormatDescrHeader *)t; if (currpath == 1) vbiport = &dev->ports[SAA7164_PORT_VBI1]; else vbiport = &dev->ports[SAA7164_PORT_VBI2]; memcpy(&vbiport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); memcpy(&vbiport->vbi_fmt_ntsc, vbifmt, sizeof(*vbifmt)); saa7164_api_configure_port_vbi(dev, vbiport); break; case VS_FORMAT_RDS: dprintk(DBGLVL_API, " = VS_FORMAT_RDS\n"); break; case VS_FORMAT_UNCOMPRESSED: dprintk(DBGLVL_API, " = VS_FORMAT_UNCOMPRESSED\n"); break; case VS_FORMAT_TYPE: dprintk(DBGLVL_API, " = VS_FORMAT_TYPE\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", t->subtype); } next_offset += t->len; } break; case TUNER_UNIT: dprintk(DBGLVL_API, " TUNER_UNIT\n"); tunerunithdr = (struct tmComResTunerDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", tunerunithdr->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", tunerunithdr->sourceid); dprintk(DBGLVL_API, " iunit = 0x%x\n", tunerunithdr->iunit); dprintk(DBGLVL_API, " tuningstandards = 0x%x\n", tunerunithdr->tuningstandards); dprintk(DBGLVL_API, " controlsize = 0x%x\n", tunerunithdr->controlsize); dprintk(DBGLVL_API, " controls = 0x%x\n", tunerunithdr->controls); if (tunerunithdr->unitid == tunerunithdr->iunit) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->tunerunit, tunerunithdr, sizeof(struct tmComResTunerDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d] tuner)\n", encport->nr); } break; case VC_SELECTOR_UNIT: psel = (struct tmComResSelDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " VC_SELECTOR_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", psel->unitid); dprintk(DBGLVL_API, " nrinpins = 0x%x\n", psel->nrinpins); dprintk(DBGLVL_API, " sourceid = 0x%x\n", psel->sourceid); break; case VC_PROCESSING_UNIT: pdh = (struct tmComResProcDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " VC_PROCESSING_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", pdh->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", pdh->sourceid); dprintk(DBGLVL_API, " controlsize = 0x%x\n", pdh->controlsize); if (pdh->controlsize == 0x04) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->vidproc, pdh, sizeof(struct tmComResProcDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case FEATURE_UNIT: afd = (struct tmComResAFeatureDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " FEATURE_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", afd->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", afd->sourceid); dprintk(DBGLVL_API, " controlsize = 0x%x\n", afd->controlsize); if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->audfeat, afd, sizeof(struct tmComResAFeatureDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); break; case ENCODER_UNIT: edh = (struct tmComResEncoderDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " ENCODER_UNIT\n"); dprintk(DBGLVL_API, " subtype = 0x%x\n", edh->subtype); dprintk(DBGLVL_API, " unitid = 0x%x\n", edh->unitid); dprintk(DBGLVL_API, " vsourceid = 0x%x\n", edh->vsourceid); dprintk(DBGLVL_API, " asourceid = 0x%x\n", edh->asourceid); dprintk(DBGLVL_API, " iunit = 0x%x\n", edh->iunit); if (edh->iunit == edh->unitid) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->encunit, edh, sizeof(struct tmComResEncoderDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case EXTENSION_UNIT: dprintk(DBGLVL_API, " EXTENSION_UNIT\n"); exthdr = (struct tmComResExtDevDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", exthdr->unitid); dprintk(DBGLVL_API, " deviceid = 0x%x\n", exthdr->deviceid); dprintk(DBGLVL_API, " devicetype = 0x%x\n", exthdr->devicetype); if (exthdr->devicetype & 0x1) dprintk(DBGLVL_API, " = Decoder Device\n"); if (exthdr->devicetype & 0x2) dprintk(DBGLVL_API, " = GPIO Source\n"); if (exthdr->devicetype & 0x4) dprintk(DBGLVL_API, " = Video Decoder\n"); if (exthdr->devicetype & 0x8) dprintk(DBGLVL_API, " = Audio Decoder\n"); if (exthdr->devicetype & 0x20) dprintk(DBGLVL_API, " = Crossbar\n"); if (exthdr->devicetype & 0x40) dprintk(DBGLVL_API, " = Tuner\n"); if (exthdr->devicetype & 0x80) dprintk(DBGLVL_API, " = IF PLL\n"); if (exthdr->devicetype & 0x100) dprintk(DBGLVL_API, " = Demodulator\n"); if (exthdr->devicetype & 0x200) dprintk(DBGLVL_API, " = RDS Decoder\n"); if (exthdr->devicetype & 0x400) dprintk(DBGLVL_API, " = Encoder\n"); if (exthdr->devicetype & 0x800) dprintk(DBGLVL_API, " = IR Decoder\n"); if (exthdr->devicetype & 0x1000) dprintk(DBGLVL_API, " = EEPROM\n"); if (exthdr->devicetype & 0x2000) dprintk(DBGLVL_API, " = VBI Decoder\n"); if (exthdr->devicetype & 0x10000) dprintk(DBGLVL_API, " = Streaming Device\n"); if (exthdr->devicetype & 0x20000) dprintk(DBGLVL_API, " = DRM Device\n"); if (exthdr->devicetype & 0x40000000) dprintk(DBGLVL_API, " = Generic Device\n"); if (exthdr->devicetype & 0x80000000) dprintk(DBGLVL_API, " = Config Space Device\n"); dprintk(DBGLVL_API, " numgpiopins = 0x%x\n", exthdr->numgpiopins); dprintk(DBGLVL_API, " numgpiogroups = 0x%x\n", exthdr->numgpiogroups); dprintk(DBGLVL_API, " controlsize = 0x%x\n", exthdr->controlsize); if (exthdr->devicetype & 0x80) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->ifunit, exthdr, sizeof(struct tmComResExtDevDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case PVC_INFRARED_UNIT: dprintk(DBGLVL_API, " PVC_INFRARED_UNIT\n"); break; case DRM_UNIT: dprintk(DBGLVL_API, " DRM_UNIT\n"); break; default: dprintk(DBGLVL_API, "default %d\n", hdr->subtype); } dprintk(DBGLVL_API, " 1.%x\n", hdr->len); dprintk(DBGLVL_API, " 2.%x\n", hdr->type); dprintk(DBGLVL_API, " 3.%x\n", hdr->subtype); dprintk(DBGLVL_API, " 4.%x\n", hdr->unitid); idx += hdr->len; } return 0; } int saa7164_api_enum_subdevs(struct saa7164_dev *dev) { int ret; u32 buflen = 0; u8 *buf; dprintk(DBGLVL_API, "%s()\n", __func__); /* Get the total descriptor length */ ret = saa7164_cmd_send(dev, 0, GET_LEN, GET_DESCRIPTORS_CONTROL, sizeof(buflen), &buflen); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s() total descriptor size = %d bytes.\n", __func__, buflen); /* Allocate enough storage for all of the descs */ buf = kzalloc(buflen, GFP_KERNEL); if (!buf) return SAA_ERR_NO_RESOURCES; /* Retrieve them */ ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_DESCRIPTORS_CONTROL, buflen, buf); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); goto out; } if (saa_debug & DBGLVL_API) saa7164_dumphex16(dev, buf, (buflen/16)*16); saa7164_api_dump_subdevs(dev, buf, buflen); out: kfree(buf); return ret; } int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg, u32 datalen, u8 *data) { struct saa7164_dev *dev = bus->dev; u16 len = 0; int unitid; u32 regval; u8 buf[256]; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); if (reglen > 4) return -EIO; if (reglen == 1) regval = *(reg); else if (reglen == 2) regval = ((*(reg) << 8) || *(reg+1)); else if (reglen == 3) regval = ((*(reg) << 16) | (*(reg+1) << 8) | *(reg+2)); else if (reglen == 4) regval = ((*(reg) << 24) | (*(reg+1) << 16) | (*(reg+2) << 8) | *(reg+3)); /* Prepare the send buffer */ /* Bytes 00-03 source register length * 04-07 source bytes to read * 08... register address */ memset(buf, 0, sizeof(buf)); memcpy((buf + 2 * sizeof(u32) + 0), reg, reglen); *((u32 *)(buf + 0 * sizeof(u32))) = reglen; *((u32 *)(buf + 1 * sizeof(u32))) = datalen; unitid = saa7164_i2caddr_to_unitid(bus, addr); if (unitid < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr 0x%x to unitid\n", __func__, addr); return -EIO; } ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); if (saa_debug & DBGLVL_I2C) saa7164_dumphex16(dev, buf, 2 * 16); ret = saa7164_cmd_send(bus->dev, unitid, GET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); else { if (saa_debug & DBGLVL_I2C) saa7164_dumphex16(dev, buf, sizeof(buf)); memcpy(data, (buf + 2 * sizeof(u32) + reglen), datalen); } return ret == SAA_OK ? 0 : -EIO; } /* For a given 8 bit i2c address device, write the buffer */ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen, u8 *data) { struct saa7164_dev *dev = bus->dev; u16 len = 0; int unitid; int reglen; u8 buf[256]; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); if ((datalen == 0) || (datalen > 232)) return -EIO; memset(buf, 0, sizeof(buf)); unitid = saa7164_i2caddr_to_unitid(bus, addr); if (unitid < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr 0x%x to unitid\n", __func__, addr); return -EIO; } reglen = saa7164_i2caddr_to_reglen(bus, addr); if (reglen < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr to reglen\n", __func__); return -EIO; } ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); /* Prepare the send buffer */ /* Bytes 00-03 dest register length * 04-07 dest bytes to write * 08... register address */ *((u32 *)(buf + 0 * sizeof(u32))) = reglen; *((u32 *)(buf + 1 * sizeof(u32))) = datalen - reglen; memcpy((buf + 2 * sizeof(u32)), data, datalen); if (saa_debug & DBGLVL_I2C) saa7164_dumphex16(dev, buf, sizeof(buf)); ret = saa7164_cmd_send(bus->dev, unitid, SET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); return ret == SAA_OK ? 0 : -EIO; } int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid, u8 pin, u8 state) { int ret; struct tmComResGPIO t; dprintk(DBGLVL_API, "%s(0x%x, %d, %d)\n", __func__, unitid, pin, state); if ((pin > 7) || (state > 2)) return SAA_ERR_BAD_PARAMETER; t.pin = pin; t.state = state; ret = saa7164_cmd_send(dev, unitid, SET_CUR, EXU_GPIO_CONTROL, sizeof(t), &t); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin) { return saa7164_api_modify_gpio(dev, unitid, pin, 1); } int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin) { return saa7164_api_modify_gpio(dev, unitid, pin, 0); }
gpl-2.0
delaemon/linux
arch/powerpc/kernel/rtas-rtc.c
11883
2749
#include <linux/kernel.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/delay.h> #include <linux/ratelimit.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/time.h> #define MAX_RTC_WAIT 5000 /* 5 sec */ #define RTAS_CLOCK_BUSY (-2) unsigned long __init rtas_get_boot_time(void) { int ret[8]; int error; unsigned int wait_time; u64 max_wait_tb; max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT; do { error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret); wait_time = rtas_busy_delay_time(error); if (wait_time) { /* This is boot time so we spin. */ udelay(wait_time*1000); } } while (wait_time && (get_tb() < max_wait_tb)); if (error != 0) { printk_ratelimited(KERN_WARNING "error: reading the clock failed (%d)\n", error); return 0; } return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]); } /* NOTE: get_rtc_time will get an error if executed in interrupt context * and if a delay is needed to read the clock. In this case we just * silently return without updating rtc_tm. */ void rtas_get_rtc_time(struct rtc_time *rtc_tm) { int ret[8]; int error; unsigned int wait_time; u64 max_wait_tb; max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT; do { error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret); wait_time = rtas_busy_delay_time(error); if (wait_time) { if (in_interrupt()) { memset(rtc_tm, 0, sizeof(struct rtc_time)); printk_ratelimited(KERN_WARNING "error: reading clock " "would delay interrupt\n"); return; /* delay not allowed */ } msleep(wait_time); } } while (wait_time && (get_tb() < max_wait_tb)); if (error != 0) { printk_ratelimited(KERN_WARNING "error: reading the clock failed (%d)\n", error); return; } rtc_tm->tm_sec = ret[5]; rtc_tm->tm_min = ret[4]; rtc_tm->tm_hour = ret[3]; rtc_tm->tm_mday = ret[2]; rtc_tm->tm_mon = ret[1] - 1; rtc_tm->tm_year = ret[0] - 1900; } int rtas_set_rtc_time(struct rtc_time *tm) { int error, wait_time; u64 max_wait_tb; max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT; do { error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, 0); wait_time = rtas_busy_delay_time(error); if (wait_time) { if (in_interrupt()) return 1; /* probably decrementer */ msleep(wait_time); } } while (wait_time && (get_tb() < max_wait_tb)); if (error != 0) printk_ratelimited(KERN_WARNING "error: setting the clock failed (%d)\n", error); return 0; }
gpl-2.0
pstovk/android_kernel_acer_a100
arch/tile/lib/memmove.c
12395
1452
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> void *memmove(void *dest, const void *src, size_t n) { if ((const char *)src >= (char *)dest + n || (char *)dest >= (const char *)src + n) { /* We found no overlap, so let memcpy do all the heavy * lifting (prefetching, etc.) */ return memcpy(dest, src, n); } if (n != 0) { const uint8_t *in; uint8_t x; uint8_t *out; int stride; if (src < dest) { /* copy backwards */ in = (const uint8_t *)src + n - 1; out = (uint8_t *)dest + n - 1; stride = -1; } else { /* copy forwards */ in = (const uint8_t *)src; out = (uint8_t *)dest; stride = 1; } /* Manually software-pipeline this loop. */ x = *in; in += stride; while (--n != 0) { *out = x; out += stride; x = *in; in += stride; } *out = x; } return dest; } EXPORT_SYMBOL(memmove);
gpl-2.0
yank555-lu/SGS3-Sourcedrops
fs/ntfs/bitmap.c
14443
5603
/* * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. * * Copyright (c) 2004-2005 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef NTFS_RW #include <linux/pagemap.h> #include "bitmap.h" #include "debug.h" #include "aops.h" #include "ntfs.h" /** * __ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value * @vi: vfs inode describing the bitmap * @start_bit: first bit to set * @count: number of bits to set * @value: value to set the bits to (i.e. 0 or 1) * @is_rollback: if 'true' this is a rollback operation * * Set @count bits starting at bit @start_bit in the bitmap described by the * vfs inode @vi to @value, where @value is either 0 or 1. * * @is_rollback should always be 'false', it is for internal use to rollback * errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead. * * Return 0 on success and -errno on error. */ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, const s64 count, const u8 value, const bool is_rollback) { s64 cnt = count; pgoff_t index, end_index; struct address_space *mapping; struct page *page; u8 *kaddr; int pos, len; u8 bit; BUG_ON(!vi); ntfs_debug("Entering for i_ino 0x%lx, start_bit 0x%llx, count 0x%llx, " "value %u.%s", vi->i_ino, (unsigned long long)start_bit, (unsigned long long)cnt, (unsigned int)value, is_rollback ? " (rollback)" : ""); BUG_ON(start_bit < 0); BUG_ON(cnt < 0); BUG_ON(value > 1); /* * Calculate the indices for the pages containing the first and last * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively. */ index = start_bit >> (3 + PAGE_CACHE_SHIFT); end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT); /* Get the page containing the first bit (@start_bit). */ mapping = vi->i_mapping; page = ntfs_map_page(mapping, index); if (IS_ERR(page)) { if (!is_rollback) ntfs_error(vi->i_sb, "Failed to map first page (error " "%li), aborting.", PTR_ERR(page)); return PTR_ERR(page); } kaddr = page_address(page); /* Set @pos to the position of the byte containing @start_bit. */ pos = (start_bit >> 3) & ~PAGE_CACHE_MASK; /* Calculate the position of @start_bit in the first byte. */ bit = start_bit & 7; /* If the first byte is partial, modify the appropriate bits in it. */ if (bit) { u8 *byte = kaddr + pos; while ((bit & 7) && cnt) { cnt--; if (value) *byte |= 1 << bit++; else *byte &= ~(1 << bit++); } /* If we are done, unmap the page and return success. */ if (!cnt) goto done; /* Update @pos to the new position. */ pos++; } /* * Depending on @value, modify all remaining whole bytes in the page up * to @cnt. */ len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); memset(kaddr + pos, value ? 0xff : 0, len); cnt -= len << 3; /* Update @len to point to the first not-done byte in the page. */ if (cnt < 8) len += pos; /* If we are not in the last page, deal with all subsequent pages. */ while (index < end_index) { BUG_ON(cnt <= 0); /* Update @index and get the next page. */ flush_dcache_page(page); set_page_dirty(page); ntfs_unmap_page(page); page = ntfs_map_page(mapping, ++index); if (IS_ERR(page)) goto rollback; kaddr = page_address(page); /* * Depending on @value, modify all remaining whole bytes in the * page up to @cnt. */ len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); memset(kaddr, value ? 0xff : 0, len); cnt -= len << 3; } /* * The currently mapped page is the last one. If the last byte is * partial, modify the appropriate bits in it. Note, @len is the * position of the last byte inside the page. */ if (cnt) { u8 *byte; BUG_ON(cnt > 7); bit = cnt; byte = kaddr + len; while (bit--) { if (value) *byte |= 1 << bit; else *byte &= ~(1 << bit); } } done: /* We are done. Unmap the page and return success. */ flush_dcache_page(page); set_page_dirty(page); ntfs_unmap_page(page); ntfs_debug("Done."); return 0; rollback: /* * Current state: * - no pages are mapped * - @count - @cnt is the number of bits that have been modified */ if (is_rollback) return PTR_ERR(page); if (count != cnt) pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt, value ? 0 : 1, true); else pos = 0; if (!pos) { /* Rollback was successful. */ ntfs_error(vi->i_sb, "Failed to map subsequent page (error " "%li), aborting.", PTR_ERR(page)); } else { /* Rollback failed. */ ntfs_error(vi->i_sb, "Failed to map subsequent page (error " "%li) and rollback failed (error %i). " "Aborting and leaving inconsistent metadata. " "Unmount and run chkdsk.", PTR_ERR(page), pos); NVolSetErrors(NTFS_SB(vi->i_sb)); } return PTR_ERR(page); } #endif /* NTFS_RW */
gpl-2.0
astarasikov/iconia-gnu-kernel
fs/squashfs/super.c
108
13132
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@lougher.demon.co.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * super.c */ /* * This file implements code to read the superblock, read and initialise * in-memory structures at mount time, and all the VFS glue code to register * the filesystem. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/module.h> #include <linux/magic.h> #include <linux/xattr.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" #include "decompressor.h" #include "xattr.h" static struct file_system_type squashfs_fs_type; static const struct super_operations squashfs_super_ops; static const struct squashfs_decompressor *supported_squashfs_filesystem(short major, short minor, short id) { const struct squashfs_decompressor *decompressor; if (major < SQUASHFS_MAJOR) { ERROR("Major/Minor mismatch, older Squashfs %d.%d " "filesystems are unsupported\n", major, minor); return NULL; } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) { ERROR("Major/Minor mismatch, trying to mount newer " "%d.%d filesystem\n", major, minor); ERROR("Please update your kernel\n"); return NULL; } decompressor = squashfs_lookup_decompressor(id); if (!decompressor->supported) { ERROR("Filesystem uses \"%s\" compression. This is not " "supported\n", decompressor->name); return NULL; } return decompressor; } static int squashfs_fill_super(struct super_block *sb, void *data, int silent) { struct squashfs_sb_info *msblk; struct squashfs_super_block *sblk = NULL; char b[BDEVNAME_SIZE]; struct inode *root; long long root_inode; unsigned short flags; unsigned int fragments; u64 lookup_table_start, xattr_id_table_start; int err; TRACE("Entered squashfs_fill_superblock\n"); sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); return -ENOMEM; } msblk = sb->s_fs_info; sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); if (sblk == NULL) { ERROR("Failed to allocate squashfs_super_block\n"); goto failure; } msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE); msblk->devblksize_log2 = ffz(~msblk->devblksize); mutex_init(&msblk->read_data_mutex); mutex_init(&msblk->meta_index_mutex); /* * msblk->bytes_used is checked in squashfs_read_table to ensure reads * are not beyond filesystem end. But as we're using * squashfs_read_table here to read the superblock (including the value * of bytes_used) we need to set it to an initial sensible dummy value */ msblk->bytes_used = sizeof(*sblk); err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk)); if (err < 0) { ERROR("unable to read squashfs_super_block\n"); goto failed_mount; } err = -EINVAL; /* Check it is a SQUASHFS superblock */ sb->s_magic = le32_to_cpu(sblk->s_magic); if (sb->s_magic != SQUASHFS_MAGIC) { if (!silent) ERROR("Can't find a SQUASHFS superblock on %s\n", bdevname(sb->s_bdev, b)); goto failed_mount; } /* Check the MAJOR & MINOR versions and lookup compression type */ msblk->decompressor = supported_squashfs_filesystem( le16_to_cpu(sblk->s_major), le16_to_cpu(sblk->s_minor), le16_to_cpu(sblk->compression)); if (msblk->decompressor == NULL) goto failed_mount; /* Check the filesystem does not extend beyond the end of the block device */ msblk->bytes_used = le64_to_cpu(sblk->bytes_used); if (msblk->bytes_used < 0 || msblk->bytes_used > i_size_read(sb->s_bdev->bd_inode)) goto failed_mount; /* Check block size for sanity */ msblk->block_size = le32_to_cpu(sblk->block_size); if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE) goto failed_mount; /* * Check the system page size is not larger than the filesystem * block size (by default 128K). This is currently not supported. */ if (PAGE_CACHE_SIZE > msblk->block_size) { ERROR("Page size > filesystem block size (%d). This is " "currently not supported!\n", msblk->block_size); goto failed_mount; } msblk->block_log = le16_to_cpu(sblk->block_log); if (msblk->block_log > SQUASHFS_FILE_MAX_LOG) goto failed_mount; /* Check the root inode for sanity */ root_inode = le64_to_cpu(sblk->root_inode); if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE) goto failed_mount; msblk->inode_table = le64_to_cpu(sblk->inode_table_start); msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->inodes = le32_to_cpu(sblk->inodes); flags = le16_to_cpu(sblk->flags); TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b)); TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags) ? "un" : ""); TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags) ? "un" : ""); TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); TRACE("Block size %d\n", msblk->block_size); TRACE("Number of inodes %d\n", msblk->inodes); TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); TRACE("sblk->fragment_table_start %llx\n", (u64) le64_to_cpu(sblk->fragment_table_start)); TRACE("sblk->id_table_start %llx\n", (u64) le64_to_cpu(sblk->id_table_start)); sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_flags |= MS_RDONLY; sb->s_op = &squashfs_super_ops; err = -ENOMEM; msblk->stream = squashfs_decompressor_init(msblk); if (msblk->stream == NULL) goto failed_mount; msblk->block_cache = squashfs_cache_init("metadata", SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); if (msblk->block_cache == NULL) goto failed_mount; /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size); if (msblk->read_page == NULL) { ERROR("Failed to allocate read_page block\n"); goto failed_mount; } /* Allocate and read id index table */ msblk->id_table = squashfs_read_id_index_table(sb, le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids)); if (IS_ERR(msblk->id_table)) { err = PTR_ERR(msblk->id_table); msblk->id_table = NULL; goto failed_mount; } fragments = le32_to_cpu(sblk->fragments); if (fragments == 0) goto allocate_lookup_table; msblk->fragment_cache = squashfs_cache_init("fragment", SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); if (msblk->fragment_cache == NULL) { err = -ENOMEM; goto failed_mount; } /* Allocate and read fragment index table */ msblk->fragment_index = squashfs_read_fragment_index_table(sb, le64_to_cpu(sblk->fragment_table_start), fragments); if (IS_ERR(msblk->fragment_index)) { err = PTR_ERR(msblk->fragment_index); msblk->fragment_index = NULL; goto failed_mount; } allocate_lookup_table: lookup_table_start = le64_to_cpu(sblk->lookup_table_start); if (lookup_table_start == SQUASHFS_INVALID_BLK) goto allocate_xattr_table; /* Allocate and read inode lookup table */ msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, lookup_table_start, msblk->inodes); if (IS_ERR(msblk->inode_lookup_table)) { err = PTR_ERR(msblk->inode_lookup_table); msblk->inode_lookup_table = NULL; goto failed_mount; } sb->s_export_op = &squashfs_export_ops; allocate_xattr_table: sb->s_xattr = squashfs_xattr_handlers; xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start); if (xattr_id_table_start == SQUASHFS_INVALID_BLK) goto allocate_root; /* Allocate and read xattr id lookup table */ msblk->xattr_id_table = squashfs_read_xattr_id_table(sb, xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids); if (IS_ERR(msblk->xattr_id_table)) { err = PTR_ERR(msblk->xattr_id_table); msblk->xattr_id_table = NULL; if (err != -ENOTSUPP) goto failed_mount; } allocate_root: root = new_inode(sb); if (!root) { err = -ENOMEM; goto failed_mount; } err = squashfs_read_inode(root, root_inode); if (err) { make_bad_inode(root); iput(root); goto failed_mount; } insert_inode_hash(root); sb->s_root = d_alloc_root(root); if (sb->s_root == NULL) { ERROR("Root inode create failed\n"); err = -ENOMEM; iput(root); goto failed_mount; } TRACE("Leaving squashfs_fill_super\n"); kfree(sblk); return 0; failed_mount: squashfs_cache_delete(msblk->block_cache); squashfs_cache_delete(msblk->fragment_cache); squashfs_cache_delete(msblk->read_page); squashfs_decompressor_free(msblk, msblk->stream); kfree(msblk->inode_lookup_table); kfree(msblk->fragment_index); kfree(msblk->id_table); kfree(msblk->xattr_id_table); kfree(sb->s_fs_info); sb->s_fs_info = NULL; kfree(sblk); return err; failure: kfree(sb->s_fs_info); sb->s_fs_info = NULL; return -ENOMEM; } static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info; u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev); TRACE("Entered squashfs_statfs\n"); buf->f_type = SQUASHFS_MAGIC; buf->f_bsize = msblk->block_size; buf->f_blocks = ((msblk->bytes_used - 1) >> msblk->block_log) + 1; buf->f_bfree = buf->f_bavail = 0; buf->f_files = msblk->inodes; buf->f_ffree = 0; buf->f_namelen = SQUASHFS_NAME_LEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } static int squashfs_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_RDONLY; return 0; } static void squashfs_put_super(struct super_block *sb) { if (sb->s_fs_info) { struct squashfs_sb_info *sbi = sb->s_fs_info; squashfs_cache_delete(sbi->block_cache); squashfs_cache_delete(sbi->fragment_cache); squashfs_cache_delete(sbi->read_page); squashfs_decompressor_free(sbi, sbi->stream); kfree(sbi->id_table); kfree(sbi->fragment_index); kfree(sbi->meta_index); kfree(sbi->inode_lookup_table); kfree(sbi->xattr_id_table); kfree(sb->s_fs_info); sb->s_fs_info = NULL; } } static struct dentry *squashfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, squashfs_fill_super); } static struct kmem_cache *squashfs_inode_cachep; static void init_once(void *foo) { struct squashfs_inode_info *ei = foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache", sizeof(struct squashfs_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, init_once); return squashfs_inode_cachep ? 0 : -ENOMEM; } static void destroy_inodecache(void) { kmem_cache_destroy(squashfs_inode_cachep); } static int __init init_squashfs_fs(void) { int err = init_inodecache(); if (err) return err; err = register_filesystem(&squashfs_fs_type); if (err) { destroy_inodecache(); return err; } printk(KERN_INFO "squashfs: version 4.0 (2009/01/31) " "Phillip Lougher\n"); return 0; } static void __exit exit_squashfs_fs(void) { unregister_filesystem(&squashfs_fs_type); destroy_inodecache(); } static struct inode *squashfs_alloc_inode(struct super_block *sb) { struct squashfs_inode_info *ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL); return ei ? &ei->vfs_inode : NULL; } static void squashfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode)); } static void squashfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, squashfs_i_callback); } static struct file_system_type squashfs_fs_type = { .owner = THIS_MODULE, .name = "squashfs", .mount = squashfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV }; static const struct super_operations squashfs_super_ops = { .alloc_inode = squashfs_alloc_inode, .destroy_inode = squashfs_destroy_inode, .statfs = squashfs_statfs, .put_super = squashfs_put_super, .remount_fs = squashfs_remount }; module_init(init_squashfs_fs); module_exit(exit_squashfs_fs); MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem"); MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>"); MODULE_LICENSE("GPL");
gpl-2.0
pombredanne/bcm11351
drivers/md/persistent-data/dm-btree.c
1388
19533
/* * Copyright (C) 2011 Red Hat, Inc. * * This file is released under the GPL. */ #include "dm-btree-internal.h" #include "dm-space-map.h" #include "dm-transaction-manager.h" #include <linux/export.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "btree" /*---------------------------------------------------------------- * Array manipulation *--------------------------------------------------------------*/ static void memcpy_disk(void *dest, const void *src, size_t len) __dm_written_to_disk(src) { memcpy(dest, src, len); __dm_unbless_for_disk(src); } static void array_insert(void *base, size_t elt_size, unsigned nr_elts, unsigned index, void *elt) __dm_written_to_disk(elt) { if (index < nr_elts) memmove(base + (elt_size * (index + 1)), base + (elt_size * index), (nr_elts - index) * elt_size); memcpy_disk(base + (elt_size * index), elt, elt_size); } /*----------------------------------------------------------------*/ /* makes the assumption that no two keys are the same. */ static int bsearch(struct btree_node *n, uint64_t key, int want_hi) { int lo = -1, hi = le32_to_cpu(n->header.nr_entries); while (hi - lo > 1) { int mid = lo + ((hi - lo) / 2); uint64_t mid_key = le64_to_cpu(n->keys[mid]); if (mid_key == key) return mid; if (mid_key < key) lo = mid; else hi = mid; } return want_hi ? hi : lo; } int lower_bound(struct btree_node *n, uint64_t key) { return bsearch(n, key, 0); } void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, struct dm_btree_value_type *vt) { unsigned i; uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) for (i = 0; i < nr_entries; i++) dm_tm_inc(tm, value64(n, i)); else if (vt->inc) for (i = 0; i < nr_entries; i++) vt->inc(vt->context, value_ptr(n, i)); } static int insert_at(size_t value_size, struct btree_node *node, unsigned index, uint64_t key, void *value) __dm_written_to_disk(value) { uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); __le64 key_le = cpu_to_le64(key); if (index > nr_entries || index >= le32_to_cpu(node->header.max_entries)) { DMERR("too many entries in btree node for insert"); __dm_unbless_for_disk(value); return -ENOMEM; } __dm_bless_for_disk(&key_le); array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le); array_insert(value_base(node), value_size, nr_entries, index, value); node->header.nr_entries = cpu_to_le32(nr_entries + 1); return 0; } /*----------------------------------------------------------------*/ /* * We want 3n entries (for some n). This works more nicely for repeated * insert remove loops than (2n + 1). */ static uint32_t calc_max_entries(size_t value_size, size_t block_size) { uint32_t total, n; size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */ block_size -= sizeof(struct node_header); total = block_size / elt_size; n = total / 3; /* rounds down */ return 3 * n; } int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root) { int r; struct dm_block *b; struct btree_node *n; size_t block_size; uint32_t max_entries; r = new_block(info, &b); if (r < 0) return r; block_size = dm_bm_block_size(dm_tm_get_bm(info->tm)); max_entries = calc_max_entries(info->value_type.size, block_size); n = dm_block_data(b); memset(n, 0, block_size); n->header.flags = cpu_to_le32(LEAF_NODE); n->header.nr_entries = cpu_to_le32(0); n->header.max_entries = cpu_to_le32(max_entries); n->header.value_size = cpu_to_le32(info->value_type.size); *root = dm_block_location(b); return unlock_block(info, b); } EXPORT_SYMBOL_GPL(dm_btree_empty); /*----------------------------------------------------------------*/ /* * Deletion uses a recursive algorithm, since we have limited stack space * we explicitly manage our own stack on the heap. */ #define MAX_SPINE_DEPTH 64 struct frame { struct dm_block *b; struct btree_node *n; unsigned level; unsigned nr_children; unsigned current_child; }; struct del_stack { struct dm_transaction_manager *tm; int top; struct frame spine[MAX_SPINE_DEPTH]; }; static int top_frame(struct del_stack *s, struct frame **f) { if (s->top < 0) { DMERR("btree deletion stack empty"); return -EINVAL; } *f = s->spine + s->top; return 0; } static int unprocessed_frames(struct del_stack *s) { return s->top >= 0; } static int push_frame(struct del_stack *s, dm_block_t b, unsigned level) { int r; uint32_t ref_count; if (s->top >= MAX_SPINE_DEPTH - 1) { DMERR("btree deletion stack out of memory"); return -ENOMEM; } r = dm_tm_ref(s->tm, b, &ref_count); if (r) return r; if (ref_count > 1) /* * This is a shared node, so we can just decrement it's * reference counter and leave the children. */ dm_tm_dec(s->tm, b); else { struct frame *f = s->spine + ++s->top; r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b); if (r) { s->top--; return r; } f->n = dm_block_data(f->b); f->level = level; f->nr_children = le32_to_cpu(f->n->header.nr_entries); f->current_child = 0; } return 0; } static void pop_frame(struct del_stack *s) { struct frame *f = s->spine + s->top--; dm_tm_dec(s->tm, dm_block_location(f->b)); dm_tm_unlock(s->tm, f->b); } static bool is_internal_level(struct dm_btree_info *info, struct frame *f) { return f->level < (info->levels - 1); } int dm_btree_del(struct dm_btree_info *info, dm_block_t root) { int r; struct del_stack *s; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; s->tm = info->tm; s->top = -1; r = push_frame(s, root, 0); if (r) goto out; while (unprocessed_frames(s)) { uint32_t flags; struct frame *f; dm_block_t b; r = top_frame(s, &f); if (r) goto out; if (f->current_child >= f->nr_children) { pop_frame(s); continue; } flags = le32_to_cpu(f->n->header.flags); if (flags & INTERNAL_NODE) { b = value64(f->n, f->current_child); f->current_child++; r = push_frame(s, b, f->level); if (r) goto out; } else if (is_internal_level(info, f)) { b = value64(f->n, f->current_child); f->current_child++; r = push_frame(s, b, f->level + 1); if (r) goto out; } else { if (info->value_type.dec) { unsigned i; for (i = 0; i < f->nr_children; i++) info->value_type.dec(info->value_type.context, value_ptr(f->n, i)); } f->current_child = f->nr_children; } } out: kfree(s); return r; } EXPORT_SYMBOL_GPL(dm_btree_del); /*----------------------------------------------------------------*/ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, int (*search_fn)(struct btree_node *, uint64_t), uint64_t *result_key, void *v, size_t value_size) { int i, r; uint32_t flags, nr_entries; do { r = ro_step(s, block); if (r < 0) return r; i = search_fn(ro_node(s), key); flags = le32_to_cpu(ro_node(s)->header.flags); nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries); if (i < 0 || i >= nr_entries) return -ENODATA; if (flags & INTERNAL_NODE) block = value64(ro_node(s), i); } while (!(flags & LEAF_NODE)); *result_key = le64_to_cpu(ro_node(s)->keys[i]); memcpy(v, value_ptr(ro_node(s), i), value_size); return 0; } int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, void *value_le) { unsigned level, last_level = info->levels - 1; int r = -ENODATA; uint64_t rkey; __le64 internal_value_le; struct ro_spine spine; init_ro_spine(&spine, info); for (level = 0; level < info->levels; level++) { size_t size; void *value_p; if (level == last_level) { value_p = value_le; size = info->value_type.size; } else { value_p = &internal_value_le; size = sizeof(uint64_t); } r = btree_lookup_raw(&spine, root, keys[level], lower_bound, &rkey, value_p, size); if (!r) { if (rkey != keys[level]) { exit_ro_spine(&spine); return -ENODATA; } } else { exit_ro_spine(&spine); return r; } root = le64_to_cpu(internal_value_le); } exit_ro_spine(&spine); return r; } EXPORT_SYMBOL_GPL(dm_btree_lookup); /* * Splits a node by creating a sibling node and shifting half the nodes * contents across. Assumes there is a parent node, and it has room for * another child. * * Before: * +--------+ * | Parent | * +--------+ * | * v * +----------+ * | A ++++++ | * +----------+ * * * After: * +--------+ * | Parent | * +--------+ * | | * v +------+ * +---------+ | * | A* +++ | v * +---------+ +-------+ * | B +++ | * +-------+ * * Where A* is a shadow of A. */ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root, unsigned parent_index, uint64_t key) { int r; size_t size; unsigned nr_left, nr_right; struct dm_block *left, *right, *parent; struct btree_node *ln, *rn, *pn; __le64 location; left = shadow_current(s); r = new_block(s->info, &right); if (r < 0) return r; ln = dm_block_data(left); rn = dm_block_data(right); nr_left = le32_to_cpu(ln->header.nr_entries) / 2; nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left; ln->header.nr_entries = cpu_to_le32(nr_left); rn->header.flags = ln->header.flags; rn->header.nr_entries = cpu_to_le32(nr_right); rn->header.max_entries = ln->header.max_entries; rn->header.value_size = ln->header.value_size; memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0])); size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ? sizeof(uint64_t) : s->info->value_type.size; memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left), size * nr_right); /* * Patch up the parent */ parent = shadow_parent(s); pn = dm_block_data(parent); location = cpu_to_le64(dm_block_location(left)); __dm_bless_for_disk(&location); memcpy_disk(value_ptr(pn, parent_index), &location, sizeof(__le64)); location = cpu_to_le64(dm_block_location(right)); __dm_bless_for_disk(&location); r = insert_at(sizeof(__le64), pn, parent_index + 1, le64_to_cpu(rn->keys[0]), &location); if (r) return r; if (key < le64_to_cpu(rn->keys[0])) { unlock_block(s->info, right); s->nodes[1] = left; } else { unlock_block(s->info, left); s->nodes[1] = right; } return 0; } /* * Splits a node by creating two new children beneath the given node. * * Before: * +----------+ * | A ++++++ | * +----------+ * * * After: * +------------+ * | A (shadow) | * +------------+ * | | * +------+ +----+ * | | * v v * +-------+ +-------+ * | B +++ | | C +++ | * +-------+ +-------+ */ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) { int r; size_t size; unsigned nr_left, nr_right; struct dm_block *left, *right, *new_parent; struct btree_node *pn, *ln, *rn; __le64 val; new_parent = shadow_current(s); r = new_block(s->info, &left); if (r < 0) return r; r = new_block(s->info, &right); if (r < 0) { /* FIXME: put left */ return r; } pn = dm_block_data(new_parent); ln = dm_block_data(left); rn = dm_block_data(right); nr_left = le32_to_cpu(pn->header.nr_entries) / 2; nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; ln->header.flags = pn->header.flags; ln->header.nr_entries = cpu_to_le32(nr_left); ln->header.max_entries = pn->header.max_entries; ln->header.value_size = pn->header.value_size; rn->header.flags = pn->header.flags; rn->header.nr_entries = cpu_to_le32(nr_right); rn->header.max_entries = pn->header.max_entries; rn->header.value_size = pn->header.value_size; memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? sizeof(__le64) : s->info->value_type.size; memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), nr_right * size); /* new_parent should just point to l and r now */ pn->header.flags = cpu_to_le32(INTERNAL_NODE); pn->header.nr_entries = cpu_to_le32(2); pn->header.max_entries = cpu_to_le32( calc_max_entries(sizeof(__le64), dm_bm_block_size( dm_tm_get_bm(s->info->tm)))); pn->header.value_size = cpu_to_le32(sizeof(__le64)); val = cpu_to_le64(dm_block_location(left)); __dm_bless_for_disk(&val); pn->keys[0] = ln->keys[0]; memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64)); val = cpu_to_le64(dm_block_location(right)); __dm_bless_for_disk(&val); pn->keys[1] = rn->keys[0]; memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); /* * rejig the spine. This is ugly, since it knows too * much about the spine */ if (s->nodes[0] != new_parent) { unlock_block(s->info, s->nodes[0]); s->nodes[0] = new_parent; } if (key < le64_to_cpu(rn->keys[0])) { unlock_block(s->info, right); s->nodes[1] = left; } else { unlock_block(s->info, left); s->nodes[1] = right; } s->count = 2; return 0; } static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, struct dm_btree_value_type *vt, uint64_t key, unsigned *index) { int r, i = *index, top = 1; struct btree_node *node; for (;;) { r = shadow_step(s, root, vt); if (r < 0) return r; node = dm_block_data(shadow_current(s)); /* * We have to patch up the parent node, ugly, but I don't * see a way to do this automatically as part of the spine * op. */ if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); __dm_bless_for_disk(&location); memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i), &location, sizeof(__le64)); } node = dm_block_data(shadow_current(s)); if (node->header.nr_entries == node->header.max_entries) { if (top) r = btree_split_beneath(s, key); else r = btree_split_sibling(s, root, i, key); if (r < 0) return r; } node = dm_block_data(shadow_current(s)); i = lower_bound(node, key); if (le32_to_cpu(node->header.flags) & LEAF_NODE) break; if (i < 0) { /* change the bounds on the lowest key */ node->keys[0] = cpu_to_le64(key); i = 0; } root = value64(node, i); top = 0; } if (i < 0 || le64_to_cpu(node->keys[i]) != key) i++; *index = i; return 0; } static int insert(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, void *value, dm_block_t *new_root, int *inserted) __dm_written_to_disk(value) { int r, need_insert; unsigned level, index = -1, last_level = info->levels - 1; dm_block_t block = root; struct shadow_spine spine; struct btree_node *n; struct dm_btree_value_type le64_type; le64_type.context = NULL; le64_type.size = sizeof(__le64); le64_type.inc = NULL; le64_type.dec = NULL; le64_type.equal = NULL; init_shadow_spine(&spine, info); for (level = 0; level < (info->levels - 1); level++) { r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index); if (r < 0) goto bad; n = dm_block_data(shadow_current(&spine)); need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) || (le64_to_cpu(n->keys[index]) != keys[level])); if (need_insert) { dm_block_t new_tree; __le64 new_le; r = dm_btree_empty(info, &new_tree); if (r < 0) goto bad; new_le = cpu_to_le64(new_tree); __dm_bless_for_disk(&new_le); r = insert_at(sizeof(uint64_t), n, index, keys[level], &new_le); if (r) goto bad; } if (level < last_level) block = value64(n, index); } r = btree_insert_raw(&spine, block, &info->value_type, keys[level], &index); if (r < 0) goto bad; n = dm_block_data(shadow_current(&spine)); need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) || (le64_to_cpu(n->keys[index]) != keys[level])); if (need_insert) { if (inserted) *inserted = 1; r = insert_at(info->value_type.size, n, index, keys[level], value); if (r) goto bad_unblessed; } else { if (inserted) *inserted = 0; if (info->value_type.dec && (!info->value_type.equal || !info->value_type.equal( info->value_type.context, value_ptr(n, index), value))) { info->value_type.dec(info->value_type.context, value_ptr(n, index)); } memcpy_disk(value_ptr(n, index), value, info->value_type.size); } *new_root = shadow_root(&spine); exit_shadow_spine(&spine); return 0; bad: __dm_unbless_for_disk(value); bad_unblessed: exit_shadow_spine(&spine); return r; } int dm_btree_insert(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, void *value, dm_block_t *new_root) __dm_written_to_disk(value) { return insert(info, root, keys, value, new_root, NULL); } EXPORT_SYMBOL_GPL(dm_btree_insert); int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, void *value, dm_block_t *new_root, int *inserted) __dm_written_to_disk(value) { return insert(info, root, keys, value, new_root, inserted); } EXPORT_SYMBOL_GPL(dm_btree_insert_notify); /*----------------------------------------------------------------*/ static int find_highest_key(struct ro_spine *s, dm_block_t block, uint64_t *result_key, dm_block_t *next_block) { int i, r; uint32_t flags; do { r = ro_step(s, block); if (r < 0) return r; flags = le32_to_cpu(ro_node(s)->header.flags); i = le32_to_cpu(ro_node(s)->header.nr_entries); if (!i) return -ENODATA; else i--; *result_key = le64_to_cpu(ro_node(s)->keys[i]); if (next_block || flags & INTERNAL_NODE) block = value64(ro_node(s), i); } while (flags & INTERNAL_NODE); if (next_block) *next_block = block; return 0; } int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root, uint64_t *result_keys) { int r = 0, count = 0, level; struct ro_spine spine; init_ro_spine(&spine, info); for (level = 0; level < info->levels; level++) { r = find_highest_key(&spine, root, result_keys + level, level == info->levels - 1 ? NULL : &root); if (r == -ENODATA) { r = 0; break; } else if (r) break; count++; } exit_ro_spine(&spine); return r ? r : count; } EXPORT_SYMBOL_GPL(dm_btree_find_highest_key); /* * FIXME: We shouldn't use a recursive algorithm when we have limited stack * space. Also this only works for single level trees. */ static int walk_node(struct ro_spine *s, dm_block_t block, int (*fn)(void *context, uint64_t *keys, void *leaf), void *context) { int r; unsigned i, nr; struct btree_node *n; uint64_t keys; r = ro_step(s, block); n = ro_node(s); nr = le32_to_cpu(n->header.nr_entries); for (i = 0; i < nr; i++) { if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { r = walk_node(s, value64(n, i), fn, context); if (r) goto out; } else { keys = le64_to_cpu(*key_ptr(n, i)); r = fn(context, &keys, value_ptr(n, i)); if (r) goto out; } } out: ro_pop(s); return r; } int dm_btree_walk(struct dm_btree_info *info, dm_block_t root, int (*fn)(void *context, uint64_t *keys, void *leaf), void *context) { int r; struct ro_spine spine; BUG_ON(info->levels > 1); init_ro_spine(&spine, info); r = walk_node(&spine, root, fn, context); exit_ro_spine(&spine); return r; } EXPORT_SYMBOL_GPL(dm_btree_walk);
gpl-2.0
taogb/linux
arch/arm/mach-clps711x/board-p720t.c
1388
11996
/* * linux/arch/arm/mach-clps711x/p720t.c * * Copyright (C) 2000-2001 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/sizes.h> #include <linux/backlight.h> #include <linux/basic_mmio_gpio.h> #include <linux/platform_device.h> #include <linux/mtd/partitions.h> #include <linux/mtd/nand-gpio.h> #include <mach/hardware.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <video/platform_lcd.h> #include "common.h" #include "devices.h" #define P720T_USERLED CLPS711X_GPIO(3, 0) #define P720T_NAND_CLE CLPS711X_GPIO(4, 0) #define P720T_NAND_ALE CLPS711X_GPIO(4, 1) #define P720T_NAND_NCE CLPS711X_GPIO(4, 2) #define P720T_NAND_BASE (CLPS711X_SDRAM1_BASE) #define P720T_MMGPIO_BASE (CLPS711X_NR_GPIO) #define SYSPLD_PHYS_BASE IOMEM(CS1_PHYS_BASE) #define PLD_INT (SYSPLD_PHYS_BASE + 0x000000) #define PLD_INT_MMGPIO_BASE (P720T_MMGPIO_BASE + 0) #define PLD_INT_PENIRQ (PLD_INT_MMGPIO_BASE + 5) #define PLD_INT_UCB_IRQ (PLD_INT_MMGPIO_BASE + 1) #define PLD_INT_KBD_ATN (PLD_INT_MMGPIO_BASE + 0) /* EINT1 */ #define PLD_PWR (SYSPLD_PHYS_BASE + 0x000004) #define PLD_PWR_MMGPIO_BASE (P720T_MMGPIO_BASE + 8) #define PLD_PWR_EXT (PLD_PWR_MMGPIO_BASE + 5) #define PLD_PWR_MODE (PLD_PWR_MMGPIO_BASE + 4) /* 1 = PWM, 0 = PFM */ #define PLD_S4_ON (PLD_PWR_MMGPIO_BASE + 3) /* LCD bias voltage enable */ #define PLD_S3_ON (PLD_PWR_MMGPIO_BASE + 2) /* LCD backlight enable */ #define PLD_S2_ON (PLD_PWR_MMGPIO_BASE + 1) /* LCD 3V3 supply enable */ #define PLD_S1_ON (PLD_PWR_MMGPIO_BASE + 0) /* LCD 3V supply enable */ #define PLD_KBD (SYSPLD_PHYS_BASE + 0x000008) #define PLD_KBD_MMGPIO_BASE (P720T_MMGPIO_BASE + 16) #define PLD_KBD_WAKE (PLD_KBD_MMGPIO_BASE + 1) #define PLD_KBD_EN (PLD_KBD_MMGPIO_BASE + 0) #define PLD_SPI (SYSPLD_PHYS_BASE + 0x00000c) #define PLD_SPI_MMGPIO_BASE (P720T_MMGPIO_BASE + 24) #define PLD_SPI_EN (PLD_SPI_MMGPIO_BASE + 0) #define PLD_IO (SYSPLD_PHYS_BASE + 0x000010) #define PLD_IO_MMGPIO_BASE (P720T_MMGPIO_BASE + 32) #define PLD_IO_BOOTSEL (PLD_IO_MMGPIO_BASE + 6) /* Boot sel switch */ #define PLD_IO_USER (PLD_IO_MMGPIO_BASE + 5) /* User defined switch */ #define PLD_IO_LED3 (PLD_IO_MMGPIO_BASE + 4) #define PLD_IO_LED2 (PLD_IO_MMGPIO_BASE + 3) #define PLD_IO_LED1 (PLD_IO_MMGPIO_BASE + 2) #define PLD_IO_LED0 (PLD_IO_MMGPIO_BASE + 1) #define PLD_IO_LEDEN (PLD_IO_MMGPIO_BASE + 0) #define PLD_IRDA (SYSPLD_PHYS_BASE + 0x000014) #define PLD_IRDA_MMGPIO_BASE (P720T_MMGPIO_BASE + 40) #define PLD_IRDA_EN (PLD_IRDA_MMGPIO_BASE + 0) #define PLD_COM2 (SYSPLD_PHYS_BASE + 0x000018) #define PLD_COM2_MMGPIO_BASE (P720T_MMGPIO_BASE + 48) #define PLD_COM2_EN (PLD_COM2_MMGPIO_BASE + 0) #define PLD_COM1 (SYSPLD_PHYS_BASE + 0x00001c) #define PLD_COM1_MMGPIO_BASE (P720T_MMGPIO_BASE + 56) #define PLD_COM1_EN (PLD_COM1_MMGPIO_BASE + 0) #define PLD_AUD (SYSPLD_PHYS_BASE + 0x000020) #define PLD_AUD_MMGPIO_BASE (P720T_MMGPIO_BASE + 64) #define PLD_AUD_DIV1 (PLD_AUD_MMGPIO_BASE + 6) #define PLD_AUD_DIV0 (PLD_AUD_MMGPIO_BASE + 5) #define PLD_AUD_CLK_SEL1 (PLD_AUD_MMGPIO_BASE + 4) #define PLD_AUD_CLK_SEL0 (PLD_AUD_MMGPIO_BASE + 3) #define PLD_AUD_MIC_PWR (PLD_AUD_MMGPIO_BASE + 2) #define PLD_AUD_MIC_GAIN (PLD_AUD_MMGPIO_BASE + 1) #define PLD_AUD_CODEC_EN (PLD_AUD_MMGPIO_BASE + 0) #define PLD_CF (SYSPLD_PHYS_BASE + 0x000024) #define PLD_CF_MMGPIO_BASE (P720T_MMGPIO_BASE + 72) #define PLD_CF2_SLEEP (PLD_CF_MMGPIO_BASE + 5) #define PLD_CF1_SLEEP (PLD_CF_MMGPIO_BASE + 4) #define PLD_CF2_nPDREQ (PLD_CF_MMGPIO_BASE + 3) #define PLD_CF1_nPDREQ (PLD_CF_MMGPIO_BASE + 2) #define PLD_CF2_nIRQ (PLD_CF_MMGPIO_BASE + 1) #define PLD_CF1_nIRQ (PLD_CF_MMGPIO_BASE + 0) #define PLD_SDC (SYSPLD_PHYS_BASE + 0x000028) #define PLD_SDC_MMGPIO_BASE (P720T_MMGPIO_BASE + 80) #define PLD_SDC_INT_EN (PLD_SDC_MMGPIO_BASE + 2) #define PLD_SDC_WP (PLD_SDC_MMGPIO_BASE + 1) #define PLD_SDC_CD (PLD_SDC_MMGPIO_BASE + 0) #define PLD_CODEC (SYSPLD_PHYS_BASE + 0x400000) #define PLD_CODEC_MMGPIO_BASE (P720T_MMGPIO_BASE + 88) #define PLD_CODEC_IRQ3 (PLD_CODEC_MMGPIO_BASE + 4) #define PLD_CODEC_IRQ2 (PLD_CODEC_MMGPIO_BASE + 3) #define PLD_CODEC_IRQ1 (PLD_CODEC_MMGPIO_BASE + 2) #define PLD_CODEC_EN (PLD_CODEC_MMGPIO_BASE + 0) #define PLD_BRITE (SYSPLD_PHYS_BASE + 0x400004) #define PLD_BRITE_MMGPIO_BASE (P720T_MMGPIO_BASE + 96) #define PLD_BRITE_UP (PLD_BRITE_MMGPIO_BASE + 1) #define PLD_BRITE_DN (PLD_BRITE_MMGPIO_BASE + 0) #define PLD_LCDEN (SYSPLD_PHYS_BASE + 0x400008) #define PLD_LCDEN_MMGPIO_BASE (P720T_MMGPIO_BASE + 104) #define PLD_LCDEN_EN (PLD_LCDEN_MMGPIO_BASE + 0) #define PLD_TCH (SYSPLD_PHYS_BASE + 0x400010) #define PLD_TCH_MMGPIO_BASE (P720T_MMGPIO_BASE + 112) #define PLD_TCH_PENIRQ (PLD_TCH_MMGPIO_BASE + 1) #define PLD_TCH_EN (PLD_TCH_MMGPIO_BASE + 0) #define PLD_GPIO (SYSPLD_PHYS_BASE + 0x400014) #define PLD_GPIO_MMGPIO_BASE (P720T_MMGPIO_BASE + 120) #define PLD_GPIO2 (PLD_GPIO_MMGPIO_BASE + 2) #define PLD_GPIO1 (PLD_GPIO_MMGPIO_BASE + 1) #define PLD_GPIO0 (PLD_GPIO_MMGPIO_BASE + 0) static struct gpio p720t_gpios[] __initconst = { { PLD_S1_ON, GPIOF_OUT_INIT_LOW, "PLD_S1_ON" }, { PLD_S2_ON, GPIOF_OUT_INIT_LOW, "PLD_S2_ON" }, { PLD_S3_ON, GPIOF_OUT_INIT_LOW, "PLD_S3_ON" }, { PLD_S4_ON, GPIOF_OUT_INIT_LOW, "PLD_S4_ON" }, { PLD_KBD_EN, GPIOF_OUT_INIT_LOW, "PLD_KBD_EN" }, { PLD_SPI_EN, GPIOF_OUT_INIT_LOW, "PLD_SPI_EN" }, { PLD_IO_USER, GPIOF_OUT_INIT_LOW, "PLD_IO_USER" }, { PLD_IO_LED0, GPIOF_OUT_INIT_LOW, "PLD_IO_LED0" }, { PLD_IO_LED1, GPIOF_OUT_INIT_LOW, "PLD_IO_LED1" }, { PLD_IO_LED2, GPIOF_OUT_INIT_LOW, "PLD_IO_LED2" }, { PLD_IO_LED3, GPIOF_OUT_INIT_LOW, "PLD_IO_LED3" }, { PLD_IO_LEDEN, GPIOF_OUT_INIT_LOW, "PLD_IO_LEDEN" }, { PLD_IRDA_EN, GPIOF_OUT_INIT_LOW, "PLD_IRDA_EN" }, { PLD_COM1_EN, GPIOF_OUT_INIT_HIGH, "PLD_COM1_EN" }, { PLD_COM2_EN, GPIOF_OUT_INIT_HIGH, "PLD_COM2_EN" }, { PLD_CODEC_EN, GPIOF_OUT_INIT_LOW, "PLD_CODEC_EN" }, { PLD_LCDEN_EN, GPIOF_OUT_INIT_LOW, "PLD_LCDEN_EN" }, { PLD_TCH_EN, GPIOF_OUT_INIT_LOW, "PLD_TCH_EN" }, { P720T_USERLED,GPIOF_OUT_INIT_LOW, "USER_LED" }, }; static struct resource p720t_mmgpio_resource[] __initdata = { DEFINE_RES_MEM_NAMED(0, 4, "dat"), }; static struct bgpio_pdata p720t_mmgpio_pdata = { .ngpio = 8, }; static struct platform_device p720t_mmgpio __initdata = { .name = "basic-mmio-gpio", .id = -1, .resource = p720t_mmgpio_resource, .num_resources = ARRAY_SIZE(p720t_mmgpio_resource), .dev = { .platform_data = &p720t_mmgpio_pdata, }, }; static void __init p720t_mmgpio_init(void __iomem *addrbase, int gpiobase) { p720t_mmgpio_resource[0].start = (unsigned long)addrbase; p720t_mmgpio_pdata.base = gpiobase; platform_device_register(&p720t_mmgpio); } static struct { void __iomem *addrbase; int gpiobase; } mmgpios[] __initconst = { { PLD_INT, PLD_INT_MMGPIO_BASE }, { PLD_PWR, PLD_PWR_MMGPIO_BASE }, { PLD_KBD, PLD_KBD_MMGPIO_BASE }, { PLD_SPI, PLD_SPI_MMGPIO_BASE }, { PLD_IO, PLD_IO_MMGPIO_BASE }, { PLD_IRDA, PLD_IRDA_MMGPIO_BASE }, { PLD_COM2, PLD_COM2_MMGPIO_BASE }, { PLD_COM1, PLD_COM1_MMGPIO_BASE }, { PLD_AUD, PLD_AUD_MMGPIO_BASE }, { PLD_CF, PLD_CF_MMGPIO_BASE }, { PLD_SDC, PLD_SDC_MMGPIO_BASE }, { PLD_CODEC, PLD_CODEC_MMGPIO_BASE }, { PLD_BRITE, PLD_BRITE_MMGPIO_BASE }, { PLD_LCDEN, PLD_LCDEN_MMGPIO_BASE }, { PLD_TCH, PLD_TCH_MMGPIO_BASE }, { PLD_GPIO, PLD_GPIO_MMGPIO_BASE }, }; static struct resource p720t_nand_resource[] __initdata = { DEFINE_RES_MEM(P720T_NAND_BASE, SZ_4), }; static struct mtd_partition p720t_nand_parts[] __initdata = { { .name = "Flash partition 1", .offset = 0, .size = SZ_2M, }, { .name = "Flash partition 2", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct gpio_nand_platdata p720t_nand_pdata __initdata = { .gpio_rdy = -1, .gpio_nce = P720T_NAND_NCE, .gpio_ale = P720T_NAND_ALE, .gpio_cle = P720T_NAND_CLE, .gpio_nwp = -1, .chip_delay = 15, .parts = p720t_nand_parts, .num_parts = ARRAY_SIZE(p720t_nand_parts), }; static struct platform_device p720t_nand_pdev __initdata = { .name = "gpio-nand", .id = -1, .resource = p720t_nand_resource, .num_resources = ARRAY_SIZE(p720t_nand_resource), .dev = { .platform_data = &p720t_nand_pdata, }, }; static void p720t_lcd_power_set(struct plat_lcd_data *pd, unsigned int power) { if (power) { gpio_set_value(PLD_LCDEN_EN, 1); gpio_set_value(PLD_S1_ON, 1); gpio_set_value(PLD_S2_ON, 1); gpio_set_value(PLD_S4_ON, 1); } else { gpio_set_value(PLD_S1_ON, 0); gpio_set_value(PLD_S2_ON, 0); gpio_set_value(PLD_S4_ON, 0); gpio_set_value(PLD_LCDEN_EN, 0); } } static struct plat_lcd_data p720t_lcd_power_pdata = { .set_power = p720t_lcd_power_set, }; static void p720t_lcd_backlight_set_intensity(int intensity) { gpio_set_value(PLD_S3_ON, intensity); } static struct generic_bl_info p720t_lcd_backlight_pdata = { .name = "lcd-backlight.0", .default_intensity = 0x01, .max_intensity = 0x01, .set_bl_intensity = p720t_lcd_backlight_set_intensity, }; static void __init fixup_p720t(struct tag *tag, char **cmdline) { /* * Our bootloader doesn't setup any tags (yet). */ if (tag->hdr.tag != ATAG_CORE) { tag->hdr.tag = ATAG_CORE; tag->hdr.size = tag_size(tag_core); tag->u.core.flags = 0; tag->u.core.pagesize = PAGE_SIZE; tag->u.core.rootdev = 0x0100; tag = tag_next(tag); tag->hdr.tag = ATAG_MEM; tag->hdr.size = tag_size(tag_mem32); tag->u.mem.size = 4096; tag->u.mem.start = PHYS_OFFSET; tag = tag_next(tag); tag->hdr.tag = ATAG_NONE; tag->hdr.size = 0; } } static struct gpio_led p720t_gpio_leds[] = { { .name = "User LED", .default_trigger = "heartbeat", .gpio = P720T_USERLED, }, }; static struct gpio_led_platform_data p720t_gpio_led_pdata __initdata = { .leds = p720t_gpio_leds, .num_leds = ARRAY_SIZE(p720t_gpio_leds), }; static void __init p720t_init(void) { int i; clps711x_devices_init(); for (i = 0; i < ARRAY_SIZE(mmgpios); i++) p720t_mmgpio_init(mmgpios[i].addrbase, mmgpios[i].gpiobase); platform_device_register(&p720t_nand_pdev); } static void __init p720t_init_late(void) { WARN_ON(gpio_request_array(p720t_gpios, ARRAY_SIZE(p720t_gpios))); platform_device_register_data(NULL, "platform-lcd", 0, &p720t_lcd_power_pdata, sizeof(p720t_lcd_power_pdata)); platform_device_register_data(NULL, "generic-bl", 0, &p720t_lcd_backlight_pdata, sizeof(p720t_lcd_backlight_pdata)); platform_device_register_simple("video-clps711x", 0, NULL, 0); platform_device_register_data(NULL, "leds-gpio", 0, &p720t_gpio_led_pdata, sizeof(p720t_gpio_led_pdata)); } MACHINE_START(P720T, "ARM-Prospector720T") /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ .atag_offset = 0x100, .fixup = fixup_p720t, .map_io = clps711x_map_io, .init_irq = clps711x_init_irq, .init_time = clps711x_timer_init, .init_machine = p720t_init, .init_late = p720t_init_late, .restart = clps711x_restart, MACHINE_END
gpl-2.0
schqiushui/kernel_kk444_sense_a51
net/netfilter/nfnetlink_cthelper.c
1644
16783
/* * (C) 2012 Pablo Neira Ayuso <pablo@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation (or any later at your option). * * This software has been sponsored by Vyatta Inc. <http://www.vyatta.com> */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/rculist.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/list.h> #include <linux/errno.h> #include <net/netlink.h> #include <net/sock.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <linux/netfilter/nfnetlink_cthelper.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); static int nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { const struct nf_conn_help *help; struct nf_conntrack_helper *helper; help = nfct_help(ct); if (help == NULL) return NF_DROP; /* rcu_read_lock()ed by nf_hook_slow */ helper = rcu_dereference(help->helper); if (helper == NULL) return NF_DROP; /* This is an user-space helper not yet configured, skip. */ if ((helper->flags & (NF_CT_HELPER_F_USERSPACE | NF_CT_HELPER_F_CONFIGURED)) == NF_CT_HELPER_F_USERSPACE) return NF_ACCEPT; /* If the user-space helper is not available, don't block traffic. */ return NF_QUEUE_NR(helper->queue_num) | NF_VERDICT_FLAG_QUEUE_BYPASS; } static const struct nla_policy nfnl_cthelper_tuple_pol[NFCTH_TUPLE_MAX+1] = { [NFCTH_TUPLE_L3PROTONUM] = { .type = NLA_U16, }, [NFCTH_TUPLE_L4PROTONUM] = { .type = NLA_U8, }, }; static int nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, const struct nlattr *attr) { struct nlattr *tb[NFCTH_TUPLE_MAX+1]; nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol); if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) return -EINVAL; tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); return 0; } static int nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) { const struct nf_conn_help *help = nfct_help(ct); if (attr == NULL) return -EINVAL; if (help->helper->data_len == 0) return -EINVAL; memcpy(&help->data, nla_data(attr), help->helper->data_len); return 0; } static int nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct) { const struct nf_conn_help *help = nfct_help(ct); if (help->helper->data_len && nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data)) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy nfnl_cthelper_expect_pol[NFCTH_POLICY_MAX+1] = { [NFCTH_POLICY_NAME] = { .type = NLA_NUL_STRING, .len = NF_CT_HELPER_NAME_LEN-1 }, [NFCTH_POLICY_EXPECT_MAX] = { .type = NLA_U32, }, [NFCTH_POLICY_EXPECT_TIMEOUT] = { .type = NLA_U32, }, }; static int nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy, const struct nlattr *attr) { struct nlattr *tb[NFCTH_POLICY_MAX+1]; nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol); if (!tb[NFCTH_POLICY_NAME] || !tb[NFCTH_POLICY_EXPECT_MAX] || !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) return -EINVAL; strncpy(expect_policy->name, nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); expect_policy->max_expected = ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); expect_policy->timeout = ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT])); return 0; } static const struct nla_policy nfnl_cthelper_expect_policy_set[NFCTH_POLICY_SET_MAX+1] = { [NFCTH_POLICY_SET_NUM] = { .type = NLA_U32, }, }; static int nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, const struct nlattr *attr) { int i, ret; struct nf_conntrack_expect_policy *expect_policy; struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, nfnl_cthelper_expect_policy_set); if (!tb[NFCTH_POLICY_SET_NUM]) return -EINVAL; helper->expect_class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); if (helper->expect_class_max != 0 && helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES) return -EOVERFLOW; expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) * helper->expect_class_max, GFP_KERNEL); if (expect_policy == NULL) return -ENOMEM; for (i=0; i<helper->expect_class_max; i++) { if (!tb[NFCTH_POLICY_SET+i]) goto err; ret = nfnl_cthelper_expect_policy(&expect_policy[i], tb[NFCTH_POLICY_SET+i]); if (ret < 0) goto err; } helper->expect_policy = expect_policy; return 0; err: kfree(expect_policy); return -EINVAL; } static int nfnl_cthelper_create(const struct nlattr * const tb[], struct nf_conntrack_tuple *tuple) { struct nf_conntrack_helper *helper; int ret; if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) return -EINVAL; helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); if (helper == NULL) return -ENOMEM; ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); if (ret < 0) goto err; strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); helper->flags |= NF_CT_HELPER_F_USERSPACE; memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple)); helper->me = THIS_MODULE; helper->help = nfnl_userspace_cthelper; helper->from_nlattr = nfnl_cthelper_from_nlattr; helper->to_nlattr = nfnl_cthelper_to_nlattr; /* Default to queue number zero, this can be updated at any time. */ if (tb[NFCTH_QUEUE_NUM]) helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM])); if (tb[NFCTH_STATUS]) { int status = ntohl(nla_get_be32(tb[NFCTH_STATUS])); switch(status) { case NFCT_HELPER_STATUS_ENABLED: helper->flags |= NF_CT_HELPER_F_CONFIGURED; break; case NFCT_HELPER_STATUS_DISABLED: helper->flags &= ~NF_CT_HELPER_F_CONFIGURED; break; } } ret = nf_conntrack_helper_register(helper); if (ret < 0) goto err; return 0; err: kfree(helper); return ret; } static int nfnl_cthelper_update(const struct nlattr * const tb[], struct nf_conntrack_helper *helper) { int ret; if (tb[NFCTH_PRIV_DATA_LEN]) return -EBUSY; if (tb[NFCTH_POLICY]) { ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); if (ret < 0) return ret; } if (tb[NFCTH_QUEUE_NUM]) helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM])); if (tb[NFCTH_STATUS]) { int status = ntohl(nla_get_be32(tb[NFCTH_STATUS])); switch(status) { case NFCT_HELPER_STATUS_ENABLED: helper->flags |= NF_CT_HELPER_F_CONFIGURED; break; case NFCT_HELPER_STATUS_DISABLED: helper->flags &= ~NF_CT_HELPER_F_CONFIGURED; break; } } return 0; } static int nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) { const char *helper_name; struct nf_conntrack_helper *cur, *helper = NULL; struct nf_conntrack_tuple tuple; int ret = 0, i; if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) return -EINVAL; helper_name = nla_data(tb[NFCTH_NAME]); ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]); if (ret < 0) return ret; rcu_read_lock(); for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { /* skip non-userspace conntrack helpers. */ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) continue; if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN) != 0) continue; if ((tuple.src.l3num != cur->tuple.src.l3num || tuple.dst.protonum != cur->tuple.dst.protonum)) continue; if (nlh->nlmsg_flags & NLM_F_EXCL) { ret = -EEXIST; goto err; } helper = cur; break; } } rcu_read_unlock(); if (helper == NULL) ret = nfnl_cthelper_create(tb, &tuple); else ret = nfnl_cthelper_update(tb, helper); return ret; err: rcu_read_unlock(); return ret; } static int nfnl_cthelper_dump_tuple(struct sk_buff *skb, struct nf_conntrack_helper *helper) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, NFCTH_TUPLE | NLA_F_NESTED); if (nest_parms == NULL) goto nla_put_failure; if (nla_put_be16(skb, NFCTH_TUPLE_L3PROTONUM, htons(helper->tuple.src.l3num))) goto nla_put_failure; if (nla_put_u8(skb, NFCTH_TUPLE_L4PROTONUM, helper->tuple.dst.protonum)) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static int nfnl_cthelper_dump_policy(struct sk_buff *skb, struct nf_conntrack_helper *helper) { int i; struct nlattr *nest_parms1, *nest_parms2; nest_parms1 = nla_nest_start(skb, NFCTH_POLICY | NLA_F_NESTED); if (nest_parms1 == NULL) goto nla_put_failure; if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM, htonl(helper->expect_class_max))) goto nla_put_failure; for (i=0; i<helper->expect_class_max; i++) { nest_parms2 = nla_nest_start(skb, (NFCTH_POLICY_SET+i) | NLA_F_NESTED); if (nest_parms2 == NULL) goto nla_put_failure; if (nla_put_string(skb, NFCTH_POLICY_NAME, helper->expect_policy[i].name)) goto nla_put_failure; if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_MAX, htonl(helper->expect_policy[i].max_expected))) goto nla_put_failure; if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_TIMEOUT, htonl(helper->expect_policy[i].timeout))) goto nla_put_failure; nla_nest_end(skb, nest_parms2); } nla_nest_end(skb, nest_parms1); return 0; nla_put_failure: return -1; } static int nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, int event, struct nf_conntrack_helper *helper) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; int status; event |= NFNL_SUBSYS_CTHELPER << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = AF_UNSPEC; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; if (nla_put_string(skb, NFCTH_NAME, helper->name)) goto nla_put_failure; if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num))) goto nla_put_failure; if (nfnl_cthelper_dump_tuple(skb, helper) < 0) goto nla_put_failure; if (nfnl_cthelper_dump_policy(skb, helper) < 0) goto nla_put_failure; if (nla_put_be32(skb, NFCTH_PRIV_DATA_LEN, htonl(helper->data_len))) goto nla_put_failure; if (helper->flags & NF_CT_HELPER_F_CONFIGURED) status = NFCT_HELPER_STATUS_ENABLED; else status = NFCT_HELPER_STATUS_DISABLED; if (nla_put_be32(skb, NFCTH_STATUS, htonl(status))) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nlmsg_failure: nla_put_failure: nlmsg_cancel(skb, nlh); return -1; } static int nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { struct nf_conntrack_helper *cur, *last; rcu_read_lock(); last = (struct nf_conntrack_helper *)cb->args[1]; for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) { restart: hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[cb->args[0]], hnode) { /* skip non-userspace conntrack helpers. */ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) continue; if (cb->args[1]) { if (cur != last) continue; cb->args[1] = 0; } if (nfnl_cthelper_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), NFNL_MSG_CTHELPER_NEW, cur) < 0) { cb->args[1] = (unsigned long)cur; goto out; } } } if (cb->args[1]) { cb->args[1] = 0; goto restart; } out: rcu_read_unlock(); return skb->len; } static int nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) { int ret = -ENOENT, i; struct nf_conntrack_helper *cur; struct sk_buff *skb2; char *helper_name = NULL; struct nf_conntrack_tuple tuple; bool tuple_set = false; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nfnl_cthelper_dump_table, }; return netlink_dump_start(nfnl, skb, nlh, &c); } if (tb[NFCTH_NAME]) helper_name = nla_data(tb[NFCTH_NAME]); if (tb[NFCTH_TUPLE]) { ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]); if (ret < 0) return ret; tuple_set = true; } for (i = 0; i < nf_ct_helper_hsize; i++) { hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { /* skip non-userspace conntrack helpers. */ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) continue; if (helper_name && strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN) != 0) { continue; } if (tuple_set && (tuple.src.l3num != cur->tuple.src.l3num || tuple.dst.protonum != cur->tuple.dst.protonum)) continue; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (skb2 == NULL) { ret = -ENOMEM; break; } ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFNL_MSG_TYPE(nlh->nlmsg_type), NFNL_MSG_CTHELPER_NEW, cur); if (ret <= 0) { kfree_skb(skb2); break; } ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT); if (ret > 0) ret = 0; /* this avoids a loop in nfnetlink. */ return ret == -EAGAIN ? -ENOBUFS : ret; } } return ret; } static int nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) { char *helper_name = NULL; struct nf_conntrack_helper *cur; struct hlist_node *tmp; struct nf_conntrack_tuple tuple; bool tuple_set = false, found = false; int i, j = 0, ret; if (tb[NFCTH_NAME]) helper_name = nla_data(tb[NFCTH_NAME]); if (tb[NFCTH_TUPLE]) { ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]); if (ret < 0) return ret; tuple_set = true; } for (i = 0; i < nf_ct_helper_hsize; i++) { hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], hnode) { /* skip non-userspace conntrack helpers. */ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) continue; j++; if (helper_name && strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN) != 0) { continue; } if (tuple_set && (tuple.src.l3num != cur->tuple.src.l3num || tuple.dst.protonum != cur->tuple.dst.protonum)) continue; found = true; nf_conntrack_helper_unregister(cur); } } /* Make sure we return success if we flush and there is no helpers */ return (found || j == 0) ? 0 : -ENOENT; } static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { [NFCTH_NAME] = { .type = NLA_NUL_STRING, .len = NF_CT_HELPER_NAME_LEN-1 }, [NFCTH_QUEUE_NUM] = { .type = NLA_U32, }, }; static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = { [NFNL_MSG_CTHELPER_NEW] = { .call = nfnl_cthelper_new, .attr_count = NFCTH_MAX, .policy = nfnl_cthelper_policy }, [NFNL_MSG_CTHELPER_GET] = { .call = nfnl_cthelper_get, .attr_count = NFCTH_MAX, .policy = nfnl_cthelper_policy }, [NFNL_MSG_CTHELPER_DEL] = { .call = nfnl_cthelper_del, .attr_count = NFCTH_MAX, .policy = nfnl_cthelper_policy }, }; static const struct nfnetlink_subsystem nfnl_cthelper_subsys = { .name = "cthelper", .subsys_id = NFNL_SUBSYS_CTHELPER, .cb_count = NFNL_MSG_CTHELPER_MAX, .cb = nfnl_cthelper_cb, }; MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTHELPER); static int __init nfnl_cthelper_init(void) { int ret; ret = nfnetlink_subsys_register(&nfnl_cthelper_subsys); if (ret < 0) { pr_err("nfnl_cthelper: cannot register with nfnetlink.\n"); goto err_out; } return 0; err_out: return ret; } static void __exit nfnl_cthelper_exit(void) { struct nf_conntrack_helper *cur; struct hlist_node *tmp; int i; nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); for (i=0; i<nf_ct_helper_hsize; i++) { hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], hnode) { /* skip non-userspace conntrack helpers. */ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) continue; nf_conntrack_helper_unregister(cur); } } } module_init(nfnl_cthelper_init); module_exit(nfnl_cthelper_exit);
gpl-2.0
kostoulhs/android_kernel_samsung_loganrelte
drivers/media/video/msm/vfe/msm_vfe31.c
1644
113827
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/io.h> #include <mach/irqs.h> #include <mach/camera.h> #include <asm/atomic.h> #include "msm_vfe31.h" #include "msm_vpe1.h" atomic_t irq_cnt; static struct vfe31_ctrl_type *vfe31_ctrl; static struct msm_camera_io_clk camio_clk; static void *vfe_syncdata; static void vfe31_send_msg_no_payload(enum VFE31_MESSAGE_ID id); static void vfe31_reset_hist_cfg(void); struct vfe31_isr_queue_cmd { struct list_head list; uint32_t vfeInterruptStatus0; uint32_t vfeInterruptStatus1; uint32_t vfePingPongStatus; struct vfe_frame_asf_info vfeAsfFrameInfo; struct vfe_frame_bpc_info vfeBpcFrameInfo; struct vfe_msg_camif_status vfeCamifStatusLocal; }; static struct vfe31_cmd_type vfe31_cmd[] = { /* 0*/ {V31_DUMMY_0}, {V31_SET_CLK}, {V31_RESET}, {V31_START}, {V31_TEST_GEN_START}, /* 5*/ {V31_OPERATION_CFG, V31_OPERATION_CFG_LEN}, {V31_AXI_OUT_CFG, V31_AXI_OUT_LEN, V31_AXI_OUT_OFF, 0xFF}, {V31_CAMIF_CFG, V31_CAMIF_LEN, V31_CAMIF_OFF, 0xFF}, {V31_AXI_INPUT_CFG}, {V31_BLACK_LEVEL_CFG, V31_BLACK_LEVEL_LEN, V31_BLACK_LEVEL_OFF, 0xFF}, /*10*/ {V31_ROLL_OFF_CFG, V31_ROLL_OFF_CFG_LEN, V31_ROLL_OFF_CFG_OFF, 0xFF}, {V31_DEMUX_CFG, V31_DEMUX_LEN, V31_DEMUX_OFF, 0xFF}, {V31_DEMOSAIC_0_CFG, V31_DEMOSAIC_0_LEN, V31_DEMOSAIC_0_OFF, 0xFF}, {V31_DEMOSAIC_1_CFG, V31_DEMOSAIC_1_LEN, V31_DEMOSAIC_1_OFF, 0xFF}, {V31_DEMOSAIC_2_CFG, V31_DEMOSAIC_2_LEN, V31_DEMOSAIC_2_OFF, 0xFF}, /*15*/ {V31_FOV_CFG, V31_FOV_LEN, V31_FOV_OFF, 0xFF}, {V31_MAIN_SCALER_CFG, V31_MAIN_SCALER_LEN, V31_MAIN_SCALER_OFF, 0xFF}, {V31_WB_CFG, V31_WB_LEN, V31_WB_OFF, 0xFF}, {V31_COLOR_COR_CFG, V31_COLOR_COR_LEN, V31_COLOR_COR_OFF, 0xFF}, {V31_RGB_G_CFG, V31_RGB_G_LEN, V31_RGB_G_OFF, 0xFF}, /*20*/ {V31_LA_CFG, V31_LA_LEN, V31_LA_OFF, 0xFF }, {V31_CHROMA_EN_CFG, V31_CHROMA_EN_LEN, V31_CHROMA_EN_OFF, 0xFF}, {V31_CHROMA_SUP_CFG, V31_CHROMA_SUP_LEN, V31_CHROMA_SUP_OFF, 0xFF}, {V31_MCE_CFG, V31_MCE_LEN, V31_MCE_OFF, 0xFF}, {V31_SK_ENHAN_CFG, V31_SCE_LEN, V31_SCE_OFF, 0xFF}, /*25*/ {V31_ASF_CFG, V31_ASF_LEN, V31_ASF_OFF, 0xFF}, {V31_S2Y_CFG, V31_S2Y_LEN, V31_S2Y_OFF, 0xFF}, {V31_S2CbCr_CFG, V31_S2CbCr_LEN, V31_S2CbCr_OFF, 0xFF}, {V31_CHROMA_SUBS_CFG, V31_CHROMA_SUBS_LEN, V31_CHROMA_SUBS_OFF, 0xFF}, {V31_OUT_CLAMP_CFG, V31_OUT_CLAMP_LEN, V31_OUT_CLAMP_OFF, 0xFF}, /*30*/ {V31_FRAME_SKIP_CFG, V31_FRAME_SKIP_LEN, V31_FRAME_SKIP_OFF, 0xFF}, {V31_DUMMY_1}, {V31_DUMMY_2}, {V31_DUMMY_3}, {V31_UPDATE}, /*35*/ {V31_BL_LVL_UPDATE, V31_BLACK_LEVEL_LEN, V31_BLACK_LEVEL_OFF, 0xFF}, {V31_DEMUX_UPDATE, V31_DEMUX_LEN, V31_DEMUX_OFF, 0xFF}, {V31_DEMOSAIC_1_UPDATE, V31_DEMOSAIC_1_LEN, V31_DEMOSAIC_1_OFF, 0xFF}, {V31_DEMOSAIC_2_UPDATE, V31_DEMOSAIC_2_LEN, V31_DEMOSAIC_2_OFF, 0xFF}, {V31_FOV_UPDATE, V31_FOV_LEN, V31_FOV_OFF, 0xFF}, /*40*/ {V31_MAIN_SCALER_UPDATE, V31_MAIN_SCALER_LEN, V31_MAIN_SCALER_OFF, 0xFF}, {V31_WB_UPDATE, V31_WB_LEN, V31_WB_OFF, 0xFF}, {V31_COLOR_COR_UPDATE, V31_COLOR_COR_LEN, V31_COLOR_COR_OFF, 0xFF}, {V31_RGB_G_UPDATE, V31_RGB_G_LEN, V31_CHROMA_EN_OFF, 0xFF}, {V31_LA_UPDATE, V31_LA_LEN, V31_LA_OFF, 0xFF }, /*45*/ {V31_CHROMA_EN_UPDATE, V31_CHROMA_EN_LEN, V31_CHROMA_EN_OFF, 0xFF}, {V31_CHROMA_SUP_UPDATE, V31_CHROMA_SUP_LEN, V31_CHROMA_SUP_OFF, 0xFF}, {V31_MCE_UPDATE, V31_MCE_LEN, V31_MCE_OFF, 0xFF}, {V31_SK_ENHAN_UPDATE, V31_SCE_LEN, V31_SCE_OFF, 0xFF}, {V31_S2CbCr_UPDATE, V31_S2CbCr_LEN, V31_S2CbCr_OFF, 0xFF}, /*50*/ {V31_S2Y_UPDATE, V31_S2Y_LEN, V31_S2Y_OFF, 0xFF}, {V31_ASF_UPDATE, V31_ASF_UPDATE_LEN, V31_ASF_OFF, 0xFF}, {V31_FRAME_SKIP_UPDATE}, {V31_CAMIF_FRAME_UPDATE}, {V31_STATS_AF_UPDATE, V31_STATS_AF_LEN, V31_STATS_AF_OFF}, /*55*/ {V31_STATS_AE_UPDATE, V31_STATS_AE_LEN, V31_STATS_AE_OFF}, {V31_STATS_AWB_UPDATE, V31_STATS_AWB_LEN, V31_STATS_AWB_OFF}, {V31_STATS_RS_UPDATE, V31_STATS_RS_LEN, V31_STATS_RS_OFF}, {V31_STATS_CS_UPDATE, V31_STATS_CS_LEN, V31_STATS_CS_OFF}, {V31_STATS_SKIN_UPDATE}, /*60*/ {V31_STATS_IHIST_UPDATE, V31_STATS_IHIST_LEN, V31_STATS_IHIST_OFF}, {V31_DUMMY_4}, {V31_EPOCH1_ACK}, {V31_EPOCH2_ACK}, {V31_START_RECORDING}, /*65*/ {V31_STOP_RECORDING}, {V31_DUMMY_5}, {V31_DUMMY_6}, {V31_CAPTURE, V31_CAPTURE_LEN, 0xFF}, {V31_DUMMY_7}, /*70*/ {V31_STOP}, {V31_GET_HW_VERSION}, {V31_GET_FRAME_SKIP_COUNTS}, {V31_OUTPUT1_BUFFER_ENQ}, {V31_OUTPUT2_BUFFER_ENQ}, /*75*/ {V31_OUTPUT3_BUFFER_ENQ}, {V31_JPEG_OUT_BUF_ENQ}, {V31_RAW_OUT_BUF_ENQ}, {V31_RAW_IN_BUF_ENQ}, {V31_STATS_AF_ENQ}, /*80*/ {V31_STATS_AE_ENQ}, {V31_STATS_AWB_ENQ}, {V31_STATS_RS_ENQ}, {V31_STATS_CS_ENQ}, {V31_STATS_SKIN_ENQ}, /*85*/ {V31_STATS_IHIST_ENQ}, {V31_DUMMY_8}, {V31_JPEG_ENC_CFG}, {V31_DUMMY_9}, {V31_STATS_AF_START, V31_STATS_AF_LEN, V31_STATS_AF_OFF}, /*90*/ {V31_STATS_AF_STOP}, {V31_STATS_AE_START, V31_STATS_AE_LEN, V31_STATS_AE_OFF}, {V31_STATS_AE_STOP}, {V31_STATS_AWB_START, V31_STATS_AWB_LEN, V31_STATS_AWB_OFF}, {V31_STATS_AWB_STOP}, /*95*/ {V31_STATS_RS_START, V31_STATS_RS_LEN, V31_STATS_RS_OFF}, {V31_STATS_RS_STOP}, {V31_STATS_CS_START, V31_STATS_CS_LEN, V31_STATS_CS_OFF}, {V31_STATS_CS_STOP}, {V31_STATS_SKIN_START}, /*100*/ {V31_STATS_SKIN_STOP}, {V31_STATS_IHIST_START, V31_STATS_IHIST_LEN, V31_STATS_IHIST_OFF}, {V31_STATS_IHIST_STOP}, {V31_DUMMY_10}, {V31_SYNC_TIMER_SETTING, V31_SYNC_TIMER_LEN, V31_SYNC_TIMER_OFF}, /*105*/ {V31_ASYNC_TIMER_SETTING, V31_ASYNC_TIMER_LEN, V31_ASYNC_TIMER_OFF}, {V31_LIVESHOT}, {V31_ZSL, V31_CAPTURE_LEN, 0xFF}, {V31_STEREOCAM}, {V31_LA_SETUP}, /*110*/ {V31_XBAR_CFG, V31_XBAR_CFG_LEN, V31_XBAR_CFG_OFF}, /*111*/ {V31_EZTUNE_CFG, V31_EZTUNE_CFG_LEN, V31_EZTUNE_CFG_OFF}, }; uint32_t vfe31_AXI_WM_CFG[] = { 0x0000004C, 0x00000064, 0x0000007C, 0x00000094, 0x000000AC, 0x000000C4, 0x000000DC, }; static const char *vfe31_general_cmd[] = { "DUMMY_0", /* 0 */ "SET_CLK", "RESET", "START", "TEST_GEN_START", "OPERATION_CFG", /* 5 */ "AXI_OUT_CFG", "CAMIF_CFG", "AXI_INPUT_CFG", "BLACK_LEVEL_CFG", "ROLL_OFF_CFG", /* 10 */ "DEMUX_CFG", "DEMOSAIC_0_CFG", /* general */ "DEMOSAIC_1_CFG", /* ABF */ "DEMOSAIC_2_CFG", /* BPC */ "FOV_CFG", /* 15 */ "MAIN_SCALER_CFG", "WB_CFG", "COLOR_COR_CFG", "RGB_G_CFG", "LA_CFG", /* 20 */ "CHROMA_EN_CFG", "CHROMA_SUP_CFG", "MCE_CFG", "SK_ENHAN_CFG", "ASF_CFG", /* 25 */ "S2Y_CFG", "S2CbCr_CFG", "CHROMA_SUBS_CFG", "OUT_CLAMP_CFG", "FRAME_SKIP_CFG", /* 30 */ "DUMMY_1", "DUMMY_2", "DUMMY_3", "UPDATE", "BL_LVL_UPDATE", /* 35 */ "DEMUX_UPDATE", "DEMOSAIC_1_UPDATE", /* BPC */ "DEMOSAIC_2_UPDATE", /* ABF */ "FOV_UPDATE", "MAIN_SCALER_UPDATE", /* 40 */ "WB_UPDATE", "COLOR_COR_UPDATE", "RGB_G_UPDATE", "LA_UPDATE", "CHROMA_EN_UPDATE", /* 45 */ "CHROMA_SUP_UPDATE", "MCE_UPDATE", "SK_ENHAN_UPDATE", "S2CbCr_UPDATE", "S2Y_UPDATE", /* 50 */ "ASF_UPDATE", "FRAME_SKIP_UPDATE", "CAMIF_FRAME_UPDATE", "STATS_AF_UPDATE", "STATS_AE_UPDATE", /* 55 */ "STATS_AWB_UPDATE", "STATS_RS_UPDATE", "STATS_CS_UPDATE", "STATS_SKIN_UPDATE", "STATS_IHIST_UPDATE", /* 60 */ "DUMMY_4", "EPOCH1_ACK", "EPOCH2_ACK", "START_RECORDING", "STOP_RECORDING", /* 65 */ "DUMMY_5", "DUMMY_6", "CAPTURE", "DUMMY_7", "STOP", /* 70 */ "GET_HW_VERSION", "GET_FRAME_SKIP_COUNTS", "OUTPUT1_BUFFER_ENQ", "OUTPUT2_BUFFER_ENQ", "OUTPUT3_BUFFER_ENQ", /* 75 */ "JPEG_OUT_BUF_ENQ", "RAW_OUT_BUF_ENQ", "RAW_IN_BUF_ENQ", "STATS_AF_ENQ", "STATS_AE_ENQ", /* 80 */ "STATS_AWB_ENQ", "STATS_RS_ENQ", "STATS_CS_ENQ", "STATS_SKIN_ENQ", "STATS_IHIST_ENQ", /* 85 */ "DUMMY_8", "JPEG_ENC_CFG", "DUMMY_9", "STATS_AF_START", "STATS_AF_STOP", /* 90 */ "STATS_AE_START", "STATS_AE_STOP", "STATS_AWB_START", "STATS_AWB_STOP", "STATS_RS_START", /* 95 */ "STATS_RS_STOP", "STATS_CS_START", "STATS_CS_STOP", "STATS_SKIN_START", "STATS_SKIN_STOP", /* 100 */ "STATS_IHIST_START", "STATS_IHIST_STOP", "DUMMY_10", "SYNC_TIMER_SETTING", "ASYNC_TIMER_SETTING", /* 105 */ "V31_LIVESHOT", "V31_ZSL", "V31_STEREOCAM", "V31_LA_SETUP", "V31_XBAR_CFG", }; static void vfe_addr_convert(struct msm_vfe_phy_info *pinfo, enum vfe_resp_msg type, void *data, void **ext, int32_t *elen) { uint8_t outid; switch (type) { case VFE_MSG_OUTPUT_T: case VFE_MSG_OUTPUT_P: case VFE_MSG_OUTPUT_S: case VFE_MSG_OUTPUT_V: { pinfo->output_id = ((struct vfe_message *)data)->_u.msgOut.output_id; switch (type) { case VFE_MSG_OUTPUT_P: outid = OUTPUT_TYPE_P; break; case VFE_MSG_OUTPUT_V: outid = OUTPUT_TYPE_V; break; case VFE_MSG_OUTPUT_T: outid = OUTPUT_TYPE_T; break; case VFE_MSG_OUTPUT_S: outid = OUTPUT_TYPE_S; break; default: outid = 0xff; break; } pinfo->output_id = outid; pinfo->p0_phy = ((struct vfe_message *)data)->_u.msgOut.p0_addr; pinfo->p1_phy = ((struct vfe_message *)data)->_u.msgOut.p1_addr; pinfo->p2_phy = ((struct vfe_message *)data)->_u.msgOut.p2_addr; pinfo->frame_id = ((struct vfe_message *)data)->_u.msgOut.frameCounter; ((struct vfe_msg_output *)(vfe31_ctrl->extdata))->bpcInfo = ((struct vfe_message *)data)->_u.msgOut.bpcInfo; ((struct vfe_msg_output *)(vfe31_ctrl->extdata))->asfInfo = ((struct vfe_message *)data)->_u.msgOut.asfInfo; ((struct vfe_msg_output *)(vfe31_ctrl->extdata))->frameCounter = ((struct vfe_message *)data)->_u.msgOut.frameCounter; *ext = vfe31_ctrl->extdata; *elen = vfe31_ctrl->extlen; } break; default: break; } /* switch */ } static void vfe31_proc_ops(enum VFE31_MESSAGE_ID id, void *msg, size_t len) { struct msm_vfe_resp *rp; rp = vfe31_ctrl->resp->vfe_alloc(sizeof(struct msm_vfe_resp), vfe31_ctrl->syncdata, GFP_ATOMIC); if (!rp) { CDBG("rp: cannot allocate buffer\n"); return; } CDBG("vfe31_proc_ops, msgId = %d\n", id); rp->evt_msg.type = MSM_CAMERA_MSG; rp->evt_msg.msg_id = id; rp->evt_msg.len = len; rp->evt_msg.data = msg; switch (rp->evt_msg.msg_id) { case MSG_ID_SNAPSHOT_DONE: rp->type = VFE_MSG_SNAPSHOT; break; case MSG_ID_OUTPUT_P: rp->type = VFE_MSG_OUTPUT_P; vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_P, rp->evt_msg.data, &(rp->extdata), &(rp->extlen)); break; case MSG_ID_OUTPUT_T: rp->type = VFE_MSG_OUTPUT_T; vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_T, rp->evt_msg.data, &(rp->extdata), &(rp->extlen)); break; case MSG_ID_OUTPUT_S: rp->type = VFE_MSG_OUTPUT_S; vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_S, rp->evt_msg.data, &(rp->extdata), &(rp->extlen)); break; case MSG_ID_OUTPUT_V: rp->type = VFE_MSG_OUTPUT_V; vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_V, rp->evt_msg.data, &(rp->extdata), &(rp->extlen)); break; case MSG_ID_COMMON: rp->type = VFE_MSG_COMMON; rp->stats_msg.status_bits = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.status_bits; rp->stats_msg.frame_id = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.frameCounter; rp->stats_msg.aec_buff = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.buff.aec; rp->stats_msg.awb_buff = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.buff.awb; rp->stats_msg.af_buff = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.buff.af; rp->stats_msg.ihist_buff = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.buff.ihist; rp->stats_msg.rs_buff = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.buff.rs; rp->stats_msg.cs_buff = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.buff.cs; rp->stats_msg.awb_ymin = ((struct vfe_message *) rp->evt_msg.data)->_u.msgStats.buff.awb_ymin; break; case MSG_ID_SYNC_TIMER0_DONE: rp->type = VFE_MSG_SYNC_TIMER0; break; case MSG_ID_SYNC_TIMER1_DONE: rp->type = VFE_MSG_SYNC_TIMER1; break; case MSG_ID_SYNC_TIMER2_DONE: rp->type = VFE_MSG_SYNC_TIMER2; break; default: rp->type = VFE_MSG_GENERAL; break; } /* save the frame id.*/ rp->evt_msg.frame_id = rp->phy.frame_id; vfe31_ctrl->resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG, vfe31_ctrl->syncdata, GFP_ATOMIC); } static void vfe_send_outmsg(uint8_t msgid, uint32_t p0_addr, uint32_t p1_addr, uint32_t p2_addr) { struct vfe_message msg; uint8_t outid; msg._d = msgid; /* now the output mode is redundnat. */ msg._u.msgOut.frameCounter = vfe31_ctrl->vfeFrameId; switch (msgid) { case MSG_ID_OUTPUT_P: outid = OUTPUT_TYPE_P; break; case MSG_ID_OUTPUT_V: outid = OUTPUT_TYPE_V; break; case MSG_ID_OUTPUT_T: outid = OUTPUT_TYPE_T; break; case MSG_ID_OUTPUT_S: outid = OUTPUT_TYPE_S; break; default: outid = 0xff; /* -1 for error condition.*/ break; } msg._u.msgOut.output_id = msgid; msg._u.msgOut.p0_addr = p0_addr; msg._u.msgOut.p1_addr = p1_addr; msg._u.msgOut.p2_addr = p2_addr; CDBG("%s p2_addr = 0x%x\n", __func__, p2_addr); vfe31_proc_ops(msgid, &msg, sizeof(struct vfe_message)); return; } static int vfe31_enable(struct camera_enable_cmd *enable) { return 0; } static void vfe31_stop(void) { atomic_set(&vfe31_ctrl->vstate, 0); atomic_set(&vfe31_ctrl->stop_ack_pending, 1); /* in either continuous or snapshot mode, stop command can be issued * at any time. stop camif immediately. */ msm_camera_io_w_mb(CAMIF_COMMAND_STOP_IMMEDIATELY, vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); /* disable all interrupts. */ msm_camera_io_w(VFE_DISABLE_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_MASK_0); msm_camera_io_w(VFE_DISABLE_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); /* clear all pending interrupts*/ msm_camera_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0); msm_camera_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1); /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_IRQ_CMD); /* now enable only halt_irq & reset_irq */ msm_camera_io_w(0xf0000000, /* this is for async timer. */ vfe31_ctrl->vfebase + VFE_IRQ_MASK_0); msm_camera_io_w(VFE_IMASK_AXI_HALT, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); /* then apply axi halt command. */ msm_camera_io_w_mb(AXI_HALT, vfe31_ctrl->vfebase + VFE_AXI_CMD); } static int vfe31_disable(struct camera_enable_cmd *enable, struct platform_device *dev) { msm_camio_set_perf_lvl(S_EXIT); msm_camio_disable(dev); return 0; } static int vfe31_add_free_buf2(struct vfe31_output_ch *outch, uint32_t paddr, uint32_t p0_off, uint32_t p1_off, uint32_t p2_off) { struct vfe31_free_buf *free_buf = NULL; unsigned long flags = 0; free_buf = kmalloc(sizeof(struct vfe31_free_buf), GFP_KERNEL); if (!free_buf) return -ENOMEM; spin_lock_irqsave(&outch->free_buf_lock, flags); free_buf->paddr = paddr; free_buf->planar0_off = p0_off; free_buf->planar1_off = p1_off; free_buf->planar2_off = p2_off; list_add_tail(&free_buf->node, &outch->free_buf_head); CDBG("%s: free_buf paddr = 0x%x, y_off = %d, cbcr_off = %d\n", __func__, free_buf->paddr, free_buf->planar0_off, free_buf->planar1_off); spin_unlock_irqrestore(&outch->free_buf_lock, flags); return 0; } #define vfe31_add_free_buf(outch, regptr) \ vfe31_add_free_buf2(outch, regptr->paddr, \ regptr->info.planar0_off, \ regptr->info.planar1_off, \ regptr->info.planar2_off) #define vfe31_free_buf_available(outch) \ (!list_empty(&outch.free_buf_head)) static inline struct vfe31_free_buf *vfe31_get_free_buf( struct vfe31_output_ch *outch) { unsigned long flags = 0; struct vfe31_free_buf *free_buf = NULL; spin_lock_irqsave(&outch->free_buf_lock, flags); if (!list_empty(&outch->free_buf_head)) { free_buf = list_first_entry(&outch->free_buf_head, struct vfe31_free_buf, node); if (free_buf) list_del_init(&free_buf->node); } spin_unlock_irqrestore(&outch->free_buf_lock, flags); return free_buf; } static inline void vfe31_reset_free_buf_queue( struct vfe31_output_ch *outch) { unsigned long flags = 0; struct vfe31_free_buf *free_buf = NULL; spin_lock_irqsave(&outch->free_buf_lock, flags); while (!list_empty(&outch->free_buf_head)) { free_buf = list_first_entry(&outch->free_buf_head, struct vfe31_free_buf, node); if (free_buf) { list_del_init(&free_buf->node); kfree(free_buf); } } spin_unlock_irqrestore(&outch->free_buf_lock, flags); } #define vfe31_init_free_buf_queue() do { \ INIT_LIST_HEAD(&vfe31_ctrl->outpath.out0.free_buf_head); \ INIT_LIST_HEAD(&vfe31_ctrl->outpath.out1.free_buf_head); \ INIT_LIST_HEAD(&vfe31_ctrl->outpath.out2.free_buf_head); \ spin_lock_init(&vfe31_ctrl->outpath.out0.free_buf_lock); \ spin_lock_init(&vfe31_ctrl->outpath.out1.free_buf_lock); \ spin_lock_init(&vfe31_ctrl->outpath.out2.free_buf_lock); \ } while (0) #define vfe31_reset_free_buf_queue_all() do { \ vfe31_reset_free_buf_queue(&vfe31_ctrl->outpath.out0); \ vfe31_reset_free_buf_queue(&vfe31_ctrl->outpath.out1); \ vfe31_reset_free_buf_queue(&vfe31_ctrl->outpath.out2); \ } while (0) static int vfe31_config_axi(int mode, struct axidata *ad, uint32_t *ao) { int i; uint32_t *p, *p1, *p2, *p3; int32_t *ch_info; struct vfe31_output_ch *outp1, *outp2, *outp3; struct msm_pmem_region *regp1 = NULL; struct msm_pmem_region *regp2 = NULL; struct msm_pmem_region *regp3 = NULL; int ret; struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata; outp1 = NULL; outp2 = NULL; outp3 = NULL; p = ao + 2; /* Update the corresponding write masters for each output*/ ch_info = ao + V31_AXI_CFG_LEN; vfe31_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info; vfe31_ctrl->outpath.out0.ch1 = 0x0000FFFF & (*ch_info++ >> 16); vfe31_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info++; vfe31_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info; vfe31_ctrl->outpath.out1.ch1 = 0x0000FFFF & (*ch_info++ >> 16); vfe31_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info++; vfe31_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info; vfe31_ctrl->outpath.out2.ch1 = 0x0000FFFF & (*ch_info++ >> 16); vfe31_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++; CDBG("vfe31_config_axi: mode = %d, bufnum1 = %d, bufnum2 = %d" "bufnum3 = %d", mode, ad->bufnum1, ad->bufnum2, ad->bufnum3); switch (mode) { case OUTPUT_2: { if (ad->bufnum2 != 3) return -EINVAL; regp1 = &(ad->region[ad->bufnum1]); outp1 = &(vfe31_ctrl->outpath.out0); vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_PT; for (i = 0; i < 2; i++) { p1 = ao + 6 + i; /* wm0 for y */ *p1 = (regp1->paddr + regp1->info.planar0_off); p1 = ao + 12 + i; /* wm1 for cbcr */ *p1 = (regp1->paddr + regp1->info.planar1_off); regp1++; } ret = vfe31_add_free_buf(outp1, regp1); if (ret < 0) return ret; } break; case OUTPUT_1_AND_2: /* use wm0& 4 for thumbnail, wm1&5 for main image.*/ if ((ad->bufnum1 < 1) || (ad->bufnum2 < 1)) return -EINVAL; vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_S; /* main image.*/ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_PT; /* thumbnail. */ /* this is thumbnail buffer. */ regp1 = &(ad->region[ad->bufnum1-1]); /* this is main image buffer. */ regp2 = &(ad->region[ad->bufnum1+ad->bufnum2-1]); outp1 = &(vfe31_ctrl->outpath.out0); outp2 = &(vfe31_ctrl->outpath.out1); /* snapshot */ /* Parse the buffers!!! */ if (ad->bufnum2 == 1) { /* assuming bufnum1 = bufnum2 */ p1 = ao + 6; /* wm0 ping */ *p1++ = (regp1->paddr + regp1->info.planar0_off); /* this is to duplicate ping address to pong.*/ *p1 = (regp1->paddr + regp1->info.planar0_off); p1 = ao + 30; /* wm4 ping */ *p1++ = (regp1->paddr + regp1->info.planar1_off); CDBG("%s: regp1->info.cbcr_off = 0x%x\n", __func__, regp1->info.planar1_off); /* this is to duplicate ping address to pong.*/ *p1 = (regp1->paddr + regp1->info.planar1_off); p1 = ao + 12; /* wm1 ping */ *p1++ = (regp2->paddr + regp2->info.planar0_off); /* pong = ping,*/ *p1 = (regp2->paddr + regp2->info.planar0_off); p1 = ao + 36; /* wm5 */ *p1++ = (regp2->paddr + regp2->info.planar1_off); CDBG("%s: regp2->info.cbcr_off = 0x%x\n", __func__, regp2->info.planar1_off); /* pong = ping,*/ *p1 = (regp2->paddr + regp2->info.planar1_off); } else { /* more than one snapshot */ /* first fill ping & pong */ for (i = 0; i < 2; i++) { p1 = ao + 6 + i; /* wm0 for y */ *p1 = (regp1->paddr + regp1->info.planar0_off); p1 = ao + 30 + i; /* wm4 for cbcr */ *p1 = (regp1->paddr + regp1->info.planar1_off); regp1--; } for (i = 0; i < 2; i++) { p2 = ao + 12 + i; /* wm1 for y */ *p2 = (regp2->paddr + regp2->info.planar0_off); p2 = ao + 36 + i; /* wm5 for cbcr */ *p2 = (regp2->paddr + regp2->info.planar1_off); regp2--; } for (i = 2; i < ad->bufnum1; i++) { ret = vfe31_add_free_buf(outp1, regp1); if (ret < 0) return ret; regp1--; } for (i = 2; i < ad->bufnum2; i++) { ret = vfe31_add_free_buf(outp2, regp2); if (ret < 0) return ret; regp2--; } } break; case OUTPUT_1_2_AND_3: CDBG("%s: OUTPUT_1_2_AND_3", __func__); CDBG("%s: %d %d %d", __func__, ad->bufnum1, ad->bufnum2, ad->bufnum3); /* use wm0& 4 for postview, wm1&5 for preview.*/ /* use wm2& 6 for main img */ if ((ad->bufnum1 < 1) || (ad->bufnum2 < 1) || (ad->bufnum3 < 1)) return -EINVAL; vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_S; /* main image.*/ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_P; /* preview. */ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_T; /* thumbnail. */ /* this is preview buffer. */ regp1 = &(ad->region[0]); /* this is thumbnail buffer. */ regp2 = &(ad->region[ad->bufnum1]); /* this is main image buffer. */ regp3 = &(ad->region[ad->bufnum1+ad->bufnum2]); outp1 = &(vfe31_ctrl->outpath.out0); outp2 = &(vfe31_ctrl->outpath.out1); outp3 = &(vfe31_ctrl->outpath.out2); /* Parse the buffers!!! */ /* first fill ping & pong */ for (i = 0; i < 2; i++) { p1 = ao + 6 + i; /* wm0 for y */ *p1 = (regp1->paddr + regp1->info.planar0_off); p1 = ao + 30 + i; /* wm4 for cbcr */ *p1 = (regp1->paddr + regp1->info.planar1_off); regp1++; } for (i = 0; i < 2; i++) { p2 = ao + 12 + i; /* wm1 for y */ *p2 = (regp2->paddr + regp2->info.planar0_off); p2 = ao + 36 + i; /* wm5 for cbcr */ *p2 = (regp2->paddr + regp2->info.planar1_off); regp2++; } for (i = 0; i < 2; i++) { p3 = ao + 18 + i; /* wm2 for y */ *p3 = (regp3->paddr + regp3->info.planar0_off); p3 = ao + 42 + i; /* wm6 for cbcr */ *p3 = (regp3->paddr + regp3->info.planar1_off); regp3++; } for (i = 2; i < ad->bufnum1; i++) { ret = vfe31_add_free_buf(outp1, regp1); if (ret < 0) return ret; regp1++; } for (i = 2; i < ad->bufnum2; i++) { ret = vfe31_add_free_buf(outp2, regp2); if (ret < 0) return ret; regp2++; } for (i = 2; i < ad->bufnum3; i++) { ret = vfe31_add_free_buf(outp3, regp3); if (ret < 0) return ret; regp3++; } break; case OUTPUT_ZSL_ALL_CHNLS: CDBG("%s: OUTPUT_ZSL_ALL_CHNLS", __func__); CDBG("%s: %d %d %d", __func__, ad->bufnum1, ad->bufnum2, ad->bufnum3); /* use wm0& 4 for postview, wm1&5 for preview.*/ /* use wm2& 6 for main img */ if ((ad->bufnum1 < 1) || (ad->bufnum2 < 1) || (ad->bufnum3 < 1)) return -EINVAL; vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_S; /* main image.*/ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_P_ALL_CHNLS; /* preview. */ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_T; /* thumbnail. */ /* this is preview buffer. */ regp1 = &(ad->region[0]); /* this is thumbnail buffer. */ regp2 = &(ad->region[ad->bufnum1]); /* this is main image buffer. */ regp3 = &(ad->region[ad->bufnum1+ad->bufnum2]); outp1 = &(vfe31_ctrl->outpath.out0); outp2 = &(vfe31_ctrl->outpath.out1); outp3 = &(vfe31_ctrl->outpath.out2); /* Parse the buffers!!! */ /* first fill ping & pong */ for (i = 0; i < 2; i++) { p1 = ao + 6 + i; /* wm0 for y */ *p1 = (regp2->paddr + regp2->info.planar0_off); p1 = ao + 12 + i; /* wm1 for cbcr */ *p1 = (regp2->paddr + regp2->info.planar1_off); regp2++; } for (i = 0; i < 2; i++) { p2 = ao + 30 + i; /* wm4 for y */ *p2 = (regp1->paddr + regp1->info.planar0_off); p2 = ao + 36 + i; /* wm5 for cbcr */ *p2 = (regp1->paddr + regp1->info.planar1_off); p2 = ao + 42 + i; /* wm5 for cbcr */ *p2 = (regp1->paddr + regp1->info.planar2_off); regp1++; } for (i = 0; i < 2; i++) { p3 = ao + 18 + i; /* wm2 for y */ *p3 = (regp3->paddr + regp3->info.planar0_off); p3 = ao + 24 + i; /* wm3 for cbcr */ *p3 = (regp3->paddr + regp3->info.planar1_off); regp3++; } for (i = 2; i < ad->bufnum1; i++) { ret = vfe31_add_free_buf(outp1, regp1); if (ret < 0) return ret; regp1++; } for (i = 2; i < ad->bufnum2; i++) { ret = vfe31_add_free_buf(outp2, regp2); if (ret < 0) return ret; regp2++; } for (i = 2; i < ad->bufnum3; i++) { ret = vfe31_add_free_buf(outp3, regp3); if (ret < 0) return ret; regp3++; } break; case OUTPUT_1_AND_3: { /* use wm0&4 for preview, wm1&5 for video.*/ if ((ad->bufnum1 < 2) || (ad->bufnum2 < 2)) return -EINVAL; #ifdef CONFIG_MSM_CAMERA_V4L2 *p++ = 0x1; /* xbar cfg0 */ *p = 0x1a03; /* xbar cfg1 */ #endif vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_V; /* video*/ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_PT; /* preview */ regp1 = &(ad->region[0]); /* this is preview buffer. */ regp2 = &(ad->region[ad->bufnum1]);/* this is video buffer. */ outp1 = &(vfe31_ctrl->outpath.out0); /* preview */ outp2 = &(vfe31_ctrl->outpath.out2); /* video */ for (i = 0; i < 2; i++) { p1 = ao + 6 + i; /* wm0 for y */ *p1 = (regp1->paddr + regp1->info.planar0_off); p1 = ao + 30 + i; /* wm4 for cbcr */ *p1 = (regp1->paddr + regp1->info.planar1_off); regp1++; } for (i = 0; i < 2; i++) { p2 = ao + 12 + i; /* wm1 for y */ *p2 = (regp2->paddr + regp2->info.planar0_off); p2 = ao + 36 + i; /* wm5 for cbcr */ *p2 = (regp2->paddr + regp2->info.planar1_off); regp2++; } for (i = 2; i < ad->bufnum1; i++) { ret = vfe31_add_free_buf(outp1, regp1); if (ret < 0) return ret; regp1++; } for (i = 2; i < ad->bufnum2; i++) { ret = vfe31_add_free_buf(outp2, regp2); if (ret < 0) return ret; regp2++; } } break; case OUTPUT_VIDEO_ALL_CHNLS: { /* use wm0&4 for preview, wm1&5 for video.*/ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_V; /* video*/ vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_P_ALL_CHNLS; regp1 = &(ad->region[0]); regp2 = &(ad->region[ad->bufnum1]); outp1 = &(vfe31_ctrl->outpath.out0); outp2 = &(vfe31_ctrl->outpath.out2); for (i = 0; i < 2; i++) { p1 = ao + 6 + i; /* wm0 for y */ *p1 = (regp1->paddr + regp1->info.planar0_off); p1 = ao + 12 + i; /* wm1 for cbcr */ *p1 = (regp1->paddr + regp1->info.planar1_off); p1 = ao + 18 + i; /* wm2 for cbcr */ *p1 = (regp1->paddr + regp1->info.planar2_off); regp1++; } for (i = 0; i < 2; i++) { p2 = ao + 30 + i; /* wm4 for y */ *p2 = (regp2->paddr + regp2->info.planar0_off); p2 = ao + 36 + i; /* wm5 for cbcr */ *p2 = (regp2->paddr + regp2->info.planar1_off); regp2++; } for (i = 2; i < ad->bufnum1; i++) { ret = vfe31_add_free_buf(outp1, regp1); if (ret < 0) return ret; regp1++; } for (i = 2; i < ad->bufnum2; i++) { ret = vfe31_add_free_buf(outp2, regp2); if (ret < 0) return ret; regp2++; } } break; case CAMIF_TO_AXI_VIA_OUTPUT_2: { /* use wm0 only */ if (ad->bufnum2 < 1) return -EINVAL; CDBG("config axi for raw snapshot.\n"); vfe31_ctrl->outpath.out1.ch0 = 0; /* raw */ regp1 = &(ad->region[ad->bufnum1]); vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_S; p1 = ao + 6; /* wm0 for y */ *p1 = (regp1->paddr + regp1->info.planar0_off); if (p_sync->stereocam_enabled) p_sync->stereo_state = STEREO_RAW_SNAP_IDLE; } break; default: break; } msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[V31_AXI_OUT_CFG].offset, ao, vfe31_cmd[V31_AXI_OUT_CFG].length - V31_AXI_CH_INF_LEN); return 0; } static void vfe31_reset_internal_variables(void) { unsigned long flags; vfe31_ctrl->vfeImaskCompositePacked = 0; /* state control variables */ vfe31_ctrl->start_ack_pending = FALSE; atomic_set(&irq_cnt, 0); spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags); vfe31_ctrl->xbar_update_pending = 0; spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags); atomic_set(&vfe31_ctrl->stop_ack_pending, 0); atomic_set(&vfe31_ctrl->vstate, 0); vfe31_ctrl->aec_ack_pending = FALSE; vfe31_ctrl->af_ack_pending = FALSE; vfe31_ctrl->awb_ack_pending = FALSE; vfe31_ctrl->ihist_ack_pending = FALSE; vfe31_ctrl->rs_ack_pending = FALSE; vfe31_ctrl->cs_ack_pending = FALSE; vfe31_ctrl->reset_ack_pending = FALSE; spin_lock_irqsave(&vfe31_ctrl->update_ack_lock, flags); vfe31_ctrl->update_ack_pending = FALSE; spin_unlock_irqrestore(&vfe31_ctrl->update_ack_lock, flags); vfe31_ctrl->recording_state = VFE_REC_STATE_IDLE; /* 0 for continuous mode, 1 for snapshot mode */ vfe31_ctrl->operation_mode = VFE_MODE_OF_OPERATION_CONTINUOUS; vfe31_ctrl->outpath.output_mode = 0; vfe31_ctrl->vfe_capture_count = 0; /* this is unsigned 32 bit integer. */ vfe31_ctrl->vfeFrameId = 0; vfe31_ctrl->output1Pattern = 0xffffffff; vfe31_ctrl->output1Period = 31; vfe31_ctrl->output2Pattern = 0xffffffff; vfe31_ctrl->output2Period = 31; vfe31_ctrl->vfeFrameSkipCount = 0; vfe31_ctrl->vfeFrameSkipPeriod = 31; /* Stats control variables. */ memset(&(vfe31_ctrl->afStatsControl), 0, sizeof(struct vfe_stats_control)); memset(&(vfe31_ctrl->awbStatsControl), 0, sizeof(struct vfe_stats_control)); memset(&(vfe31_ctrl->aecStatsControl), 0, sizeof(struct vfe_stats_control)); memset(&(vfe31_ctrl->ihistStatsControl), 0, sizeof(struct vfe_stats_control)); memset(&(vfe31_ctrl->rsStatsControl), 0, sizeof(struct vfe_stats_control)); memset(&(vfe31_ctrl->csStatsControl), 0, sizeof(struct vfe_stats_control)); } static void vfe31_reset(void) { uint32_t vfe_version; vfe31_reset_free_buf_queue_all(); vfe31_reset_internal_variables(); vfe31_reset_hist_cfg(); vfe_version = msm_camera_io_r(vfe31_ctrl->vfebase); CDBG("vfe_version = 0x%x\n", vfe_version); /* disable all interrupts. vfeImaskLocal is also reset to 0 * to begin with. */ msm_camera_io_w(VFE_DISABLE_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_MASK_0); msm_camera_io_w(VFE_DISABLE_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); /* clear all pending interrupts*/ msm_camera_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0); msm_camera_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1); /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_IRQ_CMD); /* enable reset_ack interrupt. */ msm_camera_io_w(VFE_IMASK_RESET, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); /* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset * is done, hardware interrupt will be generated. VFE ist processes * the interrupt to complete the function call. Note that the reset * function is synchronous. */ /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD, vfe31_ctrl->vfebase + VFE_GLOBAL_RESET); } static int vfe31_operation_config(uint32_t *cmd) { uint32_t *p = cmd; vfe31_ctrl->operation_mode = *p; vpe_ctrl->pad_2k_bool = (vfe31_ctrl->operation_mode & 1) ? FALSE : TRUE; vfe31_ctrl->stats_comp = *(++p); vfe31_ctrl->hfr_mode = *(++p); msm_camera_io_w(*(++p), vfe31_ctrl->vfebase + VFE_CFG_OFF); msm_camera_io_w(*(++p), vfe31_ctrl->vfebase + VFE_MODULE_CFG); msm_camera_io_w(*(++p), vfe31_ctrl->vfebase + VFE_REALIGN_BUF); msm_camera_io_w(*(++p), vfe31_ctrl->vfebase + VFE_CHROMA_UP); msm_camera_io_w(*(++p), vfe31_ctrl->vfebase + VFE_STATS_CFG); wmb(); return 0; } static uint32_t vfe_stats_awb_buf_init(struct vfe_cmd_stats_buf *in) { uint32_t *ptr = in->statsBuf; uint32_t addr; addr = ptr[0]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR); addr = ptr[1]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR); vfe31_ctrl->awbStatsControl.nextFrameAddrBuf = in->statsBuf[2]; return 0; } static uint32_t vfe_stats_aec_buf_init(struct vfe_cmd_stats_buf *in) { uint32_t *ptr = in->statsBuf; uint32_t addr; addr = ptr[0]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PING_ADDR); addr = ptr[1]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PONG_ADDR); vfe31_ctrl->aecStatsControl.nextFrameAddrBuf = in->statsBuf[2]; return 0; } static uint32_t vfe_stats_af_buf_init(struct vfe_cmd_stats_buf *in) { uint32_t *ptr = in->statsBuf; uint32_t addr; addr = ptr[0]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR); addr = ptr[1]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR); vfe31_ctrl->afStatsControl.nextFrameAddrBuf = in->statsBuf[2]; return 0; } static uint32_t vfe_stats_ihist_buf_init(struct vfe_cmd_stats_buf *in) { uint32_t *ptr = in->statsBuf; uint32_t addr; addr = ptr[0]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PING_ADDR); addr = ptr[1]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PONG_ADDR); vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf = in->statsBuf[2]; return 0; } static uint32_t vfe_stats_rs_buf_init(struct vfe_cmd_stats_buf *in) { uint32_t *ptr = in->statsBuf; uint32_t addr; addr = ptr[0]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PING_ADDR); addr = ptr[1]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PONG_ADDR); vfe31_ctrl->rsStatsControl.nextFrameAddrBuf = in->statsBuf[2]; return 0; } static uint32_t vfe_stats_cs_buf_init(struct vfe_cmd_stats_buf *in) { uint32_t *ptr = in->statsBuf; uint32_t addr; addr = ptr[0]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PING_ADDR); addr = ptr[1]; msm_camera_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PONG_ADDR); vfe31_ctrl->csStatsControl.nextFrameAddrBuf = in->statsBuf[2]; return 0; } static void vfe31_start_common(void) { uint32_t irq_mask = 0x00E00021; vfe31_ctrl->start_ack_pending = TRUE; CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n", vfe31_ctrl->operation_mode, vfe31_ctrl->outpath.output_mode); /* Enable IRQ for comp stats, Image master, SOF & Reg Update*/ if (vfe31_ctrl->stats_comp) irq_mask |= 0x01000000; else /* Enable IRQ for Image masters, AF stats, SOF & Reg Update */ irq_mask |= 0x00004000; /* Enable EOF for video mode */ if (VFE_MODE_OF_OPERATION_VIDEO == vfe31_ctrl->operation_mode) irq_mask |= 0x4; msm_camera_io_w(irq_mask, vfe31_ctrl->vfebase + VFE_IRQ_MASK_0); msm_camera_io_w(VFE_IMASK_RESET, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); /* enable out of order option */ msm_camera_io_w(0x80000000, vfe31_ctrl->vfebase + VFE_AXI_CFG); /* enable performance monitor */ msm_camera_io_w(1, vfe31_ctrl->vfebase + VFE_BUS_PM_CFG); msm_camera_io_w(1, vfe31_ctrl->vfebase + VFE_BUS_PM_CMD); msm_camera_io_dump(vfe31_ctrl->vfebase, 0x600); /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD); msm_camera_io_w(1, vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); wmb(); atomic_set(&vfe31_ctrl->vstate, 1); } static int vfe31_start_recording(void) { msm_camio_set_perf_lvl(S_VIDEO); usleep(1000); vfe31_ctrl->recording_state = VFE_REC_STATE_START_REQUESTED; msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD); return 0; } static int vfe31_stop_recording(void) { vfe31_ctrl->recording_state = VFE_REC_STATE_STOP_REQUESTED; msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD); msm_camio_set_perf_lvl(S_PREVIEW); return 0; } static void vfe31_liveshot(void) { struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata; if (p_sync) p_sync->liveshot_enabled = true; } static void vfe31_stereocam(uint32_t enable) { struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata; if (p_sync) { CDBG("%s: Enable StereoCam %d!!!\n", __func__, enable); p_sync->stereocam_enabled = enable; } } static int vfe31_zsl(void) { uint32_t irq_comp_mask = 0; /* capture command is valid for both idle and active state. */ irq_comp_mask = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK); CDBG("%s:op mode %d O/P Mode %d\n", __func__, vfe31_ctrl->operation_mode, vfe31_ctrl->outpath.output_mode); if ((vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_ZSL)) { if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P) { irq_comp_mask |= ((0x1 << (vfe31_ctrl->outpath.out0.ch0)) | (0x1 << (vfe31_ctrl->outpath.out0.ch1))); } else if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P_ALL_CHNLS) { irq_comp_mask |= (0x1 << vfe31_ctrl->outpath.out0.ch0 | 0x1 << vfe31_ctrl->outpath.out0.ch1 | 0x1 << vfe31_ctrl->outpath.out0.ch2); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_T) { irq_comp_mask |= ((0x1 << (vfe31_ctrl->outpath.out1.ch0 + 8)) | (0x1 << (vfe31_ctrl->outpath.out1.ch1 + 8))); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) { irq_comp_mask |= ((0x1 << (vfe31_ctrl->outpath.out2.ch0 + 8)) | (0x1 << (vfe31_ctrl->outpath.out2.ch1 + 8))); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]); } else if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P_ALL_CHNLS) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch2]); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_T) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch1]); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch1]); } } msm_camera_io_w(irq_comp_mask, vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK); vfe31_start_common(); msm_camio_set_perf_lvl(S_ZSL); usleep(1000); /* for debug */ msm_camera_io_w(1, vfe31_ctrl->vfebase + 0x18C); msm_camera_io_w(1, vfe31_ctrl->vfebase + 0x188); return 0; } static int vfe31_capture(uint32_t num_frames_capture) { uint32_t irq_comp_mask = 0; struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata; /* capture command is valid for both idle and active state. */ vfe31_ctrl->vfe_capture_count = num_frames_capture; if (p_sync) { p_sync->snap_count = num_frames_capture; p_sync->thumb_count = num_frames_capture; } irq_comp_mask = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK); if ((vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_SNAPSHOT) || (vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_ZSL)){ if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) { irq_comp_mask |= ((0x1 << (vfe31_ctrl->outpath.out0.ch0 + 8)) | (0x1 << (vfe31_ctrl->outpath.out0.ch1 + 8))); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) { irq_comp_mask |= ((0x1 << (vfe31_ctrl->outpath.out1.ch0 + 8)) | (0x1 << (vfe31_ctrl->outpath.out1.ch1 + 8))); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch1]); } } else { /* this is raw snapshot mode. */ CDBG("config the comp imask for raw snapshot mode.\n"); if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) { irq_comp_mask |= (0x1 << (vfe31_ctrl->outpath.out1.ch0 + 8)); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch0]); } } msm_camera_io_w(irq_comp_mask, vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK); if (p_sync->stereocam_enabled) msm_camio_set_perf_lvl(S_STEREO_CAPTURE); else msm_camio_set_perf_lvl(S_CAPTURE); usleep(1000); vfe31_start_common(); return 0; } static int vfe31_start(void) { uint32_t irq_comp_mask = 0; struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata; /* start command now is only good for continuous mode. */ if ((vfe31_ctrl->operation_mode != VFE_MODE_OF_OPERATION_CONTINUOUS) && (vfe31_ctrl->operation_mode != VFE_MODE_OF_OPERATION_VIDEO)) return 0; irq_comp_mask = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK); if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) { irq_comp_mask |= (0x1 << vfe31_ctrl->outpath.out0.ch0 | 0x1 << vfe31_ctrl->outpath.out0.ch1); if (vfe31_ctrl->outpath.out0.ch2 >= 0) irq_comp_mask |= (0x1 << vfe31_ctrl->outpath.out0.ch0 | 0x1 << vfe31_ctrl->outpath.out0.ch1 | 0x1 << vfe31_ctrl->outpath.out0.ch2); } else if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P_ALL_CHNLS) { irq_comp_mask |= (0x1 << vfe31_ctrl->outpath.out0.ch0 | 0x1 << vfe31_ctrl->outpath.out0.ch1 | 0x1 << vfe31_ctrl->outpath.out0.ch2); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_V) { irq_comp_mask |= (0x1 << (vfe31_ctrl->outpath.out2.ch0 + 16)| 0x1 << (vfe31_ctrl->outpath.out2.ch1 + 16)); } msm_camera_io_w(irq_comp_mask, vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK); if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]); if (vfe31_ctrl->outpath.out0.ch2 >= 0) msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch2]); } else if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P_ALL_CHNLS) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch2]); } if (p_sync->stereocam_enabled) msm_camio_set_perf_lvl(S_STEREO_VIDEO); else msm_camio_set_perf_lvl(S_PREVIEW); usleep(1000); vfe31_start_common(); return 0; } static void vfe31_update(void) { unsigned long flags; CDBG("vfe31_update\n"); if (vfe31_ctrl->update_gamma) { if (!msm_camera_io_r(vfe31_ctrl->vfebase + V31_GAMMA_CFG_OFF)) msm_camera_io_w(7, vfe31_ctrl->vfebase+V31_GAMMA_CFG_OFF); else msm_camera_io_w(0, vfe31_ctrl->vfebase+V31_GAMMA_CFG_OFF); vfe31_ctrl->update_gamma = false; } if (vfe31_ctrl->update_luma) { if (!msm_camera_io_r(vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF)) msm_camera_io_w(1, vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF); else msm_camera_io_w(0, vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF); vfe31_ctrl->update_luma = false; } spin_lock_irqsave(&vfe31_ctrl->update_ack_lock, flags); vfe31_ctrl->update_ack_pending = TRUE; spin_unlock_irqrestore(&vfe31_ctrl->update_ack_lock, flags); /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD); return; } static void vfe31_sync_timer_stop(void) { uint32_t value = 0; vfe31_ctrl->sync_timer_state = 0; if (vfe31_ctrl->sync_timer_number == 0) value = 0x10000; else if (vfe31_ctrl->sync_timer_number == 1) value = 0x20000; else if (vfe31_ctrl->sync_timer_number == 2) value = 0x40000; /* Timer Stop */ msm_camera_io_w_mb(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF); } static void vfe31_sync_timer_start(const uint32_t *tbl) { /* set bit 8 for auto increment. */ uint32_t value = 1; uint32_t val; vfe31_ctrl->sync_timer_state = *tbl++; vfe31_ctrl->sync_timer_repeat_count = *tbl++; vfe31_ctrl->sync_timer_number = *tbl++; CDBG("%s timer_state %d, repeat_cnt %d timer number %d\n", __func__, vfe31_ctrl->sync_timer_state, vfe31_ctrl->sync_timer_repeat_count, vfe31_ctrl->sync_timer_number); if (vfe31_ctrl->sync_timer_state) { /* Start Timer */ value = value << vfe31_ctrl->sync_timer_number; } else { /* Stop Timer */ CDBG("Failed to Start timer\n"); return; } /* Timer Start */ msm_camera_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF); /* Sync Timer Line Start */ value = *tbl++; msm_camera_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF + 4 + ((vfe31_ctrl->sync_timer_number) * 12)); /* Sync Timer Pixel Start */ value = *tbl++; msm_camera_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF + 8 + ((vfe31_ctrl->sync_timer_number) * 12)); /* Sync Timer Pixel Duration */ value = *tbl++; val = camio_clk.vfe_clk_rate / 10000; val = 10000000 / val; val = value * 10000 / val; CDBG("%s: Pixel Clk Cycles!!! %d\n", __func__, val); msm_camera_io_w(val, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF + 12 + ((vfe31_ctrl->sync_timer_number) * 12)); /* Timer0 Active High/LOW */ value = *tbl++; msm_camera_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_POLARITY_OFF); /* Selects sync timer 0 output to drive onto timer1 port */ value = 0; msm_camera_io_w(value, vfe31_ctrl->vfebase + V31_TIMER_SELECT_OFF); wmb(); } static void vfe31_program_dmi_cfg(enum VFE31_DMI_RAM_SEL bankSel) { /* set bit 8 for auto increment. */ uint32_t value = VFE_DMI_CFG_DEFAULT; value += (uint32_t)bankSel; msm_camera_io_w_mb(value, vfe31_ctrl->vfebase + VFE_DMI_CFG); /* by default, always starts with offset 0.*/ msm_camera_io_w(0, vfe31_ctrl->vfebase + VFE_DMI_ADDR); wmb(); } static void vfe31_write_gamma_cfg(enum VFE31_DMI_RAM_SEL channel_sel, const uint32_t *tbl) { int i; uint32_t value, value1, value2; vfe31_program_dmi_cfg(channel_sel); /* for loop for extracting init table. */ for (i = 0 ; i < (VFE31_GAMMA_NUM_ENTRIES/2) ; i++) { value = *tbl++; value1 = value & 0x0000FFFF; value2 = (value & 0xFFFF0000)>>16; msm_camera_io_w((value1), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO); msm_camera_io_w((value2), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO); } vfe31_program_dmi_cfg(NO_MEM_SELECTED); } static void vfe31_reset_hist_cfg() { uint32_t i; uint32_t value = 0; vfe31_program_dmi_cfg(STATS_HIST_RAM); for (i = 0 ; i < VFE31_HIST_TABLE_LENGTH ; i++) msm_camera_io_w(value, vfe31_ctrl->vfebase + VFE_DMI_DATA_LO); vfe31_program_dmi_cfg(NO_MEM_SELECTED); } static void vfe31_write_la_cfg(enum VFE31_DMI_RAM_SEL channel_sel, const uint32_t *tbl) { uint32_t i; uint32_t value, value1, value2; vfe31_program_dmi_cfg(channel_sel); /* for loop for extracting init table. */ for (i = 0 ; i < (VFE31_LA_TABLE_LENGTH/2) ; i++) { value = *tbl++; value1 = value & 0x0000FFFF; value2 = (value & 0xFFFF0000)>>16; msm_camera_io_w((value1), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO); msm_camera_io_w((value2), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO); } vfe31_program_dmi_cfg(NO_MEM_SELECTED); } static int vfe31_proc_general(struct msm_vfe31_cmd *cmd) { int i , rc = 0; uint32_t old_val = 0 , new_val = 0; uint32_t *cmdp = NULL; uint32_t *cmdp_local = NULL; uint32_t snapshot_cnt = 0; uint32_t stereo_cam_enable = 0; struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata; CDBG("vfe31_proc_general: cmdID = %s, length = %d\n", vfe31_general_cmd[cmd->id], cmd->length); switch (cmd->id) { case V31_RESET: pr_info("vfe31_proc_general: cmdID = %s\n", vfe31_general_cmd[cmd->id]); vfe31_reset(); break; case V31_START: pr_info("vfe31_proc_general: cmdID = %s\n", vfe31_general_cmd[cmd->id]); rc = vfe31_start(); break; case V31_UPDATE: vfe31_update(); break; case V31_ZSL: pr_info("vfe31_proc_general: cmdID = %s\n", vfe31_general_cmd[cmd->id]); vfe31_zsl(); break; case V31_CAPTURE: pr_info("vfe31_proc_general: cmdID = %s\n", vfe31_general_cmd[cmd->id]); if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value), sizeof(uint32_t))) { rc = -EFAULT; goto proc_general_done; } rc = vfe31_capture(snapshot_cnt); break; case V31_START_RECORDING: pr_info("vfe31_proc_general: cmdID = %s\n", vfe31_general_cmd[cmd->id]); rc = vfe31_start_recording(); if (p_sync->stereocam_enabled) p_sync->stereo_state = STEREO_VIDEO_ACTIVE; break; case V31_STOP_RECORDING: pr_info("vfe31_proc_general: cmdID = %s\n", vfe31_general_cmd[cmd->id]); rc = vfe31_stop_recording(); if (p_sync->stereocam_enabled) p_sync->stereo_state = STEREO_VIDEO_IDLE; break; case V31_OPERATION_CFG: { if (cmd->length != V31_OPERATION_CFG_LEN) { rc = -EINVAL; goto proc_general_done; } cmdp = kmalloc(V31_OPERATION_CFG_LEN, GFP_ATOMIC); if (copy_from_user(cmdp, (void __user *)(cmd->value), V31_OPERATION_CFG_LEN)) { rc = -EFAULT; goto proc_general_done; } rc = vfe31_operation_config(cmdp); } break; case V31_STATS_AE_START: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val |= AE_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; case V31_STATS_AF_START: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val |= AF_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; case V31_STATS_AWB_START: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val |= AWB_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; case V31_STATS_IHIST_START: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val |= IHIST_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; case V31_XBAR_CFG: { unsigned long flags = 0; spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags); if ((cmd->length != V31_XBAR_CFG_LEN) || vfe31_ctrl->xbar_update_pending) { rc = -EINVAL; spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags); goto proc_general_done; } spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags); cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags); vfe31_ctrl->xbar_cfg[0] = *cmdp; vfe31_ctrl->xbar_cfg[1] = *(cmdp+1); vfe31_ctrl->xbar_update_pending = 1; spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags); CDBG("%s: xbar0 0x%x xbar1 0x%x", __func__, vfe31_ctrl->xbar_cfg[0], vfe31_ctrl->xbar_cfg[1]); } break; case V31_STATS_RS_START: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; case V31_STATS_CS_START: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; case V31_MCE_UPDATE: case V31_MCE_CFG:{ cmdp = kmalloc(cmd->length, GFP_ATOMIC); /* Incrementing with 4 so as to point to the 2nd Register as the 2nd register has the mce_enable bit */ old_val = msm_camera_io_r(vfe31_ctrl->vfebase + V31_CHROMA_SUP_OFF + 4); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } cmdp_local = cmdp; new_val = *cmdp_local; old_val &= MCE_EN_MASK; new_val = new_val | old_val; msm_camera_io_memcpy( vfe31_ctrl->vfebase + V31_CHROMA_SUP_OFF + 4, &new_val, 4); cmdp_local += 1; old_val = msm_camera_io_r(vfe31_ctrl->vfebase + V31_CHROMA_SUP_OFF + 8); new_val = *cmdp_local; old_val &= MCE_Q_K_MASK; new_val = new_val | old_val; msm_camera_io_memcpy( vfe31_ctrl->vfebase + V31_CHROMA_SUP_OFF + 8, &new_val, 4); cmdp_local += 1; msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp_local, (vfe31_cmd[cmd->id].length)); } break; case V31_DEMOSAIC_2_UPDATE: /* 38 BPC update */ case V31_DEMOSAIC_2_CFG: { /* 14 BPC config */ cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } cmdp_local = cmdp; new_val = *cmdp_local; old_val = msm_camera_io_r( vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF); old_val &= BPC_MASK; new_val = new_val | old_val; *cmdp_local = new_val; msm_camera_io_memcpy(vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF, cmdp_local, 4); cmdp_local += 1; msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp_local, (vfe31_cmd[cmd->id].length)); } break; case V31_DEMOSAIC_1_UPDATE:/* 37 ABF update */ case V31_DEMOSAIC_1_CFG: { /* 13 ABF config */ cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } cmdp_local = cmdp; new_val = *cmdp_local; old_val = msm_camera_io_r( vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF); old_val &= ABF_MASK; new_val = new_val | old_val; *cmdp_local = new_val; msm_camera_io_memcpy(vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF, cmdp_local, 4); cmdp_local += 1; msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp_local, (vfe31_cmd[cmd->id].length)); } break; case V31_ROLL_OFF_CFG: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value) , cmd->length)) { rc = -EFAULT; goto proc_general_done; } cmdp_local = cmdp; msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp_local, 16); cmdp_local += 4; vfe31_program_dmi_cfg(ROLLOFF_RAM); /* for loop for extrcting init table. */ for (i = 0 ; i < (VFE31_ROLL_OFF_INIT_TABLE_SIZE * 2) ; i++) { msm_camera_io_w(*cmdp_local , vfe31_ctrl->vfebase + VFE_DMI_DATA_LO); cmdp_local++; } CDBG("done writing init table\n"); /* by default, always starts with offset 0. */ msm_camera_io_w(LENS_ROLL_OFF_DELTA_TABLE_OFFSET, vfe31_ctrl->vfebase + VFE_DMI_ADDR); /* for loop for extracting delta table. */ for (i = 0 ; i < (VFE31_ROLL_OFF_DELTA_TABLE_SIZE * 2) ; i++) { msm_camera_io_w(*cmdp_local, vfe31_ctrl->vfebase + VFE_DMI_DATA_LO); cmdp_local++; } vfe31_program_dmi_cfg(NO_MEM_SELECTED); } break; case V31_LA_CFG:{ cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } /* Select Bank 0*/ *cmdp = 0; msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); cmdp += 1; vfe31_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0 , cmdp); cmdp -= 1; } break; case V31_LA_UPDATE: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } old_val = msm_camera_io_r( vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF); cmdp += 1; if (old_val != 0x0) vfe31_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0 , cmdp); else vfe31_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK1 , cmdp); vfe31_ctrl->update_luma = true; cmdp -= 1; } break; case V31_SK_ENHAN_CFG: case V31_SK_ENHAN_UPDATE:{ cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } msm_camera_io_memcpy(vfe31_ctrl->vfebase + V31_SCE_OFF, cmdp, V31_SCE_LEN); } break; case V31_LIVESHOT: vfe31_liveshot(); break; case V31_STEREOCAM: if (copy_from_user(&stereo_cam_enable, (void __user *)(cmd->value), sizeof(uint32_t))) { rc = -EFAULT; goto proc_general_done; } vfe31_stereocam(stereo_cam_enable); break; case V31_RGB_G_CFG: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } /* Select Bank 0*/ *cmdp = 0; msm_camera_io_memcpy(vfe31_ctrl->vfebase + V31_RGB_G_OFF, cmdp, 4); cmdp += 1; vfe31_write_gamma_cfg(RGBLUT_CHX_BANK0, cmdp); cmdp -= 1; } break; case V31_RGB_G_UPDATE: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } old_val = msm_camera_io_r( vfe31_ctrl->vfebase + V31_GAMMA_CFG_OFF); cmdp += 1; if (!old_val) { vfe31_write_gamma_cfg(RGBLUT_CHX_BANK1, cmdp); } else { vfe31_write_gamma_cfg(RGBLUT_CHX_BANK0, cmdp); } vfe31_ctrl->update_gamma = true; cmdp -= 1; } break; case V31_STATS_AWB_STOP: { old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= ~AWB_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); } break; case V31_STATS_AE_STOP: { old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= ~AE_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); } break; case V31_STATS_AF_STOP: { old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= ~AF_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); } break; case V31_STATS_IHIST_STOP: { old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= ~IHIST_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); } break; case V31_STATS_RS_STOP: { old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= ~RS_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); } break; case V31_STATS_CS_STOP: { old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= ~CS_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); } break; case V31_STOP: pr_info("vfe31_proc_general: cmdID = %s\n", vfe31_general_cmd[cmd->id]); vfe31_stop(); break; case V31_SYNC_TIMER_SETTING: cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } vfe31_sync_timer_start(cmdp); break; case V31_EZTUNE_CFG: { cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto proc_general_done; } *cmdp &= ~STATS_ENABLE_MASK; old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= STATS_ENABLE_MASK; *cmdp |= old_val; msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; default: { if (cmd->length != vfe31_cmd[cmd->id].length) return -EINVAL; cmdp = kmalloc(vfe31_cmd[cmd->id].length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto proc_general_done; } if (copy_from_user(cmdp, (void __user *)cmd->value, cmd->length)) { rc = -EFAULT; pr_err("%s copy from user failed for cmd %d", __func__, cmd->id); goto proc_general_done; } msm_camera_io_memcpy( vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset, cmdp, (vfe31_cmd[cmd->id].length)); } break; } proc_general_done: kfree(cmdp); return rc; } static void vfe31_stats_af_ack(struct vfe_cmd_stats_ack *pAck) { vfe31_ctrl->afStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf; vfe31_ctrl->af_ack_pending = FALSE; } static void vfe31_stats_awb_ack(struct vfe_cmd_stats_ack *pAck) { vfe31_ctrl->awbStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf; vfe31_ctrl->awb_ack_pending = FALSE; } static void vfe31_stats_aec_ack(struct vfe_cmd_stats_ack *pAck) { vfe31_ctrl->aecStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf; vfe31_ctrl->aec_ack_pending = FALSE; } static void vfe31_stats_ihist_ack(struct vfe_cmd_stats_ack *pAck) { vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf; vfe31_ctrl->ihist_ack_pending = FALSE; } static void vfe31_stats_rs_ack(struct vfe_cmd_stats_ack *pAck) { vfe31_ctrl->rsStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf; vfe31_ctrl->rs_ack_pending = FALSE; } static void vfe31_stats_cs_ack(struct vfe_cmd_stats_ack *pAck) { vfe31_ctrl->csStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf; vfe31_ctrl->cs_ack_pending = FALSE; } static int vfe31_config(struct msm_vfe_cfg_cmd *cmd, void *data) { struct msm_vfe31_cmd vfecmd; long rc = 0; uint32_t i = 0; struct vfe_cmd_stats_buf *scfg = NULL; struct msm_pmem_region *regptr = NULL; struct vfe_cmd_stats_ack *sack = NULL; if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE && cmd->cmd_type != CMD_SNAP_BUF_RELEASE && cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE && cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE && cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE && cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE && cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE && cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) { if (copy_from_user(&vfecmd, (void __user *)(cmd->value), sizeof(vfecmd))) { pr_err("%s %d: copy_from_user failed\n", __func__, __LINE__); return -EFAULT; } } else { /* here eith stats release or frame release. */ if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE && cmd->cmd_type != CMD_SNAP_BUF_RELEASE) { /* then must be stats release. */ if (!data) return -EFAULT; sack = kmalloc(sizeof(struct vfe_cmd_stats_ack), GFP_ATOMIC); if (!sack) return -ENOMEM; sack->nextStatsBuf = *(uint32_t *)data; } } CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type); if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) || (cmd->cmd_type == CMD_STATS_AWB_ENABLE) || (cmd->cmd_type == CMD_STATS_IHIST_ENABLE) || (cmd->cmd_type == CMD_STATS_RS_ENABLE) || (cmd->cmd_type == CMD_STATS_CS_ENABLE) || (cmd->cmd_type == CMD_STATS_AEC_ENABLE)) { struct axidata *axid; axid = data; if (!axid) { rc = -EFAULT; goto vfe31_config_done; } scfg = kmalloc(sizeof(struct vfe_cmd_stats_buf), GFP_ATOMIC); if (!scfg) { rc = -ENOMEM; goto vfe31_config_done; } regptr = axid->region; if (axid->bufnum1 > 0) { for (i = 0; i < axid->bufnum1; i++) { scfg->statsBuf[i] = (uint32_t)(regptr->paddr); regptr++; } } /* individual */ switch (cmd->cmd_type) { case CMD_STATS_AEC_ENABLE: rc = vfe_stats_aec_buf_init(scfg); break; case CMD_STATS_AF_ENABLE: rc = vfe_stats_af_buf_init(scfg); break; case CMD_STATS_AWB_ENABLE: rc = vfe_stats_awb_buf_init(scfg); break; case CMD_STATS_IHIST_ENABLE: rc = vfe_stats_ihist_buf_init(scfg); break; case CMD_STATS_RS_ENABLE: rc = vfe_stats_rs_buf_init(scfg); break; case CMD_STATS_CS_ENABLE: rc = vfe_stats_cs_buf_init(scfg); break; } } switch (cmd->cmd_type) { case CMD_GENERAL: rc = vfe31_proc_general(&vfecmd); break; case CMD_FRAME_BUF_RELEASE: { struct msm_frame *b; unsigned long p; int ret; struct vfe31_output_ch *outch = NULL; if (!data) { rc = -EFAULT; break; } b = (struct msm_frame *)(cmd->value); p = *(unsigned long *)data; CDBG("CMD_FRAME_BUF_RELEASE b->path = %d\n", b->path); if (b->path & OUTPUT_TYPE_P) { CDBG("CMD_FRAME_BUF_RELEASE got free buffer\n"); outch = &vfe31_ctrl->outpath.out0; } else if (b->path & OUTPUT_TYPE_S) { outch = &vfe31_ctrl->outpath.out1; } else if (b->path & OUTPUT_TYPE_V) { outch = &vfe31_ctrl->outpath.out2; } else { rc = -EFAULT; break; } ret = vfe31_add_free_buf2(outch, p, b->planar0_off, b->planar1_off, b->planar2_off); if (ret < 0) return ret; break; } case CMD_SNAP_BUF_RELEASE: { struct msm_frame *b; unsigned long p; int ret; struct vfe31_output_ch *outch = NULL; if (!data) return -EFAULT; b = (struct msm_frame *)(cmd->value); p = *(unsigned long *)data; CDBG("CMD_PIC_BUF_RELEASE b->path = %d\n", b->path); if (b->path & OUTPUT_TYPE_T) { CDBG("CMD_FRAME_BUF_RELEASE got free buffer\n"); outch = &vfe31_ctrl->outpath.out1; } else if (b->path & OUTPUT_TYPE_S) { outch = &vfe31_ctrl->outpath.out2; } else return -EFAULT; ret = vfe31_add_free_buf2(outch, p, b->planar0_off, b->planar1_off, b->planar2_off); if (ret < 0) return ret; break; } case CMD_STATS_AEC_BUF_RELEASE: vfe31_stats_aec_ack(sack); break; case CMD_STATS_AF_BUF_RELEASE: vfe31_stats_af_ack(sack); break; case CMD_STATS_AWB_BUF_RELEASE: vfe31_stats_awb_ack(sack); break; case CMD_STATS_IHIST_BUF_RELEASE: vfe31_stats_ihist_ack(sack); break; case CMD_STATS_RS_BUF_RELEASE: vfe31_stats_rs_ack(sack); break; case CMD_STATS_CS_BUF_RELEASE: vfe31_stats_cs_ack(sack); break; case CMD_AXI_CFG_PREVIEW: { struct axidata *axid; uint32_t *axio = NULL; axid = data; if (!axid) { rc = -EFAULT; break; } axio = kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length, GFP_ATOMIC); if (!axio) { rc = -ENOMEM; break; } if (copy_from_user(axio, (void __user *)(vfecmd.value), vfe31_cmd[V31_AXI_OUT_CFG].length)) { kfree(axio); rc = -EFAULT; break; } vfe31_config_axi(OUTPUT_2, axid, axio); kfree(axio); break; } case CMD_RAW_PICT_AXI_CFG: { struct axidata *axid; uint32_t *axio = NULL; axid = data; if (!axid) { rc = -EFAULT; break; } axio = kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length, GFP_ATOMIC); if (!axio) { rc = -ENOMEM; break; } if (copy_from_user(axio, (void __user *)(vfecmd.value), vfe31_cmd[V31_AXI_OUT_CFG].length)) { kfree(axio); rc = -EFAULT; break; } vfe31_config_axi(CAMIF_TO_AXI_VIA_OUTPUT_2, axid, axio); kfree(axio); break; } case CMD_AXI_CFG_SNAP: { struct axidata *axid; uint32_t *axio = NULL; CDBG("%s, CMD_AXI_CFG_SNAP\n", __func__); axid = data; if (!axid) return -EFAULT; axio = kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length, GFP_ATOMIC); if (!axio) { rc = -ENOMEM; break; } if (copy_from_user(axio, (void __user *)(vfecmd.value), vfe31_cmd[V31_AXI_OUT_CFG].length)) { kfree(axio); rc = -EFAULT; break; } vfe31_config_axi(OUTPUT_1_AND_2, axid, axio); kfree(axio); break; } case CMD_AXI_CFG_ZSL: { struct axidata *axid; uint32_t *axio = NULL; CDBG("%s, CMD_AXI_CFG_ZSL\n", __func__); axid = data; if (!axid) return -EFAULT; axio = kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length, GFP_ATOMIC); if (!axio) { rc = -ENOMEM; break; } if (copy_from_user(axio, (void __user *)(vfecmd.value), vfe31_cmd[V31_AXI_OUT_CFG].length)) { kfree(axio); rc = -EFAULT; break; } vfe31_config_axi(OUTPUT_1_2_AND_3, axid, axio); kfree(axio); } break; case CMD_AXI_CFG_ZSL_ALL_CHNLS: { struct axidata *axid; uint32_t *axio; CDBG("%s, CMD_AXI_CFG_ZSL\n", __func__); axid = data; if (!axid) return -EFAULT; axio = kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length, GFP_ATOMIC); if (!axio) { rc = -ENOMEM; break; } if (copy_from_user(axio, (void __user *)(vfecmd.value), vfe31_cmd[V31_AXI_OUT_CFG].length)) { kfree(axio); rc = -EFAULT; break; } vfe31_config_axi(OUTPUT_ZSL_ALL_CHNLS, axid, axio); kfree(axio); } break; case CMD_AXI_CFG_VIDEO: { struct axidata *axid; uint32_t *axio = NULL; axid = data; if (!axid) { rc = -EFAULT; break; } axio = kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length, GFP_ATOMIC); if (!axio) { rc = -ENOMEM; break; } if (copy_from_user(axio, (void __user *)(vfecmd.value), vfe31_cmd[V31_AXI_OUT_CFG].length)) { kfree(axio); rc = -EFAULT; break; } vfe31_config_axi(OUTPUT_1_AND_3, axid, axio); kfree(axio); break; } case CMD_AXI_CFG_VIDEO_ALL_CHNLS: { struct axidata *axid; uint32_t *axio = NULL; axid = data; if (!axid) { rc = -EFAULT; break; } axio = kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length, GFP_ATOMIC); if (!axio) { rc = -ENOMEM; break; } if (copy_from_user(axio, (void __user *)(vfecmd.value), vfe31_cmd[V31_AXI_OUT_CFG].length)) { kfree(axio); rc = -EFAULT; break; } vfe31_config_axi(OUTPUT_VIDEO_ALL_CHNLS, axid, axio); kfree(axio); break; } default: break; } vfe31_config_done: kfree(scfg); kfree(sack); CDBG("%s done: rc = %d\n", __func__, (int) rc); return rc; } static void vfe31_send_msg_no_payload(enum VFE31_MESSAGE_ID id) { struct vfe_message msg; CDBG("vfe31_send_msg_no_payload\n"); msg._d = id; vfe31_proc_ops(id, &msg, 0); } static void vfe31_process_reg_update_irq(void) { uint32_t temp, old_val; unsigned long flags; if (vfe31_ctrl->recording_state == VFE_REC_STATE_START_REQUESTED) { if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_V) { msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch0]); msm_camera_io_w(1, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch1]); } vfe31_ctrl->recording_state = VFE_REC_STATE_STARTED; if (vpe_ctrl->dis_en) { old_val = msm_camera_io_r( vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val |= RS_CS_ENABLE_MASK; msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); } msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD); CDBG("start video triggered .\n"); } else if (vfe31_ctrl->recording_state == VFE_REC_STATE_STOP_REQUESTED) { if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_V) { msm_camera_io_w(0, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch0]); msm_camera_io_w(0, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch1]); } /*disable rs& cs when stop recording. */ old_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG); old_val &= (~RS_CS_ENABLE_MASK); msm_camera_io_w(old_val, vfe31_ctrl->vfebase + VFE_MODULE_CFG); CDBG("stop video triggered\n"); } if (vfe31_ctrl->start_ack_pending == TRUE) { vfe31_send_msg_no_payload(MSG_ID_START_ACK); vfe31_ctrl->start_ack_pending = FALSE; } else { if (vfe31_ctrl->recording_state == VFE_REC_STATE_STOP_REQUESTED) { vfe31_ctrl->recording_state = VFE_REC_STATE_STOPPED; msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD); } else if (vfe31_ctrl->recording_state == VFE_REC_STATE_STOPPED) { CDBG("sent stop video rec ACK"); vfe31_send_msg_no_payload(MSG_ID_STOP_REC_ACK); vfe31_ctrl->recording_state = VFE_REC_STATE_IDLE; } spin_lock_irqsave(&vfe31_ctrl->update_ack_lock, flags); if (vfe31_ctrl->update_ack_pending == TRUE) { vfe31_ctrl->update_ack_pending = FALSE; spin_unlock_irqrestore( &vfe31_ctrl->update_ack_lock, flags); vfe31_send_msg_no_payload(MSG_ID_UPDATE_ACK); } else { spin_unlock_irqrestore( &vfe31_ctrl->update_ack_lock, flags); } } /* in snapshot mode */ if (vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_SNAPSHOT) { /* later we need to add check for live snapshot mode. */ if (vfe31_ctrl->vfe_capture_count) vfe31_ctrl->vfe_capture_count--; /* if last frame to be captured: */ if (vfe31_ctrl->vfe_capture_count == 0) { /* stop the bus output: write master enable = 0*/ if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) { msm_camera_io_w(0, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[ vfe31_ctrl->outpath.out0.ch0]); msm_camera_io_w(0, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl-> outpath.out0.ch1]); } if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) { msm_camera_io_w(0, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl-> outpath.out1.ch0]); msm_camera_io_w(0, vfe31_ctrl->vfebase + vfe31_AXI_WM_CFG[vfe31_ctrl-> outpath.out1.ch1]); } /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY, vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); /* Ensure the read order while reading to the command register using the barrier */ temp = msm_camera_io_r_mb(vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); } /* then do reg_update. */ msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD); } /* if snapshot mode. */ } static void vfe31_set_default_reg_values(void) { msm_camera_io_w(0x800080, vfe31_ctrl->vfebase + VFE_DEMUX_GAIN_0); msm_camera_io_w(0x800080, vfe31_ctrl->vfebase + VFE_DEMUX_GAIN_1); msm_camera_io_w(0xFFFFF, vfe31_ctrl->vfebase + VFE_CGC_OVERRIDE); /* default frame drop period and pattern */ msm_camera_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_CFG); msm_camera_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_CFG); msm_camera_io_w(0xFFFFFFFF, vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_PATTERN); msm_camera_io_w(0xFFFFFFFF, vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_PATTERN); msm_camera_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y); msm_camera_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR); msm_camera_io_w(0xFFFFFFFF, vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_PATTERN); msm_camera_io_w(0xFFFFFFFF, vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR_PATTERN); msm_camera_io_w(0, vfe31_ctrl->vfebase + VFE_CLAMP_MIN); msm_camera_io_w(0xFFFFFF, vfe31_ctrl->vfebase + VFE_CLAMP_MAX); /* stats UB config */ msm_camera_io_w(0x3980007, vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG); msm_camera_io_w(0x3A00007, vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG); msm_camera_io_w(0x3A8000F, vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG); msm_camera_io_w(0x3B80007, vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG); msm_camera_io_w(0x3C0001F, vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG); msm_camera_io_w(0x3E0001F, vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG); } static void vfe31_process_reset_irq(void) { atomic_set(&vfe31_ctrl->vstate, 0); vfe31_ctrl->while_stopping_mask = VFE_IMASK_WHILE_STOPPING_1; if (atomic_read(&vfe31_ctrl->stop_ack_pending)) { /* this is from the stop command. */ atomic_set(&vfe31_ctrl->stop_ack_pending, 0); vfe31_send_msg_no_payload(MSG_ID_STOP_ACK); } else { /* this is from reset command. */ vfe31_set_default_reg_values(); /* reload all write masters. (frame & line)*/ msm_camera_io_w_mb(0x7FFF, vfe31_ctrl->vfebase + VFE_BUS_CMD); vfe31_send_msg_no_payload(MSG_ID_RESET_ACK); } } static void vfe31_process_axi_halt_irq(void) { /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(AXI_HALT_CLEAR, vfe31_ctrl->vfebase + VFE_AXI_CMD); vfe31_ctrl->while_stopping_mask = VFE_IMASK_RESET; /* disable all interrupts. */ msm_camera_io_w(VFE_DISABLE_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_MASK_0); msm_camera_io_w(VFE_DISABLE_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); /* clear all pending interrupts*/ msm_camera_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0); msm_camera_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1); /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_IRQ_CMD); /* now enable only halt_irq & reset_irq */ msm_camera_io_w(0xf0000000, /* this is for async timer. */ vfe31_ctrl->vfebase + VFE_IRQ_MASK_0); msm_camera_io_w(VFE_IMASK_RESET, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); /* Ensure the write order while writing to the command register using the barrier */ CDBG("%s: about to reset vfe...\n", __func__); msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD, vfe31_ctrl->vfebase + VFE_GLOBAL_RESET); } static void vfe31_process_camif_sof_irq(void) { uint32_t temp; /* in raw snapshot mode */ if (vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_RAW_SNAPSHOT) { if (vfe31_ctrl->start_ack_pending) { vfe31_send_msg_no_payload(MSG_ID_START_ACK); vfe31_ctrl->start_ack_pending = FALSE; } if (vfe31_ctrl->vfe_capture_count) vfe31_ctrl->vfe_capture_count--; /* if last frame to be captured: */ if (vfe31_ctrl->vfe_capture_count == 0) { /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY, vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); temp = msm_camera_io_r_mb(vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); } } /* if raw snapshot mode. */ if ((vfe31_ctrl->hfr_mode != HFR_MODE_OFF) && (vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_VIDEO) && (vfe31_ctrl->vfeFrameId % vfe31_ctrl->hfr_mode != 0)) { vfe31_ctrl->vfeFrameId++; CDBG("Skip the SOF notification when HFR enabled\n"); return; } vfe31_send_msg_no_payload(MSG_ID_SOF_ACK); vfe31_ctrl->vfeFrameId++; CDBG("camif_sof_irq, frameId = %d\n", vfe31_ctrl->vfeFrameId); if (vfe31_ctrl->sync_timer_state) { if (vfe31_ctrl->sync_timer_repeat_count == 0) vfe31_sync_timer_stop(); else vfe31_ctrl->sync_timer_repeat_count--; } } static void vfe31_process_error_irq(uint32_t errStatus) { uint32_t camifStatus, read_val; uint32_t *temp; if (errStatus & VFE31_IMASK_CAMIF_ERROR) { pr_err("vfe31_irq: camif errors\n"); temp = (uint32_t *)(vfe31_ctrl->vfebase + VFE_CAMIF_STATUS); camifStatus = msm_camera_io_r(temp); pr_err("camifStatus = 0x%x\n", camifStatus); vfe31_send_msg_no_payload(MSG_ID_CAMIF_ERROR); } if (errStatus & VFE31_IMASK_STATS_CS_OVWR) pr_err("vfe31_irq: stats cs overwrite\n"); if (errStatus & VFE31_IMASK_STATS_IHIST_OVWR) pr_err("vfe31_irq: stats ihist overwrite\n"); if (errStatus & VFE31_IMASK_REALIGN_BUF_Y_OVFL) pr_err("vfe31_irq: realign bug Y overflow\n"); if (errStatus & VFE31_IMASK_REALIGN_BUF_CB_OVFL) pr_err("vfe31_irq: realign bug CB overflow\n"); if (errStatus & VFE31_IMASK_REALIGN_BUF_CR_OVFL) pr_err("vfe31_irq: realign bug CR overflow\n"); if (errStatus & VFE31_IMASK_VIOLATION) pr_err("vfe31_irq: violation interrupt\n"); if (errStatus & VFE31_IMASK_IMG_MAST_0_BUS_OVFL) pr_err("vfe31_irq: image master 0 bus overflow\n"); if (errStatus & VFE31_IMASK_IMG_MAST_1_BUS_OVFL) pr_err("vfe31_irq: image master 1 bus overflow\n"); if (errStatus & VFE31_IMASK_IMG_MAST_2_BUS_OVFL) pr_err("vfe31_irq: image master 2 bus overflow\n"); if (errStatus & VFE31_IMASK_IMG_MAST_3_BUS_OVFL) pr_err("vfe31_irq: image master 3 bus overflow\n"); if (errStatus & VFE31_IMASK_IMG_MAST_4_BUS_OVFL) pr_err("vfe31_irq: image master 4 bus overflow\n"); if (errStatus & VFE31_IMASK_IMG_MAST_5_BUS_OVFL) pr_err("vfe31_irq: image master 5 bus overflow\n"); if (errStatus & VFE31_IMASK_IMG_MAST_6_BUS_OVFL) pr_err("vfe31_irq: image master 6 bus overflow\n"); if (errStatus & VFE31_IMASK_STATS_AE_BUS_OVFL) pr_err("vfe31_irq: ae stats bus overflow\n"); if (errStatus & VFE31_IMASK_STATS_AF_BUS_OVFL) pr_err("vfe31_irq: af stats bus overflow\n"); if (errStatus & VFE31_IMASK_STATS_AWB_BUS_OVFL) pr_err("vfe31_irq: awb stats bus overflow\n"); if (errStatus & VFE31_IMASK_STATS_RS_BUS_OVFL) pr_err("vfe31_irq: rs stats bus overflow\n"); if (errStatus & VFE31_IMASK_STATS_CS_BUS_OVFL) pr_err("vfe31_irq: cs stats bus overflow\n"); if (errStatus & VFE31_IMASK_STATS_IHIST_BUS_OVFL) pr_err("vfe31_irq: ihist stats bus overflow\n"); if (errStatus & VFE31_IMASK_STATS_SKIN_BUS_OVFL) pr_err("vfe31_irq: skin stats bus overflow\n"); if (errStatus & VFE31_IMASK_AXI_ERROR) { pr_err("vfe31_irq: axi error\n"); /* read status too when overflow happens.*/ read_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_BUS_PING_PONG_STATUS); pr_debug("VFE_BUS_PING_PONG_STATUS = 0x%x\n", read_val); read_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_BUS_OPERATION_STATUS); pr_debug("VFE_BUS_OPERATION_STATUS = 0x%x\n", read_val); read_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_0); pr_debug("VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_0 = 0x%x\n", read_val); read_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_1); pr_debug("VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_1 = 0x%x\n", read_val); read_val = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_AXI_STATUS); pr_debug("VFE_AXI_STATUS = 0x%x\n", read_val); } } #define VFE31_AXI_OFFSET 0x0050 #define vfe31_get_ch_ping_addr(chn) \ (msm_camera_io_r(vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn))) #define vfe31_get_ch_pong_addr(chn) \ (msm_camera_io_r(vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn) + 4)) #define vfe31_get_ch_addr(ping_pong, chn) \ (((ping_pong) & (1 << (chn))) == 0 ? \ vfe31_get_ch_pong_addr(chn) : vfe31_get_ch_ping_addr(chn)) #define vfe31_put_ch_ping_addr(chn, addr) \ (msm_camera_io_w((addr), vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn))) #define vfe31_put_ch_pong_addr(chn, addr) \ (msm_camera_io_w((addr), \ vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn) + 4)) #define vfe31_put_ch_addr(ping_pong, chn, addr) \ (((ping_pong) & (1 << (chn))) == 0 ? \ vfe31_put_ch_pong_addr((chn), (addr)) : \ vfe31_put_ch_ping_addr((chn), (addr))) static void vfe31_process_output_path_irq_0(uint32_t ping_pong) { uint32_t p0_addr, p1_addr, p2_addr; #ifdef CONFIG_MSM_CAMERA_V4L2 uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong; #endif struct vfe31_free_buf *free_buf = NULL; /* we render frames in the following conditions: 1. Continuous mode and the free buffer is avaialable. */ if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P_ALL_CHNLS) { if (!(((ping_pong & PINGPONG_LOWER) == PINGPONG_LOWER) || ((ping_pong & PINGPONG_LOWER) == 0x0))) { pr_err(" Irq_2 - skip the frame pp_status is not proper" "PP_status = 0x%x\n", ping_pong); return; } } free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out0); if (free_buf) { /* Y channel */ p0_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch0); /* Chroma channel */ p1_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch1); if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P_ALL_CHNLS) { p2_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch2); } else { p2_addr = p0_addr; } CDBG("Output path 0, p0_addr = 0x%x, p1_addr = 0x%x," "p2_addr = 0x%x\n", p0_addr, p1_addr, p2_addr); /* Y channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch0, free_buf->paddr + free_buf->planar0_off); /* Chroma channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch1, free_buf->paddr + free_buf->planar1_off); if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P_ALL_CHNLS) vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch2, free_buf->paddr + free_buf->planar2_off); kfree(free_buf); /* if continuous mode, for display. (preview) */ vfe_send_outmsg(MSG_ID_OUTPUT_P, p0_addr, p1_addr, p2_addr); } else { vfe31_ctrl->outpath.out0.frame_drop_cnt++; pr_warning("path_irq_0 - no free buffer!\n"); #ifdef CONFIG_MSM_CAMERA_V4L2 pr_info("Swapping ping and pong\n"); /*get addresses*/ /* Y channel */ pyaddr_ping = vfe31_get_ch_ping_addr( vfe31_ctrl->outpath.out0.ch0); /* Chroma channel */ pcbcraddr_ping = vfe31_get_ch_ping_addr( vfe31_ctrl->outpath.out0.ch1); /* Y channel */ pyaddr_pong = vfe31_get_ch_pong_addr( vfe31_ctrl->outpath.out0.ch0); /* Chroma channel */ pcbcraddr_pong = vfe31_get_ch_pong_addr( vfe31_ctrl->outpath.out0.ch1); CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping, (void *)pyaddr_pong); CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n", (void *)pcbcraddr_ping, (void *)pcbcraddr_pong); /*put addresses*/ /* SWAP y channel*/ vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out0.ch0, pyaddr_pong); vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out0.ch0, pyaddr_ping); /* SWAP chroma channel*/ vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out0.ch1, pcbcraddr_pong); vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out0.ch1, pcbcraddr_ping); CDBG("after swap: ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_pong, (void *)pyaddr_ping); #endif } } static void vfe31_process_snapshot_frame(uint32_t ping_pong) { uint32_t p0_addr, p1_addr; struct vfe31_free_buf *free_buf = NULL; /* Y channel- Main Image */ p0_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch0); /* Chroma channel - TN Image */ p1_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch1); free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out1); CDBG("%s: snapshot main, p0_addr = 0x%x, p1_addr = 0x%x\n", __func__, p0_addr, p1_addr); if (free_buf) { /* Y channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch0, free_buf->paddr + free_buf->planar0_off); /* Chroma channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch1, free_buf->paddr + free_buf->planar1_off); kfree(free_buf); } vfe_send_outmsg(MSG_ID_OUTPUT_S, p0_addr, p1_addr, p0_addr); /* Y channel- TN Image */ p0_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch0); /* Chroma channel - TN Image */ p1_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch1); free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out0); CDBG("%s: snapshot TN, p0_addr = 0x%x, p1_addr = 0x%x\n", __func__, p0_addr, p1_addr); if (free_buf) { /* Y channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch0, free_buf->paddr + free_buf->planar0_off); /* Chroma channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out0.ch1, free_buf->paddr + free_buf->planar1_off); kfree(free_buf); } vfe_send_outmsg(MSG_ID_OUTPUT_T, p0_addr, p1_addr, p0_addr); /* in snapshot mode if done then send snapshot done message */ if (vfe31_ctrl->vfe_capture_count == 0) { vfe31_send_msg_no_payload(MSG_ID_SNAPSHOT_DONE); /* Ensure the write order while writing to the cmd register using barrier */ msm_camera_io_w_mb(CAMIF_COMMAND_STOP_IMMEDIATELY, vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); } } static void vfe31_process_raw_snapshot_frame(uint32_t ping_pong) { uint32_t pyaddr, pcbcraddr; struct vfe31_free_buf *free_buf = NULL; struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata; if (p_sync->stereocam_enabled) p_sync->stereo_state = STEREO_RAW_SNAP_STARTED; /* Y channel- Main Image */ pyaddr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch0); /* Chroma channel - Main Image */ pcbcraddr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch1); free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out1); CDBG("%s: snapshot raw, pyaddr = 0x%x, pcbcraddr = 0x%x\n", __func__, pyaddr, pcbcraddr); if (free_buf) { /* Y channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch0, free_buf->paddr + free_buf->planar0_off); /* Chroma channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch1, free_buf->paddr + free_buf->planar1_off); kfree(free_buf); } vfe_send_outmsg(MSG_ID_OUTPUT_S, pyaddr, pcbcraddr, 0); /* in snapshot mode if done then send snapshot done message */ if (vfe31_ctrl->vfe_capture_count == 0) { vfe31_send_msg_no_payload(MSG_ID_SNAPSHOT_DONE); /* Ensure the write order while writing to the cmd register using barrier */ msm_camera_io_w_mb(CAMIF_COMMAND_STOP_IMMEDIATELY, vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND); } } static void vfe31_process_zsl_frame(uint32_t ping_pong) { uint32_t p0_addr, p1_addr; struct vfe31_free_buf *free_buf = NULL; /* Y channel- Main Image */ p0_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch0); /* Chroma channel - Main Image */ p1_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch1); free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out2); CDBG("%s: snapshot main, pyaddr = 0x%x, pcbcraddr = 0x%x\n", __func__, p0_addr, p1_addr); if (free_buf) { /* Y channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch0, free_buf->paddr + free_buf->planar0_off); /* Chroma channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch1, free_buf->paddr + free_buf->planar1_off); kfree(free_buf); } vfe_send_outmsg(MSG_ID_OUTPUT_S, p0_addr, p1_addr, p0_addr); /* Y channel- TN Image */ p0_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch0); /* Chroma channel - TN Image */ p1_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch1); free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out1); CDBG("%s: snapshot TN, pyaddr = 0x%x, pcbcraddr = 0x%x\n", __func__, p0_addr, p1_addr); if (free_buf) { /* Y channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch0, free_buf->paddr + free_buf->planar0_off); /* Chroma channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out1.ch1, free_buf->paddr + free_buf->planar1_off); kfree(free_buf); } vfe_send_outmsg(MSG_ID_OUTPUT_T, p0_addr, p1_addr, p0_addr); } static void vfe31_process_output_path_irq_1(uint32_t ping_pong) { #ifdef CONFIG_MSM_CAMERA_V4L2 uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong; #endif CDBG("%s, operation_mode = %d, cap_cnt = %d\n", __func__, vfe31_ctrl->operation_mode, vfe31_ctrl->vfe_capture_count); /* In Snapshot mode */ if ((VFE_MODE_OF_OPERATION_SNAPSHOT == vfe31_ctrl->operation_mode) && ((vfe31_ctrl->vfe_capture_count <= 1) || (vfe31_free_buf_available(vfe31_ctrl->outpath.out0) && vfe31_free_buf_available(vfe31_ctrl->outpath.out1)))) { vfe31_process_snapshot_frame(ping_pong); } else if ((VFE_MODE_OF_OPERATION_RAW_SNAPSHOT == vfe31_ctrl->operation_mode) && ((vfe31_ctrl->vfe_capture_count <= 1) || vfe31_free_buf_available(vfe31_ctrl->outpath.out1))) { vfe31_process_raw_snapshot_frame(ping_pong); } else if ((VFE_MODE_OF_OPERATION_ZSL == vfe31_ctrl->operation_mode) && (vfe31_free_buf_available(vfe31_ctrl->outpath.out1) && vfe31_free_buf_available(vfe31_ctrl->outpath.out2))) { vfe31_process_zsl_frame(ping_pong); } else { vfe31_ctrl->outpath.out1.frame_drop_cnt++; pr_info("path_irq_1 - no free buffer!\n"); #ifdef CONFIG_MSM_CAMERA_V4L2 pr_info("Swapping ping and pong\n"); /*get addresses*/ /* Y channel */ pyaddr_ping = vfe31_get_ch_ping_addr( vfe31_ctrl->outpath.out1.ch0); /* Chroma channel */ pcbcraddr_ping = vfe31_get_ch_ping_addr( vfe31_ctrl->outpath.out1.ch1); /* Y channel */ pyaddr_pong = vfe31_get_ch_pong_addr( vfe31_ctrl->outpath.out1.ch0); /* Chroma channel */ pcbcraddr_pong = vfe31_get_ch_pong_addr( vfe31_ctrl->outpath.out1.ch1); CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping, (void *)pyaddr_pong); CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n", (void *)pcbcraddr_ping, (void *)pcbcraddr_pong); /*put addresses*/ /* SWAP y channel*/ vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out1.ch0, pyaddr_pong); vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out1.ch0, pyaddr_ping); /* SWAP chroma channel*/ vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out1.ch1, pcbcraddr_pong); vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out1.ch1, pcbcraddr_ping); CDBG("after swap: ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_pong, (void *)pyaddr_ping); #endif } } static void vfe31_process_output_path_irq_2(uint32_t ping_pong) { uint32_t p0_addr, p1_addr, p2_addr; struct vfe31_free_buf *free_buf = NULL; #ifdef CONFIG_MSM_CAMERA_V4L2 uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong; #endif /* we render frames in the following conditions: 1. Continuous mode and the free buffer is avaialable. */ CDBG("%s, operation_mode = %d, state %d\n", __func__, vfe31_ctrl->operation_mode, vfe31_ctrl->recording_state); /* Ensure that both wm1 and wm5 ping and pong buffers are active*/ if (!(((ping_pong & 0x22) == 0x22) || ((ping_pong & 0x22) == 0x0))) { pr_err(" Irq_2 - skip the frame pp_status is not proper" "PP_status = 0x%x\n", ping_pong); return; } if ((vfe31_ctrl->recording_state == VFE_REC_STATE_STOP_REQUESTED) || (vfe31_ctrl->recording_state == VFE_REC_STATE_STOPPED)) { vfe31_ctrl->outpath.out2.frame_drop_cnt++; pr_warning("path_irq_2 - recording stopped\n"); return; } free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out2); if (free_buf) { /* Y channel */ p0_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch0); /* Chroma channel */ p1_addr = vfe31_get_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch1); p2_addr = p0_addr; CDBG("video output, pyaddr = 0x%x, pcbcraddr = 0x%x\n", p0_addr, p1_addr); /* Y channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch0, free_buf->paddr + free_buf->planar0_off); /* Chroma channel */ vfe31_put_ch_addr(ping_pong, vfe31_ctrl->outpath.out2.ch1, free_buf->paddr + free_buf->planar1_off); kfree(free_buf); vfe_send_outmsg(MSG_ID_OUTPUT_V, p0_addr, p1_addr, p2_addr); } else { vfe31_ctrl->outpath.out2.frame_drop_cnt++; pr_warning("path_irq_2 - no free buffer!\n"); #ifdef CONFIG_MSM_CAMERA_V4L2 pr_info("Swapping ping and pong\n"); /*get addresses*/ /* Y channel */ pyaddr_ping = vfe31_get_ch_ping_addr( vfe31_ctrl->outpath.out2.ch0); /* Chroma channel */ pcbcraddr_ping = vfe31_get_ch_ping_addr( vfe31_ctrl->outpath.out2.ch1); /* Y channel */ pyaddr_pong = vfe31_get_ch_pong_addr( vfe31_ctrl->outpath.out2.ch0); /* Chroma channel */ pcbcraddr_pong = vfe31_get_ch_pong_addr( vfe31_ctrl->outpath.out2.ch1); CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping, (void *)pyaddr_pong); CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n", (void *)pcbcraddr_ping, (void *)pcbcraddr_pong); /*put addresses*/ /* SWAP y channel*/ vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out2.ch0, pyaddr_pong); vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out2.ch0, pyaddr_ping); /* SWAP chroma channel*/ vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out2.ch1, pcbcraddr_pong); vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out2.ch1, pcbcraddr_ping); CDBG("after swap: ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_pong, (void *)pyaddr_ping); #endif } } static uint32_t vfe31_process_stats_irq_common(uint32_t statsNum, uint32_t newAddr) { uint32_t pingpongStatus; uint32_t returnAddr; uint32_t pingpongAddr; /* must be 0=ping, 1=pong */ pingpongStatus = ((msm_camera_io_r(vfe31_ctrl->vfebase + VFE_BUS_PING_PONG_STATUS)) & ((uint32_t)(1<<(statsNum + 7)))) >> (statsNum + 7); /* stats bits starts at 7 */ CDBG("statsNum %d, pingpongStatus %d\n", statsNum, pingpongStatus); pingpongAddr = ((uint32_t)(vfe31_ctrl->vfebase + VFE_BUS_STATS_PING_PONG_BASE)) + (3*statsNum)*4 + (1-pingpongStatus)*4; returnAddr = msm_camera_io_r((uint32_t *)pingpongAddr); msm_camera_io_w(newAddr, (uint32_t *)pingpongAddr); return returnAddr; } static void vfe_send_stats_msg(void) { struct vfe_message msg; uint32_t temp; /* fill message with right content. */ msg._u.msgStats.frameCounter = vfe31_ctrl->vfeFrameId; msg._u.msgStats.status_bits = vfe31_ctrl->status_bits; msg._d = MSG_ID_COMMON; msg._u.msgStats.buff.aec = vfe31_ctrl->aecStatsControl.bufToRender; msg._u.msgStats.buff.awb = vfe31_ctrl->awbStatsControl.bufToRender; msg._u.msgStats.buff.af = vfe31_ctrl->afStatsControl.bufToRender; msg._u.msgStats.buff.ihist = vfe31_ctrl->ihistStatsControl.bufToRender; msg._u.msgStats.buff.rs = vfe31_ctrl->rsStatsControl.bufToRender; msg._u.msgStats.buff.cs = vfe31_ctrl->csStatsControl.bufToRender; temp = msm_camera_io_r(vfe31_ctrl->vfebase + VFE_STATS_AWB_SGW_CFG); msg._u.msgStats.buff.awb_ymin = (0xFF00 & temp) >> 8; vfe31_proc_ops(msg._d, &msg, sizeof(struct vfe_message)); return; } static void vfe31_process_stats(void) { int32_t process_stats = false; CDBG("%s, stats = 0x%x\n", __func__, vfe31_ctrl->status_bits); if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_AEC) { if (!vfe31_ctrl->aec_ack_pending) { vfe31_ctrl->aec_ack_pending = TRUE; vfe31_ctrl->aecStatsControl.bufToRender = vfe31_process_stats_irq_common(statsAeNum, vfe31_ctrl->aecStatsControl.nextFrameAddrBuf); process_stats = true; } else{ vfe31_ctrl->aecStatsControl.bufToRender = 0; vfe31_ctrl->aecStatsControl.droppedStatsFrameCount++; } } else { vfe31_ctrl->aecStatsControl.bufToRender = 0; } if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_AWB) { if (!vfe31_ctrl->awb_ack_pending) { vfe31_ctrl->awb_ack_pending = TRUE; vfe31_ctrl->awbStatsControl.bufToRender = vfe31_process_stats_irq_common(statsAwbNum, vfe31_ctrl->awbStatsControl.nextFrameAddrBuf); process_stats = true; } else{ vfe31_ctrl->awbStatsControl.droppedStatsFrameCount++; vfe31_ctrl->awbStatsControl.bufToRender = 0; } } else { vfe31_ctrl->awbStatsControl.bufToRender = 0; } if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_AF) { if (!vfe31_ctrl->af_ack_pending) { vfe31_ctrl->af_ack_pending = TRUE; vfe31_ctrl->afStatsControl.bufToRender = vfe31_process_stats_irq_common(statsAfNum, vfe31_ctrl->afStatsControl.nextFrameAddrBuf); process_stats = true; } else { vfe31_ctrl->afStatsControl.bufToRender = 0; vfe31_ctrl->afStatsControl.droppedStatsFrameCount++; } } else { vfe31_ctrl->afStatsControl.bufToRender = 0; } if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_IHIST) { if (!vfe31_ctrl->ihist_ack_pending) { vfe31_ctrl->ihist_ack_pending = TRUE; vfe31_ctrl->ihistStatsControl.bufToRender = vfe31_process_stats_irq_common(statsIhistNum, vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf); process_stats = true; } else { vfe31_ctrl->ihistStatsControl.droppedStatsFrameCount++; vfe31_ctrl->ihistStatsControl.bufToRender = 0; } } else { vfe31_ctrl->ihistStatsControl.bufToRender = 0; } if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_RS) { if (!vfe31_ctrl->rs_ack_pending) { vfe31_ctrl->rs_ack_pending = TRUE; vfe31_ctrl->rsStatsControl.bufToRender = vfe31_process_stats_irq_common(statsRsNum, vfe31_ctrl->rsStatsControl.nextFrameAddrBuf); process_stats = true; } else { vfe31_ctrl->rsStatsControl.droppedStatsFrameCount++; vfe31_ctrl->rsStatsControl.bufToRender = 0; } } else { vfe31_ctrl->rsStatsControl.bufToRender = 0; } if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_CS) { if (!vfe31_ctrl->cs_ack_pending) { vfe31_ctrl->cs_ack_pending = TRUE; vfe31_ctrl->csStatsControl.bufToRender = vfe31_process_stats_irq_common(statsCsNum, vfe31_ctrl->csStatsControl.nextFrameAddrBuf); process_stats = true; } else { vfe31_ctrl->csStatsControl.droppedStatsFrameCount++; vfe31_ctrl->csStatsControl.bufToRender = 0; } } else { vfe31_ctrl->csStatsControl.bufToRender = 0; } if (process_stats) vfe_send_stats_msg(); return; } static void vfe31_process_stats_irq(uint32_t *irqstatus) { /* Subsample the stats according to the hfr speed*/ if ((vfe31_ctrl->hfr_mode != HFR_MODE_OFF) && (vfe31_ctrl->vfeFrameId % vfe31_ctrl->hfr_mode != 0)) { CDBG("Skip the stats when HFR enabled\n"); return; } vfe31_ctrl->status_bits = VFE_COM_STATUS & *irqstatus; vfe31_process_stats(); return; } static void vfe31_do_tasklet(unsigned long data) { unsigned long flags; struct vfe31_isr_queue_cmd *qcmd = NULL; CDBG("=== vfe31_do_tasklet start === \n"); while (atomic_read(&irq_cnt)) { spin_lock_irqsave(&vfe31_ctrl->tasklet_lock, flags); qcmd = list_first_entry(&vfe31_ctrl->tasklet_q, struct vfe31_isr_queue_cmd, list); atomic_sub(1, &irq_cnt); if (!qcmd) { spin_unlock_irqrestore(&vfe31_ctrl->tasklet_lock, flags); return; } list_del(&qcmd->list); spin_unlock_irqrestore(&vfe31_ctrl->tasklet_lock, flags); /* interrupt to be processed, *qcmd has the payload. */ if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_REG_UPDATE_MASK) { CDBG("irq regUpdateIrq\n"); vfe31_process_reg_update_irq(); } if (qcmd->vfeInterruptStatus1 & VFE_IMASK_RESET) { CDBG("irq resetAckIrq\n"); vfe31_process_reset_irq(); } if (qcmd->vfeInterruptStatus1 & VFE_IMASK_AXI_HALT) { CDBG("irq axi halt irq\n"); vfe31_process_axi_halt_irq(); } if (atomic_read(&vfe31_ctrl->vstate)) { if (qcmd->vfeInterruptStatus1 & VFE31_IMASK_ERROR_ONLY_1) { pr_err("irq errorIrq\n"); vfe31_process_error_irq( qcmd->vfeInterruptStatus1 & VFE31_IMASK_ERROR_ONLY_1); } /* irqs below are only valid when in active state. */ /* next, check output path related interrupts. */ if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK) { CDBG("Image composite done 0 irq occured.\n"); vfe31_process_output_path_irq_0( qcmd->vfePingPongStatus); } if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK) { CDBG("Image composite done 1 irq occured.\n"); vfe31_process_output_path_irq_1( qcmd->vfePingPongStatus); } if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK) { CDBG("Image composite done 2 irq occured.\n"); vfe31_process_output_path_irq_2( qcmd->vfePingPongStatus); } /* then process stats irq. */ if (vfe31_ctrl->stats_comp) { /* process stats comb interrupt. */ if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK) { CDBG("Stats composite irq occured.\n"); vfe31_process_stats_irq( &qcmd->vfeInterruptStatus0); } } else { /* process individual stats interrupt. */ if (qcmd->vfeInterruptStatus0 & VFE_COM_STATUS) { CDBG("VFE stats occured.\n"); vfe31_process_stats_irq( &qcmd->vfeInterruptStatus0); } if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_SYNC_TIMER0) { CDBG("SYNC_TIMER 0 irq occured.\n"); vfe31_send_msg_no_payload( MSG_ID_SYNC_TIMER0_DONE); } if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_SYNC_TIMER1) { CDBG("SYNC_TIMER 1 irq occured.\n"); vfe31_send_msg_no_payload( MSG_ID_SYNC_TIMER1_DONE); } if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_SYNC_TIMER2) { CDBG("SYNC_TIMER 2 irq occured.\n"); vfe31_send_msg_no_payload( MSG_ID_SYNC_TIMER2_DONE); } } } if (qcmd->vfeInterruptStatus0 & VFE_IRQ_STATUS0_CAMIF_SOF_MASK) { CDBG("irq camifSofIrq\n"); vfe31_process_camif_sof_irq(); } kfree(qcmd); } CDBG("=== vfe31_do_tasklet end === \n"); } DECLARE_TASKLET(vfe31_tasklet, vfe31_do_tasklet, 0); static irqreturn_t vfe31_parse_irq(int irq_num, void *data) { unsigned long flags; struct vfe31_irq_status irq; struct vfe31_isr_queue_cmd *qcmd; uint32_t *val; CDBG("vfe_parse_irq\n"); memset(&irq, 0, sizeof(struct vfe31_irq_status)); val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_IRQ_STATUS_0); irq.vfeIrqStatus0 = msm_camera_io_r(val); val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_IRQ_STATUS_1); irq.vfeIrqStatus1 = msm_camera_io_r(val); if (irq.vfeIrqStatus1 & VFE_IMASK_AXI_HALT) { msm_camera_io_w(VFE_IMASK_RESET, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1); msm_camera_io_w_mb(AXI_HALT_CLEAR, vfe31_ctrl->vfebase + VFE_AXI_CMD); } val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_CAMIF_STATUS); irq.camifStatus = msm_camera_io_r(val); CDBG("camifStatus = 0x%x\n", irq.camifStatus); val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_BUS_PING_PONG_STATUS); irq.vfePingPongStatus = msm_camera_io_r(val); /* clear the pending interrupt of the same kind.*/ msm_camera_io_w(irq.vfeIrqStatus0, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0); msm_camera_io_w(irq.vfeIrqStatus1, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1); /* Ensure the write order while writing to the command register using the barrier */ msm_camera_io_w_mb(1, vfe31_ctrl->vfebase + VFE_IRQ_CMD); if ((irq.vfeIrqStatus0 == 0) && (irq.vfeIrqStatus1 == 0)) { CDBG("vfe_parse_irq: vfeIrqStatus0 & 1 are both 0!\n"); return IRQ_HANDLED; } qcmd = kzalloc(sizeof(struct vfe31_isr_queue_cmd), GFP_ATOMIC); if (!qcmd) { pr_err("vfe_parse_irq: qcmd malloc failed!\n"); return IRQ_HANDLED; } if (atomic_read(&vfe31_ctrl->stop_ack_pending)) { irq.vfeIrqStatus0 &= VFE_IMASK_WHILE_STOPPING_0; irq.vfeIrqStatus1 &= vfe31_ctrl->while_stopping_mask; } spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags); if ((irq.vfeIrqStatus0 & VFE_IRQ_STATUS0_CAMIF_EOF_MASK) && vfe31_ctrl->xbar_update_pending) { CDBG("irq camifEofIrq\n"); msm_camera_io_memcpy(vfe31_ctrl->vfebase + V31_XBAR_CFG_OFF, (void *)vfe31_ctrl->xbar_cfg, V31_XBAR_CFG_LEN); vfe31_ctrl->xbar_update_pending = 0; } spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags); CDBG("vfe_parse_irq: Irq_status0 = 0x%x, Irq_status1 = 0x%x.\n", irq.vfeIrqStatus0, irq.vfeIrqStatus1); qcmd->vfeInterruptStatus0 = irq.vfeIrqStatus0; qcmd->vfeInterruptStatus1 = irq.vfeIrqStatus1; qcmd->vfePingPongStatus = irq.vfePingPongStatus; spin_lock_irqsave(&vfe31_ctrl->tasklet_lock, flags); list_add_tail(&qcmd->list, &vfe31_ctrl->tasklet_q); atomic_add(1, &irq_cnt); spin_unlock_irqrestore(&vfe31_ctrl->tasklet_lock, flags); tasklet_schedule(&vfe31_tasklet); return IRQ_HANDLED; } static void vfe31_release(struct platform_device *pdev) { struct resource *vfemem, *vfeio; vfe31_reset_free_buf_queue_all(); CDBG("%s, free_irq\n", __func__); free_irq(vfe31_ctrl->vfeirq, 0); tasklet_kill(&vfe31_tasklet); if (atomic_read(&irq_cnt)) pr_warning("%s, Warning IRQ Count not ZERO\n", __func__); vfemem = vfe31_ctrl->vfemem; vfeio = vfe31_ctrl->vfeio; msm_vpe_release(); kfree(vfe31_ctrl->extdata); iounmap(vfe31_ctrl->vfebase); kfree(vfe31_ctrl); vfe31_ctrl = NULL; release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1); CDBG("%s, msm_camio_disable\n", __func__); msm_camio_disable(pdev); msm_camio_set_perf_lvl(S_EXIT); vfe_syncdata = NULL; } static int vfe31_resource_init(struct msm_vfe_callback *presp, struct platform_device *pdev, void *sdata) { struct resource *vfemem, *vfeirq, *vfeio; int rc; struct msm_camera_sensor_info *s_info; s_info = pdev->dev.platform_data; pdev->resource = s_info->resource; pdev->num_resources = s_info->num_resources; vfemem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!vfemem) { pr_err("%s: no mem resource?\n", __func__); return -ENODEV; } vfeirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!vfeirq) { pr_err("%s: no irq resource?\n", __func__); return -ENODEV; } vfeio = request_mem_region(vfemem->start, resource_size(vfemem), pdev->name); if (!vfeio) { pr_err("%s: VFE region already claimed\n", __func__); return -EBUSY; } vfe31_ctrl = kzalloc(sizeof(struct vfe31_ctrl_type), GFP_KERNEL); if (!vfe31_ctrl) { rc = -ENOMEM; goto cmd_init_failed1; } vfe31_ctrl->vfeirq = vfeirq->start; vfe31_ctrl->vfebase = ioremap(vfemem->start, (vfemem->end - vfemem->start) + 1); if (!vfe31_ctrl->vfebase) { rc = -ENOMEM; pr_err("%s: vfe ioremap failed\n", __func__); goto cmd_init_failed2; } if (presp && presp->vfe_resp) vfe31_ctrl->resp = presp; else { rc = -EINVAL; goto cmd_init_failed3; } vfe31_ctrl->extdata = kmalloc(sizeof(struct vfe31_frame_extra), GFP_KERNEL); if (!vfe31_ctrl->extdata) { rc = -ENOMEM; goto cmd_init_failed3; } vfe31_ctrl->extlen = sizeof(struct vfe31_frame_extra); spin_lock_init(&vfe31_ctrl->io_lock); spin_lock_init(&vfe31_ctrl->update_ack_lock); spin_lock_init(&vfe31_ctrl->tasklet_lock); spin_lock_init(&vfe31_ctrl->xbar_lock); INIT_LIST_HEAD(&vfe31_ctrl->tasklet_q); vfe31_init_free_buf_queue(); vfe31_ctrl->syncdata = sdata; vfe31_ctrl->vfemem = vfemem; vfe31_ctrl->vfeio = vfeio; vfe31_ctrl->update_gamma = false; vfe31_ctrl->update_luma = false; vfe31_ctrl->s_info = s_info; vfe31_ctrl->stats_comp = 0; vfe31_ctrl->hfr_mode = HFR_MODE_OFF; return 0; cmd_init_failed3: free_irq(vfe31_ctrl->vfeirq, 0); iounmap(vfe31_ctrl->vfebase); cmd_init_failed2: kfree(vfe31_ctrl); cmd_init_failed1: release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1); return rc; } static int vfe31_init(struct msm_vfe_callback *presp, struct platform_device *pdev) { int rc = 0; struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; struct msm_camera_device_platform_data *camdev = sinfo->pdata; camio_clk = camdev->ioclk; rc = vfe31_resource_init(presp, pdev, vfe_syncdata); if (rc < 0) return rc; /* Bring up all the required GPIOs and Clocks */ rc = msm_camio_enable(pdev); msm_camio_set_perf_lvl(S_INIT); if (msm_vpe_open() < 0) CDBG("%s: vpe_open failed\n", __func__); /* TO DO: Need to release the VFE resources */ rc = request_irq(vfe31_ctrl->vfeirq, vfe31_parse_irq, IRQF_TRIGGER_RISING, "vfe", 0); return rc; } void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data) { fptr->vfe_init = vfe31_init; fptr->vfe_enable = vfe31_enable; fptr->vfe_config = vfe31_config; fptr->vfe_disable = vfe31_disable; fptr->vfe_release = vfe31_release; fptr->vfe_stop = vfe31_stop; vfe_syncdata = data; } void msm_camvpe_fn_init(struct msm_camvpe_fn *fptr, void *data) { fptr->vpe_reg = msm_vpe_reg; fptr->send_frame_to_vpe = msm_send_frame_to_vpe; fptr->vpe_config = msm_vpe_config; fptr->vpe_cfg_update = msm_vpe_cfg_update; fptr->dis = &(vpe_ctrl->dis_en); fptr->vpe_cfg_offset = msm_vpe_offset_update; vpe_ctrl->syncdata = data; }
gpl-2.0
fxs007/linux
drivers/hid/hid-tmff.c
4460
7195
/* * Force feedback support for various HID compliant devices by ThrustMaster: * ThrustMaster FireStorm Dual Power 2 * and possibly others whose device ids haven't been added. * * Modified to support ThrustMaster devices by Zinx Verituse * on 2003-01-25 from the Logitech force feedback driver, * which is by Johann Deneux. * * Copyright (c) 2003 Zinx Verituse <zinx@epicsol.org> * Copyright (c) 2002 Johann Deneux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/hid.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include "hid-ids.h" static const signed short ff_rumble[] = { FF_RUMBLE, -1 }; static const signed short ff_joystick[] = { FF_CONSTANT, -1 }; #ifdef CONFIG_THRUSTMASTER_FF /* Usages for thrustmaster devices I know about */ #define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb) struct tmff_device { struct hid_report *report; struct hid_field *ff_field; }; /* Changes values from 0 to 0xffff into values from minimum to maximum */ static inline int tmff_scale_u16(unsigned int in, int minimum, int maximum) { int ret; ret = (in * (maximum - minimum) / 0xffff) + minimum; if (ret < minimum) return minimum; if (ret > maximum) return maximum; return ret; } /* Changes values from -0x80 to 0x7f into values from minimum to maximum */ static inline int tmff_scale_s8(int in, int minimum, int maximum) { int ret; ret = (((in + 0x80) * (maximum - minimum)) / 0xff) + minimum; if (ret < minimum) return minimum; if (ret > maximum) return maximum; return ret; } static int tmff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct tmff_device *tmff = data; struct hid_field *ff_field = tmff->ff_field; int x, y; int left, right; /* Rumbling */ switch (effect->type) { case FF_CONSTANT: x = tmff_scale_s8(effect->u.ramp.start_level, ff_field->logical_minimum, ff_field->logical_maximum); y = tmff_scale_s8(effect->u.ramp.end_level, ff_field->logical_minimum, ff_field->logical_maximum); dbg_hid("(x, y)=(%04x, %04x)\n", x, y); ff_field->value[0] = x; ff_field->value[1] = y; hid_hw_request(hid, tmff->report, HID_REQ_SET_REPORT); break; case FF_RUMBLE: left = tmff_scale_u16(effect->u.rumble.weak_magnitude, ff_field->logical_minimum, ff_field->logical_maximum); right = tmff_scale_u16(effect->u.rumble.strong_magnitude, ff_field->logical_minimum, ff_field->logical_maximum); dbg_hid("(left,right)=(%08x, %08x)\n", left, right); ff_field->value[0] = left; ff_field->value[1] = right; hid_hw_request(hid, tmff->report, HID_REQ_SET_REPORT); break; } return 0; } static int tmff_init(struct hid_device *hid, const signed short *ff_bits) { struct tmff_device *tmff; struct hid_report *report; struct list_head *report_list; struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *input_dev = hidinput->input; int error; int i; tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL); if (!tmff) return -ENOMEM; /* Find the report to use */ report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; list_for_each_entry(report, report_list, list) { int fieldnum; for (fieldnum = 0; fieldnum < report->maxfield; ++fieldnum) { struct hid_field *field = report->field[fieldnum]; if (field->maxusage <= 0) continue; switch (field->usage[0].hid) { case THRUSTMASTER_USAGE_FF: if (field->report_count < 2) { hid_warn(hid, "ignoring FF field with report_count < 2\n"); continue; } if (field->logical_maximum == field->logical_minimum) { hid_warn(hid, "ignoring FF field with logical_maximum == logical_minimum\n"); continue; } if (tmff->report && tmff->report != report) { hid_warn(hid, "ignoring FF field in other report\n"); continue; } if (tmff->ff_field && tmff->ff_field != field) { hid_warn(hid, "ignoring duplicate FF field\n"); continue; } tmff->report = report; tmff->ff_field = field; for (i = 0; ff_bits[i] >= 0; i++) set_bit(ff_bits[i], input_dev->ffbit); break; default: hid_warn(hid, "ignoring unknown output usage %08x\n", field->usage[0].hid); continue; } } } if (!tmff->report) { hid_err(hid, "can't find FF field in output reports\n"); error = -ENODEV; goto fail; } error = input_ff_create_memless(input_dev, tmff, tmff_play); if (error) goto fail; hid_info(hid, "force feedback for ThrustMaster devices by Zinx Verituse <zinx@epicsol.org>\n"); return 0; fail: kfree(tmff); return error; } #else static inline int tmff_init(struct hid_device *hid, const signed short *ff_bits) { return 0; } #endif static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } tmff_init(hdev, (void *)id->driver_data); return 0; err: return ret; } static const struct hid_device_id tm_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300), .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653), /* RGT Force Feedback CLUTCH Raging Wheel */ .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */ .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a), /* F430 Force Feedback Wheel */ .driver_data = (unsigned long)ff_joystick }, { } }; MODULE_DEVICE_TABLE(hid, tm_devices); static struct hid_driver tm_driver = { .name = "thrustmaster", .id_table = tm_devices, .probe = tm_probe, }; module_hid_driver(tm_driver); MODULE_LICENSE("GPL");
gpl-2.0
tdm/kernel_huawei_msm8928
drivers/video/acornfb.c
5228
35846
/* * linux/drivers/video/acornfb.c * * Copyright (C) 1998-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Frame buffer code for Acorn platforms * * NOTE: Most of the modes with X!=640 will disappear shortly. * NOTE: Startup setting of HS & VS polarity not supported. * (do we need to support it if we're coming up in 640x480?) * * FIXME: (things broken by the "new improved" FBCON API) * - Blanking 8bpp displays with VIDC */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/gfp.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/pgtable.h> #include "acornfb.h" /* * VIDC machines can't do 16 or 32BPP modes. */ #ifdef HAS_VIDC #undef FBCON_HAS_CFB16 #undef FBCON_HAS_CFB32 #endif /* * Default resolution. * NOTE that it has to be supported in the table towards * the end of this file. */ #define DEFAULT_XRES 640 #define DEFAULT_YRES 480 #define DEFAULT_BPP 4 /* * define this to debug the video mode selection */ #undef DEBUG_MODE_SELECTION /* * Translation from RISC OS monitor types to actual * HSYNC and VSYNC frequency ranges. These are * probably not right, but they're the best info I * have. Allow 1% either way on the nominal for TVs. */ #define NR_MONTYPES 6 static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { { /* TV */ .hfmin = 15469, .hfmax = 15781, .vfmin = 49, .vfmax = 51, }, { /* Multi Freq */ .hfmin = 0, .hfmax = 99999, .vfmin = 0, .vfmax = 199, }, { /* Hi-res mono */ .hfmin = 58608, .hfmax = 58608, .vfmin = 64, .vfmax = 64, }, { /* VGA */ .hfmin = 30000, .hfmax = 70000, .vfmin = 60, .vfmax = 60, }, { /* SVGA */ .hfmin = 30000, .hfmax = 70000, .vfmin = 56, .vfmax = 75, }, { .hfmin = 30000, .hfmax = 70000, .vfmin = 60, .vfmax = 60, } }; static struct fb_info fb_info; static struct acornfb_par current_par; static struct vidc_timing current_vidc; extern unsigned int vram_size; /* set by setup.c */ #ifdef HAS_VIDC #define MAX_SIZE 480*1024 /* CTL VIDC Actual * 24.000 0 8.000 * 25.175 0 8.392 * 36.000 0 12.000 * 24.000 1 12.000 * 25.175 1 12.588 * 24.000 2 16.000 * 25.175 2 16.783 * 36.000 1 18.000 * 24.000 3 24.000 * 36.000 2 24.000 * 25.175 3 25.175 * 36.000 3 36.000 */ struct pixclock { u_long min_clock; u_long max_clock; u_int vidc_ctl; u_int vid_ctl; }; static struct pixclock arc_clocks[] = { /* we allow +/-1% on these */ { 123750, 126250, VIDC_CTRL_DIV3, VID_CTL_24MHz }, /* 8.000MHz */ { 82500, 84167, VIDC_CTRL_DIV2, VID_CTL_24MHz }, /* 12.000MHz */ { 61875, 63125, VIDC_CTRL_DIV1_5, VID_CTL_24MHz }, /* 16.000MHz */ { 41250, 42083, VIDC_CTRL_DIV1, VID_CTL_24MHz }, /* 24.000MHz */ }; static struct pixclock * acornfb_valid_pixrate(struct fb_var_screeninfo *var) { u_long pixclock = var->pixclock; u_int i; if (!var->pixclock) return NULL; for (i = 0; i < ARRAY_SIZE(arc_clocks); i++) if (pixclock > arc_clocks[i].min_clock && pixclock < arc_clocks[i].max_clock) return arc_clocks + i; return NULL; } /* VIDC Rules: * hcr : must be even (interlace, hcr/2 must be even) * hswr : must be even * hdsr : must be odd * hder : must be odd * * vcr : must be odd * vswr : >= 1 * vdsr : >= 1 * vder : >= vdsr * if interlaced, then hcr/2 must be even */ static void acornfb_set_timing(struct fb_var_screeninfo *var) { struct pixclock *pclk; struct vidc_timing vidc; u_int horiz_correction; u_int sync_len, display_start, display_end, cycle; u_int is_interlaced; u_int vid_ctl, vidc_ctl; u_int bandwidth; memset(&vidc, 0, sizeof(vidc)); pclk = acornfb_valid_pixrate(var); vidc_ctl = pclk->vidc_ctl; vid_ctl = pclk->vid_ctl; bandwidth = var->pixclock * 8 / var->bits_per_pixel; /* 25.175, 4bpp = 79.444ns per byte, 317.776ns per word: fifo = 2,6 */ if (bandwidth > 143500) vidc_ctl |= VIDC_CTRL_FIFO_3_7; else if (bandwidth > 71750) vidc_ctl |= VIDC_CTRL_FIFO_2_6; else if (bandwidth > 35875) vidc_ctl |= VIDC_CTRL_FIFO_1_5; else vidc_ctl |= VIDC_CTRL_FIFO_0_4; switch (var->bits_per_pixel) { case 1: horiz_correction = 19; vidc_ctl |= VIDC_CTRL_1BPP; break; case 2: horiz_correction = 11; vidc_ctl |= VIDC_CTRL_2BPP; break; case 4: horiz_correction = 7; vidc_ctl |= VIDC_CTRL_4BPP; break; default: case 8: horiz_correction = 5; vidc_ctl |= VIDC_CTRL_8BPP; break; } if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */ vidc_ctl |= VIDC_CTRL_CSYNC; else { if (!(var->sync & FB_SYNC_HOR_HIGH_ACT)) vid_ctl |= VID_CTL_HS_NHSYNC; if (!(var->sync & FB_SYNC_VERT_HIGH_ACT)) vid_ctl |= VID_CTL_VS_NVSYNC; } sync_len = var->hsync_len; display_start = sync_len + var->left_margin; display_end = display_start + var->xres; cycle = display_end + var->right_margin; /* if interlaced, then hcr/2 must be even */ is_interlaced = (var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED; if (is_interlaced) { vidc_ctl |= VIDC_CTRL_INTERLACE; if (cycle & 2) { cycle += 2; var->right_margin += 2; } } vidc.h_cycle = (cycle - 2) / 2; vidc.h_sync_width = (sync_len - 2) / 2; vidc.h_border_start = (display_start - 1) / 2; vidc.h_display_start = (display_start - horiz_correction) / 2; vidc.h_display_end = (display_end - horiz_correction) / 2; vidc.h_border_end = (display_end - 1) / 2; vidc.h_interlace = (vidc.h_cycle + 1) / 2; sync_len = var->vsync_len; display_start = sync_len + var->upper_margin; display_end = display_start + var->yres; cycle = display_end + var->lower_margin; if (is_interlaced) cycle = (cycle - 3) / 2; else cycle = cycle - 1; vidc.v_cycle = cycle; vidc.v_sync_width = sync_len - 1; vidc.v_border_start = display_start - 1; vidc.v_display_start = vidc.v_border_start; vidc.v_display_end = display_end - 1; vidc.v_border_end = vidc.v_display_end; if (machine_is_a5k()) __raw_writeb(vid_ctl, IOEB_VID_CTL); if (memcmp(&current_vidc, &vidc, sizeof(vidc))) { current_vidc = vidc; vidc_writel(0xe0000000 | vidc_ctl); vidc_writel(0x80000000 | (vidc.h_cycle << 14)); vidc_writel(0x84000000 | (vidc.h_sync_width << 14)); vidc_writel(0x88000000 | (vidc.h_border_start << 14)); vidc_writel(0x8c000000 | (vidc.h_display_start << 14)); vidc_writel(0x90000000 | (vidc.h_display_end << 14)); vidc_writel(0x94000000 | (vidc.h_border_end << 14)); vidc_writel(0x98000000); vidc_writel(0x9c000000 | (vidc.h_interlace << 14)); vidc_writel(0xa0000000 | (vidc.v_cycle << 14)); vidc_writel(0xa4000000 | (vidc.v_sync_width << 14)); vidc_writel(0xa8000000 | (vidc.v_border_start << 14)); vidc_writel(0xac000000 | (vidc.v_display_start << 14)); vidc_writel(0xb0000000 | (vidc.v_display_end << 14)); vidc_writel(0xb4000000 | (vidc.v_border_end << 14)); vidc_writel(0xb8000000); vidc_writel(0xbc000000); } #ifdef DEBUG_MODE_SELECTION printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres, var->yres, var->bits_per_pixel); printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle); printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width); printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start); printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start); printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end); printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end); printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace); printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle); printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width); printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start); printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start); printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end); printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end); printk(KERN_DEBUG " VIDC Ctrl (E) : 0x%08X\n", vidc_ctl); printk(KERN_DEBUG " IOEB Ctrl : 0x%08X\n", vid_ctl); #endif } static int acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { union palette pal; if (regno >= current_par.palette_size) return 1; pal.p = 0; pal.vidc.reg = regno; pal.vidc.red = red >> 12; pal.vidc.green = green >> 12; pal.vidc.blue = blue >> 12; current_par.palette[regno] = pal; vidc_writel(pal.p); return 0; } #endif #ifdef HAS_VIDC20 #include <mach/acornfb.h> #define MAX_SIZE 2*1024*1024 /* VIDC20 has a different set of rules from the VIDC: * hcr : must be multiple of 4 * hswr : must be even * hdsr : must be even * hder : must be even * vcr : >= 2, (interlace, must be odd) * vswr : >= 1 * vdsr : >= 1 * vder : >= vdsr */ static void acornfb_set_timing(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct vidc_timing vidc; u_int vcr, fsize; u_int ext_ctl, dat_ctl; u_int words_per_line; memset(&vidc, 0, sizeof(vidc)); vidc.h_sync_width = var->hsync_len - 8; vidc.h_border_start = vidc.h_sync_width + var->left_margin + 8 - 12; vidc.h_display_start = vidc.h_border_start + 12 - 18; vidc.h_display_end = vidc.h_display_start + var->xres; vidc.h_border_end = vidc.h_display_end + 18 - 12; vidc.h_cycle = vidc.h_border_end + var->right_margin + 12 - 8; vidc.h_interlace = vidc.h_cycle / 2; vidc.v_sync_width = var->vsync_len - 1; vidc.v_border_start = vidc.v_sync_width + var->upper_margin; vidc.v_display_start = vidc.v_border_start; vidc.v_display_end = vidc.v_display_start + var->yres; vidc.v_border_end = vidc.v_display_end; vidc.control = acornfb_default_control(); vcr = var->vsync_len + var->upper_margin + var->yres + var->lower_margin; if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { vidc.v_cycle = (vcr - 3) / 2; vidc.control |= VIDC20_CTRL_INT; } else vidc.v_cycle = vcr - 2; switch (var->bits_per_pixel) { case 1: vidc.control |= VIDC20_CTRL_1BPP; break; case 2: vidc.control |= VIDC20_CTRL_2BPP; break; case 4: vidc.control |= VIDC20_CTRL_4BPP; break; default: case 8: vidc.control |= VIDC20_CTRL_8BPP; break; case 16: vidc.control |= VIDC20_CTRL_16BPP; break; case 32: vidc.control |= VIDC20_CTRL_32BPP; break; } acornfb_vidc20_find_rates(&vidc, var); fsize = var->vsync_len + var->upper_margin + var->lower_margin - 1; if (memcmp(&current_vidc, &vidc, sizeof(vidc))) { current_vidc = vidc; vidc_writel(VIDC20_CTRL| vidc.control); vidc_writel(0xd0000000 | vidc.pll_ctl); vidc_writel(0x80000000 | vidc.h_cycle); vidc_writel(0x81000000 | vidc.h_sync_width); vidc_writel(0x82000000 | vidc.h_border_start); vidc_writel(0x83000000 | vidc.h_display_start); vidc_writel(0x84000000 | vidc.h_display_end); vidc_writel(0x85000000 | vidc.h_border_end); vidc_writel(0x86000000); vidc_writel(0x87000000 | vidc.h_interlace); vidc_writel(0x90000000 | vidc.v_cycle); vidc_writel(0x91000000 | vidc.v_sync_width); vidc_writel(0x92000000 | vidc.v_border_start); vidc_writel(0x93000000 | vidc.v_display_start); vidc_writel(0x94000000 | vidc.v_display_end); vidc_writel(0x95000000 | vidc.v_border_end); vidc_writel(0x96000000); vidc_writel(0x97000000); } iomd_writel(fsize, IOMD_FSIZE); ext_ctl = acornfb_default_econtrol(); if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */ ext_ctl |= VIDC20_ECTL_HS_NCSYNC | VIDC20_ECTL_VS_NCSYNC; else { if (var->sync & FB_SYNC_HOR_HIGH_ACT) ext_ctl |= VIDC20_ECTL_HS_HSYNC; else ext_ctl |= VIDC20_ECTL_HS_NHSYNC; if (var->sync & FB_SYNC_VERT_HIGH_ACT) ext_ctl |= VIDC20_ECTL_VS_VSYNC; else ext_ctl |= VIDC20_ECTL_VS_NVSYNC; } vidc_writel(VIDC20_ECTL | ext_ctl); words_per_line = var->xres * var->bits_per_pixel / 32; if (current_par.using_vram && info->fix.smem_len == 2048*1024) words_per_line /= 2; /* RiscPC doesn't use the VIDC's VRAM control. */ dat_ctl = VIDC20_DCTL_VRAM_DIS | VIDC20_DCTL_SNA | words_per_line; /* The data bus width is dependent on both the type * and amount of video memory. * DRAM 32bit low * 1MB VRAM 32bit * 2MB VRAM 64bit */ if (current_par.using_vram && current_par.vram_half_sam == 2048) dat_ctl |= VIDC20_DCTL_BUS_D63_0; else dat_ctl |= VIDC20_DCTL_BUS_D31_0; vidc_writel(VIDC20_DCTL | dat_ctl); #ifdef DEBUG_MODE_SELECTION printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres, var->yres, var->bits_per_pixel); printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle); printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width); printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start); printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start); printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end); printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end); printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace); printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle); printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width); printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start); printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start); printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end); printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end); printk(KERN_DEBUG " Ext Ctrl (C) : 0x%08X\n", ext_ctl); printk(KERN_DEBUG " PLL Ctrl (D) : 0x%08X\n", vidc.pll_ctl); printk(KERN_DEBUG " Ctrl (E) : 0x%08X\n", vidc.control); printk(KERN_DEBUG " Data Ctrl (F) : 0x%08X\n", dat_ctl); printk(KERN_DEBUG " Fsize : 0x%08X\n", fsize); #endif } /* * We have to take note of the VIDC20's 16-bit palette here. * The VIDC20 looks up a 16 bit pixel as follows: * * bits 111111 * 5432109876543210 * red ++++++++ (8 bits, 7 to 0) * green ++++++++ (8 bits, 11 to 4) * blue ++++++++ (8 bits, 15 to 8) * * We use a pixel which looks like: * * bits 111111 * 5432109876543210 * red +++++ (5 bits, 4 to 0) * green +++++ (5 bits, 9 to 5) * blue +++++ (5 bits, 14 to 10) */ static int acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { union palette pal; if (regno >= current_par.palette_size) return 1; if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) { u32 pseudo_val; pseudo_val = regno << info->var.red.offset; pseudo_val |= regno << info->var.green.offset; pseudo_val |= regno << info->var.blue.offset; ((u32 *)info->pseudo_palette)[regno] = pseudo_val; } pal.p = 0; pal.vidc20.red = red >> 8; pal.vidc20.green = green >> 8; pal.vidc20.blue = blue >> 8; current_par.palette[regno] = pal; if (info->var.bits_per_pixel == 16) { int i; pal.p = 0; vidc_writel(0x10000000); for (i = 0; i < 256; i += 1) { pal.vidc20.red = current_par.palette[ i & 31].vidc20.red; pal.vidc20.green = current_par.palette[(i >> 1) & 31].vidc20.green; pal.vidc20.blue = current_par.palette[(i >> 2) & 31].vidc20.blue; vidc_writel(pal.p); /* Palette register pointer auto-increments */ } } else { vidc_writel(0x10000000 | regno); vidc_writel(pal.p); } return 0; } #endif /* * Before selecting the timing parameters, adjust * the resolution to fit the rules. */ static int acornfb_adjust_timing(struct fb_info *info, struct fb_var_screeninfo *var, u_int fontht) { u_int font_line_len, sam_size, min_size, size, nr_y; /* xres must be even */ var->xres = (var->xres + 1) & ~1; /* * We don't allow xres_virtual to differ from xres */ var->xres_virtual = var->xres; var->xoffset = 0; if (current_par.using_vram) sam_size = current_par.vram_half_sam * 2; else sam_size = 16; /* * Now, find a value for yres_virtual which allows * us to do ywrap scrolling. The value of * yres_virtual must be such that the end of the * displayable frame buffer must be aligned with * the start of a font line. */ font_line_len = var->xres * var->bits_per_pixel * fontht / 8; min_size = var->xres * var->yres * var->bits_per_pixel / 8; /* * If minimum screen size is greater than that we have * available, reject it. */ if (min_size > info->fix.smem_len) return -EINVAL; /* Find int 'y', such that y * fll == s * sam < maxsize * y = s * sam / fll; s = maxsize / sam */ for (size = info->fix.smem_len; nr_y = size / font_line_len, min_size <= size; size -= sam_size) { if (nr_y * font_line_len == size) break; } nr_y *= fontht; if (var->accel_flags & FB_ACCELF_TEXT) { if (min_size > size) { /* * failed, use ypan */ size = info->fix.smem_len; var->yres_virtual = size / (font_line_len / fontht); } else var->yres_virtual = nr_y; } else if (var->yres_virtual > nr_y) var->yres_virtual = nr_y; current_par.screen_end = info->fix.smem_start + size; /* * Fix yres & yoffset if needed. */ if (var->yres > var->yres_virtual) var->yres = var->yres_virtual; if (var->vmode & FB_VMODE_YWRAP) { if (var->yoffset > var->yres_virtual) var->yoffset = var->yres_virtual; } else { if (var->yoffset + var->yres > var->yres_virtual) var->yoffset = var->yres_virtual - var->yres; } /* hsync_len must be even */ var->hsync_len = (var->hsync_len + 1) & ~1; #ifdef HAS_VIDC /* left_margin must be odd */ if ((var->left_margin & 1) == 0) { var->left_margin -= 1; var->right_margin += 1; } /* right_margin must be odd */ var->right_margin |= 1; #elif defined(HAS_VIDC20) /* left_margin must be even */ if (var->left_margin & 1) { var->left_margin += 1; var->right_margin -= 1; } /* right_margin must be even */ if (var->right_margin & 1) var->right_margin += 1; #endif if (var->vsync_len < 1) var->vsync_len = 1; return 0; } static int acornfb_validate_timing(struct fb_var_screeninfo *var, struct fb_monspecs *monspecs) { unsigned long hs, vs; /* * hs(Hz) = 10^12 / (pixclock * xtotal) * vs(Hz) = hs(Hz) / ytotal * * No need to do long long divisions or anything * like that if you factor it correctly */ hs = 1953125000 / var->pixclock; hs = hs * 512 / (var->xres + var->left_margin + var->right_margin + var->hsync_len); vs = hs / (var->yres + var->upper_margin + var->lower_margin + var->vsync_len); return (vs >= monspecs->vfmin && vs <= monspecs->vfmax && hs >= monspecs->hfmin && hs <= monspecs->hfmax) ? 0 : -EINVAL; } static inline void acornfb_update_dma(struct fb_info *info, struct fb_var_screeninfo *var) { u_int off = var->yoffset * info->fix.line_length; #if defined(HAS_MEMC) memc_write(VDMA_INIT, off >> 2); #elif defined(HAS_IOMD) iomd_writel(info->fix.smem_start + off, IOMD_VIDINIT); #endif } static int acornfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { u_int fontht; int err; /* * FIXME: Find the font height */ fontht = 8; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; switch (var->bits_per_pixel) { case 1: case 2: case 4: case 8: var->red.offset = 0; var->red.length = var->bits_per_pixel; var->green = var->red; var->blue = var->red; var->transp.offset = 0; var->transp.length = 0; break; #ifdef HAS_VIDC20 case 16: var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case 32: var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 4; break; #endif default: return -EINVAL; } /* * Check to see if the pixel rate is valid. */ if (!acornfb_valid_pixrate(var)) return -EINVAL; /* * Validate and adjust the resolution to * match the video generator hardware. */ err = acornfb_adjust_timing(info, var, fontht); if (err) return err; /* * Validate the timing against the * monitor hardware. */ return acornfb_validate_timing(var, &info->monspecs); } static int acornfb_set_par(struct fb_info *info) { switch (info->var.bits_per_pixel) { case 1: current_par.palette_size = 2; info->fix.visual = FB_VISUAL_MONO10; break; case 2: current_par.palette_size = 4; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 4: current_par.palette_size = 16; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 8: current_par.palette_size = VIDC_PALETTE_SIZE; #ifdef HAS_VIDC info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; #else info->fix.visual = FB_VISUAL_PSEUDOCOLOR; #endif break; #ifdef HAS_VIDC20 case 16: current_par.palette_size = 32; info->fix.visual = FB_VISUAL_DIRECTCOLOR; break; case 32: current_par.palette_size = VIDC_PALETTE_SIZE; info->fix.visual = FB_VISUAL_DIRECTCOLOR; break; #endif default: BUG(); } info->fix.line_length = (info->var.xres * info->var.bits_per_pixel) / 8; #if defined(HAS_MEMC) { unsigned long size = info->fix.smem_len - VDMA_XFERSIZE; memc_write(VDMA_START, 0); memc_write(VDMA_END, size >> 2); } #elif defined(HAS_IOMD) { unsigned long start, size; u_int control; start = info->fix.smem_start; size = current_par.screen_end; if (current_par.using_vram) { size -= current_par.vram_half_sam; control = DMA_CR_E | (current_par.vram_half_sam / 256); } else { size -= 16; control = DMA_CR_E | DMA_CR_D | 16; } iomd_writel(start, IOMD_VIDSTART); iomd_writel(size, IOMD_VIDEND); iomd_writel(control, IOMD_VIDCR); } #endif acornfb_update_dma(info, &info->var); acornfb_set_timing(info); return 0; } static int acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { u_int y_bottom = var->yoffset; if (!(var->vmode & FB_VMODE_YWRAP)) y_bottom += info->var.yres; if (y_bottom > info->var.yres_virtual) return -EINVAL; acornfb_update_dma(info, var); return 0; } static struct fb_ops acornfb_ops = { .owner = THIS_MODULE, .fb_check_var = acornfb_check_var, .fb_set_par = acornfb_set_par, .fb_setcolreg = acornfb_setcolreg, .fb_pan_display = acornfb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Everything after here is initialisation!!! */ static struct fb_videomode modedb[] __devinitdata = { { /* 320x256 @ 50Hz */ NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 640x250 @ 50Hz, 15.6 kHz hsync */ NULL, 50, 640, 250, 62500, 185, 123, 38, 21, 76, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x256 @ 50Hz, 15.6 kHz hsync */ NULL, 50, 640, 256, 62500, 185, 123, 35, 18, 76, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x512 @ 50Hz, 26.8 kHz hsync */ NULL, 50, 640, 512, 41667, 113, 87, 18, 1, 56, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x250 @ 70Hz, 31.5 kHz hsync */ NULL, 70, 640, 250, 39722, 48, 16, 109, 88, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 640x256 @ 70Hz, 31.5 kHz hsync */ NULL, 70, 640, 256, 39722, 48, 16, 106, 85, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 640x352 @ 70Hz, 31.5 kHz hsync */ NULL, 70, 640, 352, 39722, 48, 16, 58, 37, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 640x480 @ 60Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39722, 48, 16, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 800x600 @ 56Hz, 35.2 kHz hsync */ NULL, 56, 800, 600, 27778, 101, 23, 22, 1, 100, 2, 0, FB_VMODE_NONINTERLACED }, { /* 896x352 @ 60Hz, 21.8 kHz hsync */ NULL, 60, 896, 352, 41667, 59, 27, 9, 0, 118, 3, 0, FB_VMODE_NONINTERLACED }, { /* 1024x 768 @ 60Hz, 48.4 kHz hsync */ NULL, 60, 1024, 768, 15385, 160, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED }, { /* 1280x1024 @ 60Hz, 63.8 kHz hsync */ NULL, 60, 1280, 1024, 9090, 186, 96, 38, 1, 160, 3, 0, FB_VMODE_NONINTERLACED } }; static struct fb_videomode acornfb_default_mode __devinitdata = { .name = NULL, .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39722, .left_margin = 56, .right_margin = 16, .upper_margin = 34, .lower_margin = 9, .hsync_len = 88, .vsync_len = 2, .sync = 0, .vmode = FB_VMODE_NONINTERLACED }; static void __devinit acornfb_init_fbinfo(void) { static int first = 1; if (!first) return; first = 0; fb_info.fbops = &acornfb_ops; fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; fb_info.pseudo_palette = current_par.pseudo_palette; strcpy(fb_info.fix.id, "Acorn"); fb_info.fix.type = FB_TYPE_PACKED_PIXELS; fb_info.fix.type_aux = 0; fb_info.fix.xpanstep = 0; fb_info.fix.ypanstep = 1; fb_info.fix.ywrapstep = 1; fb_info.fix.line_length = 0; fb_info.fix.accel = FB_ACCEL_NONE; /* * setup initial parameters */ memset(&fb_info.var, 0, sizeof(fb_info.var)); #if defined(HAS_VIDC20) fb_info.var.red.length = 8; fb_info.var.transp.length = 4; #elif defined(HAS_VIDC) fb_info.var.red.length = 4; fb_info.var.transp.length = 1; #endif fb_info.var.green = fb_info.var.red; fb_info.var.blue = fb_info.var.red; fb_info.var.nonstd = 0; fb_info.var.activate = FB_ACTIVATE_NOW; fb_info.var.height = -1; fb_info.var.width = -1; fb_info.var.vmode = FB_VMODE_NONINTERLACED; fb_info.var.accel_flags = FB_ACCELF_TEXT; current_par.dram_size = 0; current_par.montype = -1; current_par.dpms = 0; } /* * setup acornfb options: * * mon:hmin-hmax:vmin-vmax:dpms:width:height * Set monitor parameters: * hmin = horizontal minimum frequency (Hz) * hmax = horizontal maximum frequency (Hz) (optional) * vmin = vertical minimum frequency (Hz) * vmax = vertical maximum frequency (Hz) (optional) * dpms = DPMS supported? (optional) * width = width of picture in mm. (optional) * height = height of picture in mm. (optional) * * montype:type * Set RISC-OS style monitor type: * 0 (or tv) - TV frequency * 1 (or multi) - Multi frequency * 2 (or hires) - Hi-res monochrome * 3 (or vga) - VGA * 4 (or svga) - SVGA * auto, or option missing * - try hardware detect * * dram:size * Set the amount of DRAM to use for the frame buffer * (even if you have VRAM). * size can optionally be followed by 'M' or 'K' for * MB or KB respectively. */ static void __devinit acornfb_parse_mon(char *opt) { char *p = opt; current_par.montype = -2; fb_info.monspecs.hfmin = simple_strtoul(p, &p, 0); if (*p == '-') fb_info.monspecs.hfmax = simple_strtoul(p + 1, &p, 0); else fb_info.monspecs.hfmax = fb_info.monspecs.hfmin; if (*p != ':') goto bad; fb_info.monspecs.vfmin = simple_strtoul(p + 1, &p, 0); if (*p == '-') fb_info.monspecs.vfmax = simple_strtoul(p + 1, &p, 0); else fb_info.monspecs.vfmax = fb_info.monspecs.vfmin; if (*p != ':') goto check_values; fb_info.monspecs.dpms = simple_strtoul(p + 1, &p, 0); if (*p != ':') goto check_values; fb_info.var.width = simple_strtoul(p + 1, &p, 0); if (*p != ':') goto check_values; fb_info.var.height = simple_strtoul(p + 1, NULL, 0); check_values: if (fb_info.monspecs.hfmax < fb_info.monspecs.hfmin || fb_info.monspecs.vfmax < fb_info.monspecs.vfmin) goto bad; return; bad: printk(KERN_ERR "Acornfb: bad monitor settings: %s\n", opt); current_par.montype = -1; } static void __devinit acornfb_parse_montype(char *opt) { current_par.montype = -2; if (strncmp(opt, "tv", 2) == 0) { opt += 2; current_par.montype = 0; } else if (strncmp(opt, "multi", 5) == 0) { opt += 5; current_par.montype = 1; } else if (strncmp(opt, "hires", 5) == 0) { opt += 5; current_par.montype = 2; } else if (strncmp(opt, "vga", 3) == 0) { opt += 3; current_par.montype = 3; } else if (strncmp(opt, "svga", 4) == 0) { opt += 4; current_par.montype = 4; } else if (strncmp(opt, "auto", 4) == 0) { opt += 4; current_par.montype = -1; } else if (isdigit(*opt)) current_par.montype = simple_strtoul(opt, &opt, 0); if (current_par.montype == -2 || current_par.montype > NR_MONTYPES) { printk(KERN_ERR "acornfb: unknown monitor type: %s\n", opt); current_par.montype = -1; } else if (opt && *opt) { if (strcmp(opt, ",dpms") == 0) current_par.dpms = 1; else printk(KERN_ERR "acornfb: unknown monitor option: %s\n", opt); } } static void __devinit acornfb_parse_dram(char *opt) { unsigned int size; size = simple_strtoul(opt, &opt, 0); if (opt) { switch (*opt) { case 'M': case 'm': size *= 1024; case 'K': case 'k': size *= 1024; default: break; } } current_par.dram_size = size; } static struct options { char *name; void (*parse)(char *opt); } opt_table[] __devinitdata = { { "mon", acornfb_parse_mon }, { "montype", acornfb_parse_montype }, { "dram", acornfb_parse_dram }, { NULL, NULL } }; static int __devinit acornfb_setup(char *options) { struct options *optp; char *opt; if (!options || !*options) return 0; acornfb_init_fbinfo(); while ((opt = strsep(&options, ",")) != NULL) { if (!*opt) continue; for (optp = opt_table; optp->name; optp++) { int optlen; optlen = strlen(optp->name); if (strncmp(opt, optp->name, optlen) == 0 && opt[optlen] == ':') { optp->parse(opt + optlen + 1); break; } } if (!optp->name) printk(KERN_ERR "acornfb: unknown parameter: %s\n", opt); } return 0; } /* * Detect type of monitor connected * For now, we just assume SVGA */ static int __devinit acornfb_detect_monitortype(void) { return 4; } /* * This enables the unused memory to be freed on older Acorn machines. * We are freeing memory on behalf of the architecture initialisation * code here. */ static inline void free_unused_pages(unsigned int virtual_start, unsigned int virtual_end) { int mb_freed = 0; /* * Align addresses */ virtual_start = PAGE_ALIGN(virtual_start); virtual_end = PAGE_ALIGN(virtual_end); while (virtual_start < virtual_end) { struct page *page; /* * Clear page reserved bit, * set count to 1, and free * the page. */ page = virt_to_page(virtual_start); ClearPageReserved(page); init_page_count(page); free_page(virtual_start); virtual_start += PAGE_SIZE; mb_freed += PAGE_SIZE / 1024; } printk("acornfb: freed %dK memory\n", mb_freed); } static int __devinit acornfb_probe(struct platform_device *dev) { unsigned long size; u_int h_sync, v_sync; int rc, i; char *option = NULL; if (fb_get_options("acornfb", &option)) return -ENODEV; acornfb_setup(option); acornfb_init_fbinfo(); current_par.dev = &dev->dev; if (current_par.montype == -1) current_par.montype = acornfb_detect_monitortype(); if (current_par.montype == -1 || current_par.montype > NR_MONTYPES) current_par.montype = 4; if (current_par.montype >= 0) { fb_info.monspecs = monspecs[current_par.montype]; fb_info.monspecs.dpms = current_par.dpms; } /* * Try to select a suitable default mode */ for (i = 0; i < ARRAY_SIZE(modedb); i++) { unsigned long hs; hs = modedb[i].refresh * (modedb[i].yres + modedb[i].upper_margin + modedb[i].lower_margin + modedb[i].vsync_len); if (modedb[i].xres == DEFAULT_XRES && modedb[i].yres == DEFAULT_YRES && modedb[i].refresh >= fb_info.monspecs.vfmin && modedb[i].refresh <= fb_info.monspecs.vfmax && hs >= fb_info.monspecs.hfmin && hs <= fb_info.monspecs.hfmax) { acornfb_default_mode = modedb[i]; break; } } fb_info.screen_base = (char *)SCREEN_BASE; fb_info.fix.smem_start = SCREEN_START; current_par.using_vram = 0; /* * If vram_size is set, we are using VRAM in * a Risc PC. However, if the user has specified * an amount of DRAM then use that instead. */ if (vram_size && !current_par.dram_size) { size = vram_size; current_par.vram_half_sam = vram_size / 1024; current_par.using_vram = 1; } else if (current_par.dram_size) size = current_par.dram_size; else size = MAX_SIZE; /* * Limit maximum screen size. */ if (size > MAX_SIZE) size = MAX_SIZE; size = PAGE_ALIGN(size); #if defined(HAS_VIDC20) if (!current_par.using_vram) { dma_addr_t handle; void *base; /* * RiscPC needs to allocate the DRAM memory * for the framebuffer if we are not using * VRAM. */ base = dma_alloc_writecombine(current_par.dev, size, &handle, GFP_KERNEL); if (base == NULL) { printk(KERN_ERR "acornfb: unable to allocate screen " "memory\n"); return -ENOMEM; } fb_info.screen_base = base; fb_info.fix.smem_start = handle; } #endif #if defined(HAS_VIDC) /* * Archimedes/A5000 machines use a fixed address for their * framebuffers. Free unused pages */ free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE); #endif fb_info.fix.smem_len = size; current_par.palette_size = VIDC_PALETTE_SIZE; /* * Lookup the timing for this resolution. If we can't * find it, then we can't restore it if we change * the resolution, so we disable this feature. */ do { rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, ARRAY_SIZE(modedb), &acornfb_default_mode, DEFAULT_BPP); /* * If we found an exact match, all ok. */ if (rc == 1) break; rc = fb_find_mode(&fb_info.var, &fb_info, NULL, NULL, 0, &acornfb_default_mode, DEFAULT_BPP); /* * If we found an exact match, all ok. */ if (rc == 1) break; rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, ARRAY_SIZE(modedb), &acornfb_default_mode, DEFAULT_BPP); if (rc) break; rc = fb_find_mode(&fb_info.var, &fb_info, NULL, NULL, 0, &acornfb_default_mode, DEFAULT_BPP); } while (0); /* * If we didn't find an exact match, try the * generic database. */ if (rc == 0) { printk("Acornfb: no valid mode found\n"); return -EINVAL; } h_sync = 1953125000 / fb_info.var.pixclock; h_sync = h_sync * 512 / (fb_info.var.xres + fb_info.var.left_margin + fb_info.var.right_margin + fb_info.var.hsync_len); v_sync = h_sync / (fb_info.var.yres + fb_info.var.upper_margin + fb_info.var.lower_margin + fb_info.var.vsync_len); printk(KERN_INFO "Acornfb: %dkB %cRAM, %s, using %dx%d, " "%d.%03dkHz, %dHz\n", fb_info.fix.smem_len / 1024, current_par.using_vram ? 'V' : 'D', VIDC_NAME, fb_info.var.xres, fb_info.var.yres, h_sync / 1000, h_sync % 1000, v_sync); printk(KERN_INFO "Acornfb: Monitor: %d.%03d-%d.%03dkHz, %d-%dHz%s\n", fb_info.monspecs.hfmin / 1000, fb_info.monspecs.hfmin % 1000, fb_info.monspecs.hfmax / 1000, fb_info.monspecs.hfmax % 1000, fb_info.monspecs.vfmin, fb_info.monspecs.vfmax, fb_info.monspecs.dpms ? ", DPMS" : ""); if (fb_set_var(&fb_info, &fb_info.var)) printk(KERN_ERR "Acornfb: unable to set display parameters\n"); if (register_framebuffer(&fb_info) < 0) return -EINVAL; return 0; } static struct platform_driver acornfb_driver = { .probe = acornfb_probe, .driver = { .name = "acornfb", }, }; static int __init acornfb_init(void) { return platform_driver_register(&acornfb_driver); } module_init(acornfb_init); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("VIDC 1/1a/20 framebuffer driver"); MODULE_LICENSE("GPL");
gpl-2.0
allwinner-zh/linux-3.4-sunxi
drivers/scsi/scsi_devinfo.c
7276
29725
#include <linux/blkdev.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <scsi/scsi_device.h> #include <scsi/scsi_devinfo.h> #include "scsi_priv.h" /* * scsi_dev_info_list: structure to hold black/white listed devices. */ struct scsi_dev_info_list { struct list_head dev_info_list; char vendor[8]; char model[16]; unsigned flags; unsigned compatible; /* for use with scsi_static_device_list entries */ }; struct scsi_dev_info_list_table { struct list_head node; /* our node for being on the master list */ struct list_head scsi_dev_info_list; /* head of dev info list */ const char *name; /* name of list for /proc (NULL for global) */ int key; /* unique numeric identifier */ }; static const char spaces[] = " "; /* 16 of them */ static unsigned scsi_default_dev_flags; static LIST_HEAD(scsi_dev_info_list); static char scsi_dev_flags[256]; /* * scsi_static_device_list: deprecated list of devices that require * settings that differ from the default, includes black-listed (broken) * devices. The entries here are added to the tail of scsi_dev_info_list * via scsi_dev_info_list_init. * * Do not add to this list, use the command line or proc interface to add * to the scsi_dev_info_list. This table will eventually go away. */ static struct { char *vendor; char *model; char *revision; /* revision known to be bad, unused */ unsigned flags; } scsi_static_device_list[] __initdata = { /* * The following devices are known not to tolerate a lun != 0 scan * for one reason or another. Some will respond to all luns, * others will lock up. */ {"Aashima", "IMAGERY 2400SP", "1.03", BLIST_NOLUN}, /* locks up */ {"CHINON", "CD-ROM CDS-431", "H42", BLIST_NOLUN}, /* locks up */ {"CHINON", "CD-ROM CDS-535", "Q14", BLIST_NOLUN}, /* locks up */ {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */ {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */ {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "MXT-1240S", "I1.2", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-4170S", "B5A", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-8760S", "B7B", BLIST_NOLUN}, /* locks up */ {"MEDIAVIS", "RENO CD-ROMX2A", "2.03", BLIST_NOLUN}, /* responds to all lun */ {"MICROTEK", "ScanMakerIII", "2.30", BLIST_NOLUN}, /* responds to all lun */ {"NEC", "CD-ROM DRIVE:841", "1.0", BLIST_NOLUN},/* locks up */ {"PHILIPS", "PCA80SC", "V4-2", BLIST_NOLUN}, /* responds to all lun */ {"RODIME", "RO3000S", "2.33", BLIST_NOLUN}, /* locks up */ {"SUN", "SENA", NULL, BLIST_NOLUN}, /* responds to all luns */ /* * The following causes a failed REQUEST SENSE on lun 1 for * aha152x controller, which causes SCSI code to reset bus. */ {"SANYO", "CRD-250S", "1.20", BLIST_NOLUN}, /* * The following causes a failed REQUEST SENSE on lun 1 for * aha152x controller, which causes SCSI code to reset bus. */ {"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, {"SEAGATE", "ST296", "921", BLIST_NOLUN}, /* responds to all lun */ {"SEAGATE", "ST1581", "6538", BLIST_NOLUN}, /* responds to all lun */ {"SONY", "CD-ROM CDU-541", "4.3d", BLIST_NOLUN}, {"SONY", "CD-ROM CDU-55S", "1.0i", BLIST_NOLUN}, {"SONY", "CD-ROM CDU-561", "1.7x", BLIST_NOLUN}, {"SONY", "CD-ROM CDU-8012", NULL, BLIST_NOLUN}, {"SONY", "SDT-5000", "3.17", BLIST_SELECT_NO_ATN}, {"TANDBERG", "TDC 3600", "U07", BLIST_NOLUN}, /* locks up */ {"TEAC", "CD-R55S", "1.0H", BLIST_NOLUN}, /* locks up */ /* * The following causes a failed REQUEST SENSE on lun 1 for * seagate controller, which causes SCSI code to reset bus. */ {"TEAC", "CD-ROM", "1.06", BLIST_NOLUN}, {"TEAC", "MT-2ST/45S2-27", "RV M", BLIST_NOLUN}, /* responds to all lun */ /* * The following causes a failed REQUEST SENSE on lun 1 for * seagate controller, which causes SCSI code to reset bus. */ {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */ {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */ {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */ {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */ {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */ {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */ {"NEC", "D3856", "0009", BLIST_NOLUN}, {"QUANTUM", "LPS525S", "3110", BLIST_NOLUN}, /* locks up */ {"QUANTUM", "PD1225S", "3110", BLIST_NOLUN}, /* locks up */ {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */ {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, {"transtec", "T5008", "0001", BLIST_NOREPORTLUN }, {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */ {"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */ /* * Other types of devices that have special flags. * Note that all USB devices should have the BLIST_INQUIRY_36 flag. */ {"3PARdata", "VV", NULL, BLIST_REPORTLUN2}, {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN}, {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN}, {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN}, {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN}, {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */ {"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */ {"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */ {"COMPAQ", "ARRAY CONTROLLER", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_MAX_512 | BLIST_REPORTLUN2}, /* Compaq RA4x00 */ {"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN | BLIST_MAX_512}, /* Compaq RA4x00 */ {"COMPAQ", "CR3500", NULL, BLIST_FORCELUN}, {"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN}, {"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"DELL", "PV660F", NULL, BLIST_SPARSELUN}, {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN}, {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */ {"DELL", "PV530F", NULL, BLIST_SPARSELUN}, {"DELL", "PERCRAID", NULL, BLIST_FORCELUN}, {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, {"easyRAID", "F8", NULL, BLIST_NOREPORTLUN}, {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2}, {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"HP", "C1557A", NULL, BLIST_FORCELUN}, {"HP", "C3323-300", "4269", BLIST_NOTQ}, {"HP", "C5713A", NULL, BLIST_NOREPORTLUN}, {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN}, {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN}, {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, {"INSITE", "I325VM", NULL, BLIST_KEY}, {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, {"MICROP", "4110", NULL, BLIST_NOTQ}, {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "iStorage", NULL, BLIST_REPORTLUN2}, {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"Promise", "", NULL, BLIST_SPARSELUN}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, {"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */ {"SEAGATE", "ST3390N", "9546", BLIST_NOTQ}, {"SGI", "RAID3", "*", BLIST_SPARSELUN}, {"SGI", "RAID5", "*", BLIST_SPARSELUN}, {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR}, {"SUN", "T300", "*", BLIST_SPARSELUN}, {"SUN", "T4", "*", BLIST_SPARSELUN}, {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN}, {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, {"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */ {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN}, {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN}, {"WangDAT", "Model 1300", "02.4", BLIST_SELECT_NO_ATN}, {"WDC WD25", "00JB-00FUA0", NULL, BLIST_NOREPORTLUN}, {"XYRATEX", "RS", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"Zzyzx", "RocketStor 500S", NULL, BLIST_SPARSELUN}, {"Zzyzx", "RocketStor 2000", NULL, BLIST_SPARSELUN}, { NULL, NULL, NULL, 0 }, }; static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key) { struct scsi_dev_info_list_table *devinfo_table; int found = 0; list_for_each_entry(devinfo_table, &scsi_dev_info_list, node) if (devinfo_table->key == key) { found = 1; break; } if (!found) return ERR_PTR(-EINVAL); return devinfo_table; } /* * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into * devinfo vendor and model strings. */ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, char *from, int compatible) { size_t from_length; from_length = strlen(from); strncpy(to, from, min(to_length, from_length)); if (from_length < to_length) { if (compatible) { /* * NUL terminate the string if it is short. */ to[from_length] = '\0'; } else { /* * space pad the string if it is short. */ strncpy(&to[from_length], spaces, to_length - from_length); } } if (from_length > to_length) printk(KERN_WARNING "%s: %s string '%s' is too long\n", __func__, name, from); } /** * scsi_dev_info_list_add - add one dev_info list entry. * @compatible: if true, null terminate short strings. Otherwise space pad. * @vendor: vendor string * @model: model (product) string * @strflags: integer string * @flags: if strflags NULL, use this flag value * * Description: * Create and add one dev_info entry for @vendor, @model, @strflags or * @flag. If @compatible, add to the tail of the list, do not space * pad, and set devinfo->compatible. The scsi_static_device_list entries * are added with @compatible 1 and @clfags NULL. * * Returns: 0 OK, -error on failure. **/ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, char *strflags, int flags) { return scsi_dev_info_list_add_keyed(compatible, vendor, model, strflags, flags, SCSI_DEVINFO_GLOBAL); } /** * scsi_dev_info_list_add_keyed - add one dev_info list entry. * @compatible: if true, null terminate short strings. Otherwise space pad. * @vendor: vendor string * @model: model (product) string * @strflags: integer string * @flags: if strflags NULL, use this flag value * @key: specify list to use * * Description: * Create and add one dev_info entry for @vendor, @model, * @strflags or @flag in list specified by @key. If @compatible, * add to the tail of the list, do not space pad, and set * devinfo->compatible. The scsi_static_device_list entries are * added with @compatible 1 and @clfags NULL. * * Returns: 0 OK, -error on failure. **/ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, char *strflags, int flags, int key) { struct scsi_dev_info_list *devinfo; struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); if (IS_ERR(devinfo_table)) return PTR_ERR(devinfo_table); devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); if (!devinfo) { printk(KERN_ERR "%s: no memory\n", __func__); return -ENOMEM; } scsi_strcpy_devinfo("vendor", devinfo->vendor, sizeof(devinfo->vendor), vendor, compatible); scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model), model, compatible); if (strflags) devinfo->flags = simple_strtoul(strflags, NULL, 0); else devinfo->flags = flags; devinfo->compatible = compatible; if (compatible) list_add_tail(&devinfo->dev_info_list, &devinfo_table->scsi_dev_info_list); else list_add(&devinfo->dev_info_list, &devinfo_table->scsi_dev_info_list); return 0; } EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); /** * scsi_dev_info_list_del_keyed - remove one dev_info list entry. * @vendor: vendor string * @model: model (product) string * @key: specify list to use * * Description: * Remove and destroy one dev_info entry for @vendor, @model * in list specified by @key. * * Returns: 0 OK, -error on failure. **/ int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key) { struct scsi_dev_info_list *devinfo, *found = NULL; struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); if (IS_ERR(devinfo_table)) return PTR_ERR(devinfo_table); list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, dev_info_list) { if (devinfo->compatible) { /* * Behave like the older version of get_device_flags. */ size_t max; /* * XXX why skip leading spaces? If an odd INQUIRY * value, that should have been part of the * scsi_static_device_list[] entry, such as " FOO" * rather than "FOO". Since this code is already * here, and we don't know what device it is * trying to work with, leave it as-is. */ max = 8; /* max length of vendor */ while ((max > 0) && *vendor == ' ') { max--; vendor++; } /* * XXX removing the following strlen() would be * good, using it means that for a an entry not in * the list, we scan every byte of every vendor * listed in scsi_static_device_list[], and never match * a single one (and still have to compare at * least the first byte of each vendor). */ if (memcmp(devinfo->vendor, vendor, min(max, strlen(devinfo->vendor)))) continue; /* * Skip spaces again. */ max = 16; /* max length of model */ while ((max > 0) && *model == ' ') { max--; model++; } if (memcmp(devinfo->model, model, min(max, strlen(devinfo->model)))) continue; found = devinfo; } else { if (!memcmp(devinfo->vendor, vendor, sizeof(devinfo->vendor)) && !memcmp(devinfo->model, model, sizeof(devinfo->model))) found = devinfo; } if (found) break; } if (found) { list_del(&found->dev_info_list); kfree(found); return 0; } return -ENOENT; } EXPORT_SYMBOL(scsi_dev_info_list_del_keyed); /** * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. * @dev_list: string of device flags to add * * Description: * Parse dev_list, and add entries to the scsi_dev_info_list. * dev_list is of the form "vendor:product:flag,vendor:product:flag". * dev_list is modified via strsep. Can be called for command line * addition, for proc or mabye a sysfs interface. * * Returns: 0 if OK, -error on failure. **/ static int scsi_dev_info_list_add_str(char *dev_list) { char *vendor, *model, *strflags, *next; char *next_check; int res = 0; next = dev_list; if (next && next[0] == '"') { /* * Ignore both the leading and trailing quote. */ next++; next_check = ",\""; } else { next_check = ","; } /* * For the leading and trailing '"' case, the for loop comes * through the last time with vendor[0] == '\0'. */ for (vendor = strsep(&next, ":"); vendor && (vendor[0] != '\0') && (res == 0); vendor = strsep(&next, ":")) { strflags = NULL; model = strsep(&next, ":"); if (model) strflags = strsep(&next, next_check); if (!model || !strflags) { printk(KERN_ERR "%s: bad dev info string '%s' '%s'" " '%s'\n", __func__, vendor, model, strflags); res = -EINVAL; } else res = scsi_dev_info_list_add(0 /* compatible */, vendor, model, strflags, 0); } return res; } /** * get_device_flags - get device specific flags from the dynamic device list. * @sdev: &scsi_device to get flags for * @vendor: vendor name * @model: model name * * Description: * Search the global scsi_dev_info_list (specified by list zero) * for an entry matching @vendor and @model, if found, return the * matching flags value, else return the host or global default * settings. Called during scan time. **/ int scsi_get_device_flags(struct scsi_device *sdev, const unsigned char *vendor, const unsigned char *model) { return scsi_get_device_flags_keyed(sdev, vendor, model, SCSI_DEVINFO_GLOBAL); } /** * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list * @sdev: &scsi_device to get flags for * @vendor: vendor name * @model: model name * @key: list to look up * * Description: * Search the scsi_dev_info_list specified by @key for an entry * matching @vendor and @model, if found, return the matching * flags value, else return the host or global default settings. * Called during scan time. **/ int scsi_get_device_flags_keyed(struct scsi_device *sdev, const unsigned char *vendor, const unsigned char *model, int key) { struct scsi_dev_info_list *devinfo; struct scsi_dev_info_list_table *devinfo_table; devinfo_table = scsi_devinfo_lookup_by_key(key); if (IS_ERR(devinfo_table)) return PTR_ERR(devinfo_table); list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, dev_info_list) { if (devinfo->compatible) { /* * Behave like the older version of get_device_flags. */ size_t max; /* * XXX why skip leading spaces? If an odd INQUIRY * value, that should have been part of the * scsi_static_device_list[] entry, such as " FOO" * rather than "FOO". Since this code is already * here, and we don't know what device it is * trying to work with, leave it as-is. */ max = 8; /* max length of vendor */ while ((max > 0) && *vendor == ' ') { max--; vendor++; } /* * XXX removing the following strlen() would be * good, using it means that for a an entry not in * the list, we scan every byte of every vendor * listed in scsi_static_device_list[], and never match * a single one (and still have to compare at * least the first byte of each vendor). */ if (memcmp(devinfo->vendor, vendor, min(max, strlen(devinfo->vendor)))) continue; /* * Skip spaces again. */ max = 16; /* max length of model */ while ((max > 0) && *model == ' ') { max--; model++; } if (memcmp(devinfo->model, model, min(max, strlen(devinfo->model)))) continue; return devinfo->flags; } else { if (!memcmp(devinfo->vendor, vendor, sizeof(devinfo->vendor)) && !memcmp(devinfo->model, model, sizeof(devinfo->model))) return devinfo->flags; } } /* nothing found, return nothing */ if (key != SCSI_DEVINFO_GLOBAL) return 0; /* except for the global list, where we have an exception */ if (sdev->sdev_bflags) return sdev->sdev_bflags; return scsi_default_dev_flags; } EXPORT_SYMBOL(scsi_get_device_flags_keyed); #ifdef CONFIG_SCSI_PROC_FS struct double_list { struct list_head *top; struct list_head *bottom; }; static int devinfo_seq_show(struct seq_file *m, void *v) { struct double_list *dl = v; struct scsi_dev_info_list_table *devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); struct scsi_dev_info_list *devinfo = list_entry(dl->bottom, struct scsi_dev_info_list, dev_info_list); if (devinfo_table->scsi_dev_info_list.next == dl->bottom && devinfo_table->name) seq_printf(m, "[%s]:\n", devinfo_table->name); seq_printf(m, "'%.8s' '%.16s' 0x%x\n", devinfo->vendor, devinfo->model, devinfo->flags); return 0; } static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos) { struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL); loff_t pos = *ppos; if (!dl) return NULL; list_for_each(dl->top, &scsi_dev_info_list) { struct scsi_dev_info_list_table *devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list) if (pos-- == 0) return dl; } kfree(dl); return NULL; } static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos) { struct double_list *dl = v; struct scsi_dev_info_list_table *devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); ++*ppos; dl->bottom = dl->bottom->next; while (&devinfo_table->scsi_dev_info_list == dl->bottom) { dl->top = dl->top->next; if (dl->top == &scsi_dev_info_list) { kfree(dl); return NULL; } devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); dl->bottom = devinfo_table->scsi_dev_info_list.next; } return dl; } static void devinfo_seq_stop(struct seq_file *m, void *v) { kfree(v); } static const struct seq_operations scsi_devinfo_seq_ops = { .start = devinfo_seq_start, .next = devinfo_seq_next, .stop = devinfo_seq_stop, .show = devinfo_seq_show, }; static int proc_scsi_devinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &scsi_devinfo_seq_ops); } /* * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc. * * Description: Adds a black/white list entry for vendor and model with an * integer value of flag to the scsi device info list. * To use, echo "vendor:model:flag" > /proc/scsi/device_info */ static ssize_t proc_scsi_devinfo_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { char *buffer; ssize_t err = length; if (!buf || length>PAGE_SIZE) return -EINVAL; if (!(buffer = (char *) __get_free_page(GFP_KERNEL))) return -ENOMEM; if (copy_from_user(buffer, buf, length)) { err =-EFAULT; goto out; } if (length < PAGE_SIZE) buffer[length] = '\0'; else if (buffer[PAGE_SIZE-1]) { err = -EINVAL; goto out; } scsi_dev_info_list_add_str(buffer); out: free_page((unsigned long)buffer); return err; } static const struct file_operations scsi_devinfo_proc_fops = { .owner = THIS_MODULE, .open = proc_scsi_devinfo_open, .read = seq_read, .write = proc_scsi_devinfo_write, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_SCSI_PROC_FS */ module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); MODULE_PARM_DESC(dev_flags, "Given scsi_dev_flags=vendor:model:flags[,v:m:f] add black/white" " list entries for vendor and model with an integer value of flags" " to the scsi device info list"); module_param_named(default_dev_flags, scsi_default_dev_flags, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(default_dev_flags, "scsi default device flag integer value"); /** * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list **/ void scsi_exit_devinfo(void) { #ifdef CONFIG_SCSI_PROC_FS remove_proc_entry("scsi/device_info", NULL); #endif scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL); } /** * scsi_dev_info_add_list - add a new devinfo list * @key: key of the list to add * @name: Name of the list to add (for /proc/scsi/device_info) * * Adds the requested list, returns zero on success, -EEXIST if the * key is already registered to a list, or other error on failure. */ int scsi_dev_info_add_list(int key, const char *name) { struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); if (!IS_ERR(devinfo_table)) /* list already exists */ return -EEXIST; devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL); if (!devinfo_table) return -ENOMEM; INIT_LIST_HEAD(&devinfo_table->node); INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list); devinfo_table->name = name; devinfo_table->key = key; list_add_tail(&devinfo_table->node, &scsi_dev_info_list); return 0; } EXPORT_SYMBOL(scsi_dev_info_add_list); /** * scsi_dev_info_remove_list - destroy an added devinfo list * @key: key of the list to destroy * * Iterates over the entire list first, freeing all the values, then * frees the list itself. Returns 0 on success or -EINVAL if the key * can't be found. */ int scsi_dev_info_remove_list(int key) { struct list_head *lh, *lh_next; struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); if (IS_ERR(devinfo_table)) /* no such list */ return -EINVAL; /* remove from the master list */ list_del(&devinfo_table->node); list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) { struct scsi_dev_info_list *devinfo; devinfo = list_entry(lh, struct scsi_dev_info_list, dev_info_list); kfree(devinfo); } kfree(devinfo_table); return 0; } EXPORT_SYMBOL(scsi_dev_info_remove_list); /** * scsi_init_devinfo - set up the dynamic device list. * * Description: * Add command line entries from scsi_dev_flags, then add * scsi_static_device_list entries to the scsi device info list. */ int __init scsi_init_devinfo(void) { #ifdef CONFIG_SCSI_PROC_FS struct proc_dir_entry *p; #endif int error, i; error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL); if (error) return error; error = scsi_dev_info_list_add_str(scsi_dev_flags); if (error) goto out; for (i = 0; scsi_static_device_list[i].vendor; i++) { error = scsi_dev_info_list_add(1 /* compatibile */, scsi_static_device_list[i].vendor, scsi_static_device_list[i].model, NULL, scsi_static_device_list[i].flags); if (error) goto out; } #ifdef CONFIG_SCSI_PROC_FS p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops); if (!p) { error = -ENOMEM; goto out; } #endif /* CONFIG_SCSI_PROC_FS */ out: if (error) scsi_exit_devinfo(); return error; }
gpl-2.0
DC07/spirit_ghost
arch/blackfin/mach-bf537/ints-priority.c
11116
6743
/* * Copyright 2005-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. * * Set up the interrupt priorities */ #include <linux/module.h> #include <linux/irq.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <asm/bfin5xx_spi.h> #include <asm/bfin_sport.h> #include <asm/bfin_can.h> #include <asm/bfin_dma.h> #include <asm/dpmc.h> void __init program_IAR(void) { /* Program the IAR0 Register with the configured priority */ bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) | ((CONFIG_IRQ_DMA_ERROR - 7) << IRQ_DMA_ERROR_POS) | ((CONFIG_IRQ_ERROR - 7) << IRQ_ERROR_POS) | ((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS) | ((CONFIG_IRQ_PPI - 7) << IRQ_PPI_POS) | ((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) | ((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) | ((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS)); bfin_write_SIC_IAR1(((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) | ((CONFIG_IRQ_TWI - 7) << IRQ_TWI_POS) | ((CONFIG_IRQ_SPI - 7) << IRQ_SPI_POS) | ((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) | ((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS) | ((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) | ((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) | ((CONFIG_IRQ_CAN_RX - 7) << IRQ_CAN_RX_POS)); bfin_write_SIC_IAR2(((CONFIG_IRQ_CAN_TX - 7) << IRQ_CAN_TX_POS) | ((CONFIG_IRQ_MAC_RX - 7) << IRQ_MAC_RX_POS) | ((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) | ((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) | ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) | ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) | ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) | ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS)); bfin_write_SIC_IAR3(((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) | ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) | ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) | ((CONFIG_IRQ_PROG_INTA - 7) << IRQ_PROG_INTA_POS) | ((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) | ((CONFIG_IRQ_MEM_DMA0 - 7) << IRQ_MEM_DMA0_POS) | ((CONFIG_IRQ_MEM_DMA1 - 7) << IRQ_MEM_DMA1_POS) | ((CONFIG_IRQ_WATCH - 7) << IRQ_WATCH_POS)); SSYNC(); } #define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */ #define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */ #define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */ #define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */ #define UART_ERR_MASK (0x6) /* UART_IIR */ #define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */ static int error_int_mask; static void bf537_generic_error_mask_irq(struct irq_data *d) { error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR)); if (!error_int_mask) bfin_internal_mask_irq(IRQ_GENERIC_ERROR); } static void bf537_generic_error_unmask_irq(struct irq_data *d) { bfin_internal_unmask_irq(IRQ_GENERIC_ERROR); error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR); } static struct irq_chip bf537_generic_error_irqchip = { .name = "ERROR", .irq_ack = bfin_ack_noop, .irq_mask_ack = bf537_generic_error_mask_irq, .irq_mask = bf537_generic_error_mask_irq, .irq_unmask = bf537_generic_error_unmask_irq, }; static void bf537_demux_error_irq(unsigned int int_err_irq, struct irq_desc *inta_desc) { int irq = 0; #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK) irq = IRQ_MAC_ERROR; else #endif if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT0_ERROR; else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT1_ERROR; else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK) irq = IRQ_PPI_ERROR; else if (bfin_read_CAN_GIF() & CAN_ERR_MASK) irq = IRQ_CAN_ERROR; else if (bfin_read_SPI_STAT() & SPI_ERR_MASK) irq = IRQ_SPI_ERROR; else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK) irq = IRQ_UART0_ERROR; else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK) irq = IRQ_UART1_ERROR; if (irq) { if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) bfin_handle_irq(irq); else { switch (irq) { case IRQ_PPI_ERROR: bfin_write_PPI_STATUS(PPI_ERR_MASK); break; #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) case IRQ_MAC_ERROR: bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK); break; #endif case IRQ_SPORT0_ERROR: bfin_write_SPORT0_STAT(SPORT_ERR_MASK); break; case IRQ_SPORT1_ERROR: bfin_write_SPORT1_STAT(SPORT_ERR_MASK); break; case IRQ_CAN_ERROR: bfin_write_CAN_GIS(CAN_ERR_MASK); break; case IRQ_SPI_ERROR: bfin_write_SPI_STAT(SPI_ERR_MASK); break; default: break; } pr_debug("IRQ %d:" " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n", irq); } } else pr_err("%s: IRQ ?: PERIPHERAL ERROR INTERRUPT ASSERTED BUT NO SOURCE FOUND\n", __func__); } #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static int mac_rx_int_mask; static void bf537_mac_rx_mask_irq(struct irq_data *d) { mac_rx_int_mask &= ~(1L << (d->irq - IRQ_MAC_RX)); if (!mac_rx_int_mask) bfin_internal_mask_irq(IRQ_PH_INTA_MAC_RX); } static void bf537_mac_rx_unmask_irq(struct irq_data *d) { bfin_internal_unmask_irq(IRQ_PH_INTA_MAC_RX); mac_rx_int_mask |= 1L << (d->irq - IRQ_MAC_RX); } static struct irq_chip bf537_mac_rx_irqchip = { .name = "ERROR", .irq_ack = bfin_ack_noop, .irq_mask_ack = bf537_mac_rx_mask_irq, .irq_mask = bf537_mac_rx_mask_irq, .irq_unmask = bf537_mac_rx_unmask_irq, }; static void bf537_demux_mac_rx_irq(unsigned int int_irq, struct irq_desc *desc) { if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) bfin_handle_irq(IRQ_MAC_RX); else bfin_demux_gpio_irq(int_irq, desc); } #endif void __init init_mach_irq(void) { int irq; #if defined(CONFIG_BF537) || defined(CONFIG_BF536) /* Clear EMAC Interrupt Status bits so we can demux it later */ bfin_write_EMAC_SYSTAT(-1); #endif irq_set_chained_handler(IRQ_GENERIC_ERROR, bf537_demux_error_irq); for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) irq_set_chip_and_handler(irq, &bf537_generic_error_irqchip, handle_level_irq); #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) irq_set_chained_handler(IRQ_PH_INTA_MAC_RX, bf537_demux_mac_rx_irq); irq_set_chip_and_handler(IRQ_MAC_RX, &bf537_mac_rx_irqchip, handle_level_irq); irq_set_chip_and_handler(IRQ_PORTH_INTA, &bf537_mac_rx_irqchip, handle_level_irq); irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); #endif }
gpl-2.0
rachitrawat/Vengeance-Kernel-MSM7x27-Nanhu
arch/blackfin/mach-bf537/ints-priority.c
11116
6743
/* * Copyright 2005-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. * * Set up the interrupt priorities */ #include <linux/module.h> #include <linux/irq.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <asm/bfin5xx_spi.h> #include <asm/bfin_sport.h> #include <asm/bfin_can.h> #include <asm/bfin_dma.h> #include <asm/dpmc.h> void __init program_IAR(void) { /* Program the IAR0 Register with the configured priority */ bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) | ((CONFIG_IRQ_DMA_ERROR - 7) << IRQ_DMA_ERROR_POS) | ((CONFIG_IRQ_ERROR - 7) << IRQ_ERROR_POS) | ((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS) | ((CONFIG_IRQ_PPI - 7) << IRQ_PPI_POS) | ((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) | ((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) | ((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS)); bfin_write_SIC_IAR1(((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) | ((CONFIG_IRQ_TWI - 7) << IRQ_TWI_POS) | ((CONFIG_IRQ_SPI - 7) << IRQ_SPI_POS) | ((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) | ((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS) | ((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) | ((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) | ((CONFIG_IRQ_CAN_RX - 7) << IRQ_CAN_RX_POS)); bfin_write_SIC_IAR2(((CONFIG_IRQ_CAN_TX - 7) << IRQ_CAN_TX_POS) | ((CONFIG_IRQ_MAC_RX - 7) << IRQ_MAC_RX_POS) | ((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) | ((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) | ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) | ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) | ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) | ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS)); bfin_write_SIC_IAR3(((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) | ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) | ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) | ((CONFIG_IRQ_PROG_INTA - 7) << IRQ_PROG_INTA_POS) | ((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) | ((CONFIG_IRQ_MEM_DMA0 - 7) << IRQ_MEM_DMA0_POS) | ((CONFIG_IRQ_MEM_DMA1 - 7) << IRQ_MEM_DMA1_POS) | ((CONFIG_IRQ_WATCH - 7) << IRQ_WATCH_POS)); SSYNC(); } #define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */ #define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */ #define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */ #define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */ #define UART_ERR_MASK (0x6) /* UART_IIR */ #define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */ static int error_int_mask; static void bf537_generic_error_mask_irq(struct irq_data *d) { error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR)); if (!error_int_mask) bfin_internal_mask_irq(IRQ_GENERIC_ERROR); } static void bf537_generic_error_unmask_irq(struct irq_data *d) { bfin_internal_unmask_irq(IRQ_GENERIC_ERROR); error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR); } static struct irq_chip bf537_generic_error_irqchip = { .name = "ERROR", .irq_ack = bfin_ack_noop, .irq_mask_ack = bf537_generic_error_mask_irq, .irq_mask = bf537_generic_error_mask_irq, .irq_unmask = bf537_generic_error_unmask_irq, }; static void bf537_demux_error_irq(unsigned int int_err_irq, struct irq_desc *inta_desc) { int irq = 0; #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK) irq = IRQ_MAC_ERROR; else #endif if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT0_ERROR; else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT1_ERROR; else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK) irq = IRQ_PPI_ERROR; else if (bfin_read_CAN_GIF() & CAN_ERR_MASK) irq = IRQ_CAN_ERROR; else if (bfin_read_SPI_STAT() & SPI_ERR_MASK) irq = IRQ_SPI_ERROR; else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK) irq = IRQ_UART0_ERROR; else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK) irq = IRQ_UART1_ERROR; if (irq) { if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) bfin_handle_irq(irq); else { switch (irq) { case IRQ_PPI_ERROR: bfin_write_PPI_STATUS(PPI_ERR_MASK); break; #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) case IRQ_MAC_ERROR: bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK); break; #endif case IRQ_SPORT0_ERROR: bfin_write_SPORT0_STAT(SPORT_ERR_MASK); break; case IRQ_SPORT1_ERROR: bfin_write_SPORT1_STAT(SPORT_ERR_MASK); break; case IRQ_CAN_ERROR: bfin_write_CAN_GIS(CAN_ERR_MASK); break; case IRQ_SPI_ERROR: bfin_write_SPI_STAT(SPI_ERR_MASK); break; default: break; } pr_debug("IRQ %d:" " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n", irq); } } else pr_err("%s: IRQ ?: PERIPHERAL ERROR INTERRUPT ASSERTED BUT NO SOURCE FOUND\n", __func__); } #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static int mac_rx_int_mask; static void bf537_mac_rx_mask_irq(struct irq_data *d) { mac_rx_int_mask &= ~(1L << (d->irq - IRQ_MAC_RX)); if (!mac_rx_int_mask) bfin_internal_mask_irq(IRQ_PH_INTA_MAC_RX); } static void bf537_mac_rx_unmask_irq(struct irq_data *d) { bfin_internal_unmask_irq(IRQ_PH_INTA_MAC_RX); mac_rx_int_mask |= 1L << (d->irq - IRQ_MAC_RX); } static struct irq_chip bf537_mac_rx_irqchip = { .name = "ERROR", .irq_ack = bfin_ack_noop, .irq_mask_ack = bf537_mac_rx_mask_irq, .irq_mask = bf537_mac_rx_mask_irq, .irq_unmask = bf537_mac_rx_unmask_irq, }; static void bf537_demux_mac_rx_irq(unsigned int int_irq, struct irq_desc *desc) { if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) bfin_handle_irq(IRQ_MAC_RX); else bfin_demux_gpio_irq(int_irq, desc); } #endif void __init init_mach_irq(void) { int irq; #if defined(CONFIG_BF537) || defined(CONFIG_BF536) /* Clear EMAC Interrupt Status bits so we can demux it later */ bfin_write_EMAC_SYSTAT(-1); #endif irq_set_chained_handler(IRQ_GENERIC_ERROR, bf537_demux_error_irq); for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) irq_set_chip_and_handler(irq, &bf537_generic_error_irqchip, handle_level_irq); #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) irq_set_chained_handler(IRQ_PH_INTA_MAC_RX, bf537_demux_mac_rx_irq); irq_set_chip_and_handler(IRQ_MAC_RX, &bf537_mac_rx_irqchip, handle_level_irq); irq_set_chip_and_handler(IRQ_PORTH_INTA, &bf537_mac_rx_irqchip, handle_level_irq); irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); #endif }
gpl-2.0
HandyMenny/android_kernel_sony_u8500
drivers/gpio/gpio-mc33880.c
109
4357
/* * MC33880 high-side/low-side switch GPIO driver * Copyright (c) 2009 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: * Freescale MC33880 high-side/low-side switch */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/spi/spi.h> #include <linux/spi/mc33880.h> #include <linux/gpio.h> #include <linux/slab.h> #define DRIVER_NAME "mc33880" /* * Pin configurations, see MAX7301 datasheet page 6 */ #define PIN_CONFIG_MASK 0x03 #define PIN_CONFIG_IN_PULLUP 0x03 #define PIN_CONFIG_IN_WO_PULLUP 0x02 #define PIN_CONFIG_OUT 0x01 #define PIN_NUMBER 8 /* * Some registers must be read back to modify. * To save time we cache them here in memory */ struct mc33880 { struct mutex lock; /* protect from simultaneous accesses */ u8 port_config; struct gpio_chip chip; struct spi_device *spi; }; static int mc33880_write_config(struct mc33880 *mc) { return spi_write(mc->spi, &mc->port_config, sizeof(mc->port_config)); } static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value) { if (value) mc->port_config |= 1 << offset; else mc->port_config &= ~(1 << offset); return mc33880_write_config(mc); } static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value) { struct mc33880 *mc = container_of(chip, struct mc33880, chip); mutex_lock(&mc->lock); __mc33880_set(mc, offset, value); mutex_unlock(&mc->lock); } static int __devinit mc33880_probe(struct spi_device *spi) { struct mc33880 *mc; struct mc33880_platform_data *pdata; int ret; pdata = spi->dev.platform_data; if (!pdata || !pdata->base) { dev_dbg(&spi->dev, "incorrect or missing platform data\n"); return -EINVAL; } /* * bits_per_word cannot be configured in platform data */ spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) return ret; mc = kzalloc(sizeof(struct mc33880), GFP_KERNEL); if (!mc) return -ENOMEM; mutex_init(&mc->lock); dev_set_drvdata(&spi->dev, mc); mc->spi = spi; mc->chip.label = DRIVER_NAME, mc->chip.set = mc33880_set; mc->chip.base = pdata->base; mc->chip.ngpio = PIN_NUMBER; mc->chip.can_sleep = 1; mc->chip.dev = &spi->dev; mc->chip.owner = THIS_MODULE; mc->port_config = 0x00; /* write twice, because during initialisation the first setting * is just for testing SPI communication, and the second is the * "real" configuration */ ret = mc33880_write_config(mc); mc->port_config = 0x00; if (!ret) ret = mc33880_write_config(mc); if (ret) { printk(KERN_ERR "Failed writing to " DRIVER_NAME ": %d\n", ret); goto exit_destroy; } ret = gpiochip_add(&mc->chip); if (ret) goto exit_destroy; return ret; exit_destroy: dev_set_drvdata(&spi->dev, NULL); mutex_destroy(&mc->lock); kfree(mc); return ret; } static int __devexit mc33880_remove(struct spi_device *spi) { struct mc33880 *mc; int ret; mc = dev_get_drvdata(&spi->dev); if (mc == NULL) return -ENODEV; dev_set_drvdata(&spi->dev, NULL); ret = gpiochip_remove(&mc->chip); if (!ret) { mutex_destroy(&mc->lock); kfree(mc); } else dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n", ret); return ret; } static struct spi_driver mc33880_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = mc33880_probe, .remove = __devexit_p(mc33880_remove), }; static int __init mc33880_init(void) { return spi_register_driver(&mc33880_driver); } /* register after spi postcore initcall and before * subsys initcalls that may rely on these GPIOs */ subsys_initcall(mc33880_init); static void __exit mc33880_exit(void) { spi_unregister_driver(&mc33880_driver); } module_exit(mc33880_exit); MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
wooshy1/kernel-olympus-3.1
drivers/misc/inv_mpu/slaveirq.c
109
6679
/* $License: Copyright (C) 2011 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/irq.h> #include <linux/signal.h> #include <linux/miscdevice.h> #include <linux/i2c.h> #include <linux/i2c-dev.h> #include <linux/poll.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/mpu.h> #include "slaveirq.h" #include "mldl_cfg.h" /* function which gets slave data and sends it to SLAVE */ struct slaveirq_dev_data { struct miscdevice dev; struct i2c_client *slave_client; struct mpuirq_data data; wait_queue_head_t slaveirq_wait; int irq; int pid; int data_ready; int timeout; }; /* The following depends on patch fa1f68db6ca7ebb6fc4487ac215bffba06c01c28 * drivers: misc: pass miscdevice pointer via file private data */ static int slaveirq_open(struct inode *inode, struct file *file) { /* Device node is availabe in the file->private_data, this is * exactly what we want so we leave it there */ struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); dev_dbg(data->dev.this_device, "%s current->pid %d\n", __func__, current->pid); data->pid = current->pid; return 0; } static int slaveirq_release(struct inode *inode, struct file *file) { struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); dev_dbg(data->dev.this_device, "slaveirq_release\n"); return 0; } /* read function called when from /dev/slaveirq is read */ static ssize_t slaveirq_read(struct file *file, char *buf, size_t count, loff_t *ppos) { int len, err; struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); if (!data->data_ready && data->timeout && !(file->f_flags & O_NONBLOCK)) { wait_event_interruptible_timeout(data->slaveirq_wait, data->data_ready, data->timeout); } if (data->data_ready && NULL != buf && count >= sizeof(data->data)) { err = copy_to_user(buf, &data->data, sizeof(data->data)); data->data.data_type = 0; } else { return 0; } if (err != 0) { dev_err(data->dev.this_device, "Copy to user returned %d\n", err); return -EFAULT; } data->data_ready = 0; len = sizeof(data->data); return len; } static unsigned int slaveirq_poll(struct file *file, struct poll_table_struct *poll) { int mask = 0; struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); poll_wait(file, &data->slaveirq_wait, poll); if (data->data_ready) mask |= POLLIN | POLLRDNORM; return mask; } /* ioctl - I/O control */ static long slaveirq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int retval = 0; int tmp; struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); switch (cmd) { case SLAVEIRQ_SET_TIMEOUT: data->timeout = arg; break; case SLAVEIRQ_GET_INTERRUPT_CNT: tmp = data->data.interruptcount - 1; if (data->data.interruptcount > 1) data->data.interruptcount = 1; if (copy_to_user((int *)arg, &tmp, sizeof(int))) return -EFAULT; break; case SLAVEIRQ_GET_IRQ_TIME: if (copy_to_user((int *)arg, &data->data.irqtime, sizeof(data->data.irqtime))) return -EFAULT; data->data.irqtime = 0; break; default: retval = -EINVAL; } return retval; } static irqreturn_t slaveirq_handler(int irq, void *dev_id) { struct slaveirq_dev_data *data = (struct slaveirq_dev_data *)dev_id; static int mycount; struct timeval irqtime; mycount++; data->data.interruptcount++; /* wake up (unblock) for reading data from userspace */ data->data_ready = 1; do_gettimeofday(&irqtime); data->data.irqtime = (((long long)irqtime.tv_sec) << 32); data->data.irqtime += irqtime.tv_usec; data->data.data_type |= 1; wake_up_interruptible(&data->slaveirq_wait); return IRQ_HANDLED; } /* define which file operations are supported */ static const struct file_operations slaveirq_fops = { .owner = THIS_MODULE, .read = slaveirq_read, .poll = slaveirq_poll, #if HAVE_COMPAT_IOCTL .compat_ioctl = slaveirq_ioctl, #endif #if HAVE_UNLOCKED_IOCTL .unlocked_ioctl = slaveirq_ioctl, #endif .open = slaveirq_open, .release = slaveirq_release, }; int slaveirq_init(struct i2c_adapter *slave_adapter, struct ext_slave_platform_data *pdata, char *name) { int res; struct slaveirq_dev_data *data; if (!pdata->irq) return -EINVAL; pdata->irq_data = kzalloc(sizeof(*data), GFP_KERNEL); data = (struct slaveirq_dev_data *)pdata->irq_data; if (!data) return -ENOMEM; data->dev.minor = MISC_DYNAMIC_MINOR; data->dev.name = name; data->dev.fops = &slaveirq_fops; data->irq = pdata->irq; data->pid = 0; data->data_ready = 0; data->timeout = 0; init_waitqueue_head(&data->slaveirq_wait); res = request_irq(data->irq, slaveirq_handler, IRQF_TRIGGER_RISING | IRQF_SHARED, data->dev.name, data); if (res) { dev_err(&slave_adapter->dev, "myirqtest: cannot register IRQ %d\n", data->irq); goto out_request_irq; } res = misc_register(&data->dev); if (res < 0) { dev_err(&slave_adapter->dev, "misc_register returned %d\n", res); goto out_misc_register; } return res; out_misc_register: free_irq(data->irq, data); out_request_irq: kfree(pdata->irq_data); pdata->irq_data = NULL; return res; } EXPORT_SYMBOL(slaveirq_init); void slaveirq_exit(struct ext_slave_platform_data *pdata) { struct slaveirq_dev_data *data = pdata->irq_data; if (!pdata->irq_data || data->irq <= 0) return; dev_info(data->dev.this_device, "Unregistering %s\n", data->dev.name); free_irq(data->irq, data); misc_deregister(&data->dev); kfree(pdata->irq_data); pdata->irq_data = NULL; } EXPORT_SYMBOL(slaveirq_exit);
gpl-2.0
JonnyH/pyra-kernel
fs/nilfs2/file.c
621
4403
/* * file.c - NILFS regular file handling primitives including fsync(). * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Amagai Yoshiji <amagai@osrg.net>, * Ryusuke Konishi <ryusuke@osrg.net> */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/writeback.h> #include "nilfs.h" #include "segment.h" int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct the_nilfs *nilfs; struct inode *inode = file->f_mapping->host; int err = 0; if (nilfs_inode_dirty(inode)) { if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, start, end); else err = nilfs_construct_segment(inode->i_sb); } nilfs = inode->i_sb->s_fs_info; if (!err) err = nilfs_flush_device(nilfs); return err; } static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct nilfs_transaction_info ti; int ret = 0; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; /* -ENOSPC */ sb_start_pagefault(inode->i_sb); lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); ret = -EFAULT; /* make the VM retry the fault */ goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); goto mapped; } } unlock_page(page); /* * fill hole blocks */ ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); /* never returns -ENOMEM, but may return -ENOSPC */ if (unlikely(ret)) goto out; file_update_time(vma->vm_file); ret = __block_page_mkwrite(vma, vmf, nilfs_get_block); if (ret) { nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_for_stable_page(page); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(ret); } static const struct vm_operations_struct nilfs_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = nilfs_page_mkwrite, }; static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &nilfs_file_vm_ops; return 0; } /* * We have mostly NULL's here: the current defaults are ok for * the nilfs filesystem. */ const struct file_operations nilfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = nilfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_compat_ioctl, #endif /* CONFIG_COMPAT */ .mmap = nilfs_file_mmap, .open = generic_file_open, /* .release = nilfs_release_file, */ .fsync = nilfs_sync_file, .splice_read = generic_file_splice_read, }; const struct inode_operations nilfs_file_inode_operations = { .setattr = nilfs_setattr, .permission = nilfs_permission, .fiemap = nilfs_fiemap, }; /* end of file */
gpl-2.0
DavionKnight/OK6410-linux-2.6.36
drivers/scsi/aacraid/dpcsup.c
1133
10194
/* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Module Name: * dpcsup.c * * Abstract: All DPC processing routines for the cyclone board occur here. * * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/blkdev.h> #include <linux/semaphore.h> #include "aacraid.h" /** * aac_response_normal - Handle command replies * @q: Queue to read from * * This DPC routine will be run when the adapter interrupts us to let us * know there is a response on our normal priority queue. We will pull off * all QE there are and wake up all the waiters before exiting. We will * take a spinlock out on the queue before operating on it. */ unsigned int aac_response_normal(struct aac_queue * q) { struct aac_dev * dev = q->dev; struct aac_entry *entry; struct hw_fib * hwfib; struct fib * fib; int consumed = 0; unsigned long flags, mflags; spin_lock_irqsave(q->lock, flags); /* * Keep pulling response QEs off the response queue and waking * up the waiters until there are no more QEs. We then return * back to the system. If no response was requesed we just * deallocate the Fib here and continue. */ while(aac_consumer_get(dev, q, &entry)) { int fast; u32 index = le32_to_cpu(entry->addr); fast = index & 0x01; fib = &dev->fibs[index >> 2]; hwfib = fib->hw_fib_va; aac_consumer_free(dev, q, HostNormRespQueue); /* * Remove this fib from the Outstanding I/O queue. * But only if it has not already been timed out. * * If the fib has been timed out already, then just * continue. The caller has already been notified that * the fib timed out. */ dev->queues->queue[AdapNormCmdQueue].numpending--; if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { spin_unlock_irqrestore(q->lock, flags); aac_fib_complete(fib); aac_fib_free(fib); spin_lock_irqsave(q->lock, flags); continue; } spin_unlock_irqrestore(q->lock, flags); if (fast) { /* * Doctor the fib */ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); } FIB_COUNTER_INCREMENT(aac_config.FibRecved); if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) { __le32 *pstatus = (__le32 *)hwfib->data; if (*pstatus & cpu_to_le32(0xffff0000)) *pstatus = cpu_to_le32(ST_OK); } if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) { if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); else FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); /* * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ fib->flags = 0; fib->callback(fib->callback_data, fib); } else { unsigned long flagv; spin_lock_irqsave(&fib->event_lock, flagv); if (!fib->done) { fib->done = 1; up(&fib->event_wait); } spin_unlock_irqrestore(&fib->event_lock, flagv); spin_lock_irqsave(&dev->manage_lock, mflags); dev->management_fib_count--; spin_unlock_irqrestore(&dev->manage_lock, mflags); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); if (fib->done == 2) { spin_lock_irqsave(&fib->event_lock, flagv); fib->done = 0; spin_unlock_irqrestore(&fib->event_lock, flagv); aac_fib_complete(fib); aac_fib_free(fib); } } consumed++; spin_lock_irqsave(q->lock, flags); } if (consumed > aac_config.peak_fibs) aac_config.peak_fibs = consumed; if (consumed == 0) aac_config.zero_fibs++; spin_unlock_irqrestore(q->lock, flags); return 0; } /** * aac_command_normal - handle commands * @q: queue to process * * This DPC routine will be queued when the adapter interrupts us to * let us know there is a command on our normal priority queue. We will * pull off all QE there are and wake up all the waiters before exiting. * We will take a spinlock out on the queue before operating on it. */ unsigned int aac_command_normal(struct aac_queue *q) { struct aac_dev * dev = q->dev; struct aac_entry *entry; unsigned long flags; spin_lock_irqsave(q->lock, flags); /* * Keep pulling response QEs off the response queue and waking * up the waiters until there are no more QEs. We then return * back to the system. */ while(aac_consumer_get(dev, q, &entry)) { struct fib fibctx; struct hw_fib * hw_fib; u32 index; struct fib *fib = &fibctx; index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); hw_fib = &dev->aif_base_va[index]; /* * Allocate a FIB at all costs. For non queued stuff * we can just use the stack so we are happy. We need * a fib object in order to manage the linked lists */ if (dev->aif_thread) if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) fib = &fibctx; memset(fib, 0, sizeof(struct fib)); INIT_LIST_HEAD(&fib->fiblink); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof(struct fib); fib->hw_fib_va = hw_fib; fib->data = hw_fib->data; fib->dev = dev; if (dev->aif_thread && fib != &fibctx) { list_add_tail(&fib->fiblink, &q->cmdq); aac_consumer_free(dev, q, HostNormCmdQueue); wake_up_interruptible(&q->cmdready); } else { aac_consumer_free(dev, q, HostNormCmdQueue); spin_unlock_irqrestore(q->lock, flags); /* * Set the status of this FIB */ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); aac_fib_adapter_complete(fib, sizeof(u32)); spin_lock_irqsave(q->lock, flags); } } spin_unlock_irqrestore(q->lock, flags); return 0; } /** * aac_intr_normal - Handle command replies * @dev: Device * @index: completion reference * * This DPC routine will be run when the adapter interrupts us to let us * know there is a response on our normal priority queue. We will pull off * all QE there are and wake up all the waiters before exiting. */ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) { unsigned long mflags; dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); if ((index & 0x00000002L)) { struct hw_fib * hw_fib; struct fib * fib; struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; unsigned long flags; if (index == 0xFFFFFFFEL) /* Special Case */ return 0; /* Do nothing */ /* * Allocate a FIB. For non queued stuff we can just use * the stack so we are happy. We need a fib object in order to * manage the linked lists. */ if ((!dev->aif_thread) || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) return 1; if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { kfree (fib); return 1; } memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib)); INIT_LIST_HEAD(&fib->fiblink); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof(struct fib); fib->hw_fib_va = hw_fib; fib->data = hw_fib->data; fib->dev = dev; spin_lock_irqsave(q->lock, flags); list_add_tail(&fib->fiblink, &q->cmdq); wake_up_interruptible(&q->cmdready); spin_unlock_irqrestore(q->lock, flags); return 1; } else { int fast = index & 0x01; struct fib * fib = &dev->fibs[index >> 2]; struct hw_fib * hwfib = fib->hw_fib_va; /* * Remove this fib from the Outstanding I/O queue. * But only if it has not already been timed out. * * If the fib has been timed out already, then just * continue. The caller has already been notified that * the fib timed out. */ dev->queues->queue[AdapNormCmdQueue].numpending--; if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { aac_fib_complete(fib); aac_fib_free(fib); return 0; } if (fast) { /* * Doctor the fib */ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); } FIB_COUNTER_INCREMENT(aac_config.FibRecved); if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) { __le32 *pstatus = (__le32 *)hwfib->data; if (*pstatus & cpu_to_le32(0xffff0000)) *pstatus = cpu_to_le32(ST_OK); } if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) { if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); else FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); /* * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ fib->flags = 0; fib->callback(fib->callback_data, fib); } else { unsigned long flagv; dprintk((KERN_INFO "event_wait up\n")); spin_lock_irqsave(&fib->event_lock, flagv); if (!fib->done) { fib->done = 1; up(&fib->event_wait); } spin_unlock_irqrestore(&fib->event_lock, flagv); spin_lock_irqsave(&dev->manage_lock, mflags); dev->management_fib_count--; spin_unlock_irqrestore(&dev->manage_lock, mflags); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); if (fib->done == 2) { spin_lock_irqsave(&fib->event_lock, flagv); fib->done = 0; spin_unlock_irqrestore(&fib->event_lock, flagv); aac_fib_complete(fib); aac_fib_free(fib); } } return 0; } }
gpl-2.0
myjang0507/slte
drivers/hwmon/ibmpex.c
2157
15440
/* * A hwmon driver for the IBM PowerExecutive temperature/power sensors * Copyright (C) 2007 IBM * * Author: Darrick J. Wong <djwong@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ipmi.h> #include <linux/module.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/err.h> #define REFRESH_INTERVAL (2 * HZ) #define DRVNAME "ibmpex" #define PEX_GET_VERSION 1 #define PEX_GET_SENSOR_COUNT 2 #define PEX_GET_SENSOR_NAME 3 #define PEX_RESET_HIGH_LOW 4 #define PEX_GET_SENSOR_DATA 6 #define PEX_NET_FUNCTION 0x3A #define PEX_COMMAND 0x3C static inline u16 extract_value(const char *data, int offset) { return be16_to_cpup((__be16 *)&data[offset]); } #define TEMP_SENSOR 1 #define POWER_SENSOR 2 #define PEX_SENSOR_TYPE_LEN 3 static u8 const power_sensor_sig[] = {0x70, 0x77, 0x72}; static u8 const temp_sensor_sig[] = {0x74, 0x65, 0x6D}; #define PEX_MULT_LEN 2 static u8 const watt_sensor_sig[] = {0x41, 0x43}; #define PEX_NUM_SENSOR_FUNCS 3 static char const * const power_sensor_name_templates[] = { "%s%d_average", "%s%d_average_lowest", "%s%d_average_highest" }; static char const * const temp_sensor_name_templates[] = { "%s%d_input", "%s%d_input_lowest", "%s%d_input_highest" }; static void ibmpex_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); static void ibmpex_register_bmc(int iface, struct device *dev); static void ibmpex_bmc_gone(int iface); struct ibmpex_sensor_data { int in_use; s16 values[PEX_NUM_SENSOR_FUNCS]; int multiplier; struct sensor_device_attribute_2 attr[PEX_NUM_SENSOR_FUNCS]; }; struct ibmpex_bmc_data { struct list_head list; struct device *hwmon_dev; struct device *bmc_device; struct mutex lock; char valid; unsigned long last_updated; /* In jiffies */ struct ipmi_addr address; struct completion read_complete; ipmi_user_t user; int interface; struct kernel_ipmi_msg tx_message; unsigned char tx_msg_data[IPMI_MAX_MSG_LENGTH]; long tx_msgid; unsigned char rx_msg_data[IPMI_MAX_MSG_LENGTH]; unsigned long rx_msg_len; unsigned char rx_result; int rx_recv_type; unsigned char sensor_major; unsigned char sensor_minor; unsigned char num_sensors; struct ibmpex_sensor_data *sensors; }; struct ibmpex_driver_data { struct list_head bmc_data; struct ipmi_smi_watcher bmc_events; struct ipmi_user_hndl ipmi_hndlrs; }; static struct ibmpex_driver_data driver_data = { .bmc_data = LIST_HEAD_INIT(driver_data.bmc_data), .bmc_events = { .owner = THIS_MODULE, .new_smi = ibmpex_register_bmc, .smi_gone = ibmpex_bmc_gone, }, .ipmi_hndlrs = { .ipmi_recv_hndl = ibmpex_msg_handler, }, }; static int ibmpex_send_message(struct ibmpex_bmc_data *data) { int err; err = ipmi_validate_addr(&data->address, sizeof(data->address)); if (err) goto out; data->tx_msgid++; err = ipmi_request_settime(data->user, &data->address, data->tx_msgid, &data->tx_message, data, 0, 0, 0); if (err) goto out1; return 0; out1: dev_err(data->bmc_device, "request_settime=%x\n", err); return err; out: dev_err(data->bmc_device, "validate_addr=%x\n", err); return err; } static int ibmpex_ver_check(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_GET_VERSION; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len != 6) return -ENOENT; data->sensor_major = data->rx_msg_data[0]; data->sensor_minor = data->rx_msg_data[1]; dev_info(data->bmc_device, "Found BMC with sensor interface v%d.%d %d-%02d-%02d on interface %d\n", data->sensor_major, data->sensor_minor, extract_value(data->rx_msg_data, 2), data->rx_msg_data[4], data->rx_msg_data[5], data->interface); return 0; } static int ibmpex_query_sensor_count(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_GET_SENSOR_COUNT; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len != 1) return -ENOENT; return data->rx_msg_data[0]; } static int ibmpex_query_sensor_name(struct ibmpex_bmc_data *data, int sensor) { data->tx_msg_data[0] = PEX_GET_SENSOR_NAME; data->tx_msg_data[1] = sensor; data->tx_message.data_len = 2; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len < 1) return -ENOENT; return 0; } static int ibmpex_query_sensor_data(struct ibmpex_bmc_data *data, int sensor) { data->tx_msg_data[0] = PEX_GET_SENSOR_DATA; data->tx_msg_data[1] = sensor; data->tx_message.data_len = 2; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len < 26) { dev_err(data->bmc_device, "Error reading sensor %d.\n", sensor); return -ENOENT; } return 0; } static int ibmpex_reset_high_low_data(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_RESET_HIGH_LOW; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); return 0; } static void ibmpex_update_device(struct ibmpex_bmc_data *data) { int i, err; mutex_lock(&data->lock); if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) && data->valid) goto out; for (i = 0; i < data->num_sensors; i++) { if (!data->sensors[i].in_use) continue; err = ibmpex_query_sensor_data(data, i); if (err) continue; data->sensors[i].values[0] = extract_value(data->rx_msg_data, 16); data->sensors[i].values[1] = extract_value(data->rx_msg_data, 18); data->sensors[i].values[2] = extract_value(data->rx_msg_data, 20); } data->last_updated = jiffies; data->valid = 1; out: mutex_unlock(&data->lock); } static struct ibmpex_bmc_data *get_bmc_data(int iface) { struct ibmpex_bmc_data *p, *next; list_for_each_entry_safe(p, next, &driver_data.bmc_data, list) if (p->interface == iface) return p; return NULL; } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", DRVNAME); } static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); static ssize_t ibmpex_show_sensor(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); struct ibmpex_bmc_data *data = dev_get_drvdata(dev); int mult = data->sensors[attr->index].multiplier; ibmpex_update_device(data); return sprintf(buf, "%d\n", data->sensors[attr->index].values[attr->nr] * mult); } static ssize_t ibmpex_reset_high_low(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct ibmpex_bmc_data *data = dev_get_drvdata(dev); ibmpex_reset_high_low_data(data); return count; } static SENSOR_DEVICE_ATTR(reset_high_low, S_IWUSR, NULL, ibmpex_reset_high_low, 0); static int is_power_sensor(const char *sensor_id, int len) { if (len < PEX_SENSOR_TYPE_LEN) return 0; if (!memcmp(sensor_id, power_sensor_sig, PEX_SENSOR_TYPE_LEN)) return 1; return 0; } static int is_temp_sensor(const char *sensor_id, int len) { if (len < PEX_SENSOR_TYPE_LEN) return 0; if (!memcmp(sensor_id, temp_sensor_sig, PEX_SENSOR_TYPE_LEN)) return 1; return 0; } static int power_sensor_multiplier(struct ibmpex_bmc_data *data, const char *sensor_id, int len) { int i; if (data->sensor_major == 2) return 1000000; for (i = PEX_SENSOR_TYPE_LEN; i < len - 1; i++) if (!memcmp(&sensor_id[i], watt_sensor_sig, PEX_MULT_LEN)) return 1000000; return 100000; } static int create_sensor(struct ibmpex_bmc_data *data, int type, int counter, int sensor, int func) { int err; char *n; n = kmalloc(32, GFP_KERNEL); if (!n) return -ENOMEM; if (type == TEMP_SENSOR) sprintf(n, temp_sensor_name_templates[func], "temp", counter); else if (type == POWER_SENSOR) sprintf(n, power_sensor_name_templates[func], "power", counter); sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr); data->sensors[sensor].attr[func].dev_attr.attr.name = n; data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO; data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor; data->sensors[sensor].attr[func].index = sensor; data->sensors[sensor].attr[func].nr = func; err = device_create_file(data->bmc_device, &data->sensors[sensor].attr[func].dev_attr); if (err) { data->sensors[sensor].attr[func].dev_attr.attr.name = NULL; kfree(n); return err; } return 0; } static int ibmpex_find_sensors(struct ibmpex_bmc_data *data) { int i, j, err; int sensor_type; int sensor_counter; int num_power = 0; int num_temp = 0; err = ibmpex_query_sensor_count(data); if (err <= 0) return -ENOENT; data->num_sensors = err; data->sensors = kzalloc(data->num_sensors * sizeof(*data->sensors), GFP_KERNEL); if (!data->sensors) return -ENOMEM; for (i = 0; i < data->num_sensors; i++) { err = ibmpex_query_sensor_name(data, i); if (err) continue; if (is_power_sensor(data->rx_msg_data, data->rx_msg_len)) { sensor_type = POWER_SENSOR; num_power++; sensor_counter = num_power; data->sensors[i].multiplier = power_sensor_multiplier(data, data->rx_msg_data, data->rx_msg_len); } else if (is_temp_sensor(data->rx_msg_data, data->rx_msg_len)) { sensor_type = TEMP_SENSOR; num_temp++; sensor_counter = num_temp; data->sensors[i].multiplier = 1000; } else continue; data->sensors[i].in_use = 1; /* Create attributes */ for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { err = create_sensor(data, sensor_type, sensor_counter, i, j); if (err) goto exit_remove; } } err = device_create_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); if (err) goto exit_remove; err = device_create_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); if (err) goto exit_remove; return 0; exit_remove: device_remove_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); for (i = 0; i < data->num_sensors; i++) for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { if (!data->sensors[i].attr[j].dev_attr.attr.name) continue; device_remove_file(data->bmc_device, &data->sensors[i].attr[j].dev_attr); kfree(data->sensors[i].attr[j].dev_attr.attr.name); } kfree(data->sensors); return err; } static void ibmpex_register_bmc(int iface, struct device *dev) { struct ibmpex_bmc_data *data; int err; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { dev_err(dev, "Insufficient memory for BMC interface.\n"); return; } data->address.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; data->address.channel = IPMI_BMC_CHANNEL; data->address.data[0] = 0; data->interface = iface; data->bmc_device = dev; /* Create IPMI messaging interface user */ err = ipmi_create_user(data->interface, &driver_data.ipmi_hndlrs, data, &data->user); if (err < 0) { dev_err(dev, "Unable to register user with IPMI interface %d\n", data->interface); goto out; } mutex_init(&data->lock); /* Initialize message */ data->tx_msgid = 0; init_completion(&data->read_complete); data->tx_message.netfn = PEX_NET_FUNCTION; data->tx_message.cmd = PEX_COMMAND; data->tx_message.data = data->tx_msg_data; /* Does this BMC support PowerExecutive? */ err = ibmpex_ver_check(data); if (err) goto out_user; /* Register the BMC as a HWMON class device */ data->hwmon_dev = hwmon_device_register(data->bmc_device); if (IS_ERR(data->hwmon_dev)) { dev_err(data->bmc_device, "Unable to register hwmon device for IPMI interface %d\n", data->interface); goto out_user; } /* finally add the new bmc data to the bmc data list */ dev_set_drvdata(dev, data); list_add_tail(&data->list, &driver_data.bmc_data); /* Now go find all the sensors */ err = ibmpex_find_sensors(data); if (err) { dev_err(data->bmc_device, "Error %d finding sensors\n", err); goto out_register; } return; out_register: hwmon_device_unregister(data->hwmon_dev); out_user: ipmi_destroy_user(data->user); out: kfree(data); } static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data) { int i, j; device_remove_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); for (i = 0; i < data->num_sensors; i++) for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { if (!data->sensors[i].attr[j].dev_attr.attr.name) continue; device_remove_file(data->bmc_device, &data->sensors[i].attr[j].dev_attr); kfree(data->sensors[i].attr[j].dev_attr.attr.name); } list_del(&data->list); dev_set_drvdata(data->bmc_device, NULL); hwmon_device_unregister(data->hwmon_dev); ipmi_destroy_user(data->user); kfree(data->sensors); kfree(data); } static void ibmpex_bmc_gone(int iface) { struct ibmpex_bmc_data *data = get_bmc_data(iface); if (!data) return; ibmpex_bmc_delete(data); } static void ibmpex_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) { struct ibmpex_bmc_data *data = (struct ibmpex_bmc_data *)user_msg_data; if (msg->msgid != data->tx_msgid) { dev_err(data->bmc_device, "Mismatch between received msgid (%02x) and transmitted msgid (%02x)!\n", (int)msg->msgid, (int)data->tx_msgid); ipmi_free_recv_msg(msg); return; } data->rx_recv_type = msg->recv_type; if (msg->msg.data_len > 0) data->rx_result = msg->msg.data[0]; else data->rx_result = IPMI_UNKNOWN_ERR_COMPLETION_CODE; if (msg->msg.data_len > 1) { data->rx_msg_len = msg->msg.data_len - 1; memcpy(data->rx_msg_data, msg->msg.data + 1, data->rx_msg_len); } else data->rx_msg_len = 0; ipmi_free_recv_msg(msg); complete(&data->read_complete); } static int __init ibmpex_init(void) { return ipmi_smi_watcher_register(&driver_data.bmc_events); } static void __exit ibmpex_exit(void) { struct ibmpex_bmc_data *p, *next; ipmi_smi_watcher_unregister(&driver_data.bmc_events); list_for_each_entry_safe(p, next, &driver_data.bmc_data, list) ibmpex_bmc_delete(p); } MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); MODULE_DESCRIPTION("IBM PowerExecutive power/temperature sensor driver"); MODULE_LICENSE("GPL"); module_init(ibmpex_init); module_exit(ibmpex_exit); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3350-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3655-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3755-*");
gpl-2.0
ElysiumRom/android_kernel_samsung_msm8660-common
arch/arm/mach-bcmring/dma.c
2413
63371
/***************************************************************************** * Copyright 2004 - 2008 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /****************************************************************************/ /** * @file dma.c * * @brief Implements the DMA interface. */ /****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include <linux/module.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/irqreturn.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <mach/timer.h> #include <linux/mm.h> #include <linux/pfn.h> #include <asm/atomic.h> #include <mach/dma.h> /* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */ /* especially since dc4 doesn't use kmalloc'd memory. */ #define ALLOW_MAP_OF_KMALLOC_MEMORY 0 /* ---- Public Variables ------------------------------------------------- */ /* ---- Private Constants and Types -------------------------------------- */ #define MAKE_HANDLE(controllerIdx, channelIdx) (((controllerIdx) << 4) | (channelIdx)) #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) #define DMA_MAP_DEBUG 0 #if DMA_MAP_DEBUG # define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args) #else # define DMA_MAP_PRINT(fmt, args...) #endif /* ---- Private Variables ------------------------------------------------ */ static DMA_Global_t gDMA; static struct proc_dir_entry *gDmaDir; static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0); static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0); static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0); static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0); #include "dma_device.c" /* ---- Private Function Prototypes -------------------------------------- */ /* ---- Functions ------------------------------------------------------- */ /****************************************************************************/ /** * Displays information for /proc/dma/mem-type */ /****************************************************************************/ static int dma_proc_read_mem_type(char *buf, char **start, off_t offset, int count, int *eof, void *data) { int len = 0; len += sprintf(buf + len, "dma_map_mem statistics\n"); len += sprintf(buf + len, "coherent: %d\n", atomic_read(&gDmaStatMemTypeCoherent)); len += sprintf(buf + len, "kmalloc: %d\n", atomic_read(&gDmaStatMemTypeKmalloc)); len += sprintf(buf + len, "vmalloc: %d\n", atomic_read(&gDmaStatMemTypeVmalloc)); len += sprintf(buf + len, "user: %d\n", atomic_read(&gDmaStatMemTypeUser)); return len; } /****************************************************************************/ /** * Displays information for /proc/dma/channels */ /****************************************************************************/ static int dma_proc_read_channels(char *buf, char **start, off_t offset, int count, int *eof, void *data) { int controllerIdx; int channelIdx; int limit = count - 200; int len = 0; DMA_Channel_t *channel; if (down_interruptible(&gDMA.lock) < 0) { return -ERESTARTSYS; } for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; controllerIdx++) { for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; channelIdx++) { if (len >= limit) { break; } channel = &gDMA.controller[controllerIdx].channel[channelIdx]; len += sprintf(buf + len, "%d:%d ", controllerIdx, channelIdx); if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) != 0) { len += sprintf(buf + len, "Dedicated for %s ", DMA_gDeviceAttribute[channel-> devType].name); } else { len += sprintf(buf + len, "Shared "); } if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) { len += sprintf(buf + len, "No ISR "); } if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) { len += sprintf(buf + len, "Fifo: 128 "); } else { len += sprintf(buf + len, "Fifo: 64 "); } if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) { len += sprintf(buf + len, "InUse by %s", DMA_gDeviceAttribute[channel-> devType].name); #if (DMA_DEBUG_TRACK_RESERVATION) len += sprintf(buf + len, " (%s:%d)", channel->fileName, channel->lineNum); #endif } else { len += sprintf(buf + len, "Avail "); } if (channel->lastDevType != DMA_DEVICE_NONE) { len += sprintf(buf + len, "Last use: %s ", DMA_gDeviceAttribute[channel-> lastDevType]. name); } len += sprintf(buf + len, "\n"); } } up(&gDMA.lock); *eof = 1; return len; } /****************************************************************************/ /** * Displays information for /proc/dma/devices */ /****************************************************************************/ static int dma_proc_read_devices(char *buf, char **start, off_t offset, int count, int *eof, void *data) { int limit = count - 200; int len = 0; int devIdx; if (down_interruptible(&gDMA.lock) < 0) { return -ERESTARTSYS; } for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) { DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx]; if (devAttr->name == NULL) { continue; } if (len >= limit) { break; } len += sprintf(buf + len, "%-12s ", devAttr->name); if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { len += sprintf(buf + len, "Dedicated %d:%d ", devAttr->dedicatedController, devAttr->dedicatedChannel); } else { len += sprintf(buf + len, "Shared DMA:"); if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) { len += sprintf(buf + len, "0"); } if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) { len += sprintf(buf + len, "1"); } len += sprintf(buf + len, " "); } if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) { len += sprintf(buf + len, "NoISR "); } if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) { len += sprintf(buf + len, "Allow-128 "); } len += sprintf(buf + len, "Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n", devAttr->numTransfers, devAttr->transferTicks, devAttr->transferBytes, devAttr->ring.bytesAllocated); } up(&gDMA.lock); *eof = 1; return len; } /****************************************************************************/ /** * Determines if a DMA_Device_t is "valid". * * @return * TRUE - dma device is valid * FALSE - dma device isn't valid */ /****************************************************************************/ static inline int IsDeviceValid(DMA_Device_t device) { return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES); } /****************************************************************************/ /** * Translates a DMA handle into a pointer to a channel. * * @return * non-NULL - pointer to DMA_Channel_t * NULL - DMA Handle was invalid */ /****************************************************************************/ static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle) { int controllerIdx; int channelIdx; controllerIdx = CONTROLLER_FROM_HANDLE(handle); channelIdx = CHANNEL_FROM_HANDLE(handle); if ((controllerIdx > DMA_NUM_CONTROLLERS) || (channelIdx > DMA_NUM_CHANNELS)) { return NULL; } return &gDMA.controller[controllerIdx].channel[channelIdx]; } /****************************************************************************/ /** * Interrupt handler which is called to process DMA interrupts. */ /****************************************************************************/ static irqreturn_t dma_interrupt_handler(int irq, void *dev_id) { DMA_Channel_t *channel; DMA_DeviceAttribute_t *devAttr; int irqStatus; channel = (DMA_Channel_t *) dev_id; /* Figure out why we were called, and knock down the interrupt */ irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle); dmacHw_clearInterrupt(channel->dmacHwHandle); if ((channel->devType < 0) || (channel->devType > DMA_NUM_DEVICE_ENTRIES)) { printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n", channel->devType); return IRQ_NONE; } devAttr = &DMA_gDeviceAttribute[channel->devType]; /* Update stats */ if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) { devAttr->transferTicks += (timer_get_tick_count() - devAttr->transferStartTime); } if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) { printk(KERN_ERR "dma_interrupt_handler: devType :%d DMA error (%s)\n", channel->devType, devAttr->name); } else { devAttr->numTransfers++; devAttr->transferBytes += devAttr->numBytes; } /* Call any installed handler */ if (devAttr->devHandler != NULL) { devAttr->devHandler(channel->devType, irqStatus, devAttr->userData); } return IRQ_HANDLED; } /****************************************************************************/ /** * Allocates memory to hold a descriptor ring. The descriptor ring then * needs to be populated by making one or more calls to * dna_add_descriptors. * * The returned descriptor ring will be automatically initialized. * * @return * 0 Descriptor ring was allocated successfully * -EINVAL Invalid parameters passed in * -ENOMEM Unable to allocate memory for the desired number of descriptors. */ /****************************************************************************/ int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to populate */ int numDescriptors /* Number of descriptors that need to be allocated. */ ) { size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors); if ((ring == NULL) || (numDescriptors <= 0)) { return -EINVAL; } ring->physAddr = 0; ring->descriptorsAllocated = 0; ring->bytesAllocated = 0; ring->virtAddr = dma_alloc_writecombine(NULL, bytesToAlloc, &ring->physAddr, GFP_KERNEL); if (ring->virtAddr == NULL) { return -ENOMEM; } ring->bytesAllocated = bytesToAlloc; ring->descriptorsAllocated = numDescriptors; return dma_init_descriptor_ring(ring, numDescriptors); } EXPORT_SYMBOL(dma_alloc_descriptor_ring); /****************************************************************************/ /** * Releases the memory which was previously allocated for a descriptor ring. */ /****************************************************************************/ void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring /* Descriptor to release */ ) { if (ring->virtAddr != NULL) { dma_free_writecombine(NULL, ring->bytesAllocated, ring->virtAddr, ring->physAddr); } ring->bytesAllocated = 0; ring->descriptorsAllocated = 0; ring->virtAddr = NULL; ring->physAddr = 0; } EXPORT_SYMBOL(dma_free_descriptor_ring); /****************************************************************************/ /** * Initializes a descriptor ring, so that descriptors can be added to it. * Once a descriptor ring has been allocated, it may be reinitialized for * use with additional/different regions of memory. * * Note that if 7 descriptors are allocated, it's perfectly acceptable to * initialize the ring with a smaller number of descriptors. The amount * of memory allocated for the descriptor ring will not be reduced, and * the descriptor ring may be reinitialized later * * @return * 0 Descriptor ring was initialized successfully * -ENOMEM The descriptor which was passed in has insufficient space * to hold the desired number of descriptors. */ /****************************************************************************/ int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to initialize */ int numDescriptors /* Number of descriptors to initialize. */ ) { if (ring->virtAddr == NULL) { return -EINVAL; } if (dmacHw_initDescriptor(ring->virtAddr, ring->physAddr, ring->bytesAllocated, numDescriptors) < 0) { printk(KERN_ERR "dma_init_descriptor_ring: dmacHw_initDescriptor failed\n"); return -ENOMEM; } return 0; } EXPORT_SYMBOL(dma_init_descriptor_ring); /****************************************************************************/ /** * Determines the number of descriptors which would be required for a * transfer of the indicated memory region. * * This function also needs to know which DMA device this transfer will * be destined for, so that the appropriate DMA configuration can be retrieved. * DMA parameters such as transfer width, and whether this is a memory-to-memory * or memory-to-peripheral, etc can all affect the actual number of descriptors * required. * * @return * > 0 Returns the number of descriptors required for the indicated transfer * -ENODEV - Device handed in is invalid. * -EINVAL Invalid parameters * -ENOMEM Memory exhausted */ /****************************************************************************/ int dma_calculate_descriptor_count(DMA_Device_t device, /* DMA Device that this will be associated with */ dma_addr_t srcData, /* Place to get data to write to device */ dma_addr_t dstData, /* Pointer to device data address */ size_t numBytes /* Number of bytes to transfer to the device */ ) { int numDescriptors; DMA_DeviceAttribute_t *devAttr; if (!IsDeviceValid(device)) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[device]; numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, (void *)dstData, numBytes); if (numDescriptors < 0) { printk(KERN_ERR "dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n"); return -EINVAL; } return numDescriptors; } EXPORT_SYMBOL(dma_calculate_descriptor_count); /****************************************************************************/ /** * Adds a region of memory to the descriptor ring. Note that it may take * multiple descriptors for each region of memory. It is the callers * responsibility to allocate a sufficiently large descriptor ring. * * @return * 0 Descriptors were added successfully * -ENODEV Device handed in is invalid. * -EINVAL Invalid parameters * -ENOMEM Memory exhausted */ /****************************************************************************/ int dma_add_descriptors(DMA_DescriptorRing_t *ring, /* Descriptor ring to add descriptors to */ DMA_Device_t device, /* DMA Device that descriptors are for */ dma_addr_t srcData, /* Place to get data (memory or device) */ dma_addr_t dstData, /* Place to put data (memory or device) */ size_t numBytes /* Number of bytes to transfer to the device */ ) { int rc; DMA_DeviceAttribute_t *devAttr; if (!IsDeviceValid(device)) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[device]; rc = dmacHw_setDataDescriptor(&devAttr->config, ring->virtAddr, (void *)srcData, (void *)dstData, numBytes); if (rc < 0) { printk(KERN_ERR "dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n", rc); return -ENOMEM; } return 0; } EXPORT_SYMBOL(dma_add_descriptors); /****************************************************************************/ /** * Sets the descriptor ring associated with a device. * * Once set, the descriptor ring will be associated with the device, even * across channel request/free calls. Passing in a NULL descriptor ring * will release any descriptor ring currently associated with the device. * * Note: If you call dma_transfer, or one of the other dma_alloc_ functions * the descriptor ring may be released and reallocated. * * Note: This function will release the descriptor memory for any current * descriptor ring associated with this device. * * @return * 0 Descriptors were added successfully * -ENODEV Device handed in is invalid. */ /****************************************************************************/ int dma_set_device_descriptor_ring(DMA_Device_t device, /* Device to update the descriptor ring for. */ DMA_DescriptorRing_t *ring /* Descriptor ring to add descriptors to */ ) { DMA_DeviceAttribute_t *devAttr; if (!IsDeviceValid(device)) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[device]; /* Free the previously allocated descriptor ring */ dma_free_descriptor_ring(&devAttr->ring); if (ring != NULL) { /* Copy in the new one */ devAttr->ring = *ring; } /* Set things up so that if dma_transfer is called then this descriptor */ /* ring will get freed. */ devAttr->prevSrcData = 0; devAttr->prevDstData = 0; devAttr->prevNumBytes = 0; return 0; } EXPORT_SYMBOL(dma_set_device_descriptor_ring); /****************************************************************************/ /** * Retrieves the descriptor ring associated with a device. * * @return * 0 Descriptors were added successfully * -ENODEV Device handed in is invalid. */ /****************************************************************************/ int dma_get_device_descriptor_ring(DMA_Device_t device, /* Device to retrieve the descriptor ring for. */ DMA_DescriptorRing_t *ring /* Place to store retrieved ring */ ) { DMA_DeviceAttribute_t *devAttr; memset(ring, 0, sizeof(*ring)); if (!IsDeviceValid(device)) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[device]; *ring = devAttr->ring; return 0; } EXPORT_SYMBOL(dma_get_device_descriptor_ring); /****************************************************************************/ /** * Configures a DMA channel. * * @return * >= 0 - Initialization was successful. * * -EBUSY - Device is currently being used. * -ENODEV - Device handed in is invalid. */ /****************************************************************************/ static int ConfigChannel(DMA_Handle_t handle) { DMA_Channel_t *channel; DMA_DeviceAttribute_t *devAttr; int controllerIdx; channel = HandleToChannel(handle); if (channel == NULL) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[channel->devType]; controllerIdx = CONTROLLER_FROM_HANDLE(handle); if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) { if (devAttr->config.transferType == dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) { devAttr->config.dstPeripheralPort = devAttr->dmacPort[controllerIdx]; } else if (devAttr->config.transferType == dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) { devAttr->config.srcPeripheralPort = devAttr->dmacPort[controllerIdx]; } } if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) { printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n"); return -EIO; } return 0; } /****************************************************************************/ /** * Initializes all of the data structures associated with the DMA. * @return * >= 0 - Initialization was successful. * * -EBUSY - Device is currently being used. * -ENODEV - Device handed in is invalid. */ /****************************************************************************/ int dma_init(void) { int rc = 0; int controllerIdx; int channelIdx; DMA_Device_t devIdx; DMA_Channel_t *channel; DMA_Handle_t dedicatedHandle; memset(&gDMA, 0, sizeof(gDMA)); sema_init(&gDMA.lock, 0); init_waitqueue_head(&gDMA.freeChannelQ); /* Initialize the Hardware */ dmacHw_initDma(); /* Start off by marking all of the DMA channels as shared. */ for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; controllerIdx++) { for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; channelIdx++) { channel = &gDMA.controller[controllerIdx].channel[channelIdx]; channel->flags = 0; channel->devType = DMA_DEVICE_NONE; channel->lastDevType = DMA_DEVICE_NONE; #if (DMA_DEBUG_TRACK_RESERVATION) channel->fileName = ""; channel->lineNum = 0; #endif channel->dmacHwHandle = dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID (controllerIdx, channelIdx)); dmacHw_initChannel(channel->dmacHwHandle); } } /* Record any special attributes that channels may have */ gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; /* Now walk through and record the dedicated channels. */ for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) { DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx]; if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) && ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) { printk(KERN_ERR "DMA Device: %s Can only request NO_ISR for dedicated devices\n", devAttr->name); rc = -EINVAL; goto out; } if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { /* This is a dedicated device. Mark the channel as being reserved. */ if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) { printk(KERN_ERR "DMA Device: %s DMA Controller %d is out of range\n", devAttr->name, devAttr->dedicatedController); rc = -EINVAL; goto out; } if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) { printk(KERN_ERR "DMA Device: %s DMA Channel %d is out of range\n", devAttr->name, devAttr->dedicatedChannel); rc = -EINVAL; goto out; } dedicatedHandle = MAKE_HANDLE(devAttr->dedicatedController, devAttr->dedicatedChannel); channel = HandleToChannel(dedicatedHandle); if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) != 0) { printk ("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n", devAttr->name, devAttr->dedicatedController, devAttr->dedicatedChannel, DMA_gDeviceAttribute[channel->devType]. name); rc = -EBUSY; goto out; } channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED; channel->devType = devIdx; if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) { channel->flags |= DMA_CHANNEL_FLAG_NO_ISR; } /* For dedicated channels, we can go ahead and configure the DMA channel now */ /* as well. */ ConfigChannel(dedicatedHandle); } } /* Go through and register the interrupt handlers */ for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; controllerIdx++) { for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; channelIdx++) { channel = &gDMA.controller[controllerIdx].channel[channelIdx]; if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) { snprintf(channel->name, sizeof(channel->name), "dma %d:%d %s", controllerIdx, channelIdx, channel->devType == DMA_DEVICE_NONE ? "" : DMA_gDeviceAttribute[channel->devType]. name); rc = request_irq(IRQ_DMA0C0 + (controllerIdx * DMA_NUM_CHANNELS) + channelIdx, dma_interrupt_handler, IRQF_DISABLED, channel->name, channel); if (rc != 0) { printk(KERN_ERR "request_irq for IRQ_DMA%dC%d failed\n", controllerIdx, channelIdx); } } } } /* Create /proc/dma/channels and /proc/dma/devices */ gDmaDir = create_proc_entry("dma", S_IFDIR | S_IRUGO | S_IXUGO, NULL); if (gDmaDir == NULL) { printk(KERN_ERR "Unable to create /proc/dma\n"); } else { create_proc_read_entry("channels", 0, gDmaDir, dma_proc_read_channels, NULL); create_proc_read_entry("devices", 0, gDmaDir, dma_proc_read_devices, NULL); create_proc_read_entry("mem-type", 0, gDmaDir, dma_proc_read_mem_type, NULL); } out: up(&gDMA.lock); return rc; } /****************************************************************************/ /** * Reserves a channel for use with @a dev. If the device is setup to use * a shared channel, then this function will block until a free channel * becomes available. * * @return * >= 0 - A valid DMA Handle. * -EBUSY - Device is currently being used. * -ENODEV - Device handed in is invalid. */ /****************************************************************************/ #if (DMA_DEBUG_TRACK_RESERVATION) DMA_Handle_t dma_request_channel_dbg (DMA_Device_t dev, const char *fileName, int lineNum) #else DMA_Handle_t dma_request_channel(DMA_Device_t dev) #endif { DMA_Handle_t handle; DMA_DeviceAttribute_t *devAttr; DMA_Channel_t *channel; int controllerIdx; int controllerIdx2; int channelIdx; if (down_interruptible(&gDMA.lock) < 0) { return -ERESTARTSYS; } if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) { handle = -ENODEV; goto out; } devAttr = &DMA_gDeviceAttribute[dev]; #if (DMA_DEBUG_TRACK_RESERVATION) { char *s; s = strrchr(fileName, '/'); if (s != NULL) { fileName = s + 1; } } #endif if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) { /* This device has already been requested and not been freed */ printk(KERN_ERR "%s: device %s is already requested\n", __func__, devAttr->name); handle = -EBUSY; goto out; } if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { /* This device has a dedicated channel. */ channel = &gDMA.controller[devAttr->dedicatedController]. channel[devAttr->dedicatedChannel]; if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) { handle = -EBUSY; goto out; } channel->flags |= DMA_CHANNEL_FLAG_IN_USE; devAttr->flags |= DMA_DEVICE_FLAG_IN_USE; #if (DMA_DEBUG_TRACK_RESERVATION) channel->fileName = fileName; channel->lineNum = lineNum; #endif handle = MAKE_HANDLE(devAttr->dedicatedController, devAttr->dedicatedChannel); goto out; } /* This device needs to use one of the shared channels. */ handle = DMA_INVALID_HANDLE; while (handle == DMA_INVALID_HANDLE) { /* Scan through the shared channels and see if one is available */ for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS; controllerIdx2++) { /* Check to see if we should try on controller 1 first. */ controllerIdx = controllerIdx2; if ((devAttr-> flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) { controllerIdx = 1 - controllerIdx; } /* See if the device is available on the controller being tested */ if ((devAttr-> flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx)) != 0) { for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; channelIdx++) { channel = &gDMA.controller[controllerIdx]. channel[channelIdx]; if (((channel-> flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) && ((channel-> flags & DMA_CHANNEL_FLAG_IN_USE) == 0)) { if (((channel-> flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) && ((devAttr-> flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) == 0)) { /* This channel is a large fifo - don't tie it up */ /* with devices that we don't want using it. */ continue; } channel->flags |= DMA_CHANNEL_FLAG_IN_USE; channel->devType = dev; devAttr->flags |= DMA_DEVICE_FLAG_IN_USE; #if (DMA_DEBUG_TRACK_RESERVATION) channel->fileName = fileName; channel->lineNum = lineNum; #endif handle = MAKE_HANDLE(controllerIdx, channelIdx); /* Now that we've reserved the channel - we can go ahead and configure it */ if (ConfigChannel(handle) != 0) { handle = -EIO; printk(KERN_ERR "dma_request_channel: ConfigChannel failed\n"); } goto out; } } } } /* No channels are currently available. Let's wait for one to free up. */ { DEFINE_WAIT(wait); prepare_to_wait(&gDMA.freeChannelQ, &wait, TASK_INTERRUPTIBLE); up(&gDMA.lock); schedule(); finish_wait(&gDMA.freeChannelQ, &wait); if (signal_pending(current)) { /* We don't currently hold gDMA.lock, so we return directly */ return -ERESTARTSYS; } } if (down_interruptible(&gDMA.lock)) { return -ERESTARTSYS; } } out: up(&gDMA.lock); return handle; } /* Create both _dbg and non _dbg functions for modules. */ #if (DMA_DEBUG_TRACK_RESERVATION) #undef dma_request_channel DMA_Handle_t dma_request_channel(DMA_Device_t dev) { return dma_request_channel_dbg(dev, __FILE__, __LINE__); } EXPORT_SYMBOL(dma_request_channel_dbg); #endif EXPORT_SYMBOL(dma_request_channel); /****************************************************************************/ /** * Frees a previously allocated DMA Handle. */ /****************************************************************************/ int dma_free_channel(DMA_Handle_t handle /* DMA handle. */ ) { int rc = 0; DMA_Channel_t *channel; DMA_DeviceAttribute_t *devAttr; if (down_interruptible(&gDMA.lock) < 0) { return -ERESTARTSYS; } channel = HandleToChannel(handle); if (channel == NULL) { rc = -EINVAL; goto out; } devAttr = &DMA_gDeviceAttribute[channel->devType]; if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) { channel->lastDevType = channel->devType; channel->devType = DMA_DEVICE_NONE; } channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE; devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE; out: up(&gDMA.lock); wake_up_interruptible(&gDMA.freeChannelQ); return rc; } EXPORT_SYMBOL(dma_free_channel); /****************************************************************************/ /** * Determines if a given device has been configured as using a shared * channel. * * @return * 0 Device uses a dedicated channel * > zero Device uses a shared channel * < zero Error code */ /****************************************************************************/ int dma_device_is_channel_shared(DMA_Device_t device /* Device to check. */ ) { DMA_DeviceAttribute_t *devAttr; if (!IsDeviceValid(device)) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[device]; return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0); } EXPORT_SYMBOL(dma_device_is_channel_shared); /****************************************************************************/ /** * Allocates buffers for the descriptors. This is normally done automatically * but needs to be done explicitly when initiating a dma from interrupt * context. * * @return * 0 Descriptors were allocated successfully * -EINVAL Invalid device type for this kind of transfer * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM) * -ENOMEM Memory exhausted */ /****************************************************************************/ int dma_alloc_descriptors(DMA_Handle_t handle, /* DMA Handle */ dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */ dma_addr_t srcData, /* Place to get data to write to device */ dma_addr_t dstData, /* Pointer to device data address */ size_t numBytes /* Number of bytes to transfer to the device */ ) { DMA_Channel_t *channel; DMA_DeviceAttribute_t *devAttr; int numDescriptors; size_t ringBytesRequired; int rc = 0; channel = HandleToChannel(handle); if (channel == NULL) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[channel->devType]; if (devAttr->config.transferType != transferType) { return -EINVAL; } /* Figure out how many descriptors we need. */ /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */ /* srcData, dstData, numBytes); */ numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, (void *)dstData, numBytes); if (numDescriptors < 0) { printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n", __func__); return -EINVAL; } /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */ /* a new one. */ ringBytesRequired = dmacHw_descriptorLen(numDescriptors); /* printk("ringBytesRequired: %d\n", ringBytesRequired); */ if (ringBytesRequired > devAttr->ring.bytesAllocated) { /* Make sure that this code path is never taken from interrupt context. */ /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */ /* allocation needs to have already been done. */ might_sleep(); /* Free the old descriptor ring and allocate a new one. */ dma_free_descriptor_ring(&devAttr->ring); /* And allocate a new one. */ rc = dma_alloc_descriptor_ring(&devAttr->ring, numDescriptors); if (rc < 0) { printk(KERN_ERR "%s: dma_alloc_descriptor_ring(%d) failed\n", __func__, numDescriptors); return rc; } /* Setup the descriptor for this transfer */ if (dmacHw_initDescriptor(devAttr->ring.virtAddr, devAttr->ring.physAddr, devAttr->ring.bytesAllocated, numDescriptors) < 0) { printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__); return -EINVAL; } } else { /* We've already got enough ring buffer allocated. All we need to do is reset */ /* any control information, just in case the previous DMA was stopped. */ dmacHw_resetDescriptorControl(devAttr->ring.virtAddr); } /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */ /* as last time, then we don't need to call setDataDescriptor again. */ if (dmacHw_setDataDescriptor(&devAttr->config, devAttr->ring.virtAddr, (void *)srcData, (void *)dstData, numBytes) < 0) { printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n", __func__); return -EINVAL; } /* Remember the critical information for this transfer so that we can eliminate */ /* another call to dma_alloc_descriptors if the caller reuses the same buffers */ devAttr->prevSrcData = srcData; devAttr->prevDstData = dstData; devAttr->prevNumBytes = numBytes; return 0; } EXPORT_SYMBOL(dma_alloc_descriptors); /****************************************************************************/ /** * Allocates and sets up descriptors for a double buffered circular buffer. * * This is primarily intended to be used for things like the ingress samples * from a microphone. * * @return * > 0 Number of descriptors actually allocated. * -EINVAL Invalid device type for this kind of transfer * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM) * -ENOMEM Memory exhausted */ /****************************************************************************/ int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */ dma_addr_t srcData, /* Physical address of source data */ dma_addr_t dstData1, /* Physical address of first destination buffer */ dma_addr_t dstData2, /* Physical address of second destination buffer */ size_t numBytes /* Number of bytes in each destination buffer */ ) { DMA_Channel_t *channel; DMA_DeviceAttribute_t *devAttr; int numDst1Descriptors; int numDst2Descriptors; int numDescriptors; size_t ringBytesRequired; int rc = 0; channel = HandleToChannel(handle); if (channel == NULL) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[channel->devType]; /* Figure out how many descriptors we need. */ /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */ /* srcData, dstData, numBytes); */ numDst1Descriptors = dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, (void *)dstData1, numBytes); if (numDst1Descriptors < 0) { return -EINVAL; } numDst2Descriptors = dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, (void *)dstData2, numBytes); if (numDst2Descriptors < 0) { return -EINVAL; } numDescriptors = numDst1Descriptors + numDst2Descriptors; /* printk("numDescriptors: %d\n", numDescriptors); */ /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */ /* a new one. */ ringBytesRequired = dmacHw_descriptorLen(numDescriptors); /* printk("ringBytesRequired: %d\n", ringBytesRequired); */ if (ringBytesRequired > devAttr->ring.bytesAllocated) { /* Make sure that this code path is never taken from interrupt context. */ /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */ /* allocation needs to have already been done. */ might_sleep(); /* Free the old descriptor ring and allocate a new one. */ dma_free_descriptor_ring(&devAttr->ring); /* And allocate a new one. */ rc = dma_alloc_descriptor_ring(&devAttr->ring, numDescriptors); if (rc < 0) { printk(KERN_ERR "%s: dma_alloc_descriptor_ring(%d) failed\n", __func__, ringBytesRequired); return rc; } } /* Setup the descriptor for this transfer. Since this function is used with */ /* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */ /* setDataDescriptor will keep trying to append onto the end. */ if (dmacHw_initDescriptor(devAttr->ring.virtAddr, devAttr->ring.physAddr, devAttr->ring.bytesAllocated, numDescriptors) < 0) { printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__); return -EINVAL; } /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */ /* as last time, then we don't need to call setDataDescriptor again. */ if (dmacHw_setDataDescriptor(&devAttr->config, devAttr->ring.virtAddr, (void *)srcData, (void *)dstData1, numBytes) < 0) { printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n", __func__); return -EINVAL; } if (dmacHw_setDataDescriptor(&devAttr->config, devAttr->ring.virtAddr, (void *)srcData, (void *)dstData2, numBytes) < 0) { printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n", __func__); return -EINVAL; } /* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */ /* try to make the 'prev' variables right. */ devAttr->prevSrcData = 0; devAttr->prevDstData = 0; devAttr->prevNumBytes = 0; return numDescriptors; } EXPORT_SYMBOL(dma_alloc_double_dst_descriptors); /****************************************************************************/ /** * Initiates a transfer when the descriptors have already been setup. * * This is a special case, and normally, the dma_transfer_xxx functions should * be used. * * @return * 0 Transfer was started successfully * -ENODEV Invalid handle */ /****************************************************************************/ int dma_start_transfer(DMA_Handle_t handle) { DMA_Channel_t *channel; DMA_DeviceAttribute_t *devAttr; channel = HandleToChannel(handle); if (channel == NULL) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[channel->devType]; dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config, devAttr->ring.virtAddr); /* Since we got this far, everything went successfully */ return 0; } EXPORT_SYMBOL(dma_start_transfer); /****************************************************************************/ /** * Stops a previously started DMA transfer. * * @return * 0 Transfer was stopped successfully * -ENODEV Invalid handle */ /****************************************************************************/ int dma_stop_transfer(DMA_Handle_t handle) { DMA_Channel_t *channel; channel = HandleToChannel(handle); if (channel == NULL) { return -ENODEV; } dmacHw_stopTransfer(channel->dmacHwHandle); return 0; } EXPORT_SYMBOL(dma_stop_transfer); /****************************************************************************/ /** * Waits for a DMA to complete by polling. This function is only intended * to be used for testing. Interrupts should be used for most DMA operations. */ /****************************************************************************/ int dma_wait_transfer_done(DMA_Handle_t handle) { DMA_Channel_t *channel; dmacHw_TRANSFER_STATUS_e status; channel = HandleToChannel(handle); if (channel == NULL) { return -ENODEV; } while ((status = dmacHw_transferCompleted(channel->dmacHwHandle)) == dmacHw_TRANSFER_STATUS_BUSY) { ; } if (status == dmacHw_TRANSFER_STATUS_ERROR) { printk(KERN_ERR "%s: DMA transfer failed\n", __func__); return -EIO; } return 0; } EXPORT_SYMBOL(dma_wait_transfer_done); /****************************************************************************/ /** * Initiates a DMA, allocating the descriptors as required. * * @return * 0 Transfer was started successfully * -EINVAL Invalid device type for this kind of transfer * (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV) */ /****************************************************************************/ int dma_transfer(DMA_Handle_t handle, /* DMA Handle */ dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */ dma_addr_t srcData, /* Place to get data to write to device */ dma_addr_t dstData, /* Pointer to device data address */ size_t numBytes /* Number of bytes to transfer to the device */ ) { DMA_Channel_t *channel; DMA_DeviceAttribute_t *devAttr; int rc = 0; channel = HandleToChannel(handle); if (channel == NULL) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[channel->devType]; if (devAttr->config.transferType != transferType) { return -EINVAL; } /* We keep track of the information about the previous request for this */ /* device, and if the attributes match, then we can use the descriptors we setup */ /* the last time, and not have to reinitialize everything. */ { rc = dma_alloc_descriptors(handle, transferType, srcData, dstData, numBytes); if (rc != 0) { return rc; } } /* And kick off the transfer */ devAttr->numBytes = numBytes; devAttr->transferStartTime = timer_get_tick_count(); dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config, devAttr->ring.virtAddr); /* Since we got this far, everything went successfully */ return 0; } EXPORT_SYMBOL(dma_transfer); /****************************************************************************/ /** * Set the callback function which will be called when a transfer completes. * If a NULL callback function is set, then no callback will occur. * * @note @a devHandler will be called from IRQ context. * * @return * 0 - Success * -ENODEV - Device handed in is invalid. */ /****************************************************************************/ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for. */ DMA_DeviceHandler_t devHandler, /* Function to call when the DMA completes */ void *userData /* Pointer which will be passed to devHandler. */ ) { DMA_DeviceAttribute_t *devAttr; unsigned long flags; if (!IsDeviceValid(dev)) { return -ENODEV; } devAttr = &DMA_gDeviceAttribute[dev]; local_irq_save(flags); devAttr->userData = userData; devAttr->devHandler = devHandler; local_irq_restore(flags); return 0; } EXPORT_SYMBOL(dma_set_device_handler); /****************************************************************************/ /** * Initializes a memory mapping structure */ /****************************************************************************/ int dma_init_mem_map(DMA_MemMap_t *memMap) { memset(memMap, 0, sizeof(*memMap)); sema_init(&memMap->lock, 1); return 0; } EXPORT_SYMBOL(dma_init_mem_map); /****************************************************************************/ /** * Releases any memory currently being held by a memory mapping structure. */ /****************************************************************************/ int dma_term_mem_map(DMA_MemMap_t *memMap) { down(&memMap->lock); /* Just being paranoid */ /* Free up any allocated memory */ up(&memMap->lock); memset(memMap, 0, sizeof(*memMap)); return 0; } EXPORT_SYMBOL(dma_term_mem_map); /****************************************************************************/ /** * Looks at a memory address and categorizes it. * * @return One of the values from the DMA_MemType_t enumeration. */ /****************************************************************************/ DMA_MemType_t dma_mem_type(void *addr) { unsigned long addrVal = (unsigned long)addr; if (addrVal >= VMALLOC_END) { /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */ /* dma_alloc_xxx pages are physically and virtually contiguous */ return DMA_MEM_TYPE_DMA; } /* Technically, we could add one more classification. Addresses between VMALLOC_END */ /* and the beginning of the DMA virtual address could be considered to be I/O space. */ /* Right now, nobody cares about this particular classification, so we ignore it. */ if (is_vmalloc_addr(addr)) { /* Address comes from the vmalloc'd region. Pages are virtually */ /* contiguous but NOT physically contiguous */ return DMA_MEM_TYPE_VMALLOC; } if (addrVal >= PAGE_OFFSET) { /* PAGE_OFFSET is typically 0xC0000000 */ /* kmalloc'd pages are physically contiguous */ return DMA_MEM_TYPE_KMALLOC; } return DMA_MEM_TYPE_USER; } EXPORT_SYMBOL(dma_mem_type); /****************************************************************************/ /** * Looks at a memory address and determines if we support DMA'ing to/from * that type of memory. * * @return boolean - * return value != 0 means dma supported * return value == 0 means dma not supported */ /****************************************************************************/ int dma_mem_supports_dma(void *addr) { DMA_MemType_t memType = dma_mem_type(addr); return (memType == DMA_MEM_TYPE_DMA) #if ALLOW_MAP_OF_KMALLOC_MEMORY || (memType == DMA_MEM_TYPE_KMALLOC) #endif || (memType == DMA_MEM_TYPE_USER); } EXPORT_SYMBOL(dma_mem_supports_dma); /****************************************************************************/ /** * Maps in a memory region such that it can be used for performing a DMA. * * @return */ /****************************************************************************/ int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */ enum dma_data_direction dir /* Direction that the mapping will be going */ ) { int rc; down(&memMap->lock); DMA_MAP_PRINT("memMap: %p\n", memMap); if (memMap->inUse) { printk(KERN_ERR "%s: memory map %p is already being used\n", __func__, memMap); rc = -EBUSY; goto out; } memMap->inUse = 1; memMap->dir = dir; memMap->numRegionsUsed = 0; rc = 0; out: DMA_MAP_PRINT("returning %d", rc); up(&memMap->lock); return rc; } EXPORT_SYMBOL(dma_map_start); /****************************************************************************/ /** * Adds a segment of memory to a memory map. Each segment is both * physically and virtually contiguous. * * @return 0 on success, error code otherwise. */ /****************************************************************************/ static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */ DMA_Region_t *region, /* Region that the segment belongs to */ void *virtAddr, /* Virtual address of the segment being added */ dma_addr_t physAddr, /* Physical address of the segment being added */ size_t numBytes /* Number of bytes of the segment being added */ ) { DMA_Segment_t *segment; DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr, physAddr, numBytes); /* Sanity check */ if (((unsigned long)virtAddr < (unsigned long)region->virtAddr) || (((unsigned long)virtAddr + numBytes)) > ((unsigned long)region->virtAddr + region->numBytes)) { printk(KERN_ERR "%s: virtAddr %p is outside region @ %p len: %d\n", __func__, virtAddr, region->virtAddr, region->numBytes); return -EINVAL; } if (region->numSegmentsUsed > 0) { /* Check to see if this segment is physically contiguous with the previous one */ segment = &region->segment[region->numSegmentsUsed - 1]; if ((segment->physAddr + segment->numBytes) == physAddr) { /* It is - just add on to the end */ DMA_MAP_PRINT("appending %d bytes to last segment\n", numBytes); segment->numBytes += numBytes; return 0; } } /* Reallocate to hold more segments, if required. */ if (region->numSegmentsUsed >= region->numSegmentsAllocated) { DMA_Segment_t *newSegment; size_t oldSize = region->numSegmentsAllocated * sizeof(*newSegment); int newAlloc = region->numSegmentsAllocated + 4; size_t newSize = newAlloc * sizeof(*newSegment); newSegment = kmalloc(newSize, GFP_KERNEL); if (newSegment == NULL) { return -ENOMEM; } memcpy(newSegment, region->segment, oldSize); memset(&((uint8_t *) newSegment)[oldSize], 0, newSize - oldSize); kfree(region->segment); region->numSegmentsAllocated = newAlloc; region->segment = newSegment; } segment = &region->segment[region->numSegmentsUsed]; region->numSegmentsUsed++; segment->virtAddr = virtAddr; segment->physAddr = physAddr; segment->numBytes = numBytes; DMA_MAP_PRINT("returning success\n"); return 0; } /****************************************************************************/ /** * Adds a region of memory to a memory map. Each region is virtually * contiguous, but not necessarily physically contiguous. * * @return 0 on success, error code otherwise. */ /****************************************************************************/ int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */ void *mem, /* Virtual address that we want to get a map of */ size_t numBytes /* Number of bytes being mapped */ ) { unsigned long addr = (unsigned long)mem; unsigned int offset; int rc = 0; DMA_Region_t *region; dma_addr_t physAddr; down(&memMap->lock); DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes); if (!memMap->inUse) { printk(KERN_ERR "%s: Make sure you call dma_map_start first\n", __func__); rc = -EINVAL; goto out; } /* Reallocate to hold more regions. */ if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) { DMA_Region_t *newRegion; size_t oldSize = memMap->numRegionsAllocated * sizeof(*newRegion); int newAlloc = memMap->numRegionsAllocated + 4; size_t newSize = newAlloc * sizeof(*newRegion); newRegion = kmalloc(newSize, GFP_KERNEL); if (newRegion == NULL) { rc = -ENOMEM; goto out; } memcpy(newRegion, memMap->region, oldSize); memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize); kfree(memMap->region); memMap->numRegionsAllocated = newAlloc; memMap->region = newRegion; } region = &memMap->region[memMap->numRegionsUsed]; memMap->numRegionsUsed++; offset = addr & ~PAGE_MASK; region->memType = dma_mem_type(mem); region->virtAddr = mem; region->numBytes = numBytes; region->numSegmentsUsed = 0; region->numLockedPages = 0; region->lockedPages = NULL; switch (region->memType) { case DMA_MEM_TYPE_VMALLOC: { atomic_inc(&gDmaStatMemTypeVmalloc); /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */ /* vmalloc'd pages are not physically contiguous */ rc = -EINVAL; break; } case DMA_MEM_TYPE_KMALLOC: { atomic_inc(&gDmaStatMemTypeKmalloc); /* kmalloc'd pages are physically contiguous, so they'll have exactly */ /* one segment */ #if ALLOW_MAP_OF_KMALLOC_MEMORY physAddr = dma_map_single(NULL, mem, numBytes, memMap->dir); rc = dma_map_add_segment(memMap, region, mem, physAddr, numBytes); #else rc = -EINVAL; #endif break; } case DMA_MEM_TYPE_DMA: { /* dma_alloc_xxx pages are physically contiguous */ atomic_inc(&gDmaStatMemTypeCoherent); physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset; dma_sync_single_for_cpu(NULL, physAddr, numBytes, memMap->dir); rc = dma_map_add_segment(memMap, region, mem, physAddr, numBytes); break; } case DMA_MEM_TYPE_USER: { size_t firstPageOffset; size_t firstPageSize; struct page **pages; struct task_struct *userTask; atomic_inc(&gDmaStatMemTypeUser); #if 1 /* If the pages are user pages, then the dma_mem_map_set_user_task function */ /* must have been previously called. */ if (memMap->userTask == NULL) { printk(KERN_ERR "%s: must call dma_mem_map_set_user_task when using user-mode memory\n", __func__); return -EINVAL; } /* User pages need to be locked. */ firstPageOffset = (unsigned long)region->virtAddr & (PAGE_SIZE - 1); firstPageSize = PAGE_SIZE - firstPageOffset; region->numLockedPages = (firstPageOffset + region->numBytes + PAGE_SIZE - 1) / PAGE_SIZE; pages = kmalloc(region->numLockedPages * sizeof(struct page *), GFP_KERNEL); if (pages == NULL) { region->numLockedPages = 0; return -ENOMEM; } userTask = memMap->userTask; down_read(&userTask->mm->mmap_sem); rc = get_user_pages(userTask, /* task */ userTask->mm, /* mm */ (unsigned long)region->virtAddr, /* start */ region->numLockedPages, /* len */ memMap->dir == DMA_FROM_DEVICE, /* write */ 0, /* force */ pages, /* pages (array of pointers to page) */ NULL); /* vmas */ up_read(&userTask->mm->mmap_sem); if (rc != region->numLockedPages) { kfree(pages); region->numLockedPages = 0; if (rc >= 0) { rc = -EINVAL; } } else { uint8_t *virtAddr = region->virtAddr; size_t bytesRemaining; int pageIdx; rc = 0; /* Since get_user_pages returns +ve number */ region->lockedPages = pages; /* We've locked the user pages. Now we need to walk them and figure */ /* out the physical addresses. */ /* The first page may be partial */ dma_map_add_segment(memMap, region, virtAddr, PFN_PHYS(page_to_pfn (pages[0])) + firstPageOffset, firstPageSize); virtAddr += firstPageSize; bytesRemaining = region->numBytes - firstPageSize; for (pageIdx = 1; pageIdx < region->numLockedPages; pageIdx++) { size_t bytesThisPage = (bytesRemaining > PAGE_SIZE ? PAGE_SIZE : bytesRemaining); DMA_MAP_PRINT ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n", pageIdx, pages[pageIdx], page_to_pfn(pages[pageIdx]), PFN_PHYS(page_to_pfn (pages[pageIdx]))); dma_map_add_segment(memMap, region, virtAddr, PFN_PHYS(page_to_pfn (pages [pageIdx])), bytesThisPage); virtAddr += bytesThisPage; bytesRemaining -= bytesThisPage; } } #else printk(KERN_ERR "%s: User mode pages are not yet supported\n", __func__); /* user pages are not physically contiguous */ rc = -EINVAL; #endif break; } default: { printk(KERN_ERR "%s: Unsupported memory type: %d\n", __func__, region->memType); rc = -EINVAL; break; } } if (rc != 0) { memMap->numRegionsUsed--; } out: DMA_MAP_PRINT("returning %d\n", rc); up(&memMap->lock); return rc; } EXPORT_SYMBOL(dma_map_add_segment); /****************************************************************************/ /** * Maps in a memory region such that it can be used for performing a DMA. * * @return 0 on success, error code otherwise. */ /****************************************************************************/ int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */ void *mem, /* Virtual address that we want to get a map of */ size_t numBytes, /* Number of bytes being mapped */ enum dma_data_direction dir /* Direction that the mapping will be going */ ) { int rc; rc = dma_map_start(memMap, dir); if (rc == 0) { rc = dma_map_add_region(memMap, mem, numBytes); if (rc < 0) { /* Since the add fails, this function will fail, and the caller won't */ /* call unmap, so we need to do it here. */ dma_unmap(memMap, 0); } } return rc; } EXPORT_SYMBOL(dma_map_mem); /****************************************************************************/ /** * Setup a descriptor ring for a given memory map. * * It is assumed that the descriptor ring has already been initialized, and * this routine will only reallocate a new descriptor ring if the existing * one is too small. * * @return 0 on success, error code otherwise. */ /****************************************************************************/ int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */ DMA_MemMap_t *memMap, /* Memory map that will be used */ dma_addr_t devPhysAddr /* Physical address of device */ ) { int rc; int numDescriptors; DMA_DeviceAttribute_t *devAttr; DMA_Region_t *region; DMA_Segment_t *segment; dma_addr_t srcPhysAddr; dma_addr_t dstPhysAddr; int regionIdx; int segmentIdx; devAttr = &DMA_gDeviceAttribute[dev]; down(&memMap->lock); /* Figure out how many descriptors we need */ numDescriptors = 0; for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { region = &memMap->region[regionIdx]; for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; segmentIdx++) { segment = &region->segment[segmentIdx]; if (memMap->dir == DMA_TO_DEVICE) { srcPhysAddr = segment->physAddr; dstPhysAddr = devPhysAddr; } else { srcPhysAddr = devPhysAddr; dstPhysAddr = segment->physAddr; } rc = dma_calculate_descriptor_count(dev, srcPhysAddr, dstPhysAddr, segment-> numBytes); if (rc < 0) { printk(KERN_ERR "%s: dma_calculate_descriptor_count failed: %d\n", __func__, rc); goto out; } numDescriptors += rc; } } /* Adjust the size of the ring, if it isn't big enough */ if (numDescriptors > devAttr->ring.descriptorsAllocated) { dma_free_descriptor_ring(&devAttr->ring); rc = dma_alloc_descriptor_ring(&devAttr->ring, numDescriptors); if (rc < 0) { printk(KERN_ERR "%s: dma_alloc_descriptor_ring failed: %d\n", __func__, rc); goto out; } } else { rc = dma_init_descriptor_ring(&devAttr->ring, numDescriptors); if (rc < 0) { printk(KERN_ERR "%s: dma_init_descriptor_ring failed: %d\n", __func__, rc); goto out; } } /* Populate the descriptors */ for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { region = &memMap->region[regionIdx]; for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; segmentIdx++) { segment = &region->segment[segmentIdx]; if (memMap->dir == DMA_TO_DEVICE) { srcPhysAddr = segment->physAddr; dstPhysAddr = devPhysAddr; } else { srcPhysAddr = devPhysAddr; dstPhysAddr = segment->physAddr; } rc = dma_add_descriptors(&devAttr->ring, dev, srcPhysAddr, dstPhysAddr, segment->numBytes); if (rc < 0) { printk(KERN_ERR "%s: dma_add_descriptors failed: %d\n", __func__, rc); goto out; } } } rc = 0; out: up(&memMap->lock); return rc; } EXPORT_SYMBOL(dma_map_create_descriptor_ring); /****************************************************************************/ /** * Maps in a memory region such that it can be used for performing a DMA. * * @return */ /****************************************************************************/ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ int dirtied /* non-zero if any of the pages were modified */ ) { int rc = 0; int regionIdx; int segmentIdx; DMA_Region_t *region; DMA_Segment_t *segment; down(&memMap->lock); for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { region = &memMap->region[regionIdx]; for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; segmentIdx++) { segment = &region->segment[segmentIdx]; switch (region->memType) { case DMA_MEM_TYPE_VMALLOC: { printk(KERN_ERR "%s: vmalloc'd pages are not yet supported\n", __func__); rc = -EINVAL; goto out; } case DMA_MEM_TYPE_KMALLOC: { #if ALLOW_MAP_OF_KMALLOC_MEMORY dma_unmap_single(NULL, segment->physAddr, segment->numBytes, memMap->dir); #endif break; } case DMA_MEM_TYPE_DMA: { dma_sync_single_for_cpu(NULL, segment-> physAddr, segment-> numBytes, memMap->dir); break; } case DMA_MEM_TYPE_USER: { /* Nothing to do here. */ break; } default: { printk(KERN_ERR "%s: Unsupported memory type: %d\n", __func__, region->memType); rc = -EINVAL; goto out; } } segment->virtAddr = NULL; segment->physAddr = 0; segment->numBytes = 0; } if (region->numLockedPages > 0) { int pageIdx; /* Some user pages were locked. We need to go and unlock them now. */ for (pageIdx = 0; pageIdx < region->numLockedPages; pageIdx++) { struct page *page = region->lockedPages[pageIdx]; if (memMap->dir == DMA_FROM_DEVICE) { SetPageDirty(page); } page_cache_release(page); } kfree(region->lockedPages); region->numLockedPages = 0; region->lockedPages = NULL; } region->memType = DMA_MEM_TYPE_NONE; region->virtAddr = NULL; region->numBytes = 0; region->numSegmentsUsed = 0; } memMap->userTask = NULL; memMap->numRegionsUsed = 0; memMap->inUse = 0; out: up(&memMap->lock); return rc; } EXPORT_SYMBOL(dma_unmap);
gpl-2.0
MrHyde03/android_kernel_samsung_espressovzw-jb
arch/arm/mach-orion5x/ts78xx-setup.c
2669
15540
/* * arch/arm/mach-orion5x/ts78xx-setup.c * * Maintainer: Alexander Clouter <alex@digriz.org.uk> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sysfs.h> #include <linux/platform_device.h> #include <linux/mv643xx_eth.h> #include <linux/ata_platform.h> #include <linux/m48t86.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/timeriomem-rng.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" #include "ts78xx-fpga.h" /***************************************************************************** * TS-78xx Info ****************************************************************************/ /* * FPGA - lives where the PCI bus would be at ORION5X_PCI_MEM_PHYS_BASE */ #define TS78XX_FPGA_REGS_PHYS_BASE 0xe8000000 #define TS78XX_FPGA_REGS_VIRT_BASE 0xff900000 #define TS78XX_FPGA_REGS_SIZE SZ_1M static struct ts78xx_fpga_data ts78xx_fpga = { .id = 0, .state = 1, /* .supports = ... - populated by ts78xx_fpga_supports() */ }; /***************************************************************************** * I/O Address Mapping ****************************************************************************/ static struct map_desc ts78xx_io_desc[] __initdata = { { .virtual = TS78XX_FPGA_REGS_VIRT_BASE, .pfn = __phys_to_pfn(TS78XX_FPGA_REGS_PHYS_BASE), .length = TS78XX_FPGA_REGS_SIZE, .type = MT_DEVICE, }, }; void __init ts78xx_map_io(void) { orion5x_map_io(); iotable_init(ts78xx_io_desc, ARRAY_SIZE(ts78xx_io_desc)); } /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data ts78xx_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; /***************************************************************************** * SATA ****************************************************************************/ static struct mv_sata_platform_data ts78xx_sata_data = { .n_ports = 2, }; /***************************************************************************** * RTC M48T86 - nicked^Wborrowed from arch/arm/mach-ep93xx/ts72xx.c ****************************************************************************/ #define TS_RTC_CTRL (TS78XX_FPGA_REGS_VIRT_BASE | 0x808) #define TS_RTC_DATA (TS78XX_FPGA_REGS_VIRT_BASE | 0x80c) static unsigned char ts78xx_ts_rtc_readbyte(unsigned long addr) { writeb(addr, TS_RTC_CTRL); return readb(TS_RTC_DATA); } static void ts78xx_ts_rtc_writebyte(unsigned char value, unsigned long addr) { writeb(addr, TS_RTC_CTRL); writeb(value, TS_RTC_DATA); } static struct m48t86_ops ts78xx_ts_rtc_ops = { .readbyte = ts78xx_ts_rtc_readbyte, .writebyte = ts78xx_ts_rtc_writebyte, }; static struct platform_device ts78xx_ts_rtc_device = { .name = "rtc-m48t86", .id = -1, .dev = { .platform_data = &ts78xx_ts_rtc_ops, }, .num_resources = 0, }; /* * TS uses some of the user storage space on the RTC chip so see if it is * present; as it's an optional feature at purchase time and not all boards * will have it present * * I've used the method TS use in their rtc7800.c example for the detection * * TODO: track down a guinea pig without an RTC to see if we can work out a * better RTC detection routine */ static int ts78xx_ts_rtc_load(void) { int rc; unsigned char tmp_rtc0, tmp_rtc1; tmp_rtc0 = ts78xx_ts_rtc_readbyte(126); tmp_rtc1 = ts78xx_ts_rtc_readbyte(127); ts78xx_ts_rtc_writebyte(0x00, 126); ts78xx_ts_rtc_writebyte(0x55, 127); if (ts78xx_ts_rtc_readbyte(127) == 0x55) { ts78xx_ts_rtc_writebyte(0xaa, 127); if (ts78xx_ts_rtc_readbyte(127) == 0xaa && ts78xx_ts_rtc_readbyte(126) == 0x00) { ts78xx_ts_rtc_writebyte(tmp_rtc0, 126); ts78xx_ts_rtc_writebyte(tmp_rtc1, 127); if (ts78xx_fpga.supports.ts_rtc.init == 0) { rc = platform_device_register(&ts78xx_ts_rtc_device); if (!rc) ts78xx_fpga.supports.ts_rtc.init = 1; } else rc = platform_device_add(&ts78xx_ts_rtc_device); return rc; } } return -ENODEV; }; static void ts78xx_ts_rtc_unload(void) { platform_device_del(&ts78xx_ts_rtc_device); } /***************************************************************************** * NAND Flash ****************************************************************************/ #define TS_NAND_CTRL (TS78XX_FPGA_REGS_VIRT_BASE | 0x800) /* VIRT */ #define TS_NAND_DATA (TS78XX_FPGA_REGS_PHYS_BASE | 0x804) /* PHYS */ /* * hardware specific access to control-lines * * ctrl: * NAND_NCE: bit 0 -> bit 2 * NAND_CLE: bit 1 -> bit 1 * NAND_ALE: bit 2 -> bit 0 */ static void ts78xx_ts_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; if (ctrl & NAND_CTRL_CHANGE) { unsigned char bits; bits = (ctrl & NAND_NCE) << 2; bits |= ctrl & NAND_CLE; bits |= (ctrl & NAND_ALE) >> 2; writeb((readb(TS_NAND_CTRL) & ~0x7) | bits, TS_NAND_CTRL); } if (cmd != NAND_CMD_NONE) writeb(cmd, this->IO_ADDR_W); } static int ts78xx_ts_nand_dev_ready(struct mtd_info *mtd) { return readb(TS_NAND_CTRL) & 0x20; } static void ts78xx_ts_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; void __iomem *io_base = chip->IO_ADDR_W; unsigned long off = ((unsigned long)buf & 3); int sz; if (off) { sz = min_t(int, 4 - off, len); writesb(io_base, buf, sz); buf += sz; len -= sz; } sz = len >> 2; if (sz) { u32 *buf32 = (u32 *)buf; writesl(io_base, buf32, sz); buf += sz << 2; len -= sz << 2; } if (len) writesb(io_base, buf, len); } static void ts78xx_ts_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; void __iomem *io_base = chip->IO_ADDR_R; unsigned long off = ((unsigned long)buf & 3); int sz; if (off) { sz = min_t(int, 4 - off, len); readsb(io_base, buf, sz); buf += sz; len -= sz; } sz = len >> 2; if (sz) { u32 *buf32 = (u32 *)buf; readsl(io_base, buf32, sz); buf += sz << 2; len -= sz << 2; } if (len) readsb(io_base, buf, len); } const char *ts_nand_part_probes[] = { "cmdlinepart", NULL }; static struct mtd_partition ts78xx_ts_nand_parts[] = { { .name = "mbr", .offset = 0, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_4M, }, { .name = "initrd", .offset = MTDPART_OFS_APPEND, .size = SZ_4M, }, { .name = "rootfs", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, } }; static struct platform_nand_data ts78xx_ts_nand_data = { .chip = { .nr_chips = 1, .part_probe_types = ts_nand_part_probes, .partitions = ts78xx_ts_nand_parts, .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts), .chip_delay = 15, .options = NAND_USE_FLASH_BBT, }, .ctrl = { /* * The HW ECC offloading functions, used to give about a 9% * performance increase for 'dd if=/dev/mtdblockX' and 5% for * nanddump. This all however was changed by git commit * e6cf5df1838c28bb060ac45b5585e48e71bbc740 so now there is * no performance advantage to be had so we no longer bother */ .cmd_ctrl = ts78xx_ts_nand_cmd_ctrl, .dev_ready = ts78xx_ts_nand_dev_ready, .write_buf = ts78xx_ts_nand_write_buf, .read_buf = ts78xx_ts_nand_read_buf, }, }; static struct resource ts78xx_ts_nand_resources = { .start = TS_NAND_DATA, .end = TS_NAND_DATA + 4, .flags = IORESOURCE_MEM, }; static struct platform_device ts78xx_ts_nand_device = { .name = "gen_nand", .id = -1, .dev = { .platform_data = &ts78xx_ts_nand_data, }, .resource = &ts78xx_ts_nand_resources, .num_resources = 1, }; static int ts78xx_ts_nand_load(void) { int rc; if (ts78xx_fpga.supports.ts_nand.init == 0) { rc = platform_device_register(&ts78xx_ts_nand_device); if (!rc) ts78xx_fpga.supports.ts_nand.init = 1; } else rc = platform_device_add(&ts78xx_ts_nand_device); return rc; }; static void ts78xx_ts_nand_unload(void) { platform_device_del(&ts78xx_ts_nand_device); } /***************************************************************************** * HW RNG ****************************************************************************/ #define TS_RNG_DATA (TS78XX_FPGA_REGS_PHYS_BASE | 0x044) static struct resource ts78xx_ts_rng_resource = { .flags = IORESOURCE_MEM, .start = TS_RNG_DATA, .end = TS_RNG_DATA + 4 - 1, }; static struct timeriomem_rng_data ts78xx_ts_rng_data = { .period = 1000000, /* one second */ }; static struct platform_device ts78xx_ts_rng_device = { .name = "timeriomem_rng", .id = -1, .dev = { .platform_data = &ts78xx_ts_rng_data, }, .resource = &ts78xx_ts_rng_resource, .num_resources = 1, }; static int ts78xx_ts_rng_load(void) { int rc; if (ts78xx_fpga.supports.ts_rng.init == 0) { rc = platform_device_register(&ts78xx_ts_rng_device); if (!rc) ts78xx_fpga.supports.ts_rng.init = 1; } else rc = platform_device_add(&ts78xx_ts_rng_device); return rc; }; static void ts78xx_ts_rng_unload(void) { platform_device_del(&ts78xx_ts_rng_device); } /***************************************************************************** * FPGA 'hotplug' support code ****************************************************************************/ static void ts78xx_fpga_devices_zero_init(void) { ts78xx_fpga.supports.ts_rtc.init = 0; ts78xx_fpga.supports.ts_nand.init = 0; ts78xx_fpga.supports.ts_rng.init = 0; } static void ts78xx_fpga_supports(void) { /* TODO: put this 'table' into ts78xx-fpga.h */ switch (ts78xx_fpga.id) { case TS7800_REV_1: case TS7800_REV_2: case TS7800_REV_3: case TS7800_REV_4: case TS7800_REV_5: case TS7800_REV_6: case TS7800_REV_7: case TS7800_REV_8: case TS7800_REV_9: ts78xx_fpga.supports.ts_rtc.present = 1; ts78xx_fpga.supports.ts_nand.present = 1; ts78xx_fpga.supports.ts_rng.present = 1; break; default: /* enable devices if magic matches */ switch ((ts78xx_fpga.id >> 8) & 0xffffff) { case TS7800_FPGA_MAGIC: pr_warning("TS-7800 FPGA: unrecognized revision 0x%.2x\n", ts78xx_fpga.id & 0xff); ts78xx_fpga.supports.ts_rtc.present = 1; ts78xx_fpga.supports.ts_nand.present = 1; ts78xx_fpga.supports.ts_rng.present = 1; break; default: ts78xx_fpga.supports.ts_rtc.present = 0; ts78xx_fpga.supports.ts_nand.present = 0; ts78xx_fpga.supports.ts_rng.present = 0; } } } static int ts78xx_fpga_load_devices(void) { int tmp, ret = 0; if (ts78xx_fpga.supports.ts_rtc.present == 1) { tmp = ts78xx_ts_rtc_load(); if (tmp) { pr_info("TS-78xx: RTC not registered\n"); ts78xx_fpga.supports.ts_rtc.present = 0; } ret |= tmp; } if (ts78xx_fpga.supports.ts_nand.present == 1) { tmp = ts78xx_ts_nand_load(); if (tmp) { pr_info("TS-78xx: NAND not registered\n"); ts78xx_fpga.supports.ts_nand.present = 0; } ret |= tmp; } if (ts78xx_fpga.supports.ts_rng.present == 1) { tmp = ts78xx_ts_rng_load(); if (tmp) { pr_info("TS-78xx: RNG not registered\n"); ts78xx_fpga.supports.ts_rng.present = 0; } ret |= tmp; } return ret; } static int ts78xx_fpga_unload_devices(void) { int ret = 0; if (ts78xx_fpga.supports.ts_rtc.present == 1) ts78xx_ts_rtc_unload(); if (ts78xx_fpga.supports.ts_nand.present == 1) ts78xx_ts_nand_unload(); if (ts78xx_fpga.supports.ts_rng.present == 1) ts78xx_ts_rng_unload(); return ret; } static int ts78xx_fpga_load(void) { ts78xx_fpga.id = readl(TS78XX_FPGA_REGS_VIRT_BASE); pr_info("TS-78xx FPGA: magic=0x%.6x, rev=0x%.2x\n", (ts78xx_fpga.id >> 8) & 0xffffff, ts78xx_fpga.id & 0xff); ts78xx_fpga_supports(); if (ts78xx_fpga_load_devices()) { ts78xx_fpga.state = -1; return -EBUSY; } return 0; }; static int ts78xx_fpga_unload(void) { unsigned int fpga_id; fpga_id = readl(TS78XX_FPGA_REGS_VIRT_BASE); /* * There does not seem to be a feasible way to block access to the GPIO * pins from userspace (/dev/mem). This if clause should hopefully warn * those foolish enough not to follow 'policy' :) * * UrJTAG SVN since r1381 can be used to reprogram the FPGA */ if (ts78xx_fpga.id != fpga_id) { pr_err("TS-78xx FPGA: magic/rev mismatch\n" "TS-78xx FPGA: was 0x%.6x/%.2x but now 0x%.6x/%.2x\n", (ts78xx_fpga.id >> 8) & 0xffffff, ts78xx_fpga.id & 0xff, (fpga_id >> 8) & 0xffffff, fpga_id & 0xff); ts78xx_fpga.state = -1; return -EBUSY; } if (ts78xx_fpga_unload_devices()) { ts78xx_fpga.state = -1; return -EBUSY; } return 0; }; static ssize_t ts78xx_fpga_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { if (ts78xx_fpga.state < 0) return sprintf(buf, "borked\n"); return sprintf(buf, "%s\n", (ts78xx_fpga.state) ? "online" : "offline"); } static ssize_t ts78xx_fpga_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int value, ret; if (ts78xx_fpga.state < 0) { pr_err("TS-78xx FPGA: borked, you must powercycle asap\n"); return -EBUSY; } if (strncmp(buf, "online", sizeof("online") - 1) == 0) value = 1; else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) value = 0; else { pr_err("ts78xx_fpga_store: Invalid value\n"); return -EINVAL; } if (ts78xx_fpga.state == value) return n; ret = (ts78xx_fpga.state == 0) ? ts78xx_fpga_load() : ts78xx_fpga_unload(); if (!(ret < 0)) ts78xx_fpga.state = value; return n; } static struct kobj_attribute ts78xx_fpga_attr = __ATTR(ts78xx_fpga, 0644, ts78xx_fpga_show, ts78xx_fpga_store); /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int ts78xx_mpp_modes[] __initdata = { MPP0_UNUSED, MPP1_GPIO, /* JTAG Clock */ MPP2_GPIO, /* JTAG Data In */ MPP3_GPIO, /* Lat ECP2 256 FPGA - PB2B */ MPP4_GPIO, /* JTAG Data Out */ MPP5_GPIO, /* JTAG TMS */ MPP6_GPIO, /* Lat ECP2 256 FPGA - PB31A_CLK4+ */ MPP7_GPIO, /* Lat ECP2 256 FPGA - PB22B */ MPP8_UNUSED, MPP9_UNUSED, MPP10_UNUSED, MPP11_UNUSED, MPP12_UNUSED, MPP13_UNUSED, MPP14_UNUSED, MPP15_UNUSED, MPP16_UART, MPP17_UART, MPP18_UART, MPP19_UART, /* * MPP[20] PCI Clock Out 1 * MPP[21] PCI Clock Out 0 * MPP[22] Unused * MPP[23] Unused * MPP[24] Unused * MPP[25] Unused */ 0, }; static void __init ts78xx_init(void) { int ret; /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(ts78xx_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_ehci1_init(); orion5x_eth_init(&ts78xx_eth_data); orion5x_sata_init(&ts78xx_sata_data); orion5x_uart0_init(); orion5x_uart1_init(); orion5x_xor_init(); /* FPGA init */ ts78xx_fpga_devices_zero_init(); ret = ts78xx_fpga_load(); ret = sysfs_create_file(power_kobj, &ts78xx_fpga_attr.attr); if (ret) pr_err("sysfs_create_file failed: %d\n", ret); } MACHINE_START(TS78XX, "Technologic Systems TS-78xx SBC") /* Maintainer: Alexander Clouter <alex@digriz.org.uk> */ .boot_params = 0x00000100, .init_machine = ts78xx_init, .map_io = ts78xx_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, MACHINE_END
gpl-2.0
aatjitra/PR25
drivers/scsi/advansys.c
3181
383611
#define DRV_NAME "advansys" #define ASC_VERSION "3.4" /* AdvanSys Driver Version */ /* * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-2000 Advanced System Products, Inc. * Copyright (c) 2000-2001 ConnectCom Solutions, Inc. * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx> * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys) * changed its name to ConnectCom Solutions, Inc. * On June 18, 2001 Initio Corp. acquired ConnectCom's SCSI assets */ #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/isa.h> #include <linux/eisa.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <asm/io.h> #include <asm/system.h> #include <asm/dma.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> /* FIXME: * * 1. Although all of the necessary command mapping places have the * appropriate dma_map.. APIs, the driver still processes its internal * queue using bus_to_virt() and virt_to_bus() which are illegal under * the API. The entire queue processing structure will need to be * altered to fix this. * 2. Need to add memory mapping workaround. Test the memory mapping. * If it doesn't work revert to I/O port access. Can a test be done * safely? * 3. Handle an interrupt not working. Keep an interrupt counter in * the interrupt handler. In the timeout function if the interrupt * has not occurred then print a message and run in polled mode. * 4. Need to add support for target mode commands, cf. CAM XPT. * 5. check DMA mapping functions for failure * 6. Use scsi_transport_spi * 7. advansys_info is not safe against multiple simultaneous callers * 8. Add module_param to override ISA/VLB ioport array */ #warning this driver is still not properly converted to the DMA API /* Enable driver /proc statistics. */ #define ADVANSYS_STATS /* Enable driver tracing. */ #undef ADVANSYS_DEBUG /* * Portable Data Types * * Any instance where a 32-bit long or pointer type is assumed * for precision or HW defined structures, the following define * types must be used. In Linux the char, short, and int types * are all consistent at 8, 16, and 32 bits respectively. Pointers * and long types are 64 bits on Alpha and UltraSPARC. */ #define ASC_PADDR __u32 /* Physical/Bus address data type. */ #define ASC_VADDR __u32 /* Virtual address data type. */ #define ASC_DCNT __u32 /* Unsigned Data count type. */ #define ASC_SDCNT __s32 /* Signed Data count type. */ typedef unsigned char uchar; #ifndef TRUE #define TRUE (1) #endif #ifndef FALSE #define FALSE (0) #endif #define ERR (-1) #define UW_ERR (uint)(0xFFFF) #define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0) #define PCI_VENDOR_ID_ASP 0x10cd #define PCI_DEVICE_ID_ASP_1200A 0x1100 #define PCI_DEVICE_ID_ASP_ABP940 0x1200 #define PCI_DEVICE_ID_ASP_ABP940U 0x1300 #define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 #define PCI_DEVICE_ID_38C0800_REV1 0x2500 #define PCI_DEVICE_ID_38C1600_REV1 0x2700 /* * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() * macro re-defined to be able to obtain a ASC_SCSI_Q pointer from the * SRB structure. */ #define CC_VERY_LONG_SG_LIST 0 #define ASC_SRB2SCSIQ(srb_ptr) (srb_ptr) #define PortAddr unsigned int /* port address size */ #define inp(port) inb(port) #define outp(port, byte) outb((byte), (port)) #define inpw(port) inw(port) #define outpw(port, word) outw((word), (port)) #define ASC_MAX_SG_QUEUE 7 #define ASC_MAX_SG_LIST 255 #define ASC_CS_TYPE unsigned short #define ASC_IS_ISA (0x0001) #define ASC_IS_ISAPNP (0x0081) #define ASC_IS_EISA (0x0002) #define ASC_IS_PCI (0x0004) #define ASC_IS_PCI_ULTRA (0x0104) #define ASC_IS_PCMCIA (0x0008) #define ASC_IS_MCA (0x0020) #define ASC_IS_VL (0x0040) #define ASC_IS_WIDESCSI_16 (0x0100) #define ASC_IS_WIDESCSI_32 (0x0200) #define ASC_IS_BIG_ENDIAN (0x8000) #define ASC_CHIP_MIN_VER_VL (0x01) #define ASC_CHIP_MAX_VER_VL (0x07) #define ASC_CHIP_MIN_VER_PCI (0x09) #define ASC_CHIP_MAX_VER_PCI (0x0F) #define ASC_CHIP_VER_PCI_BIT (0x08) #define ASC_CHIP_MIN_VER_ISA (0x11) #define ASC_CHIP_MIN_VER_ISA_PNP (0x21) #define ASC_CHIP_MAX_VER_ISA (0x27) #define ASC_CHIP_VER_ISA_BIT (0x30) #define ASC_CHIP_VER_ISAPNP_BIT (0x20) #define ASC_CHIP_VER_ASYN_BUG (0x21) #define ASC_CHIP_VER_PCI 0x08 #define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02) #define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03) #define ASC_CHIP_MIN_VER_EISA (0x41) #define ASC_CHIP_MAX_VER_EISA (0x47) #define ASC_CHIP_VER_EISA_BIT (0x40) #define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3) #define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL) #define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL) #define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL) #define ASC_SCSI_ID_BITS 3 #define ASC_SCSI_TIX_TYPE uchar #define ASC_ALL_DEVICE_BIT_SET 0xFF #define ASC_SCSI_BIT_ID_TYPE uchar #define ASC_MAX_TID 7 #define ASC_MAX_LUN 7 #define ASC_SCSI_WIDTH_BIT_SET 0xFF #define ASC_MAX_SENSE_LEN 32 #define ASC_MIN_SENSE_LEN 14 #define ASC_SCSI_RESET_HOLD_TIME_US 60 /* * Narrow boards only support 12-byte commands, while wide boards * extend to 16-byte commands. */ #define ASC_MAX_CDB_LEN 12 #define ADV_MAX_CDB_LEN 16 #define MS_SDTR_LEN 0x03 #define MS_WDTR_LEN 0x02 #define ASC_SG_LIST_PER_Q 7 #define QS_FREE 0x00 #define QS_READY 0x01 #define QS_DISC1 0x02 #define QS_DISC2 0x04 #define QS_BUSY 0x08 #define QS_ABORTED 0x40 #define QS_DONE 0x80 #define QC_NO_CALLBACK 0x01 #define QC_SG_SWAP_QUEUE 0x02 #define QC_SG_HEAD 0x04 #define QC_DATA_IN 0x08 #define QC_DATA_OUT 0x10 #define QC_URGENT 0x20 #define QC_MSG_OUT 0x40 #define QC_REQ_SENSE 0x80 #define QCSG_SG_XFER_LIST 0x02 #define QCSG_SG_XFER_MORE 0x04 #define QCSG_SG_XFER_END 0x08 #define QD_IN_PROGRESS 0x00 #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QD_INVALID_REQUEST 0x80 #define QD_INVALID_HOST_NUM 0x81 #define QD_INVALID_DEVICE 0x82 #define QD_ERR_INTERNAL 0xFF #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_DATA_UNDER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14 #define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21 #define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22 #define QHSTA_D_HOST_ABORT_FAILED 0x23 #define QHSTA_D_EXE_SCSI_Q_FAILED 0x24 #define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25 #define QHSTA_D_ASPI_NO_BUF_POOL 0x26 #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_TARGET_STATUS_BUSY 0x45 #define QHSTA_M_BAD_TAG_CODE 0x46 #define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47 #define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48 #define QHSTA_D_LRAM_CMP_ERROR 0x81 #define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1 #define ASC_FLAG_SCSIQ_REQ 0x01 #define ASC_FLAG_BIOS_SCSIQ_REQ 0x02 #define ASC_FLAG_BIOS_ASYNC_IO 0x04 #define ASC_FLAG_SRB_LINEAR_ADDR 0x08 #define ASC_FLAG_WIN16 0x10 #define ASC_FLAG_WIN32 0x20 #define ASC_FLAG_ISA_OVER_16MB 0x40 #define ASC_FLAG_DOS_VM_CALLBACK 0x80 #define ASC_TAG_FLAG_EXTRA_BYTES 0x10 #define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04 #define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08 #define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40 #define ASC_SCSIQ_CPY_BEG 4 #define ASC_SCSIQ_SGHD_CPY_BEG 2 #define ASC_SCSIQ_B_FWD 0 #define ASC_SCSIQ_B_BWD 1 #define ASC_SCSIQ_B_STATUS 2 #define ASC_SCSIQ_B_QNO 3 #define ASC_SCSIQ_B_CNTL 4 #define ASC_SCSIQ_B_SG_QUEUE_CNT 5 #define ASC_SCSIQ_D_DATA_ADDR 8 #define ASC_SCSIQ_D_DATA_CNT 12 #define ASC_SCSIQ_B_SENSE_LEN 20 #define ASC_SCSIQ_DONE_INFO_BEG 22 #define ASC_SCSIQ_D_SRBPTR 22 #define ASC_SCSIQ_B_TARGET_IX 26 #define ASC_SCSIQ_B_CDB_LEN 28 #define ASC_SCSIQ_B_TAG_CODE 29 #define ASC_SCSIQ_W_VM_ID 30 #define ASC_SCSIQ_DONE_STATUS 32 #define ASC_SCSIQ_HOST_STATUS 33 #define ASC_SCSIQ_SCSI_STATUS 34 #define ASC_SCSIQ_CDB_BEG 36 #define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56 #define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60 #define ASC_SCSIQ_B_FIRST_SG_WK_QP 48 #define ASC_SCSIQ_B_SG_WK_QP 49 #define ASC_SCSIQ_B_SG_WK_IX 50 #define ASC_SCSIQ_W_ALT_DC1 52 #define ASC_SCSIQ_B_LIST_CNT 6 #define ASC_SCSIQ_B_CUR_LIST_CNT 7 #define ASC_SGQ_B_SG_CNTL 4 #define ASC_SGQ_B_SG_HEAD_QP 5 #define ASC_SGQ_B_SG_LIST_CNT 6 #define ASC_SGQ_B_SG_CUR_LIST_CNT 7 #define ASC_SGQ_LIST_BEG 8 #define ASC_DEF_SCSI1_QNG 4 #define ASC_MAX_SCSI1_QNG 4 #define ASC_DEF_SCSI2_QNG 16 #define ASC_MAX_SCSI2_QNG 32 #define ASC_TAG_CODE_MASK 0x23 #define ASC_STOP_REQ_RISC_STOP 0x01 #define ASC_STOP_ACK_RISC_STOP 0x03 #define ASC_STOP_CLEAN_UP_BUSY_Q 0x10 #define ASC_STOP_CLEAN_UP_DISC_Q 0x20 #define ASC_STOP_HOST_REQ_RISC_HALT 0x40 #define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<<ASC_SCSI_ID_BITS)) #define ASC_TID_TO_TARGET_ID(tid) (ASC_SCSI_BIT_ID_TYPE)(0x01 << (tid)) #define ASC_TIX_TO_TARGET_ID(tix) (0x01 << ((tix) & ASC_MAX_TID)) #define ASC_TIX_TO_TID(tix) ((tix) & ASC_MAX_TID) #define ASC_TID_TO_TIX(tid) ((tid) & ASC_MAX_TID) #define ASC_TIX_TO_LUN(tix) (((tix) >> ASC_SCSI_ID_BITS) & ASC_MAX_LUN) #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) typedef struct asc_scsiq_1 { uchar status; uchar q_no; uchar cntl; uchar sg_queue_cnt; uchar target_id; uchar target_lun; ASC_PADDR data_addr; ASC_DCNT data_cnt; ASC_PADDR sense_addr; uchar sense_len; uchar extra_bytes; } ASC_SCSIQ_1; typedef struct asc_scsiq_2 { ASC_VADDR srb_ptr; uchar target_ix; uchar flag; uchar cdb_len; uchar tag_code; ushort vm_id; } ASC_SCSIQ_2; typedef struct asc_scsiq_3 { uchar done_stat; uchar host_stat; uchar scsi_stat; uchar scsi_msg; } ASC_SCSIQ_3; typedef struct asc_scsiq_4 { uchar cdb[ASC_MAX_CDB_LEN]; uchar y_first_sg_list_qp; uchar y_working_sg_qp; uchar y_working_sg_ix; uchar y_res; ushort x_req_count; ushort x_reconnect_rtn; ASC_PADDR x_saved_data_addr; ASC_DCNT x_saved_data_cnt; } ASC_SCSIQ_4; typedef struct asc_q_done_info { ASC_SCSIQ_2 d2; ASC_SCSIQ_3 d3; uchar q_status; uchar q_no; uchar cntl; uchar sense_len; uchar extra_bytes; uchar res; ASC_DCNT remain_bytes; } ASC_QDONE_INFO; typedef struct asc_sg_list { ASC_PADDR addr; ASC_DCNT bytes; } ASC_SG_LIST; typedef struct asc_sg_head { ushort entry_cnt; ushort queue_cnt; ushort entry_to_copy; ushort res; ASC_SG_LIST sg_list[0]; } ASC_SG_HEAD; typedef struct asc_scsi_q { ASC_SCSIQ_1 q1; ASC_SCSIQ_2 q2; uchar *cdbptr; ASC_SG_HEAD *sg_head; ushort remain_sg_entry_cnt; ushort next_sg_index; } ASC_SCSI_Q; typedef struct asc_scsi_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; uchar *cdbptr; ASC_SG_HEAD *sg_head; uchar *sense_ptr; ASC_SCSIQ_3 r3; uchar cdb[ASC_MAX_CDB_LEN]; uchar sense[ASC_MIN_SENSE_LEN]; } ASC_SCSI_REQ_Q; typedef struct asc_scsi_bios_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; uchar *cdbptr; ASC_SG_HEAD *sg_head; uchar *sense_ptr; ASC_SCSIQ_3 r3; uchar cdb[ASC_MAX_CDB_LEN]; uchar sense[ASC_MIN_SENSE_LEN]; } ASC_SCSI_BIOS_REQ_Q; typedef struct asc_risc_q { uchar fwd; uchar bwd; ASC_SCSIQ_1 i1; ASC_SCSIQ_2 i2; ASC_SCSIQ_3 i3; ASC_SCSIQ_4 i4; } ASC_RISC_Q; typedef struct asc_sg_list_q { uchar seq_no; uchar q_no; uchar cntl; uchar sg_head_qp; uchar sg_list_cnt; uchar sg_cur_list_cnt; } ASC_SG_LIST_Q; typedef struct asc_risc_sg_list_q { uchar fwd; uchar bwd; ASC_SG_LIST_Q sg; ASC_SG_LIST sg_list[7]; } ASC_RISC_SG_LIST_Q; #define ASCQ_ERR_Q_STATUS 0x0D #define ASCQ_ERR_CUR_QNG 0x17 #define ASCQ_ERR_SG_Q_LINKS 0x18 #define ASCQ_ERR_ISR_RE_ENTRY 0x1A #define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B #define ASCQ_ERR_ISR_ON_CRITICAL 0x1C /* * Warning code values are set in ASC_DVC_VAR 'warn_code'. */ #define ASC_WARN_NO_ERROR 0x0000 #define ASC_WARN_IO_PORT_ROTATE 0x0001 #define ASC_WARN_EEPROM_CHKSUM 0x0002 #define ASC_WARN_IRQ_MODIFIED 0x0004 #define ASC_WARN_AUTO_CONFIG 0x0008 #define ASC_WARN_CMD_QNG_CONFLICT 0x0010 #define ASC_WARN_EEPROM_RECOVER 0x0020 #define ASC_WARN_CFG_MSW_RECOVER 0x0040 /* * Error code values are set in {ASC/ADV}_DVC_VAR 'err_code'. */ #define ASC_IERR_NO_CARRIER 0x0001 /* No more carrier memory */ #define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */ #define ASC_IERR_SET_PC_ADDR 0x0004 #define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */ #define ASC_IERR_ILLEGAL_CONNECTION 0x0010 /* Illegal cable connection */ #define ASC_IERR_SINGLE_END_DEVICE 0x0020 /* SE device on DIFF bus */ #define ASC_IERR_REVERSED_CABLE 0x0040 /* Narrow flat cable reversed */ #define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */ #define ASC_IERR_HVD_DEVICE 0x0100 /* HVD device on LVD port */ #define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */ #define ASC_IERR_NO_BUS_TYPE 0x0400 #define ASC_IERR_BIST_PRE_TEST 0x0800 /* BIST pre-test error */ #define ASC_IERR_BIST_RAM_TEST 0x1000 /* BIST RAM test error */ #define ASC_IERR_BAD_CHIPTYPE 0x2000 /* Invalid chip_type setting */ #define ASC_DEF_MAX_TOTAL_QNG (0xF0) #define ASC_MIN_TAG_Q_PER_DVC (0x04) #define ASC_MIN_FREE_Q (0x02) #define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q)) #define ASC_MAX_TOTAL_QNG 240 #define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16 #define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8 #define ASC_MAX_PCI_INRAM_TOTAL_QNG 20 #define ASC_MAX_INRAM_TAG_QNG 16 #define ASC_IOADR_GAP 0x10 #define ASC_SYN_MAX_OFFSET 0x0F #define ASC_DEF_SDTR_OFFSET 0x0F #define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 /* The narrow chip only supports a limited selection of transfer rates. * These are encoded in the range 0..7 or 0..15 depending whether the chip * is Ultra-capable or not. These tables let us convert from one to the other. */ static const unsigned char asc_syn_xfer_period[8] = { 25, 30, 35, 40, 50, 60, 70, 85 }; static const unsigned char asc_syn_ultra_xfer_period[16] = { 12, 19, 25, 32, 38, 44, 50, 57, 63, 69, 75, 82, 88, 94, 100, 107 }; typedef struct ext_msg { uchar msg_type; uchar msg_len; uchar msg_req; union { struct { uchar sdtr_xfer_period; uchar sdtr_req_ack_offset; } sdtr; struct { uchar wdtr_width; } wdtr; struct { uchar mdp_b3; uchar mdp_b2; uchar mdp_b1; uchar mdp_b0; } mdp; } u_ext_msg; uchar res; } EXT_MSG; #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset #define wdtr_width u_ext_msg.wdtr.wdtr_width #define mdp_b3 u_ext_msg.mdp_b3 #define mdp_b2 u_ext_msg.mdp_b2 #define mdp_b1 u_ext_msg.mdp_b1 #define mdp_b0 u_ext_msg.mdp_b0 typedef struct asc_dvc_cfg { ASC_SCSI_BIT_ID_TYPE can_tagged_qng; ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled; ASC_SCSI_BIT_ID_TYPE disc_enable; ASC_SCSI_BIT_ID_TYPE sdtr_enable; uchar chip_scsi_id; uchar isa_dma_speed; uchar isa_dma_channel; uchar chip_version; ushort mcode_date; ushort mcode_version; uchar max_tag_qng[ASC_MAX_TID + 1]; uchar sdtr_period_offset[ASC_MAX_TID + 1]; uchar adapter_info[6]; } ASC_DVC_CFG; #define ASC_DEF_DVC_CNTL 0xFFFF #define ASC_DEF_CHIP_SCSI_ID 7 #define ASC_DEF_ISA_DMA_SPEED 4 #define ASC_INIT_STATE_BEG_GET_CFG 0x0001 #define ASC_INIT_STATE_END_GET_CFG 0x0002 #define ASC_INIT_STATE_BEG_SET_CFG 0x0004 #define ASC_INIT_STATE_END_SET_CFG 0x0008 #define ASC_INIT_STATE_BEG_LOAD_MC 0x0010 #define ASC_INIT_STATE_END_LOAD_MC 0x0020 #define ASC_INIT_STATE_BEG_INQUIRY 0x0040 #define ASC_INIT_STATE_END_INQUIRY 0x0080 #define ASC_INIT_RESET_SCSI_DONE 0x0100 #define ASC_INIT_STATE_WITHOUT_EEP 0x8000 #define ASC_BUG_FIX_IF_NOT_DWB 0x0001 #define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 #define ASC_MIN_TAGGED_CMD 7 #define ASC_MAX_SCSI_RESET_WAIT 30 #define ASC_OVERRUN_BSIZE 64 struct asc_dvc_var; /* Forward Declaration. */ typedef struct asc_dvc_var { PortAddr iop_base; ushort err_code; ushort dvc_cntl; ushort bug_fix_cntl; ushort bus_type; ASC_SCSI_BIT_ID_TYPE init_sdtr; ASC_SCSI_BIT_ID_TYPE sdtr_done; ASC_SCSI_BIT_ID_TYPE use_tagged_qng; ASC_SCSI_BIT_ID_TYPE unit_not_ready; ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; ASC_SCSI_BIT_ID_TYPE start_motor; uchar *overrun_buf; dma_addr_t overrun_dma; uchar scsi_reset_wait; uchar chip_no; char is_in_int; uchar max_total_qng; uchar cur_total_qng; uchar in_critical_cnt; uchar last_q_shortage; ushort init_state; uchar cur_dvc_qng[ASC_MAX_TID + 1]; uchar max_dvc_qng[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1]; const uchar *sdtr_period_tbl; ASC_DVC_CFG *cfg; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always; char redo_scam; ushort res2; uchar dos_int13_table[ASC_MAX_TID + 1]; ASC_DCNT max_dma_count; ASC_SCSI_BIT_ID_TYPE no_scam; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer; uchar min_sdtr_index; uchar max_sdtr_index; struct asc_board *drv_ptr; int ptr_map_count; void **ptr_map; ASC_DCNT uc_break; } ASC_DVC_VAR; typedef struct asc_dvc_inq_info { uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_DVC_INQ_INFO; typedef struct asc_cap_info { ASC_DCNT lba; ASC_DCNT blk_size; } ASC_CAP_INFO; typedef struct asc_cap_info_array { ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_CAP_INFO_ARRAY; #define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001 #define ASC_MCNTL_NULL_TARGET (ushort)0x0002 #define ASC_CNTL_INITIATOR (ushort)0x0001 #define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002 #define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004 #define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008 #define ASC_CNTL_NO_SCAM (ushort)0x0010 #define ASC_CNTL_INT_MULTI_Q (ushort)0x0080 #define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040 #define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100 #define ASC_CNTL_RESET_SCSI (ushort)0x0200 #define ASC_CNTL_INIT_INQUIRY (ushort)0x0400 #define ASC_CNTL_INIT_VERBOSE (ushort)0x0800 #define ASC_CNTL_SCSI_PARITY (ushort)0x1000 #define ASC_CNTL_BURST_MODE (ushort)0x2000 #define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000 #define ASC_EEP_DVC_CFG_BEG_VL 2 #define ASC_EEP_MAX_DVC_ADDR_VL 15 #define ASC_EEP_DVC_CFG_BEG 32 #define ASC_EEP_MAX_DVC_ADDR 45 #define ASC_EEP_MAX_RETRY 20 /* * These macros keep the chip SCSI id and ISA DMA speed * bitfields in board order. C bitfields aren't portable * between big and little-endian platforms so they are * not used. */ #define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f) #define ASC_EEP_GET_DMA_SPD(cfg) (((cfg)->id_speed & 0xf0) >> 4) #define ASC_EEP_SET_CHIP_ID(cfg, sid) \ ((cfg)->id_speed = ((cfg)->id_speed & 0xf0) | ((sid) & ASC_MAX_TID)) #define ASC_EEP_SET_DMA_SPD(cfg, spd) \ ((cfg)->id_speed = ((cfg)->id_speed & 0x0f) | ((spd) & 0x0f) << 4) typedef struct asceep_config { ushort cfg_lsw; ushort cfg_msw; uchar init_sdtr; uchar disc_enable; uchar use_cmd_qng; uchar start_motor; uchar max_total_qng; uchar max_tag_qng; uchar bios_scan; uchar power_up_wait; uchar no_scam; uchar id_speed; /* low order 4 bits is chip scsi id */ /* high order 4 bits is isa dma speed */ uchar dos_int13_table[ASC_MAX_TID + 1]; uchar adapter_info[6]; ushort cntl; ushort chksum; } ASCEEP_CONFIG; #define ASC_EEP_CMD_READ 0x80 #define ASC_EEP_CMD_WRITE 0x40 #define ASC_EEP_CMD_WRITE_ABLE 0x30 #define ASC_EEP_CMD_WRITE_DISABLE 0x00 #define ASCV_MSGOUT_BEG 0x0000 #define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3) #define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4) #define ASCV_BREAK_SAVED_CODE (ushort)0x0006 #define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8) #define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3) #define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4) #define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8) #define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8) #define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020 #define ASCV_BREAK_ADDR (ushort)0x0028 #define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A #define ASCV_BREAK_CONTROL (ushort)0x002C #define ASCV_BREAK_HIT_COUNT (ushort)0x002E #define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030 #define ASCV_MCODE_CHKSUM_W (ushort)0x0032 #define ASCV_MCODE_SIZE_W (ushort)0x0034 #define ASCV_STOP_CODE_B (ushort)0x0036 #define ASCV_DVC_ERR_CODE_B (ushort)0x0037 #define ASCV_OVERRUN_PADDR_D (ushort)0x0038 #define ASCV_OVERRUN_BSIZE_D (ushort)0x003C #define ASCV_HALTCODE_W (ushort)0x0040 #define ASCV_CHKSUM_W (ushort)0x0042 #define ASCV_MC_DATE_W (ushort)0x0044 #define ASCV_MC_VER_W (ushort)0x0046 #define ASCV_NEXTRDY_B (ushort)0x0048 #define ASCV_DONENEXT_B (ushort)0x0049 #define ASCV_USE_TAGGED_QNG_B (ushort)0x004A #define ASCV_SCSIBUSY_B (ushort)0x004B #define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C #define ASCV_CURCDB_B (ushort)0x004D #define ASCV_RCLUN_B (ushort)0x004E #define ASCV_BUSY_QHEAD_B (ushort)0x004F #define ASCV_DISC1_QHEAD_B (ushort)0x0050 #define ASCV_DISC_ENABLE_B (ushort)0x0052 #define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053 #define ASCV_HOSTSCSI_ID_B (ushort)0x0055 #define ASCV_MCODE_CNTL_B (ushort)0x0056 #define ASCV_NULL_TARGET_B (ushort)0x0057 #define ASCV_FREE_Q_HEAD_W (ushort)0x0058 #define ASCV_DONE_Q_TAIL_W (ushort)0x005A #define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1) #define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1) #define ASCV_HOST_FLAG_B (ushort)0x005D #define ASCV_TOTAL_READY_Q_B (ushort)0x0064 #define ASCV_VER_SERIAL_B (ushort)0x0065 #define ASCV_HALTCODE_SAVED_W (ushort)0x0066 #define ASCV_WTM_FLAG_B (ushort)0x0068 #define ASCV_RISC_FLAG_B (ushort)0x006A #define ASCV_REQ_SG_LIST_QP (ushort)0x006B #define ASC_HOST_FLAG_IN_ISR 0x01 #define ASC_HOST_FLAG_ACK_INT 0x02 #define ASC_RISC_FLAG_GEN_INT 0x01 #define ASC_RISC_FLAG_REQ_SG_LIST 0x02 #define IOP_CTRL (0x0F) #define IOP_STATUS (0x0E) #define IOP_INT_ACK IOP_STATUS #define IOP_REG_IFC (0x0D) #define IOP_SYN_OFFSET (0x0B) #define IOP_EXTRA_CONTROL (0x0D) #define IOP_REG_PC (0x0C) #define IOP_RAM_ADDR (0x0A) #define IOP_RAM_DATA (0x08) #define IOP_EEP_DATA (0x06) #define IOP_EEP_CMD (0x07) #define IOP_VERSION (0x03) #define IOP_CONFIG_HIGH (0x04) #define IOP_CONFIG_LOW (0x02) #define IOP_SIG_BYTE (0x01) #define IOP_SIG_WORD (0x00) #define IOP_REG_DC1 (0x0E) #define IOP_REG_DC0 (0x0C) #define IOP_REG_SB (0x0B) #define IOP_REG_DA1 (0x0A) #define IOP_REG_DA0 (0x08) #define IOP_REG_SC (0x09) #define IOP_DMA_SPEED (0x07) #define IOP_REG_FLAG (0x07) #define IOP_FIFO_H (0x06) #define IOP_FIFO_L (0x04) #define IOP_REG_ID (0x05) #define IOP_REG_QP (0x03) #define IOP_REG_IH (0x02) #define IOP_REG_IX (0x01) #define IOP_REG_AX (0x00) #define IFC_REG_LOCK (0x00) #define IFC_REG_UNLOCK (0x09) #define IFC_WR_EN_FILTER (0x10) #define IFC_RD_NO_EEPROM (0x10) #define IFC_SLEW_RATE (0x20) #define IFC_ACT_NEG (0x40) #define IFC_INP_FILTER (0x80) #define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK) #define SC_SEL (uchar)(0x80) #define SC_BSY (uchar)(0x40) #define SC_ACK (uchar)(0x20) #define SC_REQ (uchar)(0x10) #define SC_ATN (uchar)(0x08) #define SC_IO (uchar)(0x04) #define SC_CD (uchar)(0x02) #define SC_MSG (uchar)(0x01) #define SEC_SCSI_CTL (uchar)(0x80) #define SEC_ACTIVE_NEGATE (uchar)(0x40) #define SEC_SLEW_RATE (uchar)(0x20) #define SEC_ENABLE_FILTER (uchar)(0x10) #define ASC_HALT_EXTMSG_IN (ushort)0x8000 #define ASC_HALT_CHK_CONDITION (ushort)0x8100 #define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200 #define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300 #define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400 #define ASC_HALT_SDTR_REJECTED (ushort)0x4000 #define ASC_HALT_HOST_COPY_SG_LIST_TO_RISC ( ushort )0x2000 #define ASC_MAX_QNO 0xF8 #define ASC_DATA_SEC_BEG (ushort)0x0080 #define ASC_DATA_SEC_END (ushort)0x0080 #define ASC_CODE_SEC_BEG (ushort)0x0080 #define ASC_CODE_SEC_END (ushort)0x0080 #define ASC_QADR_BEG (0x4000) #define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64) #define ASC_QADR_END (ushort)0x7FFF #define ASC_QLAST_ADR (ushort)0x7FC0 #define ASC_QBLK_SIZE 0x40 #define ASC_BIOS_DATA_QBEG 0xF8 #define ASC_MIN_ACTIVE_QNO 0x01 #define ASC_QLINK_END 0xFF #define ASC_EEPROM_WORDS 0x10 #define ASC_MAX_MGS_LEN 0x10 #define ASC_BIOS_ADDR_DEF 0xDC00 #define ASC_BIOS_SIZE 0x3800 #define ASC_BIOS_RAM_OFF 0x3800 #define ASC_BIOS_RAM_SIZE 0x800 #define ASC_BIOS_MIN_ADDR 0xC000 #define ASC_BIOS_MAX_ADDR 0xEC00 #define ASC_BIOS_BANK_SIZE 0x0400 #define ASC_MCODE_START_ADDR 0x0080 #define ASC_CFG0_HOST_INT_ON 0x0020 #define ASC_CFG0_BIOS_ON 0x0040 #define ASC_CFG0_VERA_BURST_ON 0x0080 #define ASC_CFG0_SCSI_PARITY_ON 0x0800 #define ASC_CFG1_SCSI_TARGET_ON 0x0080 #define ASC_CFG1_LRAM_8BITS_ON 0x0800 #define ASC_CFG_MSW_CLR_MASK 0x3080 #define CSW_TEST1 (ASC_CS_TYPE)0x8000 #define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000 #define CSW_RESERVED1 (ASC_CS_TYPE)0x2000 #define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000 #define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800 #define CSW_TEST2 (ASC_CS_TYPE)0x0400 #define CSW_TEST3 (ASC_CS_TYPE)0x0200 #define CSW_RESERVED2 (ASC_CS_TYPE)0x0100 #define CSW_DMA_DONE (ASC_CS_TYPE)0x0080 #define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040 #define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020 #define CSW_HALTED (ASC_CS_TYPE)0x0010 #define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008 #define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004 #define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002 #define CSW_INT_PENDING (ASC_CS_TYPE)0x0001 #define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000 #define CIW_INT_ACK (ASC_CS_TYPE)0x0100 #define CIW_TEST1 (ASC_CS_TYPE)0x0200 #define CIW_TEST2 (ASC_CS_TYPE)0x0400 #define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800 #define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000 #define CC_CHIP_RESET (uchar)0x80 #define CC_SCSI_RESET (uchar)0x40 #define CC_HALT (uchar)0x20 #define CC_SINGLE_STEP (uchar)0x10 #define CC_DMA_ABLE (uchar)0x08 #define CC_TEST (uchar)0x04 #define CC_BANK_ONE (uchar)0x02 #define CC_DIAG (uchar)0x01 #define ASC_1000_ID0W 0x04C1 #define ASC_1000_ID0W_FIX 0x00C1 #define ASC_1000_ID1B 0x25 #define ASC_EISA_REV_IOP_MASK (0x0C83) #define ASC_EISA_CFG_IOP_MASK (0x0C86) #define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000) #define INS_HALTINT (ushort)0x6281 #define INS_HALT (ushort)0x6280 #define INS_SINT (ushort)0x6200 #define INS_RFLAG_WTM (ushort)0x7380 #define ASC_MC_SAVE_CODE_WSIZE 0x500 #define ASC_MC_SAVE_DATA_WSIZE 0x40 typedef struct asc_mc_saved { ushort data[ASC_MC_SAVE_DATA_WSIZE]; ushort code[ASC_MC_SAVE_CODE_WSIZE]; } ASC_MC_SAVED; #define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B) #define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val) #define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W) #define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W) #define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val) #define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val) #define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B) #define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B) #define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val) #define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val) #define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data)) #define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id)) #define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data) #define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id)) #define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE) #define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD) #define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION) #define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW) #define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH) #define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data) #define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data) #define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD) #define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data) #define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA) #define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data) #define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR)) #define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr) #define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA) #define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data) #define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC) #define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data) #define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS) #define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val) #define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL) #define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val) #define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET) #define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data) #define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data) #define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC) #define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH)) #define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID) #define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL) #define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data) #define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX) #define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data) #define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX) #define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data) #define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH) #define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data) #define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP) #define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data) #define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L) #define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data) #define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H) #define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data) #define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED) #define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data) #define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0) #define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data) #define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1) #define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data) #define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0) #define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data) #define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1) #define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data) #define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID) #define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data) /* * Portable Data Types * * Any instance where a 32-bit long or pointer type is assumed * for precision or HW defined structures, the following define * types must be used. In Linux the char, short, and int types * are all consistent at 8, 16, and 32 bits respectively. Pointers * and long types are 64 bits on Alpha and UltraSPARC. */ #define ADV_PADDR __u32 /* Physical address data type. */ #define ADV_VADDR __u32 /* Virtual address data type. */ #define ADV_DCNT __u32 /* Unsigned Data count type. */ #define ADV_SDCNT __s32 /* Signed Data count type. */ /* * These macros are used to convert a virtual address to a * 32-bit value. This currently can be used on Linux Alpha * which uses 64-bit virtual address but a 32-bit bus address. * This is likely to break in the future, but doing this now * will give us time to change the HW and FW to handle 64-bit * addresses. */ #define ADV_VADDR_TO_U32 virt_to_bus #define ADV_U32_TO_VADDR bus_to_virt #define AdvPortAddr void __iomem * /* Virtual memory address size */ /* * Define Adv Library required memory access macros. */ #define ADV_MEM_READB(addr) readb(addr) #define ADV_MEM_READW(addr) readw(addr) #define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr) #define ADV_MEM_WRITEW(addr, word) writew(word, addr) #define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr) #define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 15) /* * Define total number of simultaneous maximum element scatter-gather * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the * maximum number of outstanding commands per wide host adapter. Each * command uses one or more ADV_SG_BLOCK each with 15 scatter-gather * elements. Allow each command to have at least one ADV_SG_BLOCK structure. * This allows about 15 commands to have the maximum 17 ADV_SG_BLOCK * structures or 255 scatter-gather elements. */ #define ADV_TOT_SG_BLOCK ASC_DEF_MAX_HOST_QNG /* * Define maximum number of scatter-gather elements per request. */ #define ADV_MAX_SG_LIST 255 #define NO_OF_SG_PER_BLOCK 15 #define ADV_EEP_DVC_CFG_BEGIN (0x00) #define ADV_EEP_DVC_CFG_END (0x15) #define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ #define ADV_EEP_MAX_WORD_ADDR (0x1E) #define ADV_EEP_DELAY_MS 100 #define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */ #define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */ /* * For the ASC3550 Bit 13 is Termination Polarity control bit. * For later ICs Bit 13 controls whether the CIS (Card Information * Service Section) is loaded from EEPROM. */ #define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */ #define ADV_EEPROM_CIS_LD 0x2000 /* EEPROM Bit 13 */ /* * ASC38C1600 Bit 11 * * If EEPROM Bit 11 is 0 for Function 0, then Function 0 will specify * INT A in the PCI Configuration Space Int Pin field. If it is 1, then * Function 0 will specify INT B. * * If EEPROM Bit 11 is 0 for Function 1, then Function 1 will specify * INT B in the PCI Configuration Space Int Pin field. If it is 1, then * Function 1 will specify INT A. */ #define ADV_EEPROM_INTAB 0x0800 /* EEPROM Bit 11 */ typedef struct adveep_3550_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Term Polarity Control */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_able; /* 04 Synchronous DTR able */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar reserved1; /* reserved byte (not used) */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort ultra_able; /* 13 ULTRA speed able */ ushort reserved2; /* 14 reserved */ uchar max_host_qng; /* 15 maximum host queuing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort bug_fix; /* 17 control bit for bug fix */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort num_of_err; /* 36 number of error */ } ADVEEP_3550_CONFIG; typedef struct adveep_38C0800_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C0800_CONFIG; typedef struct adveep_38C1600_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 11 set - Func. 0 INTB, Func. 1 INTA */ /* clear - Func. 0 INTA, Func. 1 INTB */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 Basic Integrity Checking disabled */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 AIPP (Asyn. Info. Ph. Prot.) dis. */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C1600_CONFIG; /* * EEPROM Commands */ #define ASC_EEP_CMD_DONE 0x0200 /* bios_ctrl */ #define BIOS_CTRL_BIOS 0x0001 #define BIOS_CTRL_EXTENDED_XLAT 0x0002 #define BIOS_CTRL_GT_2_DISK 0x0004 #define BIOS_CTRL_BIOS_REMOVABLE 0x0008 #define BIOS_CTRL_BOOTABLE_CD 0x0010 #define BIOS_CTRL_MULTIPLE_LUN 0x0040 #define BIOS_CTRL_DISPLAY_MSG 0x0080 #define BIOS_CTRL_NO_SCAM 0x0100 #define BIOS_CTRL_RESET_SCSI_BUS 0x0200 #define BIOS_CTRL_INIT_VERBOSE 0x0800 #define BIOS_CTRL_SCSI_PARITY 0x1000 #define BIOS_CTRL_AIPP_DIS 0x2000 #define ADV_3550_MEMSIZE 0x2000 /* 8 KB Internal Memory */ #define ADV_38C0800_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * XXX - Since ASC38C1600 Rev.3 has a local RAM failure issue, there is * a special 16K Adv Library and Microcode version. After the issue is * resolved, should restore 32K support. * * #define ADV_38C1600_MEMSIZE 0x8000L * 32 KB Internal Memory * */ #define ADV_38C1600_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * Byte I/O register address from base of 'iop_base'. */ #define IOPB_INTR_STATUS_REG 0x00 #define IOPB_CHIP_ID_1 0x01 #define IOPB_INTR_ENABLES 0x02 #define IOPB_CHIP_TYPE_REV 0x03 #define IOPB_RES_ADDR_4 0x04 #define IOPB_RES_ADDR_5 0x05 #define IOPB_RAM_DATA 0x06 #define IOPB_RES_ADDR_7 0x07 #define IOPB_FLAG_REG 0x08 #define IOPB_RES_ADDR_9 0x09 #define IOPB_RISC_CSR 0x0A #define IOPB_RES_ADDR_B 0x0B #define IOPB_RES_ADDR_C 0x0C #define IOPB_RES_ADDR_D 0x0D #define IOPB_SOFT_OVER_WR 0x0E #define IOPB_RES_ADDR_F 0x0F #define IOPB_MEM_CFG 0x10 #define IOPB_RES_ADDR_11 0x11 #define IOPB_GPIO_DATA 0x12 #define IOPB_RES_ADDR_13 0x13 #define IOPB_FLASH_PAGE 0x14 #define IOPB_RES_ADDR_15 0x15 #define IOPB_GPIO_CNTL 0x16 #define IOPB_RES_ADDR_17 0x17 #define IOPB_FLASH_DATA 0x18 #define IOPB_RES_ADDR_19 0x19 #define IOPB_RES_ADDR_1A 0x1A #define IOPB_RES_ADDR_1B 0x1B #define IOPB_RES_ADDR_1C 0x1C #define IOPB_RES_ADDR_1D 0x1D #define IOPB_RES_ADDR_1E 0x1E #define IOPB_RES_ADDR_1F 0x1F #define IOPB_DMA_CFG0 0x20 #define IOPB_DMA_CFG1 0x21 #define IOPB_TICKLE 0x22 #define IOPB_DMA_REG_WR 0x23 #define IOPB_SDMA_STATUS 0x24 #define IOPB_SCSI_BYTE_CNT 0x25 #define IOPB_HOST_BYTE_CNT 0x26 #define IOPB_BYTE_LEFT_TO_XFER 0x27 #define IOPB_BYTE_TO_XFER_0 0x28 #define IOPB_BYTE_TO_XFER_1 0x29 #define IOPB_BYTE_TO_XFER_2 0x2A #define IOPB_BYTE_TO_XFER_3 0x2B #define IOPB_ACC_GRP 0x2C #define IOPB_RES_ADDR_2D 0x2D #define IOPB_DEV_ID 0x2E #define IOPB_RES_ADDR_2F 0x2F #define IOPB_SCSI_DATA 0x30 #define IOPB_RES_ADDR_31 0x31 #define IOPB_RES_ADDR_32 0x32 #define IOPB_SCSI_DATA_HSHK 0x33 #define IOPB_SCSI_CTRL 0x34 #define IOPB_RES_ADDR_35 0x35 #define IOPB_RES_ADDR_36 0x36 #define IOPB_RES_ADDR_37 0x37 #define IOPB_RAM_BIST 0x38 #define IOPB_PLL_TEST 0x39 #define IOPB_PCI_INT_CFG 0x3A #define IOPB_RES_ADDR_3B 0x3B #define IOPB_RFIFO_CNT 0x3C #define IOPB_RES_ADDR_3D 0x3D #define IOPB_RES_ADDR_3E 0x3E #define IOPB_RES_ADDR_3F 0x3F /* * Word I/O register address from base of 'iop_base'. */ #define IOPW_CHIP_ID_0 0x00 /* CID0 */ #define IOPW_CTRL_REG 0x02 /* CC */ #define IOPW_RAM_ADDR 0x04 /* LA */ #define IOPW_RAM_DATA 0x06 /* LD */ #define IOPW_RES_ADDR_08 0x08 #define IOPW_RISC_CSR 0x0A /* CSR */ #define IOPW_SCSI_CFG0 0x0C /* CFG0 */ #define IOPW_SCSI_CFG1 0x0E /* CFG1 */ #define IOPW_RES_ADDR_10 0x10 #define IOPW_SEL_MASK 0x12 /* SM */ #define IOPW_RES_ADDR_14 0x14 #define IOPW_FLASH_ADDR 0x16 /* FA */ #define IOPW_RES_ADDR_18 0x18 #define IOPW_EE_CMD 0x1A /* EC */ #define IOPW_EE_DATA 0x1C /* ED */ #define IOPW_SFIFO_CNT 0x1E /* SFC */ #define IOPW_RES_ADDR_20 0x20 #define IOPW_Q_BASE 0x22 /* QB */ #define IOPW_QP 0x24 /* QP */ #define IOPW_IX 0x26 /* IX */ #define IOPW_SP 0x28 /* SP */ #define IOPW_PC 0x2A /* PC */ #define IOPW_RES_ADDR_2C 0x2C #define IOPW_RES_ADDR_2E 0x2E #define IOPW_SCSI_DATA 0x30 /* SD */ #define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */ #define IOPW_SCSI_CTRL 0x34 /* SC */ #define IOPW_HSHK_CFG 0x36 /* HCFG */ #define IOPW_SXFR_STATUS 0x36 /* SXS */ #define IOPW_SXFR_CNTL 0x38 /* SXL */ #define IOPW_SXFR_CNTH 0x3A /* SXH */ #define IOPW_RES_ADDR_3C 0x3C #define IOPW_RFIFO_DATA 0x3E /* RFD */ /* * Doubleword I/O register address from base of 'iop_base'. */ #define IOPDW_RES_ADDR_0 0x00 #define IOPDW_RAM_DATA 0x04 #define IOPDW_RES_ADDR_8 0x08 #define IOPDW_RES_ADDR_C 0x0C #define IOPDW_RES_ADDR_10 0x10 #define IOPDW_COMMA 0x14 #define IOPDW_COMMB 0x18 #define IOPDW_RES_ADDR_1C 0x1C #define IOPDW_SDMA_ADDR0 0x20 #define IOPDW_SDMA_ADDR1 0x24 #define IOPDW_SDMA_COUNT 0x28 #define IOPDW_SDMA_ERROR 0x2C #define IOPDW_RDMA_ADDR0 0x30 #define IOPDW_RDMA_ADDR1 0x34 #define IOPDW_RDMA_COUNT 0x38 #define IOPDW_RDMA_ERROR 0x3C #define ADV_CHIP_ID_BYTE 0x25 #define ADV_CHIP_ID_WORD 0x04C1 #define ADV_INTR_ENABLE_HOST_INTR 0x01 #define ADV_INTR_ENABLE_SEL_INTR 0x02 #define ADV_INTR_ENABLE_DPR_INTR 0x04 #define ADV_INTR_ENABLE_RTA_INTR 0x08 #define ADV_INTR_ENABLE_RMA_INTR 0x10 #define ADV_INTR_ENABLE_RST_INTR 0x20 #define ADV_INTR_ENABLE_DPE_INTR 0x40 #define ADV_INTR_ENABLE_GLOBAL_INTR 0x80 #define ADV_INTR_STATUS_INTRA 0x01 #define ADV_INTR_STATUS_INTRB 0x02 #define ADV_INTR_STATUS_INTRC 0x04 #define ADV_RISC_CSR_STOP (0x0000) #define ADV_RISC_TEST_COND (0x2000) #define ADV_RISC_CSR_RUN (0x4000) #define ADV_RISC_CSR_SINGLE_STEP (0x8000) #define ADV_CTRL_REG_HOST_INTR 0x0100 #define ADV_CTRL_REG_SEL_INTR 0x0200 #define ADV_CTRL_REG_DPR_INTR 0x0400 #define ADV_CTRL_REG_RTA_INTR 0x0800 #define ADV_CTRL_REG_RMA_INTR 0x1000 #define ADV_CTRL_REG_RES_BIT14 0x2000 #define ADV_CTRL_REG_DPE_INTR 0x4000 #define ADV_CTRL_REG_POWER_DONE 0x8000 #define ADV_CTRL_REG_ANY_INTR 0xFF00 #define ADV_CTRL_REG_CMD_RESET 0x00C6 #define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5 #define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4 #define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3 #define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2 #define ADV_TICKLE_NOP 0x00 #define ADV_TICKLE_A 0x01 #define ADV_TICKLE_B 0x02 #define ADV_TICKLE_C 0x03 #define AdvIsIntPending(port) \ (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR) /* * SCSI_CFG0 Register bit definitions */ #define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */ #define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */ #define EVEN_PARITY 0x1000 /* Select Even Parity */ #define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */ #define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */ #define PRIM_MODE 0x0100 /* Primitive SCSI mode */ #define SCAM_EN 0x0080 /* Enable SCAM selection */ #define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */ #define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */ #define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */ #define OUR_ID 0x000F /* SCSI ID */ /* * SCSI_CFG1 Register bit definitions */ #define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */ #define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */ #define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */ #define FILTER_SEL 0x0C00 /* Filter Period Selection */ #define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */ #define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */ #define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */ #define ACTIVE_DBL 0x0200 /* Disable Active Negation */ #define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */ #define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */ #define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */ #define TERM_CTL 0x0030 /* External SCSI Termination Bits */ #define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */ #define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */ #define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */ /* * Addendum for ASC-38C0800 Chip * * The ASC-38C1600 Chip uses the same definitions except that the * bus mode override bits [12:10] have been moved to byte register * offset 0xE (IOPB_SOFT_OVER_WR) bits [12:10]. The [12:10] bits in * SCSI_CFG1 are read-only and always available. Bit 14 (DIS_TERM_DRV) * is not needed. The [12:10] bits in IOPB_SOFT_OVER_WR are write-only. * Also each ASC-38C1600 function or channel uses only cable bits [5:4] * and [1:0]. Bits [14], [7:6], [3:2] are unused. */ #define DIS_TERM_DRV 0x4000 /* 1: Read c_det[3:0], 0: cannot read */ #define HVD_LVD_SE 0x1C00 /* Device Detect Bits */ #define HVD 0x1000 /* HVD Device Detect */ #define LVD 0x0800 /* LVD Device Detect */ #define SE 0x0400 /* SE Device Detect */ #define TERM_LVD 0x00C0 /* LVD Termination Bits */ #define TERM_LVD_HI 0x0080 /* Enable LVD Upper Termination */ #define TERM_LVD_LO 0x0040 /* Enable LVD Lower Termination */ #define TERM_SE 0x0030 /* SE Termination Bits */ #define TERM_SE_HI 0x0020 /* Enable SE Upper Termination */ #define TERM_SE_LO 0x0010 /* Enable SE Lower Termination */ #define C_DET_LVD 0x000C /* LVD Cable Detect Bits */ #define C_DET3 0x0008 /* Cable Detect for LVD External Wide */ #define C_DET2 0x0004 /* Cable Detect for LVD Internal Wide */ #define C_DET_SE 0x0003 /* SE Cable Detect Bits */ #define C_DET1 0x0002 /* Cable Detect for SE Internal Wide */ #define C_DET0 0x0001 /* Cable Detect for SE Internal Narrow */ #define CABLE_ILLEGAL_A 0x7 /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */ #define CABLE_ILLEGAL_B 0xB /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */ /* * MEM_CFG Register bit definitions */ #define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */ #define FAST_EE_CLK 0x20 /* Diagnostic Bit */ #define RAM_SZ 0x1C /* Specify size of RAM to RISC */ #define RAM_SZ_2KB 0x00 /* 2 KB */ #define RAM_SZ_4KB 0x04 /* 4 KB */ #define RAM_SZ_8KB 0x08 /* 8 KB */ #define RAM_SZ_16KB 0x0C /* 16 KB */ #define RAM_SZ_32KB 0x10 /* 32 KB */ #define RAM_SZ_64KB 0x14 /* 64 KB */ /* * DMA_CFG0 Register bit definitions * * This register is only accessible to the host. */ #define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */ #define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */ #define FIFO_THRESH_16B 0x00 /* 16 bytes */ #define FIFO_THRESH_32B 0x20 /* 32 bytes */ #define FIFO_THRESH_48B 0x30 /* 48 bytes */ #define FIFO_THRESH_64B 0x40 /* 64 bytes */ #define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */ #define FIFO_THRESH_96B 0x60 /* 96 bytes */ #define FIFO_THRESH_112B 0x70 /* 112 bytes */ #define START_CTL 0x0C /* DMA start conditions */ #define START_CTL_TH 0x00 /* Wait threshold level (default) */ #define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */ #define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */ #define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */ #define READ_CMD 0x03 /* Memory Read Method */ #define READ_CMD_MR 0x00 /* Memory Read */ #define READ_CMD_MRL 0x02 /* Memory Read Long */ #define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */ /* * ASC-38C0800 RAM BIST Register bit definitions */ #define RAM_TEST_MODE 0x80 #define PRE_TEST_MODE 0x40 #define NORMAL_MODE 0x00 #define RAM_TEST_DONE 0x10 #define RAM_TEST_STATUS 0x0F #define RAM_TEST_HOST_ERROR 0x08 #define RAM_TEST_INTRAM_ERROR 0x04 #define RAM_TEST_RISC_ERROR 0x02 #define RAM_TEST_SCSI_ERROR 0x01 #define RAM_TEST_SUCCESS 0x00 #define PRE_TEST_VALUE 0x05 #define NORMAL_VALUE 0x00 /* * ASC38C1600 Definitions * * IOPB_PCI_INT_CFG Bit Field Definitions */ #define INTAB_LD 0x80 /* Value loaded from EEPROM Bit 11. */ /* * Bit 1 can be set to change the interrupt for the Function to operate in * Totem Pole mode. By default Bit 1 is 0 and the interrupt operates in * Open Drain mode. Both functions of the ASC38C1600 must be set to the same * mode, otherwise the operating mode is undefined. */ #define TOTEMPOLE 0x02 /* * Bit 0 can be used to change the Int Pin for the Function. The value is * 0 by default for both Functions with Function 0 using INT A and Function * B using INT B. For Function 0 if set, INT B is used. For Function 1 if set, * INT A is used. * * EEPROM Word 0 Bit 11 for each Function may change the initial Int Pin * value specified in the PCI Configuration Space. */ #define INTAB 0x01 /* * Adv Library Status Definitions */ #define ADV_TRUE 1 #define ADV_FALSE 0 #define ADV_SUCCESS 1 #define ADV_BUSY 0 #define ADV_ERROR (-1) /* * ADV_DVC_VAR 'warn_code' values */ #define ASC_WARN_BUSRESET_ERROR 0x0001 /* SCSI Bus Reset error */ #define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */ #define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */ #define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */ #define ADV_MAX_TID 15 /* max. target identifier */ #define ADV_MAX_LUN 7 /* max. logical unit number */ /* * Fixed locations of microcode operating variables. */ #define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */ #define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */ #define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */ #define ASC_MC_VERSION_DATE 0x0038 /* microcode version */ #define ASC_MC_VERSION_NUM 0x003A /* microcode number */ #define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */ #define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */ #define ASC_MC_BIOS_SIGNATURE 0x0058 /* BIOS Signature 0x55AA */ #define ASC_MC_BIOS_VERSION 0x005A /* BIOS Version (2 bytes) */ #define ASC_MC_SDTR_SPEED1 0x0090 /* SDTR Speed for TID 0-3 */ #define ASC_MC_SDTR_SPEED2 0x0092 /* SDTR Speed for TID 4-7 */ #define ASC_MC_SDTR_SPEED3 0x0094 /* SDTR Speed for TID 8-11 */ #define ASC_MC_SDTR_SPEED4 0x0096 /* SDTR Speed for TID 12-15 */ #define ASC_MC_CHIP_TYPE 0x009A #define ASC_MC_INTRB_CODE 0x009B #define ASC_MC_WDTR_ABLE 0x009C #define ASC_MC_SDTR_ABLE 0x009E #define ASC_MC_TAGQNG_ABLE 0x00A0 #define ASC_MC_DISC_ENABLE 0x00A2 #define ASC_MC_IDLE_CMD_STATUS 0x00A4 #define ASC_MC_IDLE_CMD 0x00A6 #define ASC_MC_IDLE_CMD_PARAMETER 0x00A8 #define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC #define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE #define ASC_MC_DEFAULT_MEM_CFG 0x00B0 #define ASC_MC_DEFAULT_SEL_MASK 0x00B2 #define ASC_MC_SDTR_DONE 0x00B6 #define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0 #define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0 #define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100 #define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */ #define ASC_MC_WDTR_DONE 0x0124 #define ASC_MC_CAM_MODE_MASK 0x015E /* CAM mode TID bitmask. */ #define ASC_MC_ICQ 0x0160 #define ASC_MC_IRQ 0x0164 #define ASC_MC_PPR_ABLE 0x017A /* * BIOS LRAM variable absolute offsets. */ #define BIOS_CODESEG 0x54 #define BIOS_CODELEN 0x56 #define BIOS_SIGNATURE 0x58 #define BIOS_VERSION 0x5A /* * Microcode Control Flags * * Flags set by the Adv Library in RISC variable 'control_flag' (0x122) * and handled by the microcode. */ #define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */ #define CONTROL_FLAG_ENABLE_AIPP 0x0002 /* Enabled AIPP checking. */ /* * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format */ #define HSHK_CFG_WIDE_XFR 0x8000 #define HSHK_CFG_RATE 0x0F00 #define HSHK_CFG_OFFSET 0x001F #define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */ #define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */ #define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */ #define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */ #define ASC_QC_DATA_CHECK 0x01 /* Require ASC_QC_DATA_OUT set or clear. */ #define ASC_QC_DATA_OUT 0x02 /* Data out DMA transfer. */ #define ASC_QC_START_MOTOR 0x04 /* Send auto-start motor before request. */ #define ASC_QC_NO_OVERRUN 0x08 /* Don't report overrun. */ #define ASC_QC_FREEZE_TIDQ 0x10 /* Freeze TID queue after request. XXX TBD */ #define ASC_QSC_NO_DISC 0x01 /* Don't allow disconnect for request. */ #define ASC_QSC_NO_TAGMSG 0x02 /* Don't allow tag queuing for request. */ #define ASC_QSC_NO_SYNC 0x04 /* Don't use Synch. transfer on request. */ #define ASC_QSC_NO_WIDE 0x08 /* Don't use Wide transfer on request. */ #define ASC_QSC_REDO_DTR 0x10 /* Renegotiate WDTR/SDTR before request. */ /* * Note: If a Tag Message is to be sent and neither ASC_QSC_HEAD_TAG or * ASC_QSC_ORDERED_TAG is set, then a Simple Tag Message (0x20) is used. */ #define ASC_QSC_HEAD_TAG 0x40 /* Use Head Tag Message (0x21). */ #define ASC_QSC_ORDERED_TAG 0x80 /* Use Ordered Tag Message (0x22). */ /* * All fields here are accessed by the board microcode and need to be * little-endian. */ typedef struct adv_carr_t { ADV_VADDR carr_va; /* Carrier Virtual Address */ ADV_PADDR carr_pa; /* Carrier Physical Address */ ADV_VADDR areq_vpa; /* ASC_SCSI_REQ_Q Virtual or Physical Address */ /* * next_vpa [31:4] Carrier Virtual or Physical Next Pointer * * next_vpa [3:1] Reserved Bits * next_vpa [0] Done Flag set in Response Queue. */ ADV_VADDR next_vpa; } ADV_CARR_T; /* * Mask used to eliminate low 4 bits of carrier 'next_vpa' field. */ #define ASC_NEXT_VPA_MASK 0xFFFFFFF0 #define ASC_RQ_DONE 0x00000001 #define ASC_RQ_GOOD 0x00000002 #define ASC_CQ_STOPPER 0x00000000 #define ASC_GET_CARRP(carrp) ((carrp) & ASC_NEXT_VPA_MASK) #define ADV_CARRIER_NUM_PAGE_CROSSING \ (((ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + (PAGE_SIZE - 1))/PAGE_SIZE) #define ADV_CARRIER_BUFSIZE \ ((ADV_CARRIER_COUNT + ADV_CARRIER_NUM_PAGE_CROSSING) * sizeof(ADV_CARR_T)) /* * ASC_SCSI_REQ_Q 'a_flag' definitions * * The Adv Library should limit use to the lower nibble (4 bits) of * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag. */ #define ADV_POLL_REQUEST 0x01 /* poll for request completion */ #define ADV_SCSIQ_DONE 0x02 /* request done */ #define ADV_DONT_RETRY 0x08 /* don't do retry */ #define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */ #define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */ #define ADV_CHIP_ASC38C1600 0x03 /* Ultra3-Wide/LVD2 IC */ /* * Adapter temporary configuration structure * * This structure can be discarded after initialization. Don't add * fields here needed after initialization. * * Field naming convention: * * *_enable indicates the field enables or disables a feature. The * value of the field is never reset. */ typedef struct adv_dvc_cfg { ushort disc_enable; /* enable disconnection */ uchar chip_version; /* chip version */ uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */ ushort control_flag; /* Microcode Control Flag */ ushort mcode_date; /* Microcode date */ ushort mcode_version; /* Microcode version */ ushort serial1; /* EEPROM serial number word 1 */ ushort serial2; /* EEPROM serial number word 2 */ ushort serial3; /* EEPROM serial number word 3 */ } ADV_DVC_CFG; struct adv_dvc_var; struct adv_scsi_req_q; typedef struct asc_sg_block { uchar reserved1; uchar reserved2; uchar reserved3; uchar sg_cnt; /* Valid entries in block. */ ADV_PADDR sg_ptr; /* Pointer to next sg block. */ struct { ADV_PADDR sg_addr; /* SG element address. */ ADV_DCNT sg_count; /* SG element count. */ } sg_list[NO_OF_SG_PER_BLOCK]; } ADV_SG_BLOCK; /* * ADV_SCSI_REQ_Q - microcode request structure * * All fields in this structure up to byte 60 are used by the microcode. * The microcode makes assumptions about the size and ordering of fields * in this structure. Do not change the structure definition here without * coordinating the change with the microcode. * * All fields accessed by microcode must be maintained in little_endian * order. */ typedef struct adv_scsi_req_q { uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */ uchar target_cmd; uchar target_id; /* Device target identifier. */ uchar target_lun; /* Device target logical unit number. */ ADV_PADDR data_addr; /* Data buffer physical address. */ ADV_DCNT data_cnt; /* Data count. Ucode sets to residual. */ ADV_PADDR sense_addr; ADV_PADDR carr_pa; uchar mflag; uchar sense_len; uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */ uchar scsi_cntl; uchar done_status; /* Completion status. */ uchar scsi_status; /* SCSI status byte. */ uchar host_status; /* Ucode host status. */ uchar sg_working_ix; uchar cdb[12]; /* SCSI CDB bytes 0-11. */ ADV_PADDR sg_real_addr; /* SG list physical address. */ ADV_PADDR scsiq_rptr; uchar cdb16[4]; /* SCSI CDB bytes 12-15. */ ADV_VADDR scsiq_ptr; ADV_VADDR carr_va; /* * End of microcode structure - 60 bytes. The rest of the structure * is used by the Adv Library and ignored by the microcode. */ ADV_VADDR srb_ptr; ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */ char *vdata_addr; /* Data buffer virtual address. */ uchar a_flag; uchar pad[2]; /* Pad out to a word boundary. */ } ADV_SCSI_REQ_Q; /* * The following two structures are used to process Wide Board requests. * * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the * Mid-Level SCSI request structure. * * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux * up to 255 scatter-gather elements may be used per request or * ADV_SCSI_REQ_Q. * * Both structures must be 32 byte aligned. */ typedef struct adv_sgblk { ADV_SG_BLOCK sg_block; /* Sgblock structure. */ uchar align[32]; /* Sgblock structure padding. */ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */ } adv_sgblk_t; typedef struct adv_req { ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */ uchar align[32]; /* Request structure padding. */ struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */ adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */ struct adv_req *next_reqp; /* Next Request Structure. */ } adv_req_t; /* * Adapter operation variable structure. * * One structure is required per host adapter. * * Field naming convention: * * *_able indicates both whether a feature should be enabled or disabled * and whether a device isi capable of the feature. At initialization * this field may be set, but later if a device is found to be incapable * of the feature, the field is cleared. */ typedef struct adv_dvc_var { AdvPortAddr iop_base; /* I/O port address */ ushort err_code; /* fatal error code */ ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */ ushort wdtr_able; /* try WDTR for a device */ ushort sdtr_able; /* try SDTR for a device */ ushort ultra_able; /* try SDTR Ultra speed for a device */ ushort sdtr_speed1; /* EEPROM SDTR Speed for TID 0-3 */ ushort sdtr_speed2; /* EEPROM SDTR Speed for TID 4-7 */ ushort sdtr_speed3; /* EEPROM SDTR Speed for TID 8-11 */ ushort sdtr_speed4; /* EEPROM SDTR Speed for TID 12-15 */ ushort tagqng_able; /* try tagged queuing with a device */ ushort ppr_able; /* PPR message capable per TID bitmask. */ uchar max_dvc_qng; /* maximum number of tagged commands per device */ ushort start_motor; /* start motor command allowed */ uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */ uchar chip_no; /* should be assigned by caller */ uchar max_host_qng; /* maximum number of Q'ed command allowed */ ushort no_scam; /* scam_tolerant of EEPROM */ struct asc_board *drv_ptr; /* driver pointer to private structure */ uchar chip_scsi_id; /* chip SCSI target ID */ uchar chip_type; uchar bist_err_code; ADV_CARR_T *carrier_buf; ADV_CARR_T *carr_freelist; /* Carrier free list. */ ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */ ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */ ushort carr_pending_cnt; /* Count of pending carriers. */ struct adv_req *orig_reqp; /* adv_req_t memory block. */ /* * Note: The following fields will not be used after initialization. The * driver may discard the buffer after initialization is done. */ ADV_DVC_CFG *cfg; /* temporary configuration structure */ } ADV_DVC_VAR; /* * Microcode idle loop commands */ #define IDLE_CMD_COMPLETED 0 #define IDLE_CMD_STOP_CHIP 0x0001 #define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002 #define IDLE_CMD_SEND_INT 0x0004 #define IDLE_CMD_ABORT 0x0008 #define IDLE_CMD_DEVICE_RESET 0x0010 #define IDLE_CMD_SCSI_RESET_START 0x0020 /* Assert SCSI Bus Reset */ #define IDLE_CMD_SCSI_RESET_END 0x0040 /* Deassert SCSI Bus Reset */ #define IDLE_CMD_SCSIREQ 0x0080 #define IDLE_CMD_STATUS_SUCCESS 0x0001 #define IDLE_CMD_STATUS_FAILURE 0x0002 /* * AdvSendIdleCmd() flag definitions. */ #define ADV_NOWAIT 0x01 /* * Wait loop time out values. */ #define SCSI_WAIT_100_MSEC 100UL /* 100 milliseconds */ #define SCSI_US_PER_MSEC 1000 /* microseconds per millisecond */ #define SCSI_MAX_RETRY 10 /* retry count */ #define ADV_ASYNC_RDMA_FAILURE 0x01 /* Fatal RDMA failure. */ #define ADV_ASYNC_SCSI_BUS_RESET_DET 0x02 /* Detected SCSI Bus Reset. */ #define ADV_ASYNC_CARRIER_READY_FAILURE 0x03 /* Carrier Ready failure. */ #define ADV_RDMA_IN_CARR_AND_Q_INVALID 0x04 /* RDMAed-in data invalid. */ #define ADV_HOST_SCSI_BUS_RESET 0x80 /* Host Initiated SCSI Bus Reset. */ /* Read byte from a register. */ #define AdvReadByteRegister(iop_base, reg_off) \ (ADV_MEM_READB((iop_base) + (reg_off))) /* Write byte to a register. */ #define AdvWriteByteRegister(iop_base, reg_off, byte) \ (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte))) /* Read word (2 bytes) from a register. */ #define AdvReadWordRegister(iop_base, reg_off) \ (ADV_MEM_READW((iop_base) + (reg_off))) /* Write word (2 bytes) to a register. */ #define AdvWriteWordRegister(iop_base, reg_off, word) \ (ADV_MEM_WRITEW((iop_base) + (reg_off), (word))) /* Write dword (4 bytes) to a register. */ #define AdvWriteDWordRegister(iop_base, reg_off, dword) \ (ADV_MEM_WRITEDW((iop_base) + (reg_off), (dword))) /* Read byte from LRAM. */ #define AdvReadByteLram(iop_base, addr, byte) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \ } while (0) /* Write byte to LRAM. */ #define AdvWriteByteLram(iop_base, addr, byte) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte))) /* Read word (2 bytes) from LRAM. */ #define AdvReadWordLram(iop_base, addr, word) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (word) = (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)); \ } while (0) /* Write word (2 bytes) to LRAM. */ #define AdvWriteWordLram(iop_base, addr, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* Write little-endian double word (4 bytes) to LRAM */ /* Because of unspecified C language ordering don't use auto-increment. */ #define AdvWriteDWordLramNoSwap(iop_base, addr, dword) \ ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword) & 0xFFFF)))), \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword >> 16) & 0xFFFF))))) /* Read word (2 bytes) from LRAM assuming that the address is already set. */ #define AdvReadWordAutoIncLram(iop_base) \ (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)) /* Write word (2 bytes) to LRAM assuming that the address is already set. */ #define AdvWriteWordAutoIncLram(iop_base, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* * Define macro to check for Condor signature. * * Evaluate to ADV_TRUE if a Condor chip is found the specified port * address 'iop_base'. Otherwise evalue to ADV_FALSE. */ #define AdvFindSignature(iop_base) \ (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \ ADV_CHIP_ID_BYTE) && \ (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \ ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE) /* * Define macro to Return the version number of the chip at 'iop_base'. * * The second parameter 'bus_type' is currently unused. */ #define AdvGetChipVersion(iop_base, bus_type) \ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV) /* * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must * match the ASC_SCSI_REQ_Q 'srb_ptr' field. * * If the request has not yet been sent to the device it will simply be * aborted from RISC memory. If the request is disconnected it will be * aborted on reselection by sending an Abort Message to the target ID. * * Return value: * ADV_TRUE(1) - Queue was successfully aborted. * ADV_FALSE(0) - Queue was not found on the active queue list. */ #define AdvAbortQueue(asc_dvc, scsiq) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ (ADV_DCNT) (scsiq)) /* * Send a Bus Device Reset Message to the specified target ID. * * All outstanding commands will be purged if sending the * Bus Device Reset Message is successful. * * Return Value: * ADV_TRUE(1) - All requests on the target are purged. * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests * are not purged. */ #define AdvResetDevice(asc_dvc, target_id) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ (ADV_DCNT) (target_id)) /* * SCSI Wide Type definition. */ #define ADV_SCSI_BIT_ID_TYPE ushort /* * AdvInitScsiTarget() 'cntl_flag' options. */ #define ADV_SCAN_LUN 0x01 #define ADV_CAPINFO_NOLUN 0x02 /* * Convert target id to target id bit mask. */ #define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID)) /* * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values. */ #define QD_NO_STATUS 0x00 /* Request not completed yet. */ #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_QUEUE_ABORTED 0x15 #define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */ #define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */ #define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */ #define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */ #define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */ #define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */ #define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */ /* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */ #define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */ #define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */ #define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */ #define QHSTA_M_SCSI_BUS_RESET 0x30 /* Request aborted from SBR */ #define QHSTA_M_SCSI_BUS_RESET_UNSOL 0x31 /* Request aborted from unsol. SBR */ #define QHSTA_M_BUS_DEVICE_RESET 0x32 /* Request aborted from BDR */ #define QHSTA_M_DIRECTION_ERR 0x35 /* Data Phase mismatch */ #define QHSTA_M_DIRECTION_ERR_HUNG 0x36 /* Data Phase mismatch and bus hang */ #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */ #define QHSTA_M_FROZEN_TIDQ 0x46 /* TID Queue frozen. */ #define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */ /* Return the address that is aligned at the next doubleword >= to 'addr'. */ #define ADV_8BALIGN(addr) (((ulong) (addr) + 0x7) & ~0x7) #define ADV_16BALIGN(addr) (((ulong) (addr) + 0xF) & ~0xF) #define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F) /* * Total contiguous memory needed for driver SG blocks. * * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum * number of scatter-gather elements the driver supports in a * single request. */ #define ADV_SG_LIST_MAX_BYTE_SIZE \ (sizeof(ADV_SG_BLOCK) * \ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)) /* struct asc_board flags */ #define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */ #define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0) #define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */ #define ASC_INFO_SIZE 128 /* advansys_info() line size */ #ifdef CONFIG_PROC_FS /* /proc/scsi/advansys/[0...] related definitions */ #define ASC_PRTBUF_SIZE 2048 #define ASC_PRTLINE_SIZE 160 #define ASC_PRT_NEXT() \ if (cp) { \ totlen += len; \ leftlen -= len; \ if (leftlen == 0) { \ return totlen; \ } \ cp += len; \ } #endif /* CONFIG_PROC_FS */ /* Asc Library return codes */ #define ASC_TRUE 1 #define ASC_FALSE 0 #define ASC_NOERROR 1 #define ASC_BUSY 0 #define ASC_ERROR (-1) /* struct scsi_cmnd function return codes */ #define STATUS_BYTE(byte) (byte) #define MSG_BYTE(byte) ((byte) << 8) #define HOST_BYTE(byte) ((byte) << 16) #define DRIVER_BYTE(byte) ((byte) << 24) #define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1) #ifndef ADVANSYS_STATS #define ASC_STATS_ADD(shost, counter, count) #else /* ADVANSYS_STATS */ #define ASC_STATS_ADD(shost, counter, count) \ (((struct asc_board *) shost_priv(shost))->asc_stats.counter += (count)) #endif /* ADVANSYS_STATS */ /* If the result wraps when calculating tenths, return 0. */ #define ASC_TENTHS(num, den) \ (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \ 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den))))) /* * Display a message to the console. */ #define ASC_PRINT(s) \ { \ printk("advansys: "); \ printk(s); \ } #define ASC_PRINT1(s, a1) \ { \ printk("advansys: "); \ printk((s), (a1)); \ } #define ASC_PRINT2(s, a1, a2) \ { \ printk("advansys: "); \ printk((s), (a1), (a2)); \ } #define ASC_PRINT3(s, a1, a2, a3) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3)); \ } #define ASC_PRINT4(s, a1, a2, a3, a4) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3), (a4)); \ } #ifndef ADVANSYS_DEBUG #define ASC_DBG(lvl, s...) #define ASC_DBG_PRT_SCSI_HOST(lvl, s) #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) #define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_HEX(lvl, name, start, length) #define ASC_DBG_PRT_CDB(lvl, cdb, len) #define ASC_DBG_PRT_SENSE(lvl, sense, len) #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) #else /* ADVANSYS_DEBUG */ /* * Debugging Message Levels: * 0: Errors Only * 1: High-Level Tracing * 2-N: Verbose Tracing */ #define ASC_DBG(lvl, format, arg...) { \ if (asc_dbglvl >= (lvl)) \ printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ __func__ , ## arg); \ } #define ASC_DBG_PRT_SCSI_HOST(lvl, s) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_scsi_host(s); \ } \ } #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_scsi_q(scsiqp); \ } \ } #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_qdone_info(qdone); \ } \ } #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_adv_scsi_req_q(scsiqp); \ } \ } #define ASC_DBG_PRT_HEX(lvl, name, start, length) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_hex((name), (start), (length)); \ } \ } #define ASC_DBG_PRT_CDB(lvl, cdb, len) \ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len)); #define ASC_DBG_PRT_SENSE(lvl, sense, len) \ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len)); #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len)); #endif /* ADVANSYS_DEBUG */ #ifdef ADVANSYS_STATS /* Per board statistics structure */ struct asc_stats { /* Driver Entrypoint Statistics */ ADV_DCNT queuecommand; /* # calls to advansys_queuecommand() */ ADV_DCNT reset; /* # calls to advansys_eh_bus_reset() */ ADV_DCNT biosparam; /* # calls to advansys_biosparam() */ ADV_DCNT interrupt; /* # advansys_interrupt() calls */ ADV_DCNT callback; /* # calls to asc/adv_isr_callback() */ ADV_DCNT done; /* # calls to request's scsi_done function */ ADV_DCNT build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ ADV_DCNT adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ ADV_DCNT adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */ ADV_DCNT exe_noerror; /* # ASC_NOERROR returns. */ ADV_DCNT exe_busy; /* # ASC_BUSY returns. */ ADV_DCNT exe_error; /* # ASC_ERROR returns. */ ADV_DCNT exe_unknown; /* # unknown returns. */ /* Data Transfer Statistics */ ADV_DCNT xfer_cnt; /* # I/O requests received */ ADV_DCNT xfer_elem; /* # scatter-gather elements */ ADV_DCNT xfer_sect; /* # 512-byte blocks */ }; #endif /* ADVANSYS_STATS */ /* * Structure allocated for each board. * * This structure is allocated by scsi_host_alloc() at the end * of the 'Scsi_Host' structure starting at the 'hostdata' * field. It is guaranteed to be allocated from DMA-able memory. */ struct asc_board { struct device *dev; uint flags; /* Board flags */ unsigned int irq; union { ASC_DVC_VAR asc_dvc_var; /* Narrow board */ ADV_DVC_VAR adv_dvc_var; /* Wide board */ } dvc_var; union { ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */ ADV_DVC_CFG adv_dvc_cfg; /* Wide board */ } dvc_cfg; ushort asc_n_io_port; /* Number I/O ports. */ ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */ ushort reqcnt[ADV_MAX_TID + 1]; /* Starvation request count */ ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */ ushort queue_full_cnt[ADV_MAX_TID + 1]; /* Queue full count */ union { ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */ ADVEEP_3550_CONFIG adv_3550_eep; /* 3550 EEPROM config. */ ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */ ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */ } eep_config; ulong last_reset; /* Saved last reset time */ /* /proc/scsi/advansys/[0...] */ char *prtbuf; /* /proc print buffer */ #ifdef ADVANSYS_STATS struct asc_stats asc_stats; /* Board statistics */ #endif /* ADVANSYS_STATS */ /* * The following fields are used only for Narrow Boards. */ uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */ /* * The following fields are used only for Wide Boards. */ void __iomem *ioremap_addr; /* I/O Memory remap address. */ ushort ioport; /* I/O Port address. */ adv_req_t *adv_reqp; /* Request structures. */ adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */ ushort bios_signature; /* BIOS Signature. */ ushort bios_version; /* BIOS Version. */ ushort bios_codeseg; /* BIOS Code Segment. */ ushort bios_codelen; /* BIOS Code Segment Length. */ }; #define asc_dvc_to_board(asc_dvc) container_of(asc_dvc, struct asc_board, \ dvc_var.asc_dvc_var) #define adv_dvc_to_board(adv_dvc) container_of(adv_dvc, struct asc_board, \ dvc_var.adv_dvc_var) #define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev) #ifdef ADVANSYS_DEBUG static int asc_dbglvl = 3; /* * asc_prt_asc_dvc_var() */ static void asc_prt_asc_dvc_var(ASC_DVC_VAR *h) { printk("ASC_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%x, err_code 0x%x, dvc_cntl 0x%x, bug_fix_cntl " "%d,\n", h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl); printk(" bus_type %d, init_sdtr 0x%x,\n", h->bus_type, (unsigned)h->init_sdtr); printk(" sdtr_done 0x%x, use_tagged_qng 0x%x, unit_not_ready 0x%x, " "chip_no 0x%x,\n", (unsigned)h->sdtr_done, (unsigned)h->use_tagged_qng, (unsigned)h->unit_not_ready, (unsigned)h->chip_no); printk(" queue_full_or_busy 0x%x, start_motor 0x%x, scsi_reset_wait " "%u,\n", (unsigned)h->queue_full_or_busy, (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" is_in_int %u, max_total_qng %u, cur_total_qng %u, " "in_critical_cnt %u,\n", (unsigned)h->is_in_int, (unsigned)h->max_total_qng, (unsigned)h->cur_total_qng, (unsigned)h->in_critical_cnt); printk(" last_q_shortage %u, init_state 0x%x, no_scam 0x%x, " "pci_fix_asyn_xfer 0x%x,\n", (unsigned)h->last_q_shortage, (unsigned)h->init_state, (unsigned)h->no_scam, (unsigned)h->pci_fix_asyn_xfer); printk(" cfg 0x%lx\n", (ulong)h->cfg); } /* * asc_prt_asc_dvc_cfg() */ static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h) { printk("ASC_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" can_tagged_qng 0x%x, cmd_qng_enabled 0x%x,\n", h->can_tagged_qng, h->cmd_qng_enabled); printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n", h->disc_enable, h->sdtr_enable); printk(" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, " "chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed, h->isa_dma_channel, h->chip_version); printk(" mcode_date 0x%x, mcode_version %d\n", h->mcode_date, h->mcode_version); } /* * asc_prt_adv_dvc_var() * * Display an ADV_DVC_VAR structure. */ static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h) { printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n", (ulong)h->iop_base, h->err_code, (unsigned)h->ultra_able); printk(" sdtr_able 0x%x, wdtr_able 0x%x\n", (unsigned)h->sdtr_able, (unsigned)h->wdtr_able); printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n", (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%lxn\n", (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, (ulong)h->carr_freelist); printk(" icq_sp 0x%lx, irq_sp 0x%lx\n", (ulong)h->icq_sp, (ulong)h->irq_sp); printk(" no_scam 0x%x, tagqng_able 0x%x\n", (unsigned)h->no_scam, (unsigned)h->tagqng_able); printk(" chip_scsi_id 0x%x, cfg 0x%lx\n", (unsigned)h->chip_scsi_id, (ulong)h->cfg); } /* * asc_prt_adv_dvc_cfg() * * Display an ADV_DVC_CFG structure. */ static void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h) { printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" disc_enable 0x%x, termination 0x%x\n", h->disc_enable, h->termination); printk(" chip_version 0x%x, mcode_date 0x%x\n", h->chip_version, h->mcode_date); printk(" mcode_version 0x%x, control_flag 0x%x\n", h->mcode_version, h->control_flag); } /* * asc_prt_scsi_host() */ static void asc_prt_scsi_host(struct Scsi_Host *s) { struct asc_board *boardp = shost_priv(s); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); printk(" host_busy %u, host_no %d, last_reset %d,\n", s->host_busy, s->host_no, (unsigned)s->last_reset); printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", (ulong)s->base, (ulong)s->io_port, boardp->irq); printk(" dma_channel %d, this_id %d, can_queue %d,\n", s->dma_channel, s->this_id, s->can_queue); printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n", s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma); if (ASC_NARROW_BOARD(boardp)) { asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var); asc_prt_asc_dvc_cfg(&boardp->dvc_cfg.asc_dvc_cfg); } else { asc_prt_adv_dvc_var(&boardp->dvc_var.adv_dvc_var); asc_prt_adv_dvc_cfg(&boardp->dvc_cfg.adv_dvc_cfg); } } /* * asc_prt_hex() * * Print hexadecimal output in 4 byte groupings 32 bytes * or 8 double-words per line. */ static void asc_prt_hex(char *f, uchar *s, int l) { int i; int j; int k; int m; printk("%s: (%d bytes)\n", f, l); for (i = 0; i < l; i += 32) { /* Display a maximum of 8 double-words per line. */ if ((k = (l - i) / 4) >= 8) { k = 8; m = 0; } else { m = (l - i) % 4; } for (j = 0; j < k; j++) { printk(" %2.2X%2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); } switch (m) { case 0: default: break; case 1: printk(" %2.2X", (unsigned)s[i + (j * 4)]); break; case 2: printk(" %2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1]); break; case 3: printk(" %2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); break; } printk("\n"); } } /* * asc_prt_asc_scsi_q() */ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) { ASC_SG_HEAD *sgp; int i; printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); printk (" target_ix 0x%x, target_lun %u, srb_ptr 0x%lx, tag_code 0x%x,\n", q->q2.target_ix, q->q1.target_lun, (ulong)q->q2.srb_ptr, q->q2.tag_code); printk (" data_addr 0x%lx, data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->q1.data_addr), (ulong)le32_to_cpu(q->q1.data_cnt), (ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len); printk(" cdbptr 0x%lx, cdb_len %u, sg_head 0x%lx, sg_queue_cnt %u\n", (ulong)q->cdbptr, q->q2.cdb_len, (ulong)q->sg_head, q->q1.sg_queue_cnt); if (q->sg_head) { sgp = q->sg_head; printk("ASC_SG_HEAD at addr 0x%lx\n", (ulong)sgp); printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, sgp->queue_cnt); for (i = 0; i < sgp->entry_cnt; i++) { printk(" [%u]: addr 0x%lx, bytes %lu\n", i, (ulong)le32_to_cpu(sgp->sg_list[i].addr), (ulong)le32_to_cpu(sgp->sg_list[i].bytes)); } } } /* * asc_prt_asc_qdone_info() */ static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) { printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); printk(" srb_ptr 0x%lx, target_ix %u, cdb_len %u, tag_code %u,\n", (ulong)q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len, q->d2.tag_code); printk (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n", q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg); } /* * asc_prt_adv_sgblock() * * Display an ADV_SG_BLOCK structure. */ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b) { int i; printk(" ASC_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", (ulong)b, sgblockno); printk(" sg_cnt %u, sg_ptr 0x%lx\n", b->sg_cnt, (ulong)le32_to_cpu(b->sg_ptr)); BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK); if (b->sg_ptr != 0) BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK); for (i = 0; i < b->sg_cnt; i++) { printk(" [%u]: sg_addr 0x%lx, sg_count 0x%lx\n", i, (ulong)b->sg_list[i].sg_addr, (ulong)b->sg_list[i].sg_count); } } /* * asc_prt_adv_scsi_req_q() * * Display an ADV_SCSI_REQ_Q structure. */ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) { int sg_blk_cnt; struct asc_sg_block *sg_ptr; printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); printk(" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n", q->target_id, q->target_lun, (ulong)q->srb_ptr, q->a_flag); printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n", q->cntl, (ulong)le32_to_cpu(q->data_addr), (ulong)q->vdata_addr); printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->data_cnt), (ulong)le32_to_cpu(q->sense_addr), q->sense_len); printk (" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n", q->cdb_len, q->done_status, q->host_status, q->scsi_status); printk(" sg_working_ix 0x%x, target_cmd %u\n", q->sg_working_ix, q->target_cmd); printk(" scsiq_rptr 0x%lx, sg_real_addr 0x%lx, sg_list_ptr 0x%lx\n", (ulong)le32_to_cpu(q->scsiq_rptr), (ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr); /* Display the request's ADV_SG_BLOCK structures. */ if (q->sg_list_ptr != NULL) { sg_blk_cnt = 0; while (1) { /* * 'sg_ptr' is a physical address. Convert it to a virtual * address by indexing 'sg_blk_cnt' into the virtual address * array 'sg_list_ptr'. * * XXX - Assumes all SG physical blocks are virtually contiguous. */ sg_ptr = &(((ADV_SG_BLOCK *)(q->sg_list_ptr))[sg_blk_cnt]); asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr); if (sg_ptr->sg_ptr == 0) { break; } sg_blk_cnt++; } } } #endif /* ADVANSYS_DEBUG */ /* * The advansys chip/microcode contains a 32-bit identifier for each command * known as the 'srb'. I don't know what it stands for. The driver used * to encode the scsi_cmnd pointer by calling virt_to_bus and retrieve it * with bus_to_virt. Now the driver keeps a per-host map of integers to * pointers. It auto-expands when full, unless it can't allocate memory. * Note that an srb of 0 is treated specially by the chip/firmware, hence * the return of i+1 in this routine, and the corresponding subtraction in * the inverse routine. */ #define BAD_SRB 0 static u32 advansys_ptr_to_srb(struct asc_dvc_var *asc_dvc, void *ptr) { int i; void **new_ptr; for (i = 0; i < asc_dvc->ptr_map_count; i++) { if (!asc_dvc->ptr_map[i]) goto out; } if (asc_dvc->ptr_map_count == 0) asc_dvc->ptr_map_count = 1; else asc_dvc->ptr_map_count *= 2; new_ptr = krealloc(asc_dvc->ptr_map, asc_dvc->ptr_map_count * sizeof(void *), GFP_ATOMIC); if (!new_ptr) return BAD_SRB; asc_dvc->ptr_map = new_ptr; out: ASC_DBG(3, "Putting ptr %p into array offset %d\n", ptr, i); asc_dvc->ptr_map[i] = ptr; return i + 1; } static void * advansys_srb_to_ptr(struct asc_dvc_var *asc_dvc, u32 srb) { void *ptr; srb--; if (srb >= asc_dvc->ptr_map_count) { printk("advansys: bad SRB %u, max %u\n", srb, asc_dvc->ptr_map_count); return NULL; } ptr = asc_dvc->ptr_map[srb]; asc_dvc->ptr_map[srb] = NULL; ASC_DBG(3, "Returning ptr %p from array offset %d\n", ptr, srb); return ptr; } /* * advansys_info() * * Return suitable for printing on the console with the argument * adapter's configuration information. * * Note: The information line should not exceed ASC_INFO_SIZE bytes, * otherwise the static 'info' array will be overrun. */ static const char *advansys_info(struct Scsi_Host *shost) { static char info[ASC_INFO_SIZE]; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; ADV_DVC_VAR *adv_dvc_varp; char *busname; char *widename = NULL; if (ASC_NARROW_BOARD(boardp)) { asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ASC_DBG(1, "begin\n"); if (asc_dvc_varp->bus_type & ASC_IS_ISA) { if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) == ASC_IS_ISAPNP) { busname = "ISA PnP"; } else { busname = "ISA"; } sprintf(info, "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X", ASC_VERSION, busname, (ulong)shost->io_port, (ulong)shost->io_port + ASC_IOADR_GAP - 1, boardp->irq, shost->dma_channel); } else { if (asc_dvc_varp->bus_type & ASC_IS_VL) { busname = "VL"; } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) { busname = "EISA"; } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) { if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { busname = "PCI Ultra"; } else { busname = "PCI"; } } else { busname = "?"; shost_printk(KERN_ERR, shost, "unknown bus " "type %d\n", asc_dvc_varp->bus_type); } sprintf(info, "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, busname, (ulong)shost->io_port, (ulong)shost->io_port + ASC_IOADR_GAP - 1, boardp->irq); } } else { /* * Wide Adapter Information * * Memory-mapped I/O is used instead of I/O space to access * the adapter, but display the I/O Port range. The Memory * I/O address is displayed through the driver /proc file. */ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { widename = "Ultra-Wide"; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { widename = "Ultra2-Wide"; } else { widename = "Ultra3-Wide"; } sprintf(info, "AdvanSys SCSI %s: PCI %s: PCIMEM 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, widename, (ulong)adv_dvc_varp->iop_base, (ulong)adv_dvc_varp->iop_base + boardp->asc_n_io_port - 1, boardp->irq); } BUG_ON(strlen(info) >= ASC_INFO_SIZE); ASC_DBG(1, "end\n"); return info; } #ifdef CONFIG_PROC_FS /* * asc_prt_line() * * If 'cp' is NULL print to the console, otherwise print to a buffer. * * Return 0 if printing to the console, otherwise return the number of * bytes written to the buffer. * * Note: If any single line is greater than ASC_PRTLINE_SIZE bytes the stack * will be corrupted. 's[]' is defined to be ASC_PRTLINE_SIZE bytes. */ static int asc_prt_line(char *buf, int buflen, char *fmt, ...) { va_list args; int ret; char s[ASC_PRTLINE_SIZE]; va_start(args, fmt); ret = vsprintf(s, fmt, args); BUG_ON(ret >= ASC_PRTLINE_SIZE); if (buf == NULL) { (void)printk(s); ret = 0; } else { ret = min(buflen, ret); memcpy(buf, s, ret); } va_end(args); return ret; } /* * asc_prt_board_devices() * * Print driver information for devices attached to the board. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_board_devices(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int chip_scsi_id; int i; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nDevice Information for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } len = asc_prt_line(cp, leftlen, "Target IDs Detected:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) { len = asc_prt_line(cp, leftlen, " %X,", i); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " (%X=Host Adapter)\n", chip_scsi_id); ASC_PRT_NEXT(); return totlen; } /* * Display Wide Board BIOS Information. */ static int asc_prt_adv_bios(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; ushort major, minor, letter; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nROM BIOS Version: "); ASC_PRT_NEXT(); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature != 0x55AA) { len = asc_prt_line(cp, leftlen, "Disabled or Pre-3.1\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); ASC_PRT_NEXT(); } else { major = (boardp->bios_version >> 12) & 0xF; minor = (boardp->bios_version >> 8) & 0xF; letter = (boardp->bios_version & 0xFF); len = asc_prt_line(cp, leftlen, "%d.%d%c\n", major, minor, letter >= 26 ? '?' : letter + 'A'); ASC_PRT_NEXT(); /* * Current available ROM BIOS release is 3.1I for UW * and 3.2I for U2W. This code doesn't differentiate * UW and U2W boards. */ if (major < 3 || (major <= 3 && minor < 1) || (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { len = asc_prt_line(cp, leftlen, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "ftp://ftp.connectcom.net/pub\n"); ASC_PRT_NEXT(); } } return totlen; } /* * Add serial number to information bar if signature AAh * is found in at bit 15-9 (7 bits) of word 1. * * Serial Number consists fo 12 alpha-numeric digits. * * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits) * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits) * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits) * 5 - Product revision (A-J) Word0: " " * * Signature Word1: 15-9 (7 bits) * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit) * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits) * * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits) * * Note 1: Only production cards will have a serial number. * * Note 2: Signature is most significant 7 bits (0xFE). * * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE. */ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp) { ushort w, num; if ((serialnum[1] & 0xFE00) != ((ushort)0xAA << 8)) { return ASC_FALSE; } else { /* * First word - 6 digits. */ w = serialnum[0]; /* Product type - 1st digit. */ if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') { /* Product type is P=Prototype */ *cp += 0x8; } cp++; /* Manufacturing location - 2nd digit. */ *cp++ = 'A' + ((w & 0x1C00) >> 10); /* Product ID - 3rd, 4th digits. */ num = w & 0x3FF; *cp++ = '0' + (num / 100); num %= 100; *cp++ = '0' + (num / 10); /* Product revision - 5th digit. */ *cp++ = 'A' + (num % 10); /* * Second word */ w = serialnum[1]; /* * Year - 6th digit. * * If bit 15 of third word is set, then the * last digit of the year is greater than 7. */ if (serialnum[2] & 0x8000) { *cp++ = '8' + ((w & 0x1C0) >> 6); } else { *cp++ = '0' + ((w & 0x1C0) >> 6); } /* Week of year - 7th, 8th digits. */ num = w & 0x003F; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; /* * Third word */ w = serialnum[2] & 0x7FFF; /* Serial number - 9th digit. */ *cp++ = 'A' + (w / 1000); /* 10th, 11th, 12th digits. */ num = w % 1000; *cp++ = '0' + num / 100; num %= 100; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; *cp = '\0'; /* Null Terminate the string. */ return ASC_TRUE; } } /* * asc_prt_asc_board_eeprom() * * Print board EEPROM configuration. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_asc_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; int leftlen; int totlen; int len; ASCEEP_CONFIG *ep; int i; #ifdef CONFIG_ISA int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 }; #endif /* CONFIG_ISA */ uchar serialstr[13]; asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ep = &boardp->eep_config.asc_eep; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr) == ASC_TRUE) { len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr); ASC_PRT_NEXT(); } else { if (ep->adapter_info[5] == 0xBB) { len = asc_prt_line(cp, leftlen, " Default Settings Used for EEPROM-less Adapter.\n"); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Serial Number Signature Not Present.\n"); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng, ep->max_tag_qng); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Target ID: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %d", i); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Disconnects: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Queuing: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Start Motor: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); #ifdef CONFIG_ISA if (asc_dvc_varp->bus_type & ASC_IS_ISA) { len = asc_prt_line(cp, leftlen, " Host ISA DMA speed: %d MB/S\n", isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]); ASC_PRT_NEXT(); } #endif /* CONFIG_ISA */ return totlen; } /* * asc_prt_adv_board_eeprom() * * Print board EEPROM configuration. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); ADV_DVC_VAR *adv_dvc_varp; int leftlen; int totlen; int len; int i; char *termstr; uchar serialstr[13]; ADVEEP_3550_CONFIG *ep_3550 = NULL; ADVEEP_38C0800_CONFIG *ep_38C0800 = NULL; ADVEEP_38C1600_CONFIG *ep_38C1600 = NULL; ushort word; ushort *wordp; ushort sdtr_speed = 0; adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; } leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { wordp = &ep_3550->serial_number_word1; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { wordp = &ep_38C0800->serial_number_word1; } else { wordp = &ep_38C1600->serial_number_word1; } if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) { len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Serial Number Signature Not Present.\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_3550->adapter_scsi_id, ep_3550->max_host_qng, ep_3550->max_dvc_qng); ASC_PRT_NEXT(); } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C0800->adapter_scsi_id, ep_38C0800->max_host_qng, ep_38C0800->max_dvc_qng); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C1600->adapter_scsi_id, ep_38C1600->max_host_qng, ep_38C1600->max_dvc_qng); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->termination; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->termination_lvd; } else { word = ep_38C1600->termination_lvd; } switch (word) { case 1: termstr = "Low Off/High Off"; break; case 2: termstr = "Low Off/High On"; break; case 3: termstr = "Low On/High On"; break; default: case 0: termstr = "Automatic"; break; } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_3550->termination, termstr, ep_3550->bios_ctrl); ASC_PRT_NEXT(); } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C0800->termination_lvd, termstr, ep_38C0800->bios_ctrl); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C1600->termination_lvd, termstr, ep_38C1600->bios_ctrl); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, " Target ID: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %X", i); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->disc_enable; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->disc_enable; } else { word = ep_38C1600->disc_enable; } len = asc_prt_line(cp, leftlen, " Disconnects: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->tagqng_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->tagqng_able; } else { word = ep_38C1600->tagqng_able; } len = asc_prt_line(cp, leftlen, " Command Queuing: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->start_motor; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->start_motor; } else { word = ep_38C1600->start_motor; } len = asc_prt_line(cp, leftlen, " Start Motor: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep_3550-> sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Ultra Transfer: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep_3550-> ultra_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->wdtr_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->wdtr_able; } else { word = ep_38C1600->wdtr_able; } len = asc_prt_line(cp, leftlen, " Wide Transfer: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { len = asc_prt_line(cp, leftlen, " Synchronous Transfer Speed (Mhz):\n "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { char *speed_str; if (i == 0) { sdtr_speed = adv_dvc_varp->sdtr_speed1; } else if (i == 4) { sdtr_speed = adv_dvc_varp->sdtr_speed2; } else if (i == 8) { sdtr_speed = adv_dvc_varp->sdtr_speed3; } else if (i == 12) { sdtr_speed = adv_dvc_varp->sdtr_speed4; } switch (sdtr_speed & ADV_MAX_TID) { case 0: speed_str = "Off"; break; case 1: speed_str = " 5"; break; case 2: speed_str = " 10"; break; case 3: speed_str = " 20"; break; case 4: speed_str = " 40"; break; case 5: speed_str = " 80"; break; default: speed_str = "Unk"; break; } len = asc_prt_line(cp, leftlen, "%X:%s ", i, speed_str); ASC_PRT_NEXT(); if (i == 7) { len = asc_prt_line(cp, leftlen, "\n "); ASC_PRT_NEXT(); } sdtr_speed >>= 4; } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_prt_driver_conf() * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_driver_conf(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " host_busy %u, last_reset %u, max_id %u, max_lun %u, max_channel %u\n", shost->host_busy, shost->last_reset, shost->max_id, shost->max_lun, shost->max_channel); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n", shost->unique_id, shost->can_queue, shost->this_id, shost->sg_tablesize, shost->cmd_per_lun); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " unchecked_isa_dma %d, use_clustering %d\n", shost->unchecked_isa_dma, shost->use_clustering); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " flags 0x%x, last_reset 0x%x, jiffies 0x%x, asc_n_io_port 0x%x\n", boardp->flags, boardp->last_reset, jiffies, boardp->asc_n_io_port); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " io_port 0x%x\n", shost->io_port); ASC_PRT_NEXT(); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } return totlen; } /* * asc_prt_asc_board_info() * * Print dynamic board configuration information. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int chip_scsi_id; int leftlen; int totlen; int len; ASC_DVC_VAR *v; ASC_DVC_CFG *c; int i; int renegotiate = 0; v = &boardp->dvc_var.asc_dvc_var; c = &boardp->dvc_cfg.asc_dvc_cfg; chip_scsi_id = c->chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x, err_code %u\n", c->chip_version, c->mcode_date, c->mcode_version, v->err_code); ASC_PRT_NEXT(); /* Current number of commands waiting for the host. */ len = asc_prt_line(cp, leftlen, " Total Command Pending: %d\n", v->cur_total_qng); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Queuing:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (v-> use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Current number of commands waiting for a device. */ len = asc_prt_line(cp, leftlen, " Command Queue Pending:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%u", i, v->cur_dvc_qng[i]); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Current limit on number of commands that can be sent to a device. */ len = asc_prt_line(cp, leftlen, " Command Queue Limit:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%u", i, v->max_dvc_qng[i]); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Indicate whether the device has returned queue full status. */ len = asc_prt_line(cp, leftlen, " Command Queue Full:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) { len = asc_prt_line(cp, leftlen, " %X:Y-%d", i, boardp->queue_full_cnt[i]); } else { len = asc_prt_line(cp, leftlen, " %X:N", i); } ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (v-> sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { uchar syn_period_ix; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((v->init_sdtr & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:", i); ASC_PRT_NEXT(); if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { len = asc_prt_line(cp, leftlen, " Asynchronous"); ASC_PRT_NEXT(); } else { syn_period_ix = (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - 1); len = asc_prt_line(cp, leftlen, " Transfer Period Factor: %d (%d.%d Mhz),", v->sdtr_period_tbl[syn_period_ix], 250 / v->sdtr_period_tbl[syn_period_ix], ASC_TENTHS(250, v-> sdtr_period_tbl [syn_period_ix])); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", boardp-> sdtr_data[i] & ASC_SYN_MAX_OFFSET); ASC_PRT_NEXT(); } if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*\n"); renegotiate = 1; } else { len = asc_prt_line(cp, leftlen, "\n"); } ASC_PRT_NEXT(); } if (renegotiate) { len = asc_prt_line(cp, leftlen, " * = Re-negotiation pending before next command.\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_prt_adv_board_info() * * Print dynamic board configuration information. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int i; ADV_DVC_VAR *v; ADV_DVC_CFG *c; AdvPortAddr iop_base; ushort chip_scsi_id; ushort lramword; uchar lrambyte; ushort tagqng_able; ushort sdtr_able, wdtr_able; ushort wdtr_done, sdtr_done; ushort period = 0; int renegotiate = 0; v = &boardp->dvc_var.adv_dvc_var; c = &boardp->dvc_cfg.adv_dvc_cfg; iop_base = v->iop_base; chip_scsi_id = v->chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " iop_base 0x%lx, cable_detect: %X, err_code %u\n", v->iop_base, AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1) & CABLE_DETECT, v->err_code); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x\n", c->chip_version, c->mcode_date, c->mcode_version); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); len = asc_prt_line(cp, leftlen, " Queuing Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Queue Limit:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, lrambyte); len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Pending:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, lrambyte); len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); len = asc_prt_line(cp, leftlen, " Wide Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); len = asc_prt_line(cp, leftlen, " Transfer Bit Width:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); len = asc_prt_line(cp, leftlen, " %X:%d", i, (lramword & 0x8000) ? 16 : 8); ASC_PRT_NEXT(); if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*"); ASC_PRT_NEXT(); renegotiate = 1; } } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); len = asc_prt_line(cp, leftlen, " Synchronous Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); for (i = 0; i <= ADV_MAX_TID; i++) { AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); lramword &= ~0x8000; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:", i); ASC_PRT_NEXT(); if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ len = asc_prt_line(cp, leftlen, " Asynchronous"); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Transfer Period Factor: "); ASC_PRT_NEXT(); if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ len = asc_prt_line(cp, leftlen, "9 (80.0 Mhz),"); ASC_PRT_NEXT(); } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ len = asc_prt_line(cp, leftlen, "10 (40.0 Mhz),"); ASC_PRT_NEXT(); } else { /* 20 Mhz or below. */ period = (((lramword >> 8) * 25) + 50) / 4; if (period == 0) { /* Should never happen. */ len = asc_prt_line(cp, leftlen, "%d (? Mhz), "); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, "%d (%d.%d Mhz),", period, 250 / period, ASC_TENTHS(250, period)); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", lramword & 0x1F); ASC_PRT_NEXT(); } if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*\n"); renegotiate = 1; } else { len = asc_prt_line(cp, leftlen, "\n"); } ASC_PRT_NEXT(); } if (renegotiate) { len = asc_prt_line(cp, leftlen, " * = Re-negotiation pending before next command.\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_proc_copy() * * Copy proc information to a read buffer taking into account the current * read offset in the file and the remaining space in the read buffer. */ static int asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen, char *cp, int cplen) { int cnt = 0; ASC_DBG(2, "offset %d, advoffset %d, cplen %d\n", (unsigned)offset, (unsigned)advoffset, cplen); if (offset <= advoffset) { /* Read offset below current offset, copy everything. */ cnt = min(cplen, leftlen); ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", (ulong)curbuf, (ulong)cp, cnt); memcpy(curbuf, cp, cnt); } else if (offset < advoffset + cplen) { /* Read offset within current range, partial copy. */ cnt = (advoffset + cplen) - offset; cp = (cp + cplen) - cnt; cnt = min(cnt, leftlen); ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", (ulong)curbuf, (ulong)cp, cnt); memcpy(curbuf, cp, cnt); } return cnt; } #ifdef ADVANSYS_STATS /* * asc_prt_board_stats() * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_board_stats(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); struct asc_stats *s = &boardp->asc_stats; int leftlen = cplen; int len, totlen = 0; len = asc_prt_line(cp, leftlen, "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " queuecommand %lu, reset %lu, biosparam %lu, interrupt %lu\n", s->queuecommand, s->reset, s->biosparam, s->interrupt); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " callback %lu, done %lu, build_error %lu, build_noreq %lu, build_nosg %lu\n", s->callback, s->done, s->build_error, s->adv_build_noreq, s->adv_build_nosg); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " exe_noerror %lu, exe_busy %lu, exe_error %lu, exe_unknown %lu\n", s->exe_noerror, s->exe_busy, s->exe_error, s->exe_unknown); ASC_PRT_NEXT(); /* * Display data transfer statistics. */ if (s->xfer_cnt > 0) { len = asc_prt_line(cp, leftlen, " xfer_cnt %lu, xfer_elem %lu, ", s->xfer_cnt, s->xfer_elem); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "xfer_bytes %lu.%01lu kb\n", s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2)); ASC_PRT_NEXT(); /* Scatter gather transfer statistics */ len = asc_prt_line(cp, leftlen, " avg_num_elem %lu.%01lu, ", s->xfer_elem / s->xfer_cnt, ASC_TENTHS(s->xfer_elem, s->xfer_cnt)); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "avg_elem_size %lu.%01lu kb, ", (s->xfer_sect / 2) / s->xfer_elem, ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem)); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "avg_xfer_size %lu.%01lu kb\n", (s->xfer_sect / 2) / s->xfer_cnt, ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt)); ASC_PRT_NEXT(); } return totlen; } #endif /* ADVANSYS_STATS */ /* * advansys_proc_info() - /proc/scsi/advansys/{0,1,2,3,...} * * *buffer: I/O buffer * **start: if inout == FALSE pointer into buffer where user read should start * offset: current offset into a /proc/scsi/advansys/[0...] file * length: length of buffer * hostno: Scsi_Host host_no * inout: TRUE - user is writing; FALSE - user is reading * * Return the number of bytes read from or written to a * /proc/scsi/advansys/[0...] file. * * Note: This function uses the per board buffer 'prtbuf' which is * allocated when the board is initialized in advansys_detect(). The * buffer is ASC_PRTBUF_SIZE bytes. The function asc_proc_copy() is * used to write to the buffer. The way asc_proc_copy() is written * if 'prtbuf' is too small it will not be overwritten. Instead the * user just won't get all the available statistics. */ static int advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length, int inout) { struct asc_board *boardp = shost_priv(shost); char *cp; int cplen; int cnt; int totcnt; int leftlen; char *curbuf; off_t advoffset; ASC_DBG(1, "begin\n"); /* * User write not supported. */ if (inout == TRUE) return -ENOSYS; /* * User read of /proc/scsi/advansys/[0...] file. */ /* Copy read data starting at the beginning of the buffer. */ *start = buffer; curbuf = buffer; advoffset = 0; totcnt = 0; leftlen = length; /* * Get board configuration information. * * advansys_info() returns the board string from its own static buffer. */ cp = (char *)advansys_info(shost); strcat(cp, "\n"); cplen = strlen(cp); /* Copy board information. */ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display Wide Board BIOS Information. */ if (!ASC_NARROW_BOARD(boardp)) { cp = boardp->prtbuf; cplen = asc_prt_adv_bios(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; } /* * Display driver information for each device attached to the board. */ cp = boardp->prtbuf; cplen = asc_prt_board_devices(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display EEPROM configuration for the board. */ cp = boardp->prtbuf; if (ASC_NARROW_BOARD(boardp)) { cplen = asc_prt_asc_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); } else { cplen = asc_prt_adv_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); } BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display driver configuration and information for the board. */ cp = boardp->prtbuf; cplen = asc_prt_driver_conf(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; #ifdef ADVANSYS_STATS /* * Display driver statistics for the board. */ cp = boardp->prtbuf; cplen = asc_prt_board_stats(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; #endif /* ADVANSYS_STATS */ /* * Display Asc Library dynamic configuration information * for the board. */ cp = boardp->prtbuf; if (ASC_NARROW_BOARD(boardp)) { cplen = asc_prt_asc_board_info(shost, cp, ASC_PRTBUF_SIZE); } else { cplen = asc_prt_adv_board_info(shost, cp, ASC_PRTBUF_SIZE); } BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } #endif /* CONFIG_PROC_FS */ static void asc_scsi_done(struct scsi_cmnd *scp) { scsi_dma_unmap(scp); ASC_STATS(scp->device->host, done); scp->scsi_done(scp); } static void AscSetBank(PortAddr iop_base, uchar bank) { uchar val; val = AscGetChipControl(iop_base) & (~ (CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | CC_CHIP_RESET)); if (bank == 1) { val |= CC_BANK_ONE; } else if (bank == 2) { val |= CC_DIAG | CC_BANK_ONE; } else { val &= ~CC_BANK_ONE; } AscSetChipControl(iop_base, val); } static void AscSetChipIH(PortAddr iop_base, ushort ins_code) { AscSetBank(iop_base, 1); AscWriteChipIH(iop_base, ins_code); AscSetBank(iop_base, 0); } static int AscStartChip(PortAddr iop_base) { AscSetChipControl(iop_base, 0); if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { return (0); } return (1); } static int AscStopChip(PortAddr iop_base) { uchar cc_val; cc_val = AscGetChipControl(iop_base) & (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG)); AscSetChipControl(iop_base, (uchar)(cc_val | CC_HALT)); AscSetChipIH(iop_base, INS_HALT); AscSetChipIH(iop_base, INS_RFLAG_WTM); if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) { return (0); } return (1); } static int AscIsChipHalted(PortAddr iop_base) { if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { if ((AscGetChipControl(iop_base) & CC_HALT) != 0) { return (1); } } return (0); } static int AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i = 10; iop_base = asc_dvc->iop_base; while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscStopChip(iop_base); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT); udelay(60); AscSetChipIH(iop_base, INS_RFLAG_WTM); AscSetChipIH(iop_base, INS_HALT); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT); AscSetChipControl(iop_base, CC_HALT); mdelay(200); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); return (AscIsChipHalted(iop_base)); } static int AscFindSignature(PortAddr iop_base) { ushort sig_word; ASC_DBG(1, "AscGetChipSignatureByte(0x%x) 0x%x\n", iop_base, AscGetChipSignatureByte(iop_base)); if (AscGetChipSignatureByte(iop_base) == (uchar)ASC_1000_ID1B) { ASC_DBG(1, "AscGetChipSignatureWord(0x%x) 0x%x\n", iop_base, AscGetChipSignatureWord(iop_base)); sig_word = AscGetChipSignatureWord(iop_base); if ((sig_word == (ushort)ASC_1000_ID0W) || (sig_word == (ushort)ASC_1000_ID0W_FIX)) { return (1); } } return (0); } static void AscEnableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON); } static void AscDisableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON)); } static uchar AscReadLramByte(PortAddr iop_base, ushort addr) { unsigned char byte_data; unsigned short word_data; if (isodd_word(addr)) { AscSetChipLramAddr(iop_base, addr - 1); word_data = AscGetChipLramData(iop_base); byte_data = (word_data >> 8) & 0xFF; } else { AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); byte_data = word_data & 0xFF; } return byte_data; } static ushort AscReadLramWord(PortAddr iop_base, ushort addr) { ushort word_data; AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); return (word_data); } #if CC_VERY_LONG_SG_LIST static ASC_DCNT AscReadLramDWord(PortAddr iop_base, ushort addr) { ushort val_low, val_high; ASC_DCNT dword_data; AscSetChipLramAddr(iop_base, addr); val_low = AscGetChipLramData(iop_base); val_high = AscGetChipLramData(iop_base); dword_data = ((ASC_DCNT) val_high << 16) | (ASC_DCNT) val_low; return (dword_data); } #endif /* CC_VERY_LONG_SG_LIST */ static void AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < words; i++) { AscSetChipLramData(iop_base, set_wval); } } static void AscWriteLramWord(PortAddr iop_base, ushort addr, ushort word_val) { AscSetChipLramAddr(iop_base, addr); AscSetChipLramData(iop_base, word_val); } static void AscWriteLramByte(PortAddr iop_base, ushort addr, uchar byte_val) { ushort word_data; if (isodd_word(addr)) { addr--; word_data = AscReadLramWord(iop_base, addr); word_data &= 0x00FF; word_data |= (((ushort)byte_val << 8) & 0xFF00); } else { word_data = AscReadLramWord(iop_base, addr); word_data &= 0xFF00; word_data |= ((ushort)byte_val & 0x00FF); } AscWriteLramWord(iop_base, addr, word_data); } /* * Copy 2 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, const uchar *s_buffer, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { /* * On a little-endian system the second argument below * produces a little-endian ushort which is written to * LRAM in little-endian order. On a big-endian system * the second argument produces a big-endian ushort which * is "transparently" byte-swapped by outpw() and written * in little-endian order to LRAM. */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); } } /* * Copy 4 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemDWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, uchar *s_buffer, int dwords) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 4 * dwords; i += 4) { outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); /* LSW */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 3] << 8) | s_buffer[i + 2]); /* MSW */ } } /* * Copy 2 bytes from LRAM. * * The source data is assumed to be in little-endian order in LRAM * and is maintained in little-endian order when written to memory. */ static void AscMemWordCopyPtrFromLram(PortAddr iop_base, ushort s_addr, uchar *d_buffer, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { word = inpw(iop_base + IOP_RAM_DATA); d_buffer[i] = word & 0xff; d_buffer[i + 1] = (word >> 8) & 0xff; } } static ASC_DCNT AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) { ASC_DCNT sum; int i; sum = 0L; for (i = 0; i < words; i++, s_addr += 2) { sum += AscReadLramWord(iop_base, s_addr); } return (sum); } static ushort AscInitLram(ASC_DVC_VAR *asc_dvc) { uchar i; ushort s_addr; PortAddr iop_base; ushort warn_code; iop_base = asc_dvc->iop_base; warn_code = 0; AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0, (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)); i = ASC_MIN_ACTIVE_QNO; s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE; AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); i++; s_addr += ASC_QBLK_SIZE; for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(i - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); } AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)ASC_QLINK_END); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)asc_dvc->max_total_qng); i++; s_addr += ASC_QBLK_SIZE; for (; i <= (uchar)(asc_dvc->max_total_qng + 3); i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_FWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_BWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i); } return warn_code; } static ASC_DCNT AscLoadMicroCode(PortAddr iop_base, ushort s_addr, const uchar *mcode_buf, ushort mcode_size) { ASC_DCNT chksum; ushort mcode_word_size; ushort mcode_chksum; /* Write the microcode buffer starting at LRAM address 0. */ mcode_word_size = (ushort)(mcode_size >> 1); AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size); AscMemWordCopyPtrToLram(iop_base, s_addr, mcode_buf, mcode_word_size); chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size); ASC_DBG(1, "chksum 0x%lx\n", (ulong)chksum); mcode_chksum = (ushort)AscMemSumLramWord(iop_base, (ushort)ASC_CODE_SEC_BEG, (ushort)((mcode_size - s_addr - (ushort) ASC_CODE_SEC_BEG) / 2)); ASC_DBG(1, "mcode_chksum 0x%lx\n", (ulong)mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size); return chksum; } static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i; ushort lram_addr; iop_base = asc_dvc->iop_base; AscPutRiscVarFreeQHead(iop_base, 1); AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscPutVarFreeQHead(iop_base, 1); AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 1)); AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 2)); AscWriteLramByte(iop_base, (ushort)ASCV_TOTAL_READY_Q_B, asc_dvc->max_total_qng); AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0); AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0); AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0); AscPutQDoneInProgress(iop_base, 0); lram_addr = ASC_QADR_BEG; for (i = 0; i < 32; i++, lram_addr += 2) { AscWriteLramWord(iop_base, lram_addr, 0); } } static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) { int i; ushort warn_code; PortAddr iop_base; ASC_PADDR phy_addr; ASC_DCNT phy_size; struct asc_board *board = asc_dvc_to_board(asc_dvc); iop_base = asc_dvc->iop_base; warn_code = 0; for (i = 0; i <= ASC_MAX_TID; i++) { AscPutMCodeInitSDTRAtID(iop_base, i, asc_dvc->cfg->sdtr_period_offset[i]); } AscInitQLinkVar(asc_dvc); AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B, ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id)); /* Ensure overrun buffer is aligned on an 8 byte boundary. */ BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) { warn_code = -ENOMEM; goto err_dma_map; } phy_addr = cpu_to_le32(asc_dvc->overrun_dma); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, (uchar *)&phy_addr, 1); phy_size = cpu_to_le32(ASC_OVERRUN_BSIZE); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_BSIZE_D, (uchar *)&phy_size, 1); asc_dvc->cfg->mcode_date = AscReadLramWord(iop_base, (ushort)ASCV_MC_DATE_W); asc_dvc->cfg->mcode_version = AscReadLramWord(iop_base, (ushort)ASCV_MC_VER_W); AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; warn_code = UW_ERR; goto err_mcode_start; } if (AscStartChip(iop_base) != 1) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; warn_code = UW_ERR; goto err_mcode_start; } return warn_code; err_mcode_start: dma_unmap_single(board->dev, asc_dvc->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); err_dma_map: asc_dvc->overrun_dma = 0; return warn_code; } static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/mcode.bin"; int err; unsigned long chksum; ushort warn_code; PortAddr iop_base; iop_base = asc_dvc->iop_base; warn_code = 0; if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) && !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) { AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC; if (asc_dvc->err_code != 0) return UW_ERR; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return warn_code; } AscDisableInterrupt(iop_base); warn_code |= AscInitLram(asc_dvc); if (asc_dvc->err_code != 0) return UW_ERR; err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; ASC_DBG(1, "_asc_mcode_chksum 0x%lx\n", (ulong)chksum); if (AscLoadMicroCode(iop_base, 0, &fw->data[4], fw->size - 4) != chksum) { asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; release_firmware(fw); return warn_code; } release_firmware(fw); warn_code |= AscInitMicroCodeVar(asc_dvc); if (!asc_dvc->overrun_dma) return warn_code; asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; AscEnableInterrupt(iop_base); return warn_code; } /* * Load the Microcode * * Write the microcode image to RISC memory starting at address 0. * * The microcode is stored compressed in the following format: * * 254 word (508 byte) table indexed by byte code followed * by the following byte codes: * * 1-Byte Code: * 00: Emit word 0 in table. * 01: Emit word 1 in table. * . * FD: Emit word 253 in table. * * Multi-Byte Code: * FE WW WW: (3 byte code) Word to emit is the next word WW WW. * FF BB WW WW: (4 byte code) Emit BB count times next word WW WW. * * Returns 0 or an error if the checksum doesn't match */ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf, int size, int memsize, int chksum) { int i, j, end, len = 0; ADV_DCNT sum; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (i = 253 * 2; i < size; i++) { if (buf[i] == 0xff) { unsigned short word = (buf[i + 3] << 8) | buf[i + 2]; for (j = 0; j < buf[i + 1]; j++) { AdvWriteWordAutoIncLram(iop_base, word); len += 2; } i += 3; } else if (buf[i] == 0xfe) { unsigned short word = (buf[i + 2] << 8) | buf[i + 1]; AdvWriteWordAutoIncLram(iop_base, word); i += 2; len += 2; } else { unsigned int off = buf[i] * 2; unsigned short word = (buf[off + 1] << 8) | buf[off]; AdvWriteWordAutoIncLram(iop_base, word); len += 2; } } end = len; while (len < memsize) { AdvWriteWordAutoIncLram(iop_base, 0); len += 2; } /* Verify the microcode checksum. */ sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (len = 0; len < end; len += 2) { sum += AdvReadWordAutoIncLram(iop_base); } if (sum != chksum) return ASC_IERR_MCODE_CHKSUM; return 0; } static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc) { ADV_CARR_T *carrp; ADV_SDCNT buf_size; ADV_PADDR carr_paddr; carrp = (ADV_CARR_T *) ADV_16BALIGN(asc_dvc->carrier_buf); asc_dvc->carr_freelist = NULL; if (carrp == asc_dvc->carrier_buf) { buf_size = ADV_CARRIER_BUFSIZE; } else { buf_size = ADV_CARRIER_BUFSIZE - sizeof(ADV_CARR_T); } do { /* Get physical address of the carrier 'carrp'. */ carr_paddr = cpu_to_le32(virt_to_bus(carrp)); buf_size -= sizeof(ADV_CARR_T); carrp->carr_pa = carr_paddr; carrp->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(carrp)); /* * Insert the carrier at the beginning of the freelist. */ carrp->next_vpa = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); asc_dvc->carr_freelist = carrp; carrp++; } while (buf_size > 0); } /* * Send an idle command to the chip and wait for completion. * * Command completion is polled for once per microsecond. * * The function can be called from anywhere including an interrupt handler. * But the function is not re-entrant, so it uses the DvcEnter/LeaveCritical() * functions to prevent reentrancy. * * Return Values: * ADV_TRUE - command completed successfully * ADV_FALSE - command failed * ADV_ERROR - command timed out */ static int AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc, ushort idle_cmd, ADV_DCNT idle_cmd_parameter) { int result; ADV_DCNT i, j; AdvPortAddr iop_base; iop_base = asc_dvc->iop_base; /* * Clear the idle command status which is set by the microcode * to a non-zero value to indicate when the command is completed. * The non-zero result is one of the IDLE_CMD_STATUS_* values */ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, (ushort)0); /* * Write the idle command value after the idle command parameter * has been written to avoid a race condition. If the order is not * followed, the microcode may process the idle command before the * parameters have been written to LRAM. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IDLE_CMD_PARAMETER, cpu_to_le32(idle_cmd_parameter)); AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd); /* * Tickle the RISC to tell it to process the idle command. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_B); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_b' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } /* Wait for up to 100 millisecond for the idle command to timeout. */ for (i = 0; i < SCSI_WAIT_100_MSEC; i++) { /* Poll once each microsecond for command completion. */ for (j = 0; j < SCSI_US_PER_MSEC; j++) { AdvReadWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, result); if (result != 0) return result; udelay(1); } } BUG(); /* The idle command should never timeout. */ return ADV_ERROR; } /* * Reset SCSI Bus and purge all outstanding requests. * * Return Value: * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset. * ADV_FALSE(0) - Microcode command failed. * ADV_ERROR(-1) - Microcode command timed-out. Microcode or IC * may be hung which requires driver recovery. */ static int AdvResetSB(ADV_DVC_VAR *asc_dvc) { int status; /* * Send the SCSI Bus Reset idle start idle command which asserts * the SCSI Bus Reset signal. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_START, 0L); if (status != ADV_TRUE) { return status; } /* * Delay for the specified SCSI Bus Reset hold time. * * The hold time delay is done on the host because the RISC has no * microsecond accurate timer. */ udelay(ASC_SCSI_RESET_HOLD_TIME_US); /* * Send the SCSI Bus Reset end idle command which de-asserts * the SCSI Bus Reset signal and purges any pending requests. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_END, 0L); if (status != ADV_TRUE) { return status; } mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ return status; } /* * Initialize the ASC-3550. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/3550.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able = 0, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC3550. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { ushort bios_version, major, minor; bios_version = bios_mem[(ASC_MC_BIOS_VERSION - ASC_MC_BIOSMEM) / 2]; major = (bios_version >> 12) & 0xF; minor = (bios_version >> 8) & 0xF; if (major < 3 || (major == 3 && minor == 1)) { /* BIOS 3.1 and earlier location of 'wdtr_able' variable. */ AdvReadWordLram(iop_base, 0x120, wdtr_able); } else { AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); } } AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_3550_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read and save microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC3550. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC3550); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-3550, setting the START_CTL_EMFU [3:2] bits sets a FIFO * threshold of 128 bytes. This register is only accessible to the host. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, START_CTL_EMFU | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 based on the ULTRA EEPROM per TID * bitmask. These values determine the maximum SDTR speed negotiated * with a device. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. * * 4-bit speed SDTR speed name * =========== =============== * 0000b (0x0) SDTR disabled * 0001b (0x1) 5 Mhz * 0010b (0x2) 10 Mhz * 0011b (0x3) 20 Mhz (Ultra) * 0100b (0x4) 40 Mhz (LVD/Ultra2) * 0101b (0x5) 80 Mhz (LVD2/Ultra3) * 0110b (0x6) Undefined * . * 1111b (0xF) Undefined */ word = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (ADV_TID_TO_TIDMASK(tid) & asc_dvc->ultra_able) { /* Set Ultra speed for TID 'tid'. */ word |= (0x3 << (4 * (tid % 4))); } else { /* Set Fast speed for TID 'tid'. */ word |= (0x2 << (4 * (tid % 4))); } if (tid == 3) { /* Check if done with sdtr_speed1. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, word); word = 0; } else if (tid == 7) { /* Check if done with sdtr_speed2. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, word); word = 0; } else if (tid == 11) { /* Check if done with sdtr_speed3. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, word); word = 0; } else if (tid == 15) { /* Check if done with sdtr_speed4. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, word); /* End of loop. */ } } /* * Set microcode operating variable for the disconnect per TID bitmask. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If all three connectors are in use, return an error. */ if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 || (scsi_cfg1 & CABLE_ILLEGAL_B) == 0) { asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION; return ADV_ERROR; } /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * If this is a differential board and a single-ended device * is attached to one of the connectors, return an error. */ if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0) { asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE; return ADV_ERROR; } /* * If automatic termination control is enabled, then set the * termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting * then 'termination' was set-up in AdvInitFrom3550EEPROM() and * is ready to be 'ored' into SCSI_CFG1. */ if (asc_dvc->cfg->termination == 0) { /* * The software always controls termination by setting TERM_CTL_SEL. * If TERM_CTL_SEL were set to 0, the hardware would set termination. */ asc_dvc->cfg->termination |= TERM_CTL_SEL; switch (scsi_cfg1 & CABLE_DETECT) { /* TERM_CTL_H: on, TERM_CTL_L: on */ case 0x3: case 0x7: case 0xB: case 0xD: case 0xE: case 0xF: asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L); break; /* TERM_CTL_H: on, TERM_CTL_L: off */ case 0x1: case 0x5: case 0x9: case 0xA: case 0xC: asc_dvc->cfg->termination |= TERM_CTL_H; break; /* TERM_CTL_H: off, TERM_CTL_L: off */ case 0x2: case 0x6: break; } } /* * Clear any set TERM_CTL_H and TERM_CTL_L bits. */ scsi_cfg1 &= ~TERM_CTL; /* * Invert the TERM_CTL_H and TERM_CTL_L bits and then * set 'scsi_cfg1'. The TERM_POL bit does not need to be * referenced, because the hardware internally inverts * the Termination High and Low bits if TERM_POL is set. */ scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL)); /* * Set SCSI_CFG1 Microcode Default Value * * Set filter value and possibly modified termination control * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, FLTR_DISABLE | scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-3550 has 8KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_8KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C0800. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C0800.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C0800. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C0800) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (RAM Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C0800_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C0800. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C0800); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C0800, set FIFO_THRESH_80B [6:4] bits and START_CTL_TH [3:2] * bits for the default FIFO threshold. * * Note: ASC-38C0800 FIFO threshold has been changed to 256 bytes. * * For DMA Errata #4 set the BC_THRESH_ENB bit. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, BC_THRESH_ENB | FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * All kind of combinations of devices attached to one of four * connectors are acceptable except HVD device attached. For example, * LVD device can be attached to SE connector while SE device attached * to LVD connector. If LVD device attached to SE connector, it only * runs up to Ultra speed. * * If an HVD device is attached to one of LVD connectors, return an * error. However, there is no way to detect HVD device attached to * SE connectors. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code = ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * If either SE or LVD automatic termination control is enabled, then * set the termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting then * 'termination' was set-up in AdvInitFrom38C0800EEPROM() and is ready * to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; /* TERM_SE_HI: on, TERM_SE_LO: off */ case 0x0: asc_dvc->cfg->termination |= TERM_SE_HI; break; } } if ((asc_dvc->cfg->termination & TERM_LVD) == 0) { /* LVD automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_LVD) { /* TERM_LVD_HI: on, TERM_LVD_LO: on */ case 0x4: case 0x8: case 0xC: asc_dvc->cfg->termination |= TERM_LVD; break; /* TERM_LVD_HI: off, TERM_LVD_LO: off */ case 0x0: break; } } /* * Clear any set TERM_SE and TERM_LVD bits. */ scsi_cfg1 &= (~TERM_SE & ~TERM_LVD); /* * Invert the TERM_SE and TERM_LVD bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & 0xF0); /* * Clear BIG_ENDIAN, DIS_TERM_DRV, Terminator Polarity and HVD/LVD/SE * bits and set possibly modified termination control bits in the * Microcode SCSI_CFG1 Register Value. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL & ~HVD_LVD_SE); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control and reset DIS_TERM_DRV * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C0800 has 16KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. * carr_pa is LE, must be native before write */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. * * carr_pa is LE, must be native before write * */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C1600. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C1600.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; long word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, ppr_able, tagqng_able; uchar max_cmd[ASC_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) { return ADV_ERROR; } /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C1600. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C1600_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C1600. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C1600); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * If the BIOS control flag AIPP (Asynchronous Information * Phase Protection) disable bit is not set, then set the firmware * 'control_flag' CONTROL_FLAG_ENABLE_AIPP bit to enable * AIPP checking and encoding. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_AIPP_DIS) == 0) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_ENABLE_AIPP; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C1600 use DMA_CFG0 default values: FIFO_THRESH_80B [6:4], * and START_CTL_TH [3:2]. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Calculate SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. * * Each ASC-38C1600 function has only two cable detect bits. * The bus mode override bits are in IOPB_SOFT_OVER_WR. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the cable is reversed all of the SCSI_CTRL register signals * will be set. Check for and return an error if this condition is * found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * Each ASC-38C1600 function has two connectors. Only an HVD device * can not be connected to either connector. An LVD device or SE device * may be connected to either connecor. If an SE device is connected, * then at most Ultra speed (20 Mhz) can be used on both connectors. * * If an HVD device is attached, return an error. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code |= ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * Each function in the ASC-38C1600 uses only the SE cable detect and * termination because there are two connectors for each function. Each * function may use either LVD or SE mode. Corresponding the SE automatic * termination control EEPROM bits are used for each function. Each * function has its own EEPROM. If SE automatic control is enabled for * the function, then set the termination value based on a table listed * in a_condor.h. * * If manual termination is specified in the EEPROM for the function, * then 'termination' was set-up in AscInitFrom38C1600EEPROM() and is * ready to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; case 0x0: if (PCI_FUNC(pdev->devfn) == 0) { /* Function 0 - TERM_SE_HI: off, TERM_SE_LO: off */ } else { /* Function 1 - TERM_SE_HI: on, TERM_SE_LO: off */ asc_dvc->cfg->termination |= TERM_SE_HI; } break; } } /* * Clear any set TERM_SE bits. */ scsi_cfg1 &= ~TERM_SE; /* * Invert the TERM_SE bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & TERM_SE); /* * Clear Big Endian and Terminator Polarity bits and set possibly * modified termination control bits in the Microcode SCSI_CFG1 * Register Value. * * Big Endian bit is not used even on big endian machines. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control bits in the Microcode * SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C1600 has 32KB internal memory. * * XXX - Since ASC38C1600 Rev.3 has a Local RAM failure issue, we come * out a special 16K Adv Library and Microcode version. After the issue * resolved, we should turn back to the 32K support. Both a_condor.h and * mcode.sas files also need to be updated. * * AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, * BIOS_EN | RAM_SZ_32KB); */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. Initialize the * COMMA register to the same value otherwise the RISC will * prematurely detect a command is available. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(asc_dvc->icq_sp->carr_pa)); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * per TID microcode operating variables. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Reset chip and SCSI Bus. * * Return Value: * ADV_TRUE(1) - Chip re-initialization and SCSI Bus Reset successful. * ADV_FALSE(0) - Chip re-initialization and SCSI Bus Reset failure. */ static int AdvResetChipAndSB(ADV_DVC_VAR *asc_dvc) { int status; ushort wdtr_able, sdtr_able, tagqng_able; ushort ppr_able = 0; uchar tid, max_cmd[ADV_MAX_TID + 1]; AdvPortAddr iop_base; ushort bios_sig; iop_base = asc_dvc->iop_base; /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * Force the AdvInitAsc3550/38C0800Driver() function to * perform a SCSI Bus Reset by clearing the BIOS signature word. * The initialization functions assumes a SCSI Bus Reset is not * needed if the BIOS signature word is present. */ AdvReadWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, 0); /* * Stop chip and reset it. */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_STOP); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); /* * Reset Adv Library error code, if any, and try * re-initializing the chip. */ asc_dvc->err_code = 0; if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitAsc38C1600Driver(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitAsc38C0800Driver(asc_dvc); } else { status = AdvInitAsc3550Driver(asc_dvc); } /* Translate initialization return value to status value. */ if (status == 0) { status = ADV_TRUE; } else { status = ADV_FALSE; } /* * Restore the BIOS signature word. */ AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } return status; } /* * adv_async_callback() - Adv Library asynchronous event callback function. */ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code) { switch (code) { case ADV_ASYNC_SCSI_BUS_RESET_DET: /* * The firmware detected a SCSI Bus reset. */ ASC_DBG(0, "ADV_ASYNC_SCSI_BUS_RESET_DET\n"); break; case ADV_ASYNC_RDMA_FAILURE: /* * Handle RDMA failure by resetting the SCSI Bus and * possibly the chip if it is unresponsive. Log the error * with a unique code. */ ASC_DBG(0, "ADV_ASYNC_RDMA_FAILURE\n"); AdvResetChipAndSB(adv_dvc_varp); break; case ADV_HOST_SCSI_BUS_RESET: /* * Host generated SCSI bus reset occurred. */ ASC_DBG(0, "ADV_HOST_SCSI_BUS_RESET\n"); break; default: ASC_DBG(0, "unknown code 0x%x\n", code); break; } } /* * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR(). * * Callback function for the Wide SCSI Adv Library. */ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) { struct asc_board *boardp; adv_req_t *reqp; adv_sgblk_t *sgblkp; struct scsi_cmnd *scp; struct Scsi_Host *shost; ADV_DCNT resid_cnt; ASC_DBG(1, "adv_dvc_varp 0x%lx, scsiqp 0x%lx\n", (ulong)adv_dvc_varp, (ulong)scsiqp); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); /* * Get the adv_req_t structure for the command that has been * completed. The adv_req_t structure actually contains the * completed ADV_SCSI_REQ_Q structure. */ reqp = (adv_req_t *)ADV_U32_TO_VADDR(scsiqp->srb_ptr); ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); if (reqp == NULL) { ASC_PRINT("adv_isr_callback: reqp is NULL\n"); return; } /* * Get the struct scsi_cmnd structure and Scsi_Host structure for the * command that has been completed. * * Note: The adv_req_t request structure and adv_sgblk_t structure, * if any, are dropped, because a board structure pointer can not be * determined. */ scp = reqp->cmndp; ASC_DBG(1, "scp 0x%p\n", scp); if (scp == NULL) { ASC_PRINT ("adv_isr_callback: scp is NULL; adv_req_t dropped.\n"); return; } ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); shost = scp->device->host; ASC_STATS(shost, callback); ASC_DBG(1, "shost 0x%p\n", shost); boardp = shost_priv(shost); BUG_ON(adv_dvc_varp != &boardp->dvc_var.adv_dvc_var); /* * 'done_status' contains the command's ending status. */ switch (scsiqp->done_status) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); scp->result = 0; /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * then return the number of underrun bytes. */ resid_cnt = le32_to_cpu(scsiqp->data_cnt); if (scsi_bufflen(scp) != 0 && resid_cnt != 0 && resid_cnt <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %lu bytes\n", (ulong)resid_cnt); scsi_set_resid(scp, resid_cnt); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (scsiqp->host_status) { case QHSTA_NO_ERROR: if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* * Note: The 'status_byte()' macro used by * target drivers defined in scsi.h shifts the * status byte returned by host drivers right * by 1 bit. This is why target drivers also * use right shifted status byte definitions. * For instance target drivers use * CHECK_CONDITION, defined to 0x1, instead of * the SCSI defined check condition value of * 0x2. Host drivers are supposed to return * the status byte as it is defined by SCSI. */ scp->result = DRIVER_BYTE(DRIVER_SENSE) | STATUS_BYTE(scsiqp->scsi_status); } else { scp->result = STATUS_BYTE(scsiqp->scsi_status); } break; default: /* Some other QHSTA error occurred. */ ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status); scp->result = HOST_BYTE(DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); scp->result = HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status); break; default: ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status); scp->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && scsiqp->done_status == QD_NO_ERROR && scsiqp->host_status == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); /* * Free all 'adv_sgblk_t' structures allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; /* Add 'sgblkp' to the board free list. */ sgblkp->next_sgblkp = boardp->adv_sgblkp; boardp->adv_sgblkp = sgblkp; } /* * Free the adv_req_t structure used with the command by adding * it back to the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; ASC_DBG(1, "done\n"); } /* * Adv Library Interrupt Service Routine * * This function is called by a driver's interrupt service routine. * The function disables and re-enables interrupts. * * When a microcode idle command is completed, the ADV_DVC_VAR * 'idle_cmd_done' field is set to ADV_TRUE. * * Note: AdvISR() can be called when interrupts are disabled or even * when there is no hardware interrupt condition present. It will * always check for completed idle commands and microcode requests. * This is an important feature that shouldn't be changed because it * allows commands to be completed from polling mode loops. * * Return: * ADV_TRUE(1) - interrupt was pending * ADV_FALSE(0) - no interrupt was pending */ static int AdvISR(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; uchar int_stat; ushort target_bit; ADV_CARR_T *free_carrp; ADV_VADDR irq_next_vpa; ADV_SCSI_REQ_Q *scsiq; iop_base = asc_dvc->iop_base; /* Reading the register clears the interrupt. */ int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG); if ((int_stat & (ADV_INTR_STATUS_INTRA | ADV_INTR_STATUS_INTRB | ADV_INTR_STATUS_INTRC)) == 0) { return ADV_FALSE; } /* * Notify the driver of an asynchronous microcode condition by * calling the adv_async_callback function. The function * is passed the microcode ASC_MC_INTRB_CODE byte value. */ if (int_stat & ADV_INTR_STATUS_INTRB) { uchar intrb_code; AdvReadByteLram(iop_base, ASC_MC_INTRB_CODE, intrb_code); if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { if (intrb_code == ADV_ASYNC_CARRIER_READY_FAILURE && asc_dvc->carr_pending_cnt != 0) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } } adv_async_callback(asc_dvc, intrb_code); } /* * Check if the IRQ stopper carrier contains a completed request. */ while (((irq_next_vpa = le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ASC_RQ_DONE) != 0) { /* * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure. * The RISC will have set 'areq_vpa' to a virtual address. * * The firmware will have copied the ASC_SCSI_REQ_Q.scsiq_ptr * field to the carrier ADV_CARR_T.areq_vpa field. The conversion * below complements the conversion of ASC_SCSI_REQ_Q.scsiq_ptr' * in AdvExeScsiQueue(). */ scsiq = (ADV_SCSI_REQ_Q *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->areq_vpa)); /* * Request finished with good status and the queue was not * DMAed to host memory by the firmware. Set all status fields * to indicate good status. */ if ((irq_next_vpa & ASC_RQ_GOOD) != 0) { scsiq->done_status = QD_NO_ERROR; scsiq->host_status = scsiq->scsi_status = 0; scsiq->data_cnt = 0L; } /* * Advance the stopper pointer to the next carrier * ignoring the lower four bits. Free the previous * stopper carrier. */ free_carrp = asc_dvc->irq_sp; asc_dvc->irq_sp = (ADV_CARR_T *) ADV_U32_TO_VADDR(ASC_GET_CARRP(irq_next_vpa)); free_carrp->next_vpa = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); asc_dvc->carr_freelist = free_carrp; asc_dvc->carr_pending_cnt--; target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id); /* * Clear request microcode control flag. */ scsiq->cntl = 0; /* * Notify the driver of the completed request by passing * the ADV_SCSI_REQ_Q pointer to its callback function. */ scsiq->a_flag |= ADV_SCSIQ_DONE; adv_isr_callback(asc_dvc, scsiq); /* * Note: After the driver callback function is called, 'scsiq' * can no longer be referenced. * * Fall through and continue processing other completed * requests... */ } return ADV_TRUE; } static int AscSetLibErrorCode(ASC_DVC_VAR *asc_dvc, ushort err_code) { if (asc_dvc->err_code == 0) { asc_dvc->err_code = err_code; AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W, err_code); } return err_code; } static void AscAckInterrupt(PortAddr iop_base) { uchar host_flag; uchar risc_flag; ushort loop; loop = 0; do { risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B); if (loop++ > 0x7FFF) { break; } } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | ASC_HOST_FLAG_ACK_INT)); AscSetChipStatus(iop_base, CIW_INT_ACK); loop = 0; while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) { AscSetChipStatus(iop_base, CIW_INT_ACK); if (loop++ > 3) { break; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); } static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time) { const uchar *period_table; int max_index; int min_index; int i; period_table = asc_dvc->sdtr_period_tbl; max_index = (int)asc_dvc->max_sdtr_index; min_index = (int)asc_dvc->min_sdtr_index; if ((syn_time <= period_table[max_index])) { for (i = min_index; i < (max_index - 1); i++) { if (syn_time <= period_table[i]) { return (uchar)i; } } return (uchar)max_index; } else { return (uchar)(max_index + 1); } } static uchar AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset) { EXT_MSG sdtr_buf; uchar sdtr_period_index; PortAddr iop_base; iop_base = asc_dvc->iop_base; sdtr_buf.msg_type = EXTENDED_MESSAGE; sdtr_buf.msg_len = MS_SDTR_LEN; sdtr_buf.msg_req = EXTENDED_SDTR; sdtr_buf.xfer_period = sdtr_period; sdtr_offset &= ASC_SYN_MAX_OFFSET; sdtr_buf.req_ack_offset = sdtr_offset; sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period); if (sdtr_period_index <= asc_dvc->max_sdtr_index) { AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return ((sdtr_period_index << 4) | sdtr_offset); } else { sdtr_buf.req_ack_offset = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return 0; } } static uchar AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset) { uchar byte; uchar sdtr_period_ix; sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period); if (sdtr_period_ix > asc_dvc->max_sdtr_index) return 0xFF; byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET); return byte; } static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) { ASC_SCSI_BIT_ID_TYPE org_id; int i; int sta = TRUE; AscSetBank(iop_base, 1); org_id = AscReadChipDvcID(iop_base); for (i = 0; i <= ASC_MAX_TID; i++) { if (org_id == (0x01 << i)) break; } org_id = (ASC_SCSI_BIT_ID_TYPE) i; AscWriteChipDvcID(iop_base, id); if (AscReadChipDvcID(iop_base) == (0x01 << id)) { AscSetBank(iop_base, 0); AscSetChipSyn(iop_base, sdtr_data); if (AscGetChipSyn(iop_base) != sdtr_data) { sta = FALSE; } } else { sta = FALSE; } AscSetBank(iop_base, 1); AscWriteChipDvcID(iop_base, org_id); AscSetBank(iop_base, 0); return (sta); } static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no) { AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data); } static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) { EXT_MSG ext_msg; EXT_MSG out_msg; ushort halt_q_addr; int sdtr_accept; ushort int_halt_code; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; uchar tag_code; uchar q_status; uchar halt_qp; uchar sdtr_data; uchar target_ix; uchar q_cntl, tid_no; uchar cur_dvc_qng; uchar asyn_sdtr; uchar scsi_status; struct asc_board *boardp; BUG_ON(!asc_dvc->drv_ptr); boardp = asc_dvc->drv_ptr; iop_base = asc_dvc->iop_base; int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W); halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B); halt_q_addr = ASC_QNO_TO_QADDR(halt_qp); target_ix = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TARGET_IX)); q_cntl = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL)); tid_no = ASC_TIX_TO_TID(target_ix); target_id = (uchar)ASC_TID_TO_TARGET_ID(tid_no); if (asc_dvc->pci_fix_asyn_xfer & target_id) { asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB; } else { asyn_sdtr = 0; } if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, 0, tid_no); boardp->sdtr_data[tid_no] = 0; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_EXTMSG_IN) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGIN_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_SDTR && ext_msg.msg_len == MS_SDTR_LEN) { sdtr_accept = TRUE; if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { sdtr_accept = FALSE; ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET; } if ((ext_msg.xfer_period < asc_dvc->sdtr_period_tbl[asc_dvc->min_sdtr_index]) || (ext_msg.xfer_period > asc_dvc->sdtr_period_tbl[asc_dvc-> max_sdtr_index])) { sdtr_accept = FALSE; ext_msg.xfer_period = asc_dvc->sdtr_period_tbl[asc_dvc-> min_sdtr_index]; } if (sdtr_accept) { sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); if ((sdtr_data == 0xFF)) { q_cntl |= QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } } if (ext_msg.req_ack_offset == 0) { q_cntl &= ~QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); } else { if (sdtr_accept && (q_cntl & QC_MSG_OUT)) { q_cntl &= ~QC_MSG_OUT; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; } else { q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; } } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_WDTR && ext_msg.msg_len == MS_WDTR_LEN) { ext_msg.wdtr_width = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else { ext_msg.msg_type = MESSAGE_REJECT; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } } else if (int_halt_code == ASC_HALT_CHK_CONDITION) { q_cntl |= QC_REQ_SENSE; if ((asc_dvc->init_sdtr & target_id) != 0) { asc_dvc->sdtr_done &= ~target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); tag_code = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE)); tag_code &= 0xDC; if ((asc_dvc->pci_fix_asyn_xfer & target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & target_id) ) { tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TAG_CODE), tag_code); q_status = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS)); q_status |= (QS_READY | QS_BUSY); AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_STATUS), q_status); scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&out_msg, sizeof(EXT_MSG) >> 1); if ((out_msg.msg_type == EXTENDED_MESSAGE) && (out_msg.msg_len == MS_SDTR_LEN) && (out_msg.msg_req == EXTENDED_SDTR)) { asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } q_cntl &= ~QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) { scsi_status = AscReadLramByte(iop_base, (ushort)((ushort)halt_q_addr + (ushort) ASC_SCSIQ_SCSI_STATUS)); cur_dvc_qng = AscReadLramByte(iop_base, (ushort)((ushort)ASC_QADR_BEG + (ushort)target_ix)); if ((cur_dvc_qng > 0) && (asc_dvc->cur_dvc_qng[tid_no] > 0)) { scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy |= target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy |= target_id; if (scsi_status == SAM_STAT_TASK_SET_FULL) { if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) { cur_dvc_qng -= 1; asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng; AscWriteLramByte(iop_base, (ushort)((ushort) ASCV_MAX_DVC_QNG_BEG + (ushort) tid_no), cur_dvc_qng); /* * Set the device queue depth to the * number of active requests when the * QUEUE FULL condition was encountered. */ boardp->queue_full |= target_id; boardp->queue_full_cnt[tid_no] = cur_dvc_qng; } } } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } #if CC_VERY_LONG_SG_LIST else if (int_halt_code == ASC_HALT_HOST_COPY_SG_LIST_TO_RISC) { uchar q_no; ushort q_addr; uchar sg_wk_q_no; uchar first_sg_wk_q_no; ASC_SCSI_Q *scsiq; /* Ptr to driver request. */ ASC_SG_HEAD *sg_head; /* Ptr to driver SG request. */ ASC_SG_LIST_Q scsi_sg_q; /* Structure written to queue. */ ushort sg_list_dwords; ushort sg_entry_cnt; uchar next_qp; int i; q_no = AscReadLramByte(iop_base, (ushort)ASCV_REQ_SG_LIST_QP); if (q_no == ASC_QLINK_END) return 0; q_addr = ASC_QNO_TO_QADDR(q_no); /* * Convert the request's SRB pointer to a host ASC_SCSI_REQ * structure pointer using a macro provided by the driver. * The ASC_SCSI_REQ pointer provides a pointer to the * host ASC_SG_HEAD structure. */ /* Read request's SRB pointer. */ scsiq = (ASC_SCSI_Q *) ASC_SRB2SCSIQ(ASC_U32_TO_VADDR(AscReadLramDWord(iop_base, (ushort) (q_addr + ASC_SCSIQ_D_SRBPTR)))); /* * Get request's first and working SG queue. */ sg_wk_q_no = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_SG_WK_QP)); first_sg_wk_q_no = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FIRST_SG_WK_QP)); /* * Reset request's working SG queue back to the * first SG queue. */ AscWriteLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_SG_WK_QP), first_sg_wk_q_no); sg_head = scsiq->sg_head; /* * Set sg_entry_cnt to the number of SG elements * that will be completed on this interrupt. * * Note: The allocated SG queues contain ASC_MAX_SG_LIST - 1 * SG elements. The data_cnt and data_addr fields which * add 1 to the SG element capacity are not used when * restarting SG handling after a halt. */ if (scsiq->remain_sg_entry_cnt > (ASC_MAX_SG_LIST - 1)) { sg_entry_cnt = ASC_MAX_SG_LIST - 1; /* * Keep track of remaining number of SG elements that * will need to be handled on the next interrupt. */ scsiq->remain_sg_entry_cnt -= (ASC_MAX_SG_LIST - 1); } else { sg_entry_cnt = scsiq->remain_sg_entry_cnt; scsiq->remain_sg_entry_cnt = 0; } /* * Copy SG elements into the list of allocated SG queues. * * Last index completed is saved in scsiq->next_sg_index. */ next_qp = first_sg_wk_q_no; q_addr = ASC_QNO_TO_QADDR(next_qp); scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { scsi_sg_q.seq_no = i + 1; if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); sg_entry_cnt -= ASC_SG_LIST_PER_Q; /* * After very first SG queue RISC FW uses next * SG queue first element then checks sg_list_cnt * against zero and then decrements, so set * sg_list_cnt 1 less than number of SG elements * in each SG queue. */ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1; } else { /* * This is the last SG queue in the list of * allocated SG queues. If there are more * SG elements than will fit in the allocated * queues, then set the QCSG_SG_XFER_MORE flag. */ if (scsiq->remain_sg_entry_cnt != 0) { scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; } else { scsi_sg_q.cntl |= QCSG_SG_XFER_END; } /* equals sg_entry_cnt * 2 */ sg_list_dwords = sg_entry_cnt << 1; scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; sg_entry_cnt = 0; } scsi_sg_q.q_no = next_qp; AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_SGHD_CPY_BEG, (uchar *)&scsi_sg_q, sizeof(ASC_SG_LIST_Q) >> 1); AscMemDWordCopyPtrToLram(iop_base, q_addr + ASC_SGQ_LIST_BEG, (uchar *)&sg_head-> sg_list[scsiq->next_sg_index], sg_list_dwords); scsiq->next_sg_index += ASC_SG_LIST_PER_Q; /* * If the just completed SG queue contained the * last SG element, then no more SG queues need * to be written. */ if (scsi_sg_q.cntl & QCSG_SG_XFER_END) { break; } next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); q_addr = ASC_QNO_TO_QADDR(next_qp); } /* * Clear the halt condition so the RISC will be restarted * after the return. */ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } #endif /* CC_VERY_LONG_SG_LIST */ return (0); } /* * void * DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) * * Calling/Exit State: * none * * Description: * Input an ASC_QDONE_INFO structure from the chip */ static void DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 10) { continue; } word = inpw(iop_base + IOP_RAM_DATA); inbuf[i] = word & 0xff; inbuf[i + 1] = (word >> 8) & 0xff; } ASC_DBG_PRT_HEX(2, "DvcGetQinfo", inbuf, 2 * words); } static uchar _AscCopyLramScsiDoneQ(PortAddr iop_base, ushort q_addr, ASC_QDONE_INFO *scsiq, ASC_DCNT max_dma_count) { ushort _val; uchar sg_queue_cnt; DvcGetQinfo(iop_base, q_addr + ASC_SCSIQ_DONE_INFO_BEG, (uchar *)scsiq, (sizeof(ASC_SCSIQ_2) + sizeof(ASC_SCSIQ_3)) / 2); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS)); scsiq->q_status = (uchar)_val; scsiq->q_no = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_CNTL)); scsiq->cntl = (uchar)_val; sg_queue_cnt = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_SENSE_LEN)); scsiq->sense_len = (uchar)_val; scsiq->extra_bytes = (uchar)(_val >> 8); /* * Read high word of remain bytes from alternate location. */ scsiq->remain_bytes = (((ADV_DCNT)AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_W_ALT_DC1))) << 16); /* * Read low word of remain bytes from original location. */ scsiq->remain_bytes += AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT)); scsiq->remain_bytes &= max_dma_count; return sg_queue_cnt; } /* * asc_isr_callback() - Second Level Interrupt Handler called by AscISR(). * * Interrupt callback function for the Narrow SCSI Asc Library. */ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep) { struct asc_board *boardp; struct scsi_cmnd *scp; struct Scsi_Host *shost; ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep); ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep); scp = advansys_srb_to_ptr(asc_dvc_varp, qdonep->d2.srb_ptr); if (!scp) return; ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); shost = scp->device->host; ASC_STATS(shost, callback); ASC_DBG(1, "shost 0x%p\n", shost); boardp = shost_priv(shost); BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var); dma_unmap_single(boardp->dev, scp->SCp.dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* * 'qdonep' contains the command's ending status. */ switch (qdonep->d3.done_stat) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); scp->result = 0; /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * return the number of underrun bytes. */ if (scsi_bufflen(scp) != 0 && qdonep->remain_bytes != 0 && qdonep->remain_bytes <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %u bytes\n", (unsigned)qdonep->remain_bytes); scsi_set_resid(scp, qdonep->remain_bytes); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (qdonep->d3.host_stat) { case QHSTA_NO_ERROR: if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* * Note: The 'status_byte()' macro used by * target drivers defined in scsi.h shifts the * status byte returned by host drivers right * by 1 bit. This is why target drivers also * use right shifted status byte definitions. * For instance target drivers use * CHECK_CONDITION, defined to 0x1, instead of * the SCSI defined check condition value of * 0x2. Host drivers are supposed to return * the status byte as it is defined by SCSI. */ scp->result = DRIVER_BYTE(DRIVER_SENSE) | STATUS_BYTE(qdonep->d3.scsi_stat); } else { scp->result = STATUS_BYTE(qdonep->d3.scsi_stat); } break; default: /* QHSTA error occurred */ ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat); scp->result = HOST_BYTE(DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); scp->result = HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3. scsi_msg) | STATUS_BYTE(qdonep->d3.scsi_stat); break; default: ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat); scp->result = HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3. scsi_msg) | STATUS_BYTE(qdonep->d3.scsi_stat); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && qdonep->d3.done_stat == QD_NO_ERROR && qdonep->d3.host_stat == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); } static int AscIsrQDone(ASC_DVC_VAR *asc_dvc) { uchar next_qp; uchar n_q_used; uchar sg_list_qp; uchar sg_queue_cnt; uchar q_cnt; uchar done_q_tail; uchar tid_no; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; ushort q_addr; ushort sg_q_addr; uchar cur_target_qng; ASC_QDONE_INFO scsiq_buf; ASC_QDONE_INFO *scsiq; int false_overrun; iop_base = asc_dvc->iop_base; n_q_used = 1; scsiq = (ASC_QDONE_INFO *)&scsiq_buf; done_q_tail = (uchar)AscGetVarDoneQTail(iop_base); q_addr = ASC_QNO_TO_QADDR(done_q_tail); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_FWD)); if (next_qp != ASC_QLINK_END) { AscPutVarDoneQTail(iop_base, next_qp); q_addr = ASC_QNO_TO_QADDR(next_qp); sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count); AscWriteLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (uchar)(scsiq-> q_status & (uchar)~(QS_READY | QS_ABORTED))); tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix); target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix); if ((scsiq->cntl & QC_SG_HEAD) != 0) { sg_q_addr = q_addr; sg_list_qp = next_qp; for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) { sg_list_qp = AscReadLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_FWD)); sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp); if (sg_list_qp == ASC_QLINK_END) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_Q_LINKS); scsiq->d3.done_stat = QD_WITH_ERROR; scsiq->d3.host_stat = QHSTA_D_QDONE_SG_LIST_CORRUPTED; goto FATAL_ERR_QDONE; } AscWriteLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_STATUS), QS_FREE); } n_q_used = sg_queue_cnt + 1; AscPutVarDoneQTail(iop_base, sg_list_qp); } if (asc_dvc->queue_full_or_busy & target_id) { cur_target_qng = AscReadLramByte(iop_base, (ushort)((ushort) ASC_QADR_BEG + (ushort) scsiq->d2. target_ix)); if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) { scsi_busy = AscReadLramByte(iop_base, (ushort) ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy &= ~target_id; } } if (asc_dvc->cur_total_qng >= n_q_used) { asc_dvc->cur_total_qng -= n_q_used; if (asc_dvc->cur_dvc_qng[tid_no] != 0) { asc_dvc->cur_dvc_qng[tid_no]--; } } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG); scsiq->d3.done_stat = QD_WITH_ERROR; goto FATAL_ERR_QDONE; } if ((scsiq->d2.srb_ptr == 0UL) || ((scsiq->q_status & QS_ABORTED) != 0)) { return (0x11); } else if (scsiq->q_status == QS_DONE) { false_overrun = FALSE; if (scsiq->extra_bytes != 0) { scsiq->remain_bytes += (ADV_DCNT)scsiq->extra_bytes; } if (scsiq->d3.done_stat == QD_WITH_ERROR) { if (scsiq->d3.host_stat == QHSTA_M_DATA_OVER_RUN) { if ((scsiq-> cntl & (QC_DATA_IN | QC_DATA_OUT)) == 0) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } else if (false_overrun) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } } else if (scsiq->d3.host_stat == QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) { AscStopChip(iop_base); AscSetChipControl(iop_base, (uchar)(CC_SCSI_RESET | CC_HALT)); udelay(60); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); AscSetChipControl(iop_base, 0); } } if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } else { if ((AscReadLramByte(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_CDB_BEG)) == START_STOP)) { asc_dvc->unit_not_ready &= ~target_id; if (scsiq->d3.done_stat != QD_NO_ERROR) { asc_dvc->start_motor &= ~target_id; } } } return (1); } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS); FATAL_ERR_QDONE: if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } return (0x80); } } return (0); } static int AscISR(ASC_DVC_VAR *asc_dvc) { ASC_CS_TYPE chipstat; PortAddr iop_base; ushort saved_ram_addr; uchar ctrl_reg; uchar saved_ctrl_reg; int int_pending; int status; uchar host_flag; iop_base = asc_dvc->iop_base; int_pending = FALSE; if (AscIsIntPending(iop_base) == 0) return int_pending; if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) { return ERR; } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL); return ERR; } if (asc_dvc->is_in_int) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY); return ERR; } asc_dvc->is_in_int = TRUE; ctrl_reg = AscGetChipControl(iop_base); saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET | CC_SINGLE_STEP | CC_DIAG | CC_TEST)); chipstat = AscGetChipStatus(iop_base); if (chipstat & CSW_SCSI_RESET_LATCH) { if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) { int i = 10; int_pending = TRUE; asc_dvc->sdtr_done = 0; saved_ctrl_reg &= (uchar)(~CC_HALT); while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT)); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); chipstat = AscGetChipStatus(iop_base); } } saved_ram_addr = AscGetChipLramAddr(iop_base); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (uchar)(~ASC_HOST_FLAG_IN_ISR); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR)); if ((chipstat & CSW_INT_PENDING) || (int_pending)) { AscAckInterrupt(iop_base); int_pending = TRUE; if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) { if (AscIsrChipHalted(asc_dvc) == ERR) { goto ISR_REPORT_QDONE_FATAL_ERROR; } else { saved_ctrl_reg &= (uchar)(~CC_HALT); } } else { ISR_REPORT_QDONE_FATAL_ERROR: if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) { while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) { } } else { do { if ((status = AscIsrQDone(asc_dvc)) == 1) { break; } } while (status == 0x11); } if ((status & 0x80) != 0) int_pending = ERR; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); AscSetChipLramAddr(iop_base, saved_ram_addr); AscSetChipControl(iop_base, saved_ctrl_reg); asc_dvc->is_in_int = FALSE; return int_pending; } /* * advansys_reset() * * Reset the bus associated with the command 'scp'. * * This function runs its own thread. Interrupts must be blocked but * sleeping is allowed and no locking other than for host structures is * required. Returns SUCCESS or FAILED. */ static int advansys_reset(struct scsi_cmnd *scp) { struct Scsi_Host *shost = scp->device->host; struct asc_board *boardp = shost_priv(shost); unsigned long flags; int status; int ret = SUCCESS; ASC_DBG(1, "0x%p\n", scp); ASC_STATS(shost, reset); scmd_printk(KERN_INFO, scp, "SCSI bus reset started...\n"); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; /* Reset the chip and SCSI bus. */ ASC_DBG(1, "before AscInitAsc1000Driver()\n"); status = AscInitAsc1000Driver(asc_dvc); /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ if (asc_dvc->err_code || !asc_dvc->overrun_dma) { scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " "0x%x, status: 0x%x\n", asc_dvc->err_code, status); ret = FAILED; } else if (status) { scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " "0x%x\n", status); } else { scmd_printk(KERN_INFO, scp, "SCSI bus reset " "successful\n"); } ASC_DBG(1, "after AscInitAsc1000Driver()\n"); spin_lock_irqsave(shost->host_lock, flags); } else { /* * If the suggest reset bus flags are set, then reset the bus. * Otherwise only reset the device. */ ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; /* * Reset the target's SCSI bus. */ ASC_DBG(1, "before AdvResetChipAndSB()\n"); switch (AdvResetChipAndSB(adv_dvc)) { case ASC_TRUE: scmd_printk(KERN_INFO, scp, "SCSI bus reset " "successful\n"); break; case ASC_FALSE: default: scmd_printk(KERN_INFO, scp, "SCSI bus reset error\n"); ret = FAILED; break; } spin_lock_irqsave(shost->host_lock, flags); AdvISR(adv_dvc); } /* Save the time of the most recently completed reset. */ boardp->last_reset = jiffies; spin_unlock_irqrestore(shost->host_lock, flags); ASC_DBG(1, "ret %d\n", ret); return ret; } /* * advansys_biosparam() * * Translate disk drive geometry if the "BIOS greater than 1 GB" * support is enabled for a drive. * * ip (information pointer) is an int array with the following definition: * ip[0]: heads * ip[1]: sectors * ip[2]: cylinders */ static int advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int ip[]) { struct asc_board *boardp = shost_priv(sdev->host); ASC_DBG(1, "begin\n"); ASC_STATS(sdev->host, biosparam); if (ASC_NARROW_BOARD(boardp)) { if ((boardp->dvc_var.asc_dvc_var.dvc_cntl & ASC_CNTL_BIOS_GT_1GB) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } else { if ((boardp->dvc_var.adv_dvc_var.bios_ctrl & BIOS_CTRL_EXTENDED_XLAT) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); ASC_DBG(1, "end\n"); return 0; } /* * First-level interrupt handler. * * 'dev_id' is a pointer to the interrupting adapter's Scsi_Host. */ static irqreturn_t advansys_interrupt(int irq, void *dev_id) { struct Scsi_Host *shost = dev_id; struct asc_board *boardp = shost_priv(shost); irqreturn_t result = IRQ_NONE; ASC_DBG(2, "boardp 0x%p\n", boardp); spin_lock(shost->host_lock); if (ASC_NARROW_BOARD(boardp)) { if (AscIsIntPending(shost->io_port)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); ASC_DBG(1, "before AscISR()\n"); AscISR(&boardp->dvc_var.asc_dvc_var); } } else { ASC_DBG(1, "before AdvISR()\n"); if (AdvISR(&boardp->dvc_var.adv_dvc_var)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); } } spin_unlock(shost->host_lock); ASC_DBG(1, "end\n"); return result; } static int AscHostReqRiscHalt(PortAddr iop_base) { int count = 0; int sta = 0; uchar saved_stop_code; if (AscIsChipHalted(iop_base)) return (1); saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP); do { if (AscIsChipHalted(iop_base)) { sta = 1; break; } mdelay(100); } while (count++ < 20); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code); return (sta); } static int AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data) { int sta = FALSE; if (AscHostReqRiscHalt(iop_base)) { sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscStartChip(iop_base); } return sta; } static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev) { char type = sdev->type; ASC_SCSI_BIT_ID_TYPE tid_bits = 1 << sdev->id; if (!(asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN)) return; if (asc_dvc->init_sdtr & tid_bits) return; if ((type == TYPE_ROM) && (strncmp(sdev->vendor, "HP ", 3) == 0)) asc_dvc->pci_fix_asyn_xfer_always |= tid_bits; asc_dvc->pci_fix_asyn_xfer |= tid_bits; if ((type == TYPE_PROCESSOR) || (type == TYPE_SCANNER) || (type == TYPE_ROM) || (type == TYPE_TAPE)) asc_dvc->pci_fix_asyn_xfer &= ~tid_bits; if (asc_dvc->pci_fix_asyn_xfer & tid_bits) AscSetRunChipSynRegAtID(asc_dvc->iop_base, sdev->id, ASYN_SDTR_DATA_FIX_PCI_REV_AB); } static void advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) { ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id; ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng; if (sdev->lun == 0) { ASC_SCSI_BIT_ID_TYPE orig_init_sdtr = asc_dvc->init_sdtr; if ((asc_dvc->cfg->sdtr_enable & tid_bit) && sdev->sdtr) { asc_dvc->init_sdtr |= tid_bit; } else { asc_dvc->init_sdtr &= ~tid_bit; } if (orig_init_sdtr != asc_dvc->init_sdtr) AscAsyncFix(asc_dvc, sdev); } if (sdev->tagged_supported) { if (asc_dvc->cfg->cmd_qng_enabled & tid_bit) { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng |= tid_bit; asc_dvc->use_tagged_qng |= tid_bit; } scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, asc_dvc->max_dvc_qng[sdev->id]); } } else { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng &= ~tid_bit; asc_dvc->use_tagged_qng &= ~tid_bit; } scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); } if ((sdev->lun == 0) && (orig_use_tagged_qng != asc_dvc->use_tagged_qng)) { AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B, asc_dvc->use_tagged_qng); AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B, asc_dvc->cfg->can_tagged_qng); asc_dvc->max_dvc_qng[sdev->id] = asc_dvc->cfg->max_tag_qng[sdev->id]; AscWriteLramByte(asc_dvc->iop_base, (ushort)(ASCV_MAX_DVC_QNG_BEG + sdev->id), asc_dvc->max_dvc_qng[sdev->id]); } } /* * Wide Transfers * * If the EEPROM enabled WDTR for the device and the device supports wide * bus (16 bit) transfers, then turn on the device's 'wdtr_able' bit and * write the new value to the microcode. */ static void advansys_wide_enable_wdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); /* * Clear the microcode SDTR and WDTR negotiation done indicators for * the target to cause it to negotiate with the new setting set above. * WDTR when accepted causes the target to enter asynchronous mode, so * SDTR must be negotiated. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); } /* * Synchronous Transfers * * If the EEPROM enabled SDTR for the device and the device * supports synchronous transfers, then turn on the device's * 'sdtr_able' bit. Write the new value to the microcode. */ static void advansys_wide_enable_sdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); /* * Clear the microcode "SDTR negotiation" done indicator for the * target to cause it to negotiate with the new setting set above. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); } /* * PPR (Parallel Protocol Request) Capable * * If the device supports DT mode, then it must be PPR capable. * The PPR message will be used in place of the SDTR and WDTR * messages to negotiate synchronous speed and offset, transfer * width, and protocol options. */ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc, AdvPortAddr iop_base, unsigned short tidmask) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); adv_dvc->ppr_able |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); } static void advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) { AdvPortAddr iop_base = adv_dvc->iop_base; unsigned short tidmask = 1 << sdev->id; if (sdev->lun == 0) { /* * Handle WDTR, SDTR, and Tag Queuing. If the feature * is enabled in the EEPROM and the device supports the * feature, then enable it in the microcode. */ if ((adv_dvc->wdtr_able & tidmask) && sdev->wdtr) advansys_wide_enable_wdtr(iop_base, tidmask); if ((adv_dvc->sdtr_able & tidmask) && sdev->sdtr) advansys_wide_enable_sdtr(iop_base, tidmask); if (adv_dvc->chip_type == ADV_CHIP_ASC38C1600 && sdev->ppr) advansys_wide_enable_ppr(adv_dvc, iop_base, tidmask); /* * Tag Queuing is disabled for the BIOS which runs in polled * mode and would see no benefit from Tag Queuing. Also by * disabling Tag Queuing in the BIOS devices with Tag Queuing * bugs will at least work with the BIOS. */ if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + sdev->id, adv_dvc->max_dvc_qng); } } if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, adv_dvc->max_dvc_qng); } else { scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); } } /* * Set the number of commands to queue per device for the * specified host adapter. */ static int advansys_slave_configure(struct scsi_device *sdev) { struct asc_board *boardp = shost_priv(sdev->host); if (ASC_NARROW_BOARD(boardp)) advansys_narrow_slave_configure(sdev, &boardp->dvc_var.asc_dvc_var); else advansys_wide_slave_configure(sdev, &boardp->dvc_var.adv_dvc_var); return 0; } static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp) { struct asc_board *board = shost_priv(scp->device->host); scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); dma_cache_sync(board->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); return cpu_to_le32(scp->SCp.dma_handle); } static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, struct asc_scsi_q *asc_scsi_q) { struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var; int use_sg; memset(asc_scsi_q, 0, sizeof(*asc_scsi_q)); /* * Point the ASC_SCSI_Q to the 'struct scsi_cmnd'. */ asc_scsi_q->q2.srb_ptr = advansys_ptr_to_srb(asc_dvc, scp); if (asc_scsi_q->q2.srb_ptr == BAD_SRB) { scp->result = HOST_BYTE(DID_SOFT_ERROR); return ASC_ERROR; } /* * Build the ASC_SCSI_Q request. */ asc_scsi_q->cdbptr = &scp->cmnd[0]; asc_scsi_q->q2.cdb_len = scp->cmd_len; asc_scsi_q->q1.target_id = ASC_TID_TO_TARGET_ID(scp->device->id); asc_scsi_q->q1.target_lun = scp->device->lun; asc_scsi_q->q2.target_ix = ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp); asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE; /* * If there are any outstanding requests for the current target, * then every 255th request send an ORDERED request. This heuristic * tries to retain the benefit of request sorting while preventing * request starvation. 255 is the max number of tags or pending commands * a device may have outstanding. * * The request count is incremented below for every successfully * started request. * */ if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && (boardp->reqcnt[scp->device->id] % 255) == 0) { asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG; } else { asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG; } /* Build ASC_SCSI_Q */ use_sg = scsi_dma_map(scp); if (use_sg != 0) { int sgcnt; struct scatterlist *slp; struct asc_sg_head *asc_sg_head; if (use_sg > scp->device->host->sg_tablesize) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "sg_tablesize %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_ERROR); return ASC_ERROR; } asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) + use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC); if (!asc_sg_head) { scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_SOFT_ERROR); return ASC_ERROR; } asc_scsi_q->q1.cntl |= QC_SG_HEAD; asc_scsi_q->sg_head = asc_sg_head; asc_scsi_q->q1.data_cnt = 0; asc_scsi_q->q1.data_addr = 0; /* This is a byte value, otherwise it would need to be swapped. */ asc_sg_head->entry_cnt = asc_scsi_q->q1.sg_queue_cnt = use_sg; ASC_STATS_ADD(scp->device->host, xfer_elem, asc_sg_head->entry_cnt); /* * Convert scatter-gather list into ASC_SG_HEAD list. */ scsi_for_each_sg(scp, slp, use_sg, sgcnt) { asc_sg_head->sg_list[sgcnt].addr = cpu_to_le32(sg_dma_address(slp)); asc_sg_head->sg_list[sgcnt].bytes = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); } } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ASC_SCSI_Q(2, asc_scsi_q); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); return ASC_NOERROR; } /* * Build scatter-gather list for Adv Library (Wide Board). * * Additional ADV_SG_BLOCK structures will need to be allocated * if the total number of scatter-gather elements exceeds * NO_OF_SG_PER_BLOCK (15). The ADV_SG_BLOCK structures are * assumed to be physically contiguous. * * Return: * ADV_SUCCESS(1) - SG List successfully created * ADV_ERROR(-1) - SG List creation failed */ static int adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, int use_sg) { adv_sgblk_t *sgblkp; ADV_SCSI_REQ_Q *scsiqp; struct scatterlist *slp; int sg_elem_cnt; ADV_SG_BLOCK *sg_block, *prev_sg_block; ADV_PADDR sg_block_paddr; int i; scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); slp = scsi_sglist(scp); sg_elem_cnt = use_sg; prev_sg_block = NULL; reqp->sgblkp = NULL; for (;;) { /* * Allocate a 'adv_sgblk_t' structure from the board free * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK * (15) scatter-gather elements. */ if ((sgblkp = boardp->adv_sgblkp) == NULL) { ASC_DBG(1, "no free adv_sgblk_t\n"); ASC_STATS(scp->device->host, adv_build_nosg); /* * Allocation failed. Free 'adv_sgblk_t' structures * already allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; /* Add 'sgblkp' to the board free list. */ sgblkp->next_sgblkp = boardp->adv_sgblkp; boardp->adv_sgblkp = sgblkp; } return ASC_BUSY; } /* Complete 'adv_sgblk_t' board allocation. */ boardp->adv_sgblkp = sgblkp->next_sgblkp; sgblkp->next_sgblkp = NULL; /* * Get 8 byte aligned virtual and physical addresses * for the allocated ADV_SG_BLOCK structure. */ sg_block = (ADV_SG_BLOCK *)ADV_8BALIGN(&sgblkp->sg_block); sg_block_paddr = virt_to_bus(sg_block); /* * Check if this is the first 'adv_sgblk_t' for the * request. */ if (reqp->sgblkp == NULL) { /* Request's first scatter-gather block. */ reqp->sgblkp = sgblkp; /* * Set ADV_SCSI_REQ_T ADV_SG_BLOCK virtual and physical * address pointers. */ scsiqp->sg_list_ptr = sg_block; scsiqp->sg_real_addr = cpu_to_le32(sg_block_paddr); } else { /* Request's second or later scatter-gather block. */ sgblkp->next_sgblkp = reqp->sgblkp; reqp->sgblkp = sgblkp; /* * Point the previous ADV_SG_BLOCK structure to * the newly allocated ADV_SG_BLOCK structure. */ prev_sg_block->sg_ptr = cpu_to_le32(sg_block_paddr); } for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { sg_block->sg_list[i].sg_addr = cpu_to_le32(sg_dma_address(slp)); sg_block->sg_list[i].sg_count = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); if (--sg_elem_cnt == 0) { /* Last ADV_SG_BLOCK and scatter-gather entry. */ sg_block->sg_cnt = i + 1; sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ return ADV_SUCCESS; } slp++; } sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; prev_sg_block = sg_block; } } /* * Build a request structure for the Adv Library (Wide Board). * * If an adv_req_t can not be allocated to issue the request, * then return ASC_BUSY. If an error occurs, then return ASC_ERROR. * * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the * microcode for DMA addresses or math operations are byte swapped * to little-endian order. */ static int adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, ADV_SCSI_REQ_Q **adv_scsiqpp) { adv_req_t *reqp; ADV_SCSI_REQ_Q *scsiqp; int i; int ret; int use_sg; /* * Allocate an adv_req_t structure from the board to execute * the command. */ if (boardp->adv_reqp == NULL) { ASC_DBG(1, "no free adv_req_t\n"); ASC_STATS(scp->device->host, adv_build_noreq); return ASC_BUSY; } else { reqp = boardp->adv_reqp; boardp->adv_reqp = reqp->next_reqp; reqp->next_reqp = NULL; } /* * Get 32-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers. */ scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); /* * Initialize the structure. */ scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0; /* * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure. */ scsiqp->srb_ptr = ADV_VADDR_TO_U32(reqp); /* * Set the adv_req_t 'cmndp' to point to the struct scsi_cmnd structure. */ reqp->cmndp = scp; /* * Build the ADV_SCSI_REQ_Q request. */ /* Set CDB length and copy it to the request structure. */ scsiqp->cdb_len = scp->cmd_len; /* Copy first 12 CDB bytes to cdb[]. */ for (i = 0; i < scp->cmd_len && i < 12; i++) { scsiqp->cdb[i] = scp->cmnd[i]; } /* Copy last 4 CDB bytes, if present, to cdb16[]. */ for (; i < scp->cmd_len; i++) { scsiqp->cdb16[i - 12] = scp->cmnd[i]; } scsiqp->target_id = scp->device->id; scsiqp->target_lun = scp->device->lun; scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0])); scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; /* Build ADV_SCSI_REQ_Q */ use_sg = scsi_dma_map(scp); if (use_sg == 0) { /* Zero-length transfer */ reqp->sgblkp = NULL; scsiqp->data_cnt = 0; scsiqp->vdata_addr = NULL; scsiqp->data_addr = 0; scsiqp->sg_list_ptr = NULL; scsiqp->sg_real_addr = 0; } else { if (use_sg > ADV_MAX_SG_LIST) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "ADV_MAX_SG_LIST %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_ERROR); /* * Free the 'adv_req_t' structure by adding it back * to the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; return ASC_ERROR; } scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp)); ret = adv_get_sglist(boardp, reqp, scp, use_sg); if (ret != ADV_SUCCESS) { /* * Free the adv_req_t structure by adding it back to * the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; return ret; } ASC_STATS_ADD(scp->device->host, xfer_elem, use_sg); } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); *adv_scsiqpp = scsiqp; return ASC_NOERROR; } static int AscSgListToQueue(int sg_list) { int n_sg_list_qs; n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q); if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0) n_sg_list_qs++; return n_sg_list_qs + 1; } static uint AscGetNumOfFreeQueue(ASC_DVC_VAR *asc_dvc, uchar target_ix, uchar n_qs) { uint cur_used_qs; uint cur_free_qs; ASC_SCSI_BIT_ID_TYPE target_id; uchar tid_no; target_id = ASC_TIX_TO_TARGET_ID(target_ix); tid_no = ASC_TIX_TO_TID(target_ix); if ((asc_dvc->unit_not_ready & target_id) || (asc_dvc->queue_full_or_busy & target_id)) { return 0; } if (n_qs == 1) { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) asc_dvc->last_q_shortage + (uint) ASC_MIN_FREE_Q; } else { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) ASC_MIN_FREE_Q; } if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) { cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs; if (asc_dvc->cur_dvc_qng[tid_no] >= asc_dvc->max_dvc_qng[tid_no]) { return 0; } return cur_free_qs; } if (n_qs > 1) { if ((n_qs > asc_dvc->last_q_shortage) && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) { asc_dvc->last_q_shortage = n_qs; } } return 0; } static uchar AscAllocFreeQueue(PortAddr iop_base, uchar free_q_head) { ushort q_addr; uchar next_qp; uchar q_status; q_addr = ASC_QNO_TO_QADDR(free_q_head); q_status = (uchar)AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_STATUS)); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END)) return next_qp; return ASC_QLINK_END; } static uchar AscAllocMultipleFreeQueue(PortAddr iop_base, uchar free_q_head, uchar n_free_q) { uchar i; for (i = 0; i < n_free_q; i++) { free_q_head = AscAllocFreeQueue(iop_base, free_q_head); if (free_q_head == ASC_QLINK_END) break; } return free_q_head; } /* * void * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) * * Calling/Exit State: * none * * Description: * Output an ASC_SCSI_Q structure to the chip */ static void DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) { int i; ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", outbuf, 2 * words); AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 4 || i == 20) { continue; } outpw(iop_base + IOP_RAM_DATA, ((ushort)outbuf[i + 1] << 8) | outbuf[i]); } } static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { ushort q_addr; uchar tid_no; uchar sdtr_data; uchar syn_period_ix; uchar syn_offset; PortAddr iop_base; iop_base = asc_dvc->iop_base; if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) && ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) { tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix); sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); syn_period_ix = (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1); syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET; AscMsgOutSDTR(asc_dvc, asc_dvc->sdtr_period_tbl[syn_period_ix], syn_offset); scsiq->q1.cntl |= QC_MSG_OUT; } q_addr = ASC_QNO_TO_QADDR(q_no); if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; } scsiq->q1.status = QS_FREE; AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_CDB_BEG, (uchar *)scsiq->cdbptr, scsiq->q2.cdb_len >> 1); DvcPutScsiQ(iop_base, q_addr + ASC_SCSIQ_CPY_BEG, (uchar *)&scsiq->q1.cntl, ((sizeof(ASC_SCSIQ_1) + sizeof(ASC_SCSIQ_2)) / 2) - 1); AscWriteLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (ushort)(((ushort)scsiq->q1. q_no << 8) | (ushort)QS_READY)); return 1; } static int AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { int sta; int i; ASC_SG_HEAD *sg_head; ASC_SG_LIST_Q scsi_sg_q; ASC_DCNT saved_data_addr; ASC_DCNT saved_data_cnt; PortAddr iop_base; ushort sg_list_dwords; ushort sg_index; ushort sg_entry_cnt; ushort q_addr; uchar next_qp; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; saved_data_addr = scsiq->q1.data_addr; saved_data_cnt = scsiq->q1.data_cnt; scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr; scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes; #if CC_VERY_LONG_SG_LIST /* * If sg_head->entry_cnt is greater than ASC_MAX_SG_LIST * then not all SG elements will fit in the allocated queues. * The rest of the SG elements will be copied when the RISC * completes the SG elements that fit and halts. */ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { /* * Set sg_entry_cnt to be the number of SG elements that * will fit in the allocated SG queues. It is minus 1, because * the first SG element is handled above. ASC_MAX_SG_LIST is * already inflated by 1 to account for this. For example it * may be 50 which is 1 + 7 queues * 7 SG elements. */ sg_entry_cnt = ASC_MAX_SG_LIST - 1; /* * Keep track of remaining number of SG elements that will * need to be handled from a_isr.c. */ scsiq->remain_sg_entry_cnt = sg_head->entry_cnt - ASC_MAX_SG_LIST; } else { #endif /* CC_VERY_LONG_SG_LIST */ /* * Set sg_entry_cnt to be the number of SG elements that * will fit in the allocated SG queues. It is minus 1, because * the first SG element is handled above. */ sg_entry_cnt = sg_head->entry_cnt - 1; #if CC_VERY_LONG_SG_LIST } #endif /* CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt != 0) { scsiq->q1.cntl |= QC_SG_HEAD; q_addr = ASC_QNO_TO_QADDR(q_no); sg_index = 1; scsiq->q1.sg_queue_cnt = sg_head->queue_cnt; scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { scsi_sg_q.seq_no = i + 1; if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); sg_entry_cnt -= ASC_SG_LIST_PER_Q; if (i == 0) { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q; } else { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1; } } else { #if CC_VERY_LONG_SG_LIST /* * This is the last SG queue in the list of * allocated SG queues. If there are more * SG elements than will fit in the allocated * queues, then set the QCSG_SG_XFER_MORE flag. */ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; } else { #endif /* CC_VERY_LONG_SG_LIST */ scsi_sg_q.cntl |= QCSG_SG_XFER_END; #if CC_VERY_LONG_SG_LIST } #endif /* CC_VERY_LONG_SG_LIST */ sg_list_dwords = sg_entry_cnt << 1; if (i == 0) { scsi_sg_q.sg_list_cnt = sg_entry_cnt; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt; } else { scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; } sg_entry_cnt = 0; } next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); scsi_sg_q.q_no = next_qp; q_addr = ASC_QNO_TO_QADDR(next_qp); AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_SGHD_CPY_BEG, (uchar *)&scsi_sg_q, sizeof(ASC_SG_LIST_Q) >> 1); AscMemDWordCopyPtrToLram(iop_base, q_addr + ASC_SGQ_LIST_BEG, (uchar *)&sg_head-> sg_list[sg_index], sg_list_dwords); sg_index += ASC_SG_LIST_PER_Q; scsiq->next_sg_index = sg_index; } } else { scsiq->q1.cntl &= ~QC_SG_HEAD; } sta = AscPutReadyQueue(asc_dvc, scsiq, q_no); scsiq->q1.data_addr = saved_data_addr; scsiq->q1.data_cnt = saved_data_cnt; return (sta); } static int AscSendScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar n_q_required) { PortAddr iop_base; uchar free_q_head; uchar next_qp; uchar tid_no; uchar target_ix; int sta; iop_base = asc_dvc->iop_base; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); sta = 0; free_q_head = (uchar)AscGetVarFreeQHead(iop_base); if (n_q_required > 1) { next_qp = AscAllocMultipleFreeQueue(iop_base, free_q_head, (uchar)n_q_required); if (next_qp != ASC_QLINK_END) { asc_dvc->last_q_shortage = 0; scsiq->sg_head->queue_cnt = n_q_required - 1; scsiq->q1.q_no = free_q_head; sta = AscPutReadySgListQueue(asc_dvc, scsiq, free_q_head); } } else if (n_q_required == 1) { next_qp = AscAllocFreeQueue(iop_base, free_q_head); if (next_qp != ASC_QLINK_END) { scsiq->q1.q_no = free_q_head; sta = AscPutReadyQueue(asc_dvc, scsiq, free_q_head); } } if (sta == 1) { AscPutVarFreeQHead(iop_base, next_qp); asc_dvc->cur_total_qng += n_q_required; asc_dvc->cur_dvc_qng[tid_no]++; } return sta; } #define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16 static uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] = { INQUIRY, REQUEST_SENSE, READ_CAPACITY, READ_TOC, MODE_SELECT, MODE_SENSE, MODE_SELECT_10, MODE_SENSE_10, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) { PortAddr iop_base; int sta; int n_q_required; int disable_syn_offset_one_fix; int i; ASC_PADDR addr; ushort sg_entry_cnt = 0; ushort sg_entry_cnt_minus_one = 0; uchar target_ix; uchar tid_no; uchar sdtr_data; uchar extra_bytes; uchar scsi_cmd; uchar disable_cmd; ASC_SG_HEAD *sg_head; ASC_DCNT data_cnt; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; if (asc_dvc->err_code != 0) return (ERR); scsiq->q1.q_no = 0; if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) { scsiq->q1.extra_bytes = 0; } sta = 0; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); n_q_required = 1; if (scsiq->cdbptr[0] == REQUEST_SENSE) { if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) { asc_dvc->sdtr_done &= ~scsiq->q1.target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT); } } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY); return (ERR); } asc_dvc->in_critical_cnt++; if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if ((sg_entry_cnt = sg_head->entry_cnt) == 0) { asc_dvc->in_critical_cnt--; return (ERR); } #if !CC_VERY_LONG_SG_LIST if (sg_entry_cnt > ASC_MAX_SG_LIST) { asc_dvc->in_critical_cnt--; return (ERR); } #endif /* !CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt == 1) { scsiq->q1.data_addr = (ADV_PADDR)sg_head->sg_list[0].addr; scsiq->q1.data_cnt = (ADV_DCNT)sg_head->sg_list[0].bytes; scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE); } sg_entry_cnt_minus_one = sg_entry_cnt - 1; } scsi_cmd = scsiq->cdbptr[0]; disable_syn_offset_one_fix = FALSE; if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) { if (scsiq->q1.cntl & QC_SG_HEAD) { data_cnt = 0; for (i = 0; i < sg_entry_cnt; i++) { data_cnt += (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i]. bytes); } } else { data_cnt = le32_to_cpu(scsiq->q1.data_cnt); } if (data_cnt != 0UL) { if (data_cnt < 512UL) { disable_syn_offset_one_fix = TRUE; } else { for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; i++) { disable_cmd = _syn_offset_one_disable_cmd[i]; if (disable_cmd == 0xFF) { break; } if (scsi_cmd == disable_cmd) { disable_syn_offset_one_fix = TRUE; break; } } } } } if (disable_syn_offset_one_fix) { scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | ASC_TAG_FLAG_DISABLE_DISCONNECT); } else { scsiq->q2.tag_code &= 0x27; } if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = (ADV_PADDR)le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. addr) + (ADV_DCNT)le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; scsiq->q1.extra_bytes = extra_bytes; data_cnt = le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); data_cnt -= (ASC_DCNT) extra_bytes; sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes = cpu_to_le32(data_cnt); } } } } sg_head->entry_to_copy = sg_head->entry_cnt; #if CC_VERY_LONG_SG_LIST /* * Set the sg_entry_cnt to the maximum possible. The rest of * the SG elements will be copied when the RISC completes the * SG elements that fit and halts. */ if (sg_entry_cnt > ASC_MAX_SG_LIST) { sg_entry_cnt = ASC_MAX_SG_LIST; } #endif /* CC_VERY_LONG_SG_LIST */ n_q_required = AscSgListToQueue(sg_entry_cnt); if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >= (uint) n_q_required) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } else { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = le32_to_cpu(scsiq->q1.data_addr) + le32_to_cpu(scsiq->q1.data_cnt); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { data_cnt = le32_to_cpu(scsiq->q1. data_cnt); if (((ushort)data_cnt & 0x01FF) == 0) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; data_cnt -= (ASC_DCNT) extra_bytes; scsiq->q1.data_cnt = cpu_to_le32 (data_cnt); scsiq->q1.extra_bytes = extra_bytes; } } } } } n_q_required = 1; if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } asc_dvc->in_critical_cnt--; return (sta); } /* * AdvExeScsiQueue() - Send a request to the RISC microcode program. * * Allocate a carrier structure, point the carrier to the ADV_SCSI_REQ_Q, * add the carrier to the ICQ (Initiator Command Queue), and tickle the * RISC to notify it a new command is ready to be executed. * * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be * set to SCSI_MAX_RETRY. * * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the microcode * for DMA addresses or math operations are byte swapped to little-endian * order. * * Return: * ADV_SUCCESS(1) - The request was successfully queued. * ADV_BUSY(0) - Resource unavailable; Retry again after pending * request completes. * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure * host IC error. */ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq) { AdvPortAddr iop_base; ADV_PADDR req_paddr; ADV_CARR_T *new_carrp; /* * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID. */ if (scsiq->target_id > ADV_MAX_TID) { scsiq->host_status = QHSTA_M_INVALID_DEVICE; scsiq->done_status = QD_WITH_ERROR; return ADV_ERROR; } iop_base = asc_dvc->iop_base; /* * Allocate a carrier ensuring at least one carrier always * remains on the freelist and initialize fields. */ if ((new_carrp = asc_dvc->carr_freelist) == NULL) { return ADV_BUSY; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(new_carrp->next_vpa)); asc_dvc->carr_pending_cnt++; /* * Set the carrier to be a stopper by setting 'next_vpa' * to the stopper value. The current stopper will be changed * below to point to the new stopper. */ new_carrp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Clear the ADV_SCSI_REQ_Q done flag. */ scsiq->a_flag &= ~ADV_SCSIQ_DONE; req_paddr = virt_to_bus(scsiq); BUG_ON(req_paddr & 31); /* Wait for assertion before making little-endian */ req_paddr = cpu_to_le32(req_paddr); /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */ scsiq->scsiq_ptr = cpu_to_le32(ADV_VADDR_TO_U32(scsiq)); scsiq->scsiq_rptr = req_paddr; scsiq->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->icq_sp)); /* * Every ADV_CARR_T.carr_pa is byte swapped to little-endian * order during initialization. */ scsiq->carr_pa = asc_dvc->icq_sp->carr_pa; /* * Use the current stopper to send the ADV_SCSI_REQ_Q command to * the microcode. The newly allocated stopper will become the new * stopper. */ asc_dvc->icq_sp->areq_vpa = req_paddr; /* * Set the 'next_vpa' pointer for the old stopper to be the * physical address of the new stopper. The RISC can only * follow physical addresses. */ asc_dvc->icq_sp->next_vpa = new_carrp->carr_pa; /* * Set the host adapter stopper pointer to point to the new carrier. */ asc_dvc->icq_sp = new_carrp; if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { /* * Tickle the RISC to tell it to read its Command Queue Head pointer. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_a' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { /* * Notify the RISC a carrier is ready by writing the physical * address of the new carrier stopper to the COMMA register. */ AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(new_carrp->carr_pa)); } return ADV_SUCCESS; } /* * Execute a single 'Scsi_Cmnd'. */ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) { int ret, err_code; struct asc_board *boardp = shost_priv(scp->device->host); ASC_DBG(1, "scp 0x%p\n", scp); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; struct asc_scsi_q asc_scsi_q; /* asc_build_req() can not return ASC_BUSY. */ ret = asc_build_req(boardp, scp, &asc_scsi_q); if (ret == ASC_ERROR) { ASC_STATS(scp->device->host, build_error); return ASC_ERROR; } ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q); kfree(asc_scsi_q.sg_head); err_code = asc_dvc->err_code; } else { ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; ADV_SCSI_REQ_Q *adv_scsiqp; switch (adv_build_req(boardp, scp, &adv_scsiqp)) { case ASC_NOERROR: ASC_DBG(3, "adv_build_req ASC_NOERROR\n"); break; case ASC_BUSY: ASC_DBG(1, "adv_build_req ASC_BUSY\n"); /* * The asc_stats fields 'adv_build_noreq' and * 'adv_build_nosg' count wide board busy conditions. * They are updated in adv_build_req and * adv_get_sglist, respectively. */ return ASC_BUSY; case ASC_ERROR: default: ASC_DBG(1, "adv_build_req ASC_ERROR\n"); ASC_STATS(scp->device->host, build_error); return ASC_ERROR; } ret = AdvExeScsiQueue(adv_dvc, adv_scsiqp); err_code = adv_dvc->err_code; } switch (ret) { case ASC_NOERROR: ASC_STATS(scp->device->host, exe_noerror); /* * Increment monotonically increasing per device * successful request counter. Wrapping doesn't matter. */ boardp->reqcnt[scp->device->id]++; ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n"); break; case ASC_BUSY: ASC_STATS(scp->device->host, exe_busy); break; case ASC_ERROR: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_error); scp->result = HOST_BYTE(DID_ERROR); break; default: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_unknown); scp->result = HOST_BYTE(DID_ERROR); break; } ASC_DBG(1, "end\n"); return ret; } /* * advansys_queuecommand() - interrupt-driven I/O entrypoint. * * This function always returns 0. Command return status is saved * in the 'scp' result field. */ static int advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *shost = scp->device->host; int asc_res, result = 0; ASC_STATS(shost, queuecommand); scp->scsi_done = done; asc_res = asc_execute_scsi_cmnd(scp); switch (asc_res) { case ASC_NOERROR: break; case ASC_BUSY: result = SCSI_MLQUEUE_HOST_BUSY; break; case ASC_ERROR: default: asc_scsi_done(scp); break; } return result; } static DEF_SCSI_QCMD(advansys_queuecommand) static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) { PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) (ASC_EISA_CFG_IOP_MASK); return inpw(eisa_cfg_iop); } /* * Return the BIOS address of the adapter at the specified * I/O port and with the specified bus type. */ static unsigned short __devinit AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type) { unsigned short cfg_lsw; unsigned short bios_addr; /* * The PCI BIOS is re-located by the motherboard BIOS. Because * of this the driver can not determine where a PCI BIOS is * loaded and executes. */ if (bus_type & ASC_IS_PCI) return 0; if ((bus_type & ASC_IS_EISA) != 0) { cfg_lsw = AscGetEisaChipCfg(iop_base); cfg_lsw &= 0x000F; bios_addr = ASC_BIOS_MIN_ADDR + cfg_lsw * ASC_BIOS_BANK_SIZE; return bios_addr; } cfg_lsw = AscGetChipCfgLsw(iop_base); /* * ISA PnP uses the top bit as the 32K BIOS flag */ if (bus_type == ASC_IS_ISAPNP) cfg_lsw &= 0x7FFF; bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE; return bios_addr; } static uchar __devinit AscSetChipScsiID(PortAddr iop_base, uchar new_host_id) { ushort cfg_lsw; if (AscGetChipScsiID(iop_base) == new_host_id) { return (new_host_id); } cfg_lsw = AscGetChipCfgLsw(iop_base); cfg_lsw &= 0xF8FF; cfg_lsw |= (ushort)((new_host_id & ASC_MAX_TID) << 8); AscSetChipCfgLsw(iop_base, cfg_lsw); return (AscGetChipScsiID(iop_base)); } static unsigned char __devinit AscGetChipScsiCtrl(PortAddr iop_base) { unsigned char sc; AscSetBank(iop_base, 1); sc = inp(iop_base + IOP_REG_SC); AscSetBank(iop_base, 0); return sc; } static unsigned char __devinit AscGetChipVersion(PortAddr iop_base, unsigned short bus_type) { if (bus_type & ASC_IS_EISA) { PortAddr eisa_iop; unsigned char revision; eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) ASC_EISA_REV_IOP_MASK; revision = inp(eisa_iop); return ASC_CHIP_MIN_VER_EISA - 1 + revision; } return AscGetChipVerNo(iop_base); } #ifdef CONFIG_ISA static void __devinit AscEnableIsaDma(uchar dma_channel) { if (dma_channel < 4) { outp(0x000B, (ushort)(0xC0 | dma_channel)); outp(0x000A, dma_channel); } else if (dma_channel < 8) { outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4))); outp(0x00D4, (ushort)(dma_channel - 4)); } } #endif /* CONFIG_ISA */ static int AscStopQueueExe(PortAddr iop_base) { int count = 0; if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) { AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_REQ_RISC_STOP); do { if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) & ASC_STOP_ACK_RISC_STOP) { return (1); } mdelay(100); } while (count++ < 20); } return (0); } static ASC_DCNT __devinit AscGetMaxDmaCount(ushort bus_type) { if (bus_type & ASC_IS_ISA) return ASC_MAX_ISA_DMA_COUNT; else if (bus_type & (ASC_IS_EISA | ASC_IS_VL)) return ASC_MAX_VL_DMA_COUNT; return ASC_MAX_PCI_DMA_COUNT; } #ifdef CONFIG_ISA static ushort __devinit AscGetIsaDmaChannel(PortAddr iop_base) { ushort channel; channel = AscGetChipCfgLsw(iop_base) & 0x0003; if (channel == 0x03) return (0); else if (channel == 0x00) return (7); return (channel + 4); } static ushort __devinit AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel) { ushort cfg_lsw; uchar value; if ((dma_channel >= 5) && (dma_channel <= 7)) { if (dma_channel == 7) value = 0x00; else value = dma_channel - 4; cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC; cfg_lsw |= value; AscSetChipCfgLsw(iop_base, cfg_lsw); return (AscGetIsaDmaChannel(iop_base)); } return 0; } static uchar __devinit AscGetIsaDmaSpeed(PortAddr iop_base) { uchar speed_value; AscSetBank(iop_base, 1); speed_value = AscReadChipDmaSpeed(iop_base); speed_value &= 0x07; AscSetBank(iop_base, 0); return speed_value; } static uchar __devinit AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value) { speed_value &= 0x07; AscSetBank(iop_base, 1); AscWriteChipDmaSpeed(iop_base, speed_value); AscSetBank(iop_base, 0); return AscGetIsaDmaSpeed(iop_base); } #endif /* CONFIG_ISA */ static ushort __devinit AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) { int i; PortAddr iop_base; ushort warn_code; uchar chip_version; iop_base = asc_dvc->iop_base; warn_code = 0; asc_dvc->err_code = 0; if ((asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) { asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE; } AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, 0); asc_dvc->bug_fix_cntl = 0; asc_dvc->pci_fix_asyn_xfer = 0; asc_dvc->pci_fix_asyn_xfer_always = 0; /* asc_dvc->init_state initialized in AscInitGetConfig(). */ asc_dvc->sdtr_done = 0; asc_dvc->cur_total_qng = 0; asc_dvc->is_in_int = 0; asc_dvc->in_critical_cnt = 0; asc_dvc->last_q_shortage = 0; asc_dvc->use_tagged_qng = 0; asc_dvc->no_scam = 0; asc_dvc->unit_not_ready = 0; asc_dvc->queue_full_or_busy = 0; asc_dvc->redo_scam = 0; asc_dvc->res2 = 0; asc_dvc->min_sdtr_index = 0; asc_dvc->cfg->can_tagged_qng = 0; asc_dvc->cfg->cmd_qng_enabled = 0; asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL; asc_dvc->init_sdtr = 0; asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG; asc_dvc->scsi_reset_wait = 3; asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type); asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID; chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type); asc_dvc->cfg->chip_version = chip_version; asc_dvc->sdtr_period_tbl = asc_syn_xfer_period; asc_dvc->max_sdtr_index = 7; if ((asc_dvc->bus_type & ASC_IS_PCI) && (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) { asc_dvc->bus_type = ASC_IS_PCI_ULTRA; asc_dvc->sdtr_period_tbl = asc_syn_ultra_xfer_period; asc_dvc->max_sdtr_index = 15; if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_ENABLE_FILTER)); } } if (asc_dvc->bus_type == ASC_IS_PCI) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED; #ifdef CONFIG_ISA if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) { if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) { AscSetChipIFC(iop_base, IFC_INIT_DEFAULT); asc_dvc->bus_type = ASC_IS_ISAPNP; } asc_dvc->cfg->isa_dma_channel = (uchar)AscGetIsaDmaChannel(iop_base); } #endif /* CONFIG_ISA */ for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->cur_dvc_qng[i] = 0; asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG; asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *)0L; asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L; asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG; } return warn_code; } static int __devinit AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg) { int retry; for (retry = 0; retry < ASC_EEP_MAX_RETRY; retry++) { unsigned char read_back; AscSetChipEEPCmd(iop_base, cmd_reg); mdelay(1); read_back = AscGetChipEEPCmd(iop_base); if (read_back == cmd_reg) return 1; } return 0; } static void __devinit AscWaitEEPRead(void) { mdelay(1); } static ushort __devinit AscReadEEPWord(PortAddr iop_base, uchar addr) { ushort read_wval; uchar cmd_reg; AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); cmd_reg = addr | ASC_EEP_CMD_READ; AscWriteEEPCmdReg(iop_base, cmd_reg); AscWaitEEPRead(); read_wval = AscGetChipEEPData(iop_base); AscWaitEEPRead(); return read_wval; } static ushort __devinit AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { ushort wval; ushort sum; ushort *wbuf; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; int s_addr; wbuf = (ushort *)cfg_buf; sum = 0; /* Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); sum += *wbuf; } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { wval = AscReadEEPWord(iop_base, (uchar)s_addr); if (s_addr <= uchar_end_in_config) { /* * Swap all char fields - must unswap bytes already swapped * by AscReadEEPWord(). */ *wbuf = le16_to_cpu(wval); } else { /* Don't swap word field at the end - cntl field. */ *wbuf = wval; } sum += wval; /* Checksum treats all EEPROM data as words. */ } /* * Read the checksum word which will be compared against 'sum' * by the caller. Word field already swapped. */ *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); return sum; } static int __devinit AscTestExternalLram(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; ushort q_addr; ushort saved_word; int sta; iop_base = asc_dvc->iop_base; sta = 0; q_addr = ASC_QNO_TO_QADDR(241); saved_word = AscReadLramWord(iop_base, q_addr); AscSetChipLramAddr(iop_base, q_addr); AscSetChipLramData(iop_base, 0x55AA); mdelay(10); AscSetChipLramAddr(iop_base, q_addr); if (AscGetChipLramData(iop_base) == 0x55AA) { sta = 1; AscWriteLramWord(iop_base, q_addr, saved_word); } return (sta); } static void __devinit AscWaitEEPWrite(void) { mdelay(20); } static int __devinit AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg) { ushort read_back; int retry; retry = 0; while (TRUE) { AscSetChipEEPData(iop_base, data_reg); mdelay(1); read_back = AscGetChipEEPData(iop_base); if (read_back == data_reg) { return (1); } if (retry++ > ASC_EEP_MAX_RETRY) { return (0); } } } static ushort __devinit AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val) { ushort read_wval; read_wval = AscReadEEPWord(iop_base, addr); if (read_wval != word_val) { AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE); AscWaitEEPRead(); AscWriteEEPDataReg(iop_base, word_val); AscWaitEEPRead(); AscWriteEEPCmdReg(iop_base, (uchar)((uchar)ASC_EEP_CMD_WRITE | addr)); AscWaitEEPWrite(); AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); return (AscReadEEPWord(iop_base, addr)); } return (read_wval); } static int __devinit AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int n_error; ushort *wbuf; ushort word; ushort sum; int s_addr; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; wbuf = (ushort *)cfg_buf; n_error = 0; sum = 0; /* Write two config words; AscWriteEEPWord() will swap bytes. */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { sum += *wbuf; if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * This is a char field. Swap char fields before they are * swapped again by AscWriteEEPWord(). */ word = cpu_to_le16(*wbuf); if (word != AscWriteEEPWord(iop_base, (uchar)s_addr, word)) { n_error++; } } else { /* Don't swap word field at the end - cntl field. */ if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } sum += *wbuf; /* Checksum calculated from word values. */ } /* Write checksum word. It will be swapped by AscWriteEEPWord(). */ *wbuf = sum; if (sum != AscWriteEEPWord(iop_base, (uchar)s_addr, sum)) { n_error++; } /* Read EEPROM back again. */ wbuf = (ushort *)cfg_buf; /* * Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { if (*wbuf != AscReadEEPWord(iop_base, (uchar)s_addr)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * Swap all char fields. Must unswap bytes already swapped * by AscReadEEPWord(). */ word = le16_to_cpu(AscReadEEPWord (iop_base, (uchar)s_addr)); } else { /* Don't swap word field at the end - cntl field. */ word = AscReadEEPWord(iop_base, (uchar)s_addr); } if (*wbuf != word) { n_error++; } } /* Read checksum; Byte swapping not needed. */ if (AscReadEEPWord(iop_base, (uchar)s_addr) != sum) { n_error++; } return n_error; } static int __devinit AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int retry; int n_error; retry = 0; while (TRUE) { if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf, bus_type)) == 0) { break; } if (++retry > ASC_EEP_MAX_RETRY) { break; } } return n_error; } static ushort __devinit AscInitFromEEP(ASC_DVC_VAR *asc_dvc) { ASCEEP_CONFIG eep_config_buf; ASCEEP_CONFIG *eep_config; PortAddr iop_base; ushort chksum; ushort warn_code; ushort cfg_msw, cfg_lsw; int i; int write_eep = 0; iop_base = asc_dvc->iop_base; warn_code = 0; AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE); AscStopQueueExe(iop_base); if ((AscStopChip(iop_base) == FALSE) || (AscGetChipScsiCtrl(iop_base) != 0)) { asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE; AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } if (AscIsChipHalted(iop_base) == FALSE) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; return (warn_code); } AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; return (warn_code); } eep_config = (ASCEEP_CONFIG *)&eep_config_buf; cfg_msw = AscGetChipCfgMsw(iop_base); cfg_lsw = AscGetChipCfgLsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type); ASC_DBG(1, "chksum 0x%x\n", chksum); if (chksum == 0) { chksum = 0xaa55; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; if (asc_dvc->cfg->chip_version == 3) { if (eep_config->cfg_lsw != cfg_lsw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_lsw = AscGetChipCfgLsw(iop_base); } if (eep_config->cfg_msw != cfg_msw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); } } } eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON; ASC_DBG(1, "eep_config->chksum 0x%x\n", eep_config->chksum); if (chksum != eep_config->chksum) { if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == ASC_CHIP_VER_PCI_ULTRA_3050) { ASC_DBG(1, "chksum error ignored; EEPROM-less board\n"); eep_config->init_sdtr = 0xFF; eep_config->disc_enable = 0xFF; eep_config->start_motor = 0xFF; eep_config->use_cmd_qng = 0; eep_config->max_total_qng = 0xF0; eep_config->max_tag_qng = 0x20; eep_config->cntl = 0xBFFF; ASC_EEP_SET_CHIP_ID(eep_config, 7); eep_config->no_scam = 0; eep_config->adapter_info[0] = 0; eep_config->adapter_info[1] = 0; eep_config->adapter_info[2] = 0; eep_config->adapter_info[3] = 0; eep_config->adapter_info[4] = 0; /* Indicate EEPROM-less board. */ eep_config->adapter_info[5] = 0xBB; } else { ASC_PRINT ("AscInitFromEEP: EEPROM checksum error; Will try to re-write EEPROM.\n"); write_eep = 1; warn_code |= ASC_WARN_EEPROM_CHKSUM; } } asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr; asc_dvc->cfg->disc_enable = eep_config->disc_enable; asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng; asc_dvc->cfg->isa_dma_speed = ASC_EEP_GET_DMA_SPD(eep_config); asc_dvc->start_motor = eep_config->start_motor; asc_dvc->dvc_cntl = eep_config->cntl; asc_dvc->no_scam = eep_config->no_scam; asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0]; asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1]; asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2]; asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3]; asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4]; asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5]; if (!AscTestExternalLram(asc_dvc)) { if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA)) { eep_config->max_total_qng = ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG; } else { eep_config->cfg_msw |= 0x0800; cfg_msw |= 0x0800; AscSetChipCfgMsw(iop_base, cfg_msw); eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG; } } else { } if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) { eep_config->max_total_qng = ASC_MIN_TOTAL_QNG; } if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) { eep_config->max_total_qng = ASC_MAX_TOTAL_QNG; } if (eep_config->max_tag_qng > eep_config->max_total_qng) { eep_config->max_tag_qng = eep_config->max_total_qng; } if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) { eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC; } asc_dvc->max_total_qng = eep_config->max_total_qng; if ((eep_config->use_cmd_qng & eep_config->disc_enable) != eep_config->use_cmd_qng) { eep_config->disc_enable = eep_config->use_cmd_qng; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } ASC_EEP_SET_CHIP_ID(eep_config, ASC_EEP_GET_CHIP_ID(eep_config) & ASC_MAX_TID); asc_dvc->cfg->chip_scsi_id = ASC_EEP_GET_CHIP_ID(eep_config); if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) && !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) { asc_dvc->min_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX; } for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i]; asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng; asc_dvc->cfg->sdtr_period_offset[i] = (uchar)(ASC_DEF_SDTR_OFFSET | (asc_dvc->min_sdtr_index << 4)); } eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); if (write_eep) { if ((i = AscSetEEPConfig(iop_base, eep_config, asc_dvc->bus_type)) != 0) { ASC_PRINT1 ("AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i); } else { ASC_PRINT ("AscInitFromEEP: Successfully re-wrote EEPROM.\n"); } } return (warn_code); } static int __devinit AscInitGetConfig(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; unsigned short warn_code = 0; asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (AscFindSignature(asc_dvc->iop_base)) { warn_code |= AscInitAscDvcVar(asc_dvc); warn_code |= AscInitFromEEP(asc_dvc); asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG; if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; } else { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; } switch (warn_code) { case 0: /* No error */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing enabled w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } static int __devinit AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; PortAddr iop_base = asc_dvc->iop_base; unsigned short cfg_msw; unsigned short warn_code = 0; asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return asc_dvc->err_code; } cfg_msw = AscGetChipCfgMsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) != asc_dvc->cfg->cmd_qng_enabled) { asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; } #ifdef CONFIG_PCI if (asc_dvc->bus_type & ASC_IS_PCI) { cfg_msw &= 0xFFC0; AscSetChipCfgMsw(iop_base, cfg_msw); if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { } else { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } } } else #endif /* CONFIG_PCI */ if (asc_dvc->bus_type == ASC_IS_ISAPNP) { if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == ASC_CHIP_VER_ASYN_BUG) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } } if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) != asc_dvc->cfg->chip_scsi_id) { asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID; } #ifdef CONFIG_ISA if (asc_dvc->bus_type & ASC_IS_ISA) { AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel); AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed); } #endif /* CONFIG_ISA */ asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG; switch (warn_code) { case 0: /* No error. */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } /* * EEPROM Configuration. * * All drivers should use this structure to set the default EEPROM * configuration. The BIOS now uses this structure when it is built. * Additional structure information can be found in a_condor.h where * the structure is defined. * * The *_Field_IsChar structs are needed to correct for endianness. * These values are read from the board 16 bits at a time directly * into the structs. Because some fields are char, the values will be * in the wrong order. The *_Field_IsChar tells when to flip the * bytes. Data read and written to PCI memory is automatically swapped * on big-endian platforms so char fields read as words are actually being * unswapped on big-endian platforms. */ static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */ 0x0000, /* cfg_msw */ 0xFFFF, /* disc_enable */ 0xFFFF, /* wdtr_able */ 0xFFFF, /* sdtr_able */ 0xFFFF, /* start_motor */ 0xFFFF, /* tagqng_able */ 0xFFFF, /* bios_scan */ 0, /* scam_tolerant */ 7, /* adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* termination */ 0, /* reserved1 */ 0xFFE7, /* bios_ctrl */ 0xFFFF, /* ultra_able */ 0, /* reserved2 */ ASC_DEF_MAX_HOST_QNG, /* max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar __devinitdata = { 0, /* cfg_lsw */ 0, /* cfg_msw */ 0, /* -disc_enable */ 0, /* wdtr_able */ 0, /* sdtr_able */ 0, /* start_motor */ 0, /* tagqng_able */ 0, /* bios_scan */ 0, /* scam_tolerant */ 1, /* adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* termination */ 1, /* reserved1 */ 0, /* bios_ctrl */ 0, /* ultra_able */ 0, /* reserved2 */ 1, /* max_host_qng */ 1, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x4444, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x4444, /* 13 sdtr_speed2 */ 0x4444, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x4444, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar __devinitdata = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x5555, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x5555, /* 13 sdtr_speed2 */ 0x5555, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x5555, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar __devinitdata = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; #ifdef CONFIG_PCI /* * Wait for EEPROM command to complete */ static void __devinit AdvWaitEEPCmd(AdvPortAddr iop_base) { int eep_delay_ms; for (eep_delay_ms = 0; eep_delay_ms < ADV_EEP_DELAY_MS; eep_delay_ms++) { if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) { break; } mdelay(1); } if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) == 0) BUG(); } /* * Read the EEPROM from specified location */ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr) { AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_READ | eep_word_addr); AdvWaitEEPCmd(iop_base); return AdvReadWordRegister(iop_base, IOPW_EE_DATA); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort *wbuf; ushort addr, chksum; ushort *charfields; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_3550_CONFIG eep_config; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet3550EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_3550_EEPROM_Config, sizeof(ADVEEP_3550_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet3550EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_able = eep_config.sdtr_able; asc_dvc->ultra_able = eep_config.ultra_able; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination == 0) { asc_dvc->cfg->termination = 0; /* auto termination */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination == 1) { asc_dvc->cfg->termination = TERM_CTL_SEL; /* Enable manual control with low off / high on. */ } else if (eep_config.termination == 2) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H; /* Enable manual control with low on / high on. */ } else if (eep_config.termination == 3) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L; } else { /* * The EEPROM 'termination' field contains a bad value. Use * automatic termination instead. */ asc_dvc->cfg->termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C0800_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C0800EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C0800_EEPROM_Config, sizeof(ADVEEP_38C0800_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C0800EEPConfig(iop_base, &eep_config); } /* * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ADV_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ASC_DVC_VAR and * ASC_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C1600_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C1600EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C1600_EEPROM_Config, sizeof(ADVEEP_38C1600_CONFIG)); if (PCI_FUNC(pdev->devfn) != 0) { u8 ints; /* * Disable Bit 14 (BIOS_ENABLE) to fix SPARC Ultra 60 * and old Mac system booting problem. The Expansion * ROM must be disabled in Function 1 for these systems */ eep_config.cfg_lsw &= ~ADV_EEPROM_BIOS_ENABLE; /* * Clear the INTAB (bit 11) if the GPIO 0 input * indicates the Function 1 interrupt line is wired * to INTB. * * Set/Clear Bit 11 (INTAB) from the GPIO bit 0 input: * 1 - Function 1 interrupt line wired to INT A. * 0 - Function 1 interrupt line wired to INT B. * * Note: Function 0 is always wired to INTA. * Put all 5 GPIO bits in input mode and then read * their input values. */ AdvWriteByteRegister(iop_base, IOPB_GPIO_CNTL, 0); ints = AdvReadByteRegister(iop_base, IOPB_GPIO_DATA); if ((ints & 0x01) == 0) eep_config.cfg_lsw &= ~ADV_EEPROM_INTAB; } /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C1600EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->ppr_able = 0; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ASC_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ASC_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ASC_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ASC_DVC_VAR 'max_host_qng' and ASC_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ASC_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ASC_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Initialize the ADV_DVC_VAR structure. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. */ static int __devinit AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var; unsigned short warn_code = 0; AdvPortAddr iop_base = asc_dvc->iop_base; u16 cmd; int status; asc_dvc->err_code = 0; /* * Save the state of the PCI Configuration Command Register * "Parity Error Response Control" Bit. If the bit is clear (0), * in AdvInitAsc3550/38C0800Driver() tell the microcode to ignore * DMA parity errors. */ asc_dvc->cfg->control_flag = 0; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if ((cmd & PCI_COMMAND_PARITY) == 0) asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR; asc_dvc->cfg->chip_version = AdvGetChipVersion(iop_base, asc_dvc->bus_type); ASC_DBG(1, "iopb_chip_id_1: 0x%x 0x%x\n", (ushort)AdvReadByteRegister(iop_base, IOPB_CHIP_ID_1), (ushort)ADV_CHIP_ID_BYTE); ASC_DBG(1, "iopw_chip_id_0: 0x%x 0x%x\n", (ushort)AdvReadWordRegister(iop_base, IOPW_CHIP_ID_0), (ushort)ADV_CHIP_ID_WORD); /* * Reset the chip to start and allow register writes. */ if (AdvFindSignature(iop_base) == 0) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return ADV_ERROR; } else { /* * The caller must set 'chip_type' to a valid setting. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550 && asc_dvc->chip_type != ADV_CHIP_ASC38C0800 && asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code |= ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } /* * Reset Chip. */ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitFrom38C1600EEP(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitFrom38C0800EEP(asc_dvc); } else { status = AdvInitFrom3550EEP(asc_dvc); } warn_code |= status; } if (warn_code != 0) shost_printk(KERN_WARNING, shost, "warning: 0x%x\n", warn_code); if (asc_dvc->err_code) shost_printk(KERN_ERR, shost, "error code 0x%x\n", asc_dvc->err_code); return asc_dvc->err_code; } #endif static struct scsi_host_template advansys_template = { .proc_name = DRV_NAME, #ifdef CONFIG_PROC_FS .proc_info = advansys_proc_info, #endif .name = DRV_NAME, .info = advansys_info, .queuecommand = advansys_queuecommand, .eh_bus_reset_handler = advansys_reset, .bios_param = advansys_biosparam, .slave_configure = advansys_slave_configure, /* * Because the driver may control an ISA adapter 'unchecked_isa_dma' * must be set. The flag will be cleared in advansys_board_found * for non-ISA adapters. */ .unchecked_isa_dma = 1, /* * All adapters controlled by this driver are capable of large * scatter-gather lists. According to the mid-level SCSI documentation * this obviates any performance gain provided by setting * 'use_clustering'. But empirically while CPU utilization is increased * by enabling clustering, I/O throughput increases as well. */ .use_clustering = ENABLE_CLUSTERING, }; static int __devinit advansys_wide_init_chip(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; int req_cnt = 0; adv_req_t *reqp = NULL; int sg_cnt = 0; adv_sgblk_t *sgp; int warn_code, err_code; /* * Allocate buffer carrier structures. The total size * is about 4 KB, so allocate all at once. */ adv_dvc->carrier_buf = kmalloc(ADV_CARRIER_BUFSIZE, GFP_KERNEL); ASC_DBG(1, "carrier_buf 0x%p\n", adv_dvc->carrier_buf); if (!adv_dvc->carrier_buf) goto kmalloc_failed; /* * Allocate up to 'max_host_qng' request structures for the Wide * board. The total size is about 16 KB, so allocate all at once. * If the allocation fails decrement and try again. */ for (req_cnt = adv_dvc->max_host_qng; req_cnt > 0; req_cnt--) { reqp = kmalloc(sizeof(adv_req_t) * req_cnt, GFP_KERNEL); ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", reqp, req_cnt, (ulong)sizeof(adv_req_t) * req_cnt); if (reqp) break; } if (!reqp) goto kmalloc_failed; adv_dvc->orig_reqp = reqp; /* * Allocate up to ADV_TOT_SG_BLOCK request structures for * the Wide board. Each structure is about 136 bytes. */ board->adv_sgblkp = NULL; for (sg_cnt = 0; sg_cnt < ADV_TOT_SG_BLOCK; sg_cnt++) { sgp = kmalloc(sizeof(adv_sgblk_t), GFP_KERNEL); if (!sgp) break; sgp->next_sgblkp = board->adv_sgblkp; board->adv_sgblkp = sgp; } ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", sg_cnt, sizeof(adv_sgblk_t), sizeof(adv_sgblk_t) * sg_cnt); if (!board->adv_sgblkp) goto kmalloc_failed; /* * Point 'adv_reqp' to the request structures and * link them together. */ req_cnt--; reqp[req_cnt].next_reqp = NULL; for (; req_cnt > 0; req_cnt--) { reqp[req_cnt - 1].next_reqp = &reqp[req_cnt]; } board->adv_reqp = &reqp[0]; if (adv_dvc->chip_type == ADV_CHIP_ASC3550) { ASC_DBG(2, "AdvInitAsc3550Driver()\n"); warn_code = AdvInitAsc3550Driver(adv_dvc); } else if (adv_dvc->chip_type == ADV_CHIP_ASC38C0800) { ASC_DBG(2, "AdvInitAsc38C0800Driver()\n"); warn_code = AdvInitAsc38C0800Driver(adv_dvc); } else { ASC_DBG(2, "AdvInitAsc38C1600Driver()\n"); warn_code = AdvInitAsc38C1600Driver(adv_dvc); } err_code = adv_dvc->err_code; if (warn_code || err_code) { shost_printk(KERN_WARNING, shost, "error: warn 0x%x, error " "0x%x\n", warn_code, err_code); } goto exit; kmalloc_failed: shost_printk(KERN_ERR, shost, "error: kmalloc() failed\n"); err_code = ADV_ERROR; exit: return err_code; } static void advansys_wide_free_mem(struct asc_board *board) { struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; kfree(adv_dvc->carrier_buf); adv_dvc->carrier_buf = NULL; kfree(adv_dvc->orig_reqp); adv_dvc->orig_reqp = board->adv_reqp = NULL; while (board->adv_sgblkp) { adv_sgblk_t *sgp = board->adv_sgblkp; board->adv_sgblkp = sgp->next_sgblkp; kfree(sgp); } } static int __devinit advansys_board_found(struct Scsi_Host *shost, unsigned int iop, int bus_type) { struct pci_dev *pdev; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp = NULL; ADV_DVC_VAR *adv_dvc_varp = NULL; int share_irq, warn_code, ret; pdev = (bus_type == ASC_IS_PCI) ? to_pci_dev(boardp->dev) : NULL; if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(1, "narrow board\n"); asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; asc_dvc_varp->bus_type = bus_type; asc_dvc_varp->drv_ptr = boardp; asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg; asc_dvc_varp->iop_base = iop; } else { #ifdef CONFIG_PCI adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; adv_dvc_varp->drv_ptr = boardp; adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW) { ASC_DBG(1, "wide board ASC-3550\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; } else if (pdev->device == PCI_DEVICE_ID_38C0800_REV1) { ASC_DBG(1, "wide board ASC-38C0800\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; } else { ASC_DBG(1, "wide board ASC-38C1600\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C1600; } boardp->asc_n_io_port = pci_resource_len(pdev, 1); boardp->ioremap_addr = pci_ioremap_bar(pdev, 1); if (!boardp->ioremap_addr) { shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) " "returned NULL\n", (long)pci_resource_start(pdev, 1), boardp->asc_n_io_port); ret = -ENODEV; goto err_shost; } adv_dvc_varp->iop_base = (AdvPortAddr)boardp->ioremap_addr; ASC_DBG(1, "iop_base: 0x%p\n", adv_dvc_varp->iop_base); /* * Even though it isn't used to access wide boards, other * than for the debug line below, save I/O Port address so * that it can be reported. */ boardp->ioport = iop; ASC_DBG(1, "iopb_chip_id_1 0x%x, iopw_chip_id_0 0x%x\n", (ushort)inp(iop + 1), (ushort)inpw(iop)); #endif /* CONFIG_PCI */ } #ifdef CONFIG_PROC_FS /* * Allocate buffer for printing information from * /proc/scsi/advansys/[0...]. */ boardp->prtbuf = kmalloc(ASC_PRTBUF_SIZE, GFP_KERNEL); if (!boardp->prtbuf) { shost_printk(KERN_ERR, shost, "kmalloc(%d) returned NULL\n", ASC_PRTBUF_SIZE); ret = -ENOMEM; goto err_unmap; } #endif /* CONFIG_PROC_FS */ if (ASC_NARROW_BOARD(boardp)) { /* * Set the board bus type and PCI IRQ before * calling AscInitGetConfig(). */ switch (asc_dvc_varp->bus_type) { #ifdef CONFIG_ISA case ASC_IS_ISA: shost->unchecked_isa_dma = TRUE; share_irq = 0; break; case ASC_IS_VL: shost->unchecked_isa_dma = FALSE; share_irq = 0; break; case ASC_IS_EISA: shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; break; #endif /* CONFIG_ISA */ #ifdef CONFIG_PCI case ASC_IS_PCI: shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; break; #endif /* CONFIG_PCI */ default: shost_printk(KERN_ERR, shost, "unknown adapter type: " "%d\n", asc_dvc_varp->bus_type); shost->unchecked_isa_dma = TRUE; share_irq = 0; break; } /* * NOTE: AscInitGetConfig() may change the board's * bus_type value. The bus_type value should no * longer be used. If the bus_type field must be * referenced only use the bit-wise AND operator "&". */ ASC_DBG(2, "AscInitGetConfig()\n"); ret = AscInitGetConfig(shost) ? -ENODEV : 0; } else { #ifdef CONFIG_PCI /* * For Wide boards set PCI information before calling * AdvInitGetConfig(). */ shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; ASC_DBG(2, "AdvInitGetConfig()\n"); ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; #endif /* CONFIG_PCI */ } if (ret) goto err_free_proc; /* * Save the EEPROM configuration so that it can be displayed * from /proc/scsi/advansys/[0...]. */ if (ASC_NARROW_BOARD(boardp)) { ASCEEP_CONFIG *ep; /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id); /* * Save EEPROM settings for the board. */ ep = &boardp->eep_config.asc_eep; ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable; ep->disc_enable = asc_dvc_varp->cfg->disc_enable; ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled; ASC_EEP_SET_DMA_SPD(ep, asc_dvc_varp->cfg->isa_dma_speed); ep->start_motor = asc_dvc_varp->start_motor; ep->cntl = asc_dvc_varp->dvc_cntl; ep->no_scam = asc_dvc_varp->no_scam; ep->max_total_qng = asc_dvc_varp->max_total_qng; ASC_EEP_SET_CHIP_ID(ep, asc_dvc_varp->cfg->chip_scsi_id); /* 'max_tag_qng' is set to the same value for every device. */ ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0]; ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0]; ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1]; ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2]; ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3]; ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4]; ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5]; /* * Modify board configuration. */ ASC_DBG(2, "AscInitSetConfig()\n"); ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0; if (ret) goto err_free_proc; } else { ADVEEP_3550_CONFIG *ep_3550; ADVEEP_38C0800_CONFIG *ep_38C0800; ADVEEP_38C1600_CONFIG *ep_38C1600; /* * Save Wide EEP Configuration Information. */ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; ep_3550->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_3550->max_host_qng = adv_dvc_varp->max_host_qng; ep_3550->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_3550->termination = adv_dvc_varp->cfg->termination; ep_3550->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_3550->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_3550->wdtr_able = adv_dvc_varp->wdtr_able; ep_3550->sdtr_able = adv_dvc_varp->sdtr_able; ep_3550->ultra_able = adv_dvc_varp->ultra_able; ep_3550->tagqng_able = adv_dvc_varp->tagqng_able; ep_3550->start_motor = adv_dvc_varp->start_motor; ep_3550->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_3550->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_3550->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_3550->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; ep_38C0800->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C0800->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C0800->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C0800->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C0800->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C0800->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C0800->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C0800->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C0800->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C0800->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->start_motor = adv_dvc_varp->start_motor; ep_38C0800->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C0800->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C0800->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C0800->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; ep_38C1600->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C1600->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C1600->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C1600->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C1600->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C1600->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C1600->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C1600->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C1600->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C1600->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->start_motor = adv_dvc_varp->start_motor; ep_38C1600->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C1600->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C1600->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C1600->serial_number_word3 = adv_dvc_varp->cfg->serial3; } /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id); } /* * Channels are numbered beginning with 0. For AdvanSys one host * structure supports one channel. Multi-channel boards have a * separate host structure for each channel. */ shost->max_channel = 0; if (ASC_NARROW_BOARD(boardp)) { shost->max_id = ASC_MAX_TID + 1; shost->max_lun = ASC_MAX_LUN + 1; shost->max_cmd_len = ASC_MAX_CDB_LEN; shost->io_port = asc_dvc_varp->iop_base; boardp->asc_n_io_port = ASC_IOADR_GAP; shost->this_id = asc_dvc_varp->cfg->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = asc_dvc_varp->max_total_qng; } else { shost->max_id = ADV_MAX_TID + 1; shost->max_lun = ADV_MAX_LUN + 1; shost->max_cmd_len = ADV_MAX_CDB_LEN; /* * Save the I/O Port address and length even though * I/O ports are not used to access Wide boards. * Instead the Wide boards are accessed with * PCI Memory Mapped I/O. */ shost->io_port = iop; shost->this_id = adv_dvc_varp->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = adv_dvc_varp->max_host_qng; } /* * Following v1.3.89, 'cmd_per_lun' is no longer needed * and should be set to zero. * * But because of a bug introduced in v1.3.89 if the driver is * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level * SCSI function 'allocate_device' will panic. To allow the driver * to work as a module in these kernels set 'cmd_per_lun' to 1. * * Note: This is wrong. cmd_per_lun should be set to the depth * you want on untagged devices always. #ifdef MODULE */ shost->cmd_per_lun = 1; /* #else shost->cmd_per_lun = 0; #endif */ /* * Set the maximum number of scatter-gather elements the * adapter can handle. */ if (ASC_NARROW_BOARD(boardp)) { /* * Allow two commands with 'sg_tablesize' scatter-gather * elements to be executed simultaneously. This value is * the theoretical hardware limit. It may be decreased * below. */ shost->sg_tablesize = (((asc_dvc_varp->max_total_qng - 2) / 2) * ASC_SG_LIST_PER_Q) + 1; } else { shost->sg_tablesize = ADV_MAX_SG_LIST; } /* * The value of 'sg_tablesize' can not exceed the SCSI * mid-level driver definition of SG_ALL. SG_ALL also * must not be exceeded, because it is used to define the * size of the scatter-gather table in 'struct asc_sg_head'. */ if (shost->sg_tablesize > SG_ALL) { shost->sg_tablesize = SG_ALL; } ASC_DBG(1, "sg_tablesize: %d\n", shost->sg_tablesize); /* BIOS start address. */ if (ASC_NARROW_BOARD(boardp)) { shost->base = AscGetChipBiosAddress(asc_dvc_varp->iop_base, asc_dvc_varp->bus_type); } else { /* * Fill-in BIOS board variables. The Wide BIOS saves * information in LRAM that is used by the driver. */ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_SIGNATURE, boardp->bios_signature); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_VERSION, boardp->bios_version); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODESEG, boardp->bios_codeseg); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODELEN, boardp->bios_codelen); ASC_DBG(1, "bios_signature 0x%x, bios_version 0x%x\n", boardp->bios_signature, boardp->bios_version); ASC_DBG(1, "bios_codeseg 0x%x, bios_codelen 0x%x\n", boardp->bios_codeseg, boardp->bios_codelen); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature == 0x55AA) { /* * Convert x86 realmode code segment to a linear * address by shifting left 4. */ shost->base = ((ulong)boardp->bios_codeseg << 4); } else { shost->base = 0; } } /* * Register Board Resources - I/O Port, DMA, IRQ */ /* Register DMA Channel for Narrow boards. */ shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */ #ifdef CONFIG_ISA if (ASC_NARROW_BOARD(boardp)) { /* Register DMA channel for ISA bus. */ if (asc_dvc_varp->bus_type & ASC_IS_ISA) { shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel; ret = request_dma(shost->dma_channel, DRV_NAME); if (ret) { shost_printk(KERN_ERR, shost, "request_dma() " "%d failed %d\n", shost->dma_channel, ret); goto err_free_proc; } AscEnableIsaDma(shost->dma_channel); } } #endif /* CONFIG_ISA */ /* Register IRQ Number. */ ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost); ret = request_irq(boardp->irq, advansys_interrupt, share_irq, DRV_NAME, shost); if (ret) { if (ret == -EBUSY) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "already in use\n", boardp->irq); } else if (ret == -EINVAL) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "not valid\n", boardp->irq); } else { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "failed with %d\n", boardp->irq, ret); } goto err_free_dma; } /* * Initialize board RISC chip and enable interrupts. */ if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(2, "AscInitAsc1000Driver()\n"); asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); if (!asc_dvc_varp->overrun_buf) { ret = -ENOMEM; goto err_free_irq; } warn_code = AscInitAsc1000Driver(asc_dvc_varp); if (warn_code || asc_dvc_varp->err_code) { shost_printk(KERN_ERR, shost, "error: init_state 0x%x, " "warn 0x%x, error 0x%x\n", asc_dvc_varp->init_state, warn_code, asc_dvc_varp->err_code); if (!asc_dvc_varp->overrun_dma) { ret = -ENODEV; goto err_free_mem; } } } else { if (advansys_wide_init_chip(shost)) { ret = -ENODEV; goto err_free_mem; } } ASC_DBG_PRT_SCSI_HOST(2, shost); ret = scsi_add_host(shost, boardp->dev); if (ret) goto err_free_mem; scsi_scan_host(shost); return 0; err_free_mem: if (ASC_NARROW_BOARD(boardp)) { if (asc_dvc_varp->overrun_dma) dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(asc_dvc_varp->overrun_buf); } else advansys_wide_free_mem(boardp); err_free_irq: free_irq(boardp->irq, shost); err_free_dma: #ifdef CONFIG_ISA if (shost->dma_channel != NO_ISA_DMA) free_dma(shost->dma_channel); #endif err_free_proc: kfree(boardp->prtbuf); err_unmap: if (boardp->ioremap_addr) iounmap(boardp->ioremap_addr); err_shost: return ret; } /* * advansys_release() * * Release resources allocated for a single AdvanSys adapter. */ static int advansys_release(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DBG(1, "begin\n"); scsi_remove_host(shost); free_irq(board->irq, shost); #ifdef CONFIG_ISA if (shost->dma_channel != NO_ISA_DMA) { ASC_DBG(1, "free_dma()\n"); free_dma(shost->dma_channel); } #endif if (ASC_NARROW_BOARD(board)) { dma_unmap_single(board->dev, board->dvc_var.asc_dvc_var.overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(board->dvc_var.asc_dvc_var.overrun_buf); } else { iounmap(board->ioremap_addr); advansys_wide_free_mem(board); } kfree(board->prtbuf); scsi_host_put(shost); ASC_DBG(1, "end\n"); return 0; } #define ASC_IOADR_TABLE_MAX_IX 11 static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = { 0x100, 0x0110, 0x120, 0x0130, 0x140, 0x0150, 0x0190, 0x0210, 0x0230, 0x0250, 0x0330 }; /* * The ISA IRQ number is found in bits 2 and 3 of the CfgLsw. It decodes as: * 00: 10 * 01: 11 * 10: 12 * 11: 15 */ static unsigned int __devinit advansys_isa_irq_no(PortAddr iop_base) { unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10; if (chip_irq == 13) chip_irq = 15; return chip_irq; } static int __devinit advansys_isa_probe(struct device *dev, unsigned int id) { int err = -ENODEV; PortAddr iop_base = _asc_def_iop_base[id]; struct Scsi_Host *shost; struct asc_board *board; if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); return -ENODEV; } ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); if (!AscFindSignature(iop_base)) goto release_region; if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT)) goto release_region; err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = advansys_isa_irq_no(iop_base); board->dev = dev; err = advansys_board_found(shost, iop_base, ASC_IS_ISA); if (err) goto free_host; dev_set_drvdata(dev, shost); return 0; free_host: scsi_host_put(shost); release_region: release_region(iop_base, ASC_IOADR_GAP); return err; } static int __devexit advansys_isa_remove(struct device *dev, unsigned int id) { int ioport = _asc_def_iop_base[id]; advansys_release(dev_get_drvdata(dev)); release_region(ioport, ASC_IOADR_GAP); return 0; } static struct isa_driver advansys_isa_driver = { .probe = advansys_isa_probe, .remove = __devexit_p(advansys_isa_remove), .driver = { .owner = THIS_MODULE, .name = DRV_NAME, }, }; /* * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as: * 000: invalid * 001: 10 * 010: 11 * 011: 12 * 100: invalid * 101: 14 * 110: 15 * 111: invalid */ static unsigned int __devinit advansys_vlb_irq_no(PortAddr iop_base) { unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9; if ((chip_irq < 10) || (chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int __devinit advansys_vlb_probe(struct device *dev, unsigned int id) { int err = -ENODEV; PortAddr iop_base = _asc_def_iop_base[id]; struct Scsi_Host *shost; struct asc_board *board; if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); return -ENODEV; } ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); if (!AscFindSignature(iop_base)) goto release_region; /* * I don't think this condition can actually happen, but the old * driver did it, and the chances of finding a VLB setup in 2007 * to do testing with is slight to none. */ if (AscGetChipVersion(iop_base, ASC_IS_VL) > ASC_CHIP_MAX_VER_VL) goto release_region; err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = advansys_vlb_irq_no(iop_base); board->dev = dev; err = advansys_board_found(shost, iop_base, ASC_IS_VL); if (err) goto free_host; dev_set_drvdata(dev, shost); return 0; free_host: scsi_host_put(shost); release_region: release_region(iop_base, ASC_IOADR_GAP); return -ENODEV; } static struct isa_driver advansys_vlb_driver = { .probe = advansys_vlb_probe, .remove = __devexit_p(advansys_isa_remove), .driver = { .owner = THIS_MODULE, .name = "advansys_vlb", }, }; static struct eisa_device_id advansys_eisa_table[] __devinitdata = { { "ABP7401" }, { "ABP7501" }, { "" } }; MODULE_DEVICE_TABLE(eisa, advansys_eisa_table); /* * EISA is a little more tricky than PCI; each EISA device may have two * channels, and this driver is written to make each channel its own Scsi_Host */ struct eisa_scsi_data { struct Scsi_Host *host[2]; }; /* * The EISA IRQ number is found in bits 8 to 10 of the CfgLsw. It decodes as: * 000: 10 * 001: 11 * 010: 12 * 011: invalid * 100: 14 * 101: 15 * 110: invalid * 111: invalid */ static unsigned int __devinit advansys_eisa_irq_no(struct eisa_device *edev) { unsigned short cfg_lsw = inw(edev->base_addr + 0xc86); unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10; if ((chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int __devinit advansys_eisa_probe(struct device *dev) { int i, ioport, irq = 0; int err; struct eisa_device *edev = to_eisa_device(dev); struct eisa_scsi_data *data; err = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto fail; ioport = edev->base_addr + 0xc30; err = -ENODEV; for (i = 0; i < 2; i++, ioport += 0x20) { struct asc_board *board; struct Scsi_Host *shost; if (!request_region(ioport, ASC_IOADR_GAP, DRV_NAME)) { printk(KERN_WARNING "Region %x-%x busy\n", ioport, ioport + ASC_IOADR_GAP - 1); continue; } if (!AscFindSignature(ioport)) { release_region(ioport, ASC_IOADR_GAP); continue; } /* * I don't know why we need to do this for EISA chips, but * not for any others. It looks to be equivalent to * AscGetChipCfgMsw, but I may have overlooked something, * so I'm not converting it until I get an EISA board to * test with. */ inw(ioport + 4); if (!irq) irq = advansys_eisa_irq_no(edev); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = irq; board->dev = dev; err = advansys_board_found(shost, ioport, ASC_IS_EISA); if (!err) { data->host[i] = shost; continue; } scsi_host_put(shost); release_region: release_region(ioport, ASC_IOADR_GAP); break; } if (err) goto free_data; dev_set_drvdata(dev, data); return 0; free_data: kfree(data->host[0]); kfree(data->host[1]); kfree(data); fail: return err; } static __devexit int advansys_eisa_remove(struct device *dev) { int i; struct eisa_scsi_data *data = dev_get_drvdata(dev); for (i = 0; i < 2; i++) { int ioport; struct Scsi_Host *shost = data->host[i]; if (!shost) continue; ioport = shost->io_port; advansys_release(shost); release_region(ioport, ASC_IOADR_GAP); } kfree(data); return 0; } static struct eisa_driver advansys_eisa_driver = { .id_table = advansys_eisa_table, .driver = { .name = DRV_NAME, .probe = advansys_eisa_probe, .remove = __devexit_p(advansys_eisa_remove), } }; /* PCI Devices supported by this driver */ static struct pci_device_id advansys_pci_tbl[] __devinitdata = { {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {} }; MODULE_DEVICE_TABLE(pci, advansys_pci_tbl); static void __devinit advansys_set_latency(struct pci_dev *pdev) { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0); } else { u8 latency; pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency); if (latency < 0x20) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20); } } static int __devinit advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err, ioport; struct Scsi_Host *shost; struct asc_board *board; err = pci_enable_device(pdev); if (err) goto fail; err = pci_request_regions(pdev, DRV_NAME); if (err) goto disable_device; pci_set_master(pdev); advansys_set_latency(pdev); err = -ENODEV; if (pci_resource_len(pdev, 0) == 0) goto release_region; ioport = pci_resource_start(pdev, 0); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = pdev->irq; board->dev = &pdev->dev; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW || pdev->device == PCI_DEVICE_ID_38C0800_REV1 || pdev->device == PCI_DEVICE_ID_38C1600_REV1) { board->flags |= ASC_IS_WIDE_BOARD; } err = advansys_board_found(shost, ioport, ASC_IS_PCI); if (err) goto free_host; pci_set_drvdata(pdev, shost); return 0; free_host: scsi_host_put(shost); release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } static void __devexit advansys_pci_remove(struct pci_dev *pdev) { advansys_release(pci_get_drvdata(pdev)); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver advansys_pci_driver = { .name = DRV_NAME, .id_table = advansys_pci_tbl, .probe = advansys_pci_probe, .remove = __devexit_p(advansys_pci_remove), }; static int __init advansys_init(void) { int error; error = isa_register_driver(&advansys_isa_driver, ASC_IOADR_TABLE_MAX_IX); if (error) goto fail; error = isa_register_driver(&advansys_vlb_driver, ASC_IOADR_TABLE_MAX_IX); if (error) goto unregister_isa; error = eisa_driver_register(&advansys_eisa_driver); if (error) goto unregister_vlb; error = pci_register_driver(&advansys_pci_driver); if (error) goto unregister_eisa; return 0; unregister_eisa: eisa_driver_unregister(&advansys_eisa_driver); unregister_vlb: isa_unregister_driver(&advansys_vlb_driver); unregister_isa: isa_unregister_driver(&advansys_isa_driver); fail: return error; } static void __exit advansys_exit(void) { pci_unregister_driver(&advansys_pci_driver); eisa_driver_unregister(&advansys_eisa_driver); isa_unregister_driver(&advansys_vlb_driver); isa_unregister_driver(&advansys_isa_driver); } module_init(advansys_init); module_exit(advansys_exit); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("advansys/mcode.bin"); MODULE_FIRMWARE("advansys/3550.bin"); MODULE_FIRMWARE("advansys/38C0800.bin"); MODULE_FIRMWARE("advansys/38C1600.bin");
gpl-2.0
EPDCenter/android_kernel_rikomagic_mk808
drivers/scsi/advansys.c
3181
383611
#define DRV_NAME "advansys" #define ASC_VERSION "3.4" /* AdvanSys Driver Version */ /* * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-2000 Advanced System Products, Inc. * Copyright (c) 2000-2001 ConnectCom Solutions, Inc. * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx> * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys) * changed its name to ConnectCom Solutions, Inc. * On June 18, 2001 Initio Corp. acquired ConnectCom's SCSI assets */ #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/isa.h> #include <linux/eisa.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <asm/io.h> #include <asm/system.h> #include <asm/dma.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> /* FIXME: * * 1. Although all of the necessary command mapping places have the * appropriate dma_map.. APIs, the driver still processes its internal * queue using bus_to_virt() and virt_to_bus() which are illegal under * the API. The entire queue processing structure will need to be * altered to fix this. * 2. Need to add memory mapping workaround. Test the memory mapping. * If it doesn't work revert to I/O port access. Can a test be done * safely? * 3. Handle an interrupt not working. Keep an interrupt counter in * the interrupt handler. In the timeout function if the interrupt * has not occurred then print a message and run in polled mode. * 4. Need to add support for target mode commands, cf. CAM XPT. * 5. check DMA mapping functions for failure * 6. Use scsi_transport_spi * 7. advansys_info is not safe against multiple simultaneous callers * 8. Add module_param to override ISA/VLB ioport array */ #warning this driver is still not properly converted to the DMA API /* Enable driver /proc statistics. */ #define ADVANSYS_STATS /* Enable driver tracing. */ #undef ADVANSYS_DEBUG /* * Portable Data Types * * Any instance where a 32-bit long or pointer type is assumed * for precision or HW defined structures, the following define * types must be used. In Linux the char, short, and int types * are all consistent at 8, 16, and 32 bits respectively. Pointers * and long types are 64 bits on Alpha and UltraSPARC. */ #define ASC_PADDR __u32 /* Physical/Bus address data type. */ #define ASC_VADDR __u32 /* Virtual address data type. */ #define ASC_DCNT __u32 /* Unsigned Data count type. */ #define ASC_SDCNT __s32 /* Signed Data count type. */ typedef unsigned char uchar; #ifndef TRUE #define TRUE (1) #endif #ifndef FALSE #define FALSE (0) #endif #define ERR (-1) #define UW_ERR (uint)(0xFFFF) #define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0) #define PCI_VENDOR_ID_ASP 0x10cd #define PCI_DEVICE_ID_ASP_1200A 0x1100 #define PCI_DEVICE_ID_ASP_ABP940 0x1200 #define PCI_DEVICE_ID_ASP_ABP940U 0x1300 #define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 #define PCI_DEVICE_ID_38C0800_REV1 0x2500 #define PCI_DEVICE_ID_38C1600_REV1 0x2700 /* * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() * macro re-defined to be able to obtain a ASC_SCSI_Q pointer from the * SRB structure. */ #define CC_VERY_LONG_SG_LIST 0 #define ASC_SRB2SCSIQ(srb_ptr) (srb_ptr) #define PortAddr unsigned int /* port address size */ #define inp(port) inb(port) #define outp(port, byte) outb((byte), (port)) #define inpw(port) inw(port) #define outpw(port, word) outw((word), (port)) #define ASC_MAX_SG_QUEUE 7 #define ASC_MAX_SG_LIST 255 #define ASC_CS_TYPE unsigned short #define ASC_IS_ISA (0x0001) #define ASC_IS_ISAPNP (0x0081) #define ASC_IS_EISA (0x0002) #define ASC_IS_PCI (0x0004) #define ASC_IS_PCI_ULTRA (0x0104) #define ASC_IS_PCMCIA (0x0008) #define ASC_IS_MCA (0x0020) #define ASC_IS_VL (0x0040) #define ASC_IS_WIDESCSI_16 (0x0100) #define ASC_IS_WIDESCSI_32 (0x0200) #define ASC_IS_BIG_ENDIAN (0x8000) #define ASC_CHIP_MIN_VER_VL (0x01) #define ASC_CHIP_MAX_VER_VL (0x07) #define ASC_CHIP_MIN_VER_PCI (0x09) #define ASC_CHIP_MAX_VER_PCI (0x0F) #define ASC_CHIP_VER_PCI_BIT (0x08) #define ASC_CHIP_MIN_VER_ISA (0x11) #define ASC_CHIP_MIN_VER_ISA_PNP (0x21) #define ASC_CHIP_MAX_VER_ISA (0x27) #define ASC_CHIP_VER_ISA_BIT (0x30) #define ASC_CHIP_VER_ISAPNP_BIT (0x20) #define ASC_CHIP_VER_ASYN_BUG (0x21) #define ASC_CHIP_VER_PCI 0x08 #define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02) #define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03) #define ASC_CHIP_MIN_VER_EISA (0x41) #define ASC_CHIP_MAX_VER_EISA (0x47) #define ASC_CHIP_VER_EISA_BIT (0x40) #define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3) #define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL) #define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL) #define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL) #define ASC_SCSI_ID_BITS 3 #define ASC_SCSI_TIX_TYPE uchar #define ASC_ALL_DEVICE_BIT_SET 0xFF #define ASC_SCSI_BIT_ID_TYPE uchar #define ASC_MAX_TID 7 #define ASC_MAX_LUN 7 #define ASC_SCSI_WIDTH_BIT_SET 0xFF #define ASC_MAX_SENSE_LEN 32 #define ASC_MIN_SENSE_LEN 14 #define ASC_SCSI_RESET_HOLD_TIME_US 60 /* * Narrow boards only support 12-byte commands, while wide boards * extend to 16-byte commands. */ #define ASC_MAX_CDB_LEN 12 #define ADV_MAX_CDB_LEN 16 #define MS_SDTR_LEN 0x03 #define MS_WDTR_LEN 0x02 #define ASC_SG_LIST_PER_Q 7 #define QS_FREE 0x00 #define QS_READY 0x01 #define QS_DISC1 0x02 #define QS_DISC2 0x04 #define QS_BUSY 0x08 #define QS_ABORTED 0x40 #define QS_DONE 0x80 #define QC_NO_CALLBACK 0x01 #define QC_SG_SWAP_QUEUE 0x02 #define QC_SG_HEAD 0x04 #define QC_DATA_IN 0x08 #define QC_DATA_OUT 0x10 #define QC_URGENT 0x20 #define QC_MSG_OUT 0x40 #define QC_REQ_SENSE 0x80 #define QCSG_SG_XFER_LIST 0x02 #define QCSG_SG_XFER_MORE 0x04 #define QCSG_SG_XFER_END 0x08 #define QD_IN_PROGRESS 0x00 #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QD_INVALID_REQUEST 0x80 #define QD_INVALID_HOST_NUM 0x81 #define QD_INVALID_DEVICE 0x82 #define QD_ERR_INTERNAL 0xFF #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_DATA_UNDER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14 #define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21 #define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22 #define QHSTA_D_HOST_ABORT_FAILED 0x23 #define QHSTA_D_EXE_SCSI_Q_FAILED 0x24 #define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25 #define QHSTA_D_ASPI_NO_BUF_POOL 0x26 #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_TARGET_STATUS_BUSY 0x45 #define QHSTA_M_BAD_TAG_CODE 0x46 #define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47 #define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48 #define QHSTA_D_LRAM_CMP_ERROR 0x81 #define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1 #define ASC_FLAG_SCSIQ_REQ 0x01 #define ASC_FLAG_BIOS_SCSIQ_REQ 0x02 #define ASC_FLAG_BIOS_ASYNC_IO 0x04 #define ASC_FLAG_SRB_LINEAR_ADDR 0x08 #define ASC_FLAG_WIN16 0x10 #define ASC_FLAG_WIN32 0x20 #define ASC_FLAG_ISA_OVER_16MB 0x40 #define ASC_FLAG_DOS_VM_CALLBACK 0x80 #define ASC_TAG_FLAG_EXTRA_BYTES 0x10 #define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04 #define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08 #define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40 #define ASC_SCSIQ_CPY_BEG 4 #define ASC_SCSIQ_SGHD_CPY_BEG 2 #define ASC_SCSIQ_B_FWD 0 #define ASC_SCSIQ_B_BWD 1 #define ASC_SCSIQ_B_STATUS 2 #define ASC_SCSIQ_B_QNO 3 #define ASC_SCSIQ_B_CNTL 4 #define ASC_SCSIQ_B_SG_QUEUE_CNT 5 #define ASC_SCSIQ_D_DATA_ADDR 8 #define ASC_SCSIQ_D_DATA_CNT 12 #define ASC_SCSIQ_B_SENSE_LEN 20 #define ASC_SCSIQ_DONE_INFO_BEG 22 #define ASC_SCSIQ_D_SRBPTR 22 #define ASC_SCSIQ_B_TARGET_IX 26 #define ASC_SCSIQ_B_CDB_LEN 28 #define ASC_SCSIQ_B_TAG_CODE 29 #define ASC_SCSIQ_W_VM_ID 30 #define ASC_SCSIQ_DONE_STATUS 32 #define ASC_SCSIQ_HOST_STATUS 33 #define ASC_SCSIQ_SCSI_STATUS 34 #define ASC_SCSIQ_CDB_BEG 36 #define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56 #define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60 #define ASC_SCSIQ_B_FIRST_SG_WK_QP 48 #define ASC_SCSIQ_B_SG_WK_QP 49 #define ASC_SCSIQ_B_SG_WK_IX 50 #define ASC_SCSIQ_W_ALT_DC1 52 #define ASC_SCSIQ_B_LIST_CNT 6 #define ASC_SCSIQ_B_CUR_LIST_CNT 7 #define ASC_SGQ_B_SG_CNTL 4 #define ASC_SGQ_B_SG_HEAD_QP 5 #define ASC_SGQ_B_SG_LIST_CNT 6 #define ASC_SGQ_B_SG_CUR_LIST_CNT 7 #define ASC_SGQ_LIST_BEG 8 #define ASC_DEF_SCSI1_QNG 4 #define ASC_MAX_SCSI1_QNG 4 #define ASC_DEF_SCSI2_QNG 16 #define ASC_MAX_SCSI2_QNG 32 #define ASC_TAG_CODE_MASK 0x23 #define ASC_STOP_REQ_RISC_STOP 0x01 #define ASC_STOP_ACK_RISC_STOP 0x03 #define ASC_STOP_CLEAN_UP_BUSY_Q 0x10 #define ASC_STOP_CLEAN_UP_DISC_Q 0x20 #define ASC_STOP_HOST_REQ_RISC_HALT 0x40 #define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<<ASC_SCSI_ID_BITS)) #define ASC_TID_TO_TARGET_ID(tid) (ASC_SCSI_BIT_ID_TYPE)(0x01 << (tid)) #define ASC_TIX_TO_TARGET_ID(tix) (0x01 << ((tix) & ASC_MAX_TID)) #define ASC_TIX_TO_TID(tix) ((tix) & ASC_MAX_TID) #define ASC_TID_TO_TIX(tid) ((tid) & ASC_MAX_TID) #define ASC_TIX_TO_LUN(tix) (((tix) >> ASC_SCSI_ID_BITS) & ASC_MAX_LUN) #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) typedef struct asc_scsiq_1 { uchar status; uchar q_no; uchar cntl; uchar sg_queue_cnt; uchar target_id; uchar target_lun; ASC_PADDR data_addr; ASC_DCNT data_cnt; ASC_PADDR sense_addr; uchar sense_len; uchar extra_bytes; } ASC_SCSIQ_1; typedef struct asc_scsiq_2 { ASC_VADDR srb_ptr; uchar target_ix; uchar flag; uchar cdb_len; uchar tag_code; ushort vm_id; } ASC_SCSIQ_2; typedef struct asc_scsiq_3 { uchar done_stat; uchar host_stat; uchar scsi_stat; uchar scsi_msg; } ASC_SCSIQ_3; typedef struct asc_scsiq_4 { uchar cdb[ASC_MAX_CDB_LEN]; uchar y_first_sg_list_qp; uchar y_working_sg_qp; uchar y_working_sg_ix; uchar y_res; ushort x_req_count; ushort x_reconnect_rtn; ASC_PADDR x_saved_data_addr; ASC_DCNT x_saved_data_cnt; } ASC_SCSIQ_4; typedef struct asc_q_done_info { ASC_SCSIQ_2 d2; ASC_SCSIQ_3 d3; uchar q_status; uchar q_no; uchar cntl; uchar sense_len; uchar extra_bytes; uchar res; ASC_DCNT remain_bytes; } ASC_QDONE_INFO; typedef struct asc_sg_list { ASC_PADDR addr; ASC_DCNT bytes; } ASC_SG_LIST; typedef struct asc_sg_head { ushort entry_cnt; ushort queue_cnt; ushort entry_to_copy; ushort res; ASC_SG_LIST sg_list[0]; } ASC_SG_HEAD; typedef struct asc_scsi_q { ASC_SCSIQ_1 q1; ASC_SCSIQ_2 q2; uchar *cdbptr; ASC_SG_HEAD *sg_head; ushort remain_sg_entry_cnt; ushort next_sg_index; } ASC_SCSI_Q; typedef struct asc_scsi_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; uchar *cdbptr; ASC_SG_HEAD *sg_head; uchar *sense_ptr; ASC_SCSIQ_3 r3; uchar cdb[ASC_MAX_CDB_LEN]; uchar sense[ASC_MIN_SENSE_LEN]; } ASC_SCSI_REQ_Q; typedef struct asc_scsi_bios_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; uchar *cdbptr; ASC_SG_HEAD *sg_head; uchar *sense_ptr; ASC_SCSIQ_3 r3; uchar cdb[ASC_MAX_CDB_LEN]; uchar sense[ASC_MIN_SENSE_LEN]; } ASC_SCSI_BIOS_REQ_Q; typedef struct asc_risc_q { uchar fwd; uchar bwd; ASC_SCSIQ_1 i1; ASC_SCSIQ_2 i2; ASC_SCSIQ_3 i3; ASC_SCSIQ_4 i4; } ASC_RISC_Q; typedef struct asc_sg_list_q { uchar seq_no; uchar q_no; uchar cntl; uchar sg_head_qp; uchar sg_list_cnt; uchar sg_cur_list_cnt; } ASC_SG_LIST_Q; typedef struct asc_risc_sg_list_q { uchar fwd; uchar bwd; ASC_SG_LIST_Q sg; ASC_SG_LIST sg_list[7]; } ASC_RISC_SG_LIST_Q; #define ASCQ_ERR_Q_STATUS 0x0D #define ASCQ_ERR_CUR_QNG 0x17 #define ASCQ_ERR_SG_Q_LINKS 0x18 #define ASCQ_ERR_ISR_RE_ENTRY 0x1A #define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B #define ASCQ_ERR_ISR_ON_CRITICAL 0x1C /* * Warning code values are set in ASC_DVC_VAR 'warn_code'. */ #define ASC_WARN_NO_ERROR 0x0000 #define ASC_WARN_IO_PORT_ROTATE 0x0001 #define ASC_WARN_EEPROM_CHKSUM 0x0002 #define ASC_WARN_IRQ_MODIFIED 0x0004 #define ASC_WARN_AUTO_CONFIG 0x0008 #define ASC_WARN_CMD_QNG_CONFLICT 0x0010 #define ASC_WARN_EEPROM_RECOVER 0x0020 #define ASC_WARN_CFG_MSW_RECOVER 0x0040 /* * Error code values are set in {ASC/ADV}_DVC_VAR 'err_code'. */ #define ASC_IERR_NO_CARRIER 0x0001 /* No more carrier memory */ #define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */ #define ASC_IERR_SET_PC_ADDR 0x0004 #define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */ #define ASC_IERR_ILLEGAL_CONNECTION 0x0010 /* Illegal cable connection */ #define ASC_IERR_SINGLE_END_DEVICE 0x0020 /* SE device on DIFF bus */ #define ASC_IERR_REVERSED_CABLE 0x0040 /* Narrow flat cable reversed */ #define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */ #define ASC_IERR_HVD_DEVICE 0x0100 /* HVD device on LVD port */ #define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */ #define ASC_IERR_NO_BUS_TYPE 0x0400 #define ASC_IERR_BIST_PRE_TEST 0x0800 /* BIST pre-test error */ #define ASC_IERR_BIST_RAM_TEST 0x1000 /* BIST RAM test error */ #define ASC_IERR_BAD_CHIPTYPE 0x2000 /* Invalid chip_type setting */ #define ASC_DEF_MAX_TOTAL_QNG (0xF0) #define ASC_MIN_TAG_Q_PER_DVC (0x04) #define ASC_MIN_FREE_Q (0x02) #define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q)) #define ASC_MAX_TOTAL_QNG 240 #define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16 #define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8 #define ASC_MAX_PCI_INRAM_TOTAL_QNG 20 #define ASC_MAX_INRAM_TAG_QNG 16 #define ASC_IOADR_GAP 0x10 #define ASC_SYN_MAX_OFFSET 0x0F #define ASC_DEF_SDTR_OFFSET 0x0F #define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 /* The narrow chip only supports a limited selection of transfer rates. * These are encoded in the range 0..7 or 0..15 depending whether the chip * is Ultra-capable or not. These tables let us convert from one to the other. */ static const unsigned char asc_syn_xfer_period[8] = { 25, 30, 35, 40, 50, 60, 70, 85 }; static const unsigned char asc_syn_ultra_xfer_period[16] = { 12, 19, 25, 32, 38, 44, 50, 57, 63, 69, 75, 82, 88, 94, 100, 107 }; typedef struct ext_msg { uchar msg_type; uchar msg_len; uchar msg_req; union { struct { uchar sdtr_xfer_period; uchar sdtr_req_ack_offset; } sdtr; struct { uchar wdtr_width; } wdtr; struct { uchar mdp_b3; uchar mdp_b2; uchar mdp_b1; uchar mdp_b0; } mdp; } u_ext_msg; uchar res; } EXT_MSG; #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset #define wdtr_width u_ext_msg.wdtr.wdtr_width #define mdp_b3 u_ext_msg.mdp_b3 #define mdp_b2 u_ext_msg.mdp_b2 #define mdp_b1 u_ext_msg.mdp_b1 #define mdp_b0 u_ext_msg.mdp_b0 typedef struct asc_dvc_cfg { ASC_SCSI_BIT_ID_TYPE can_tagged_qng; ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled; ASC_SCSI_BIT_ID_TYPE disc_enable; ASC_SCSI_BIT_ID_TYPE sdtr_enable; uchar chip_scsi_id; uchar isa_dma_speed; uchar isa_dma_channel; uchar chip_version; ushort mcode_date; ushort mcode_version; uchar max_tag_qng[ASC_MAX_TID + 1]; uchar sdtr_period_offset[ASC_MAX_TID + 1]; uchar adapter_info[6]; } ASC_DVC_CFG; #define ASC_DEF_DVC_CNTL 0xFFFF #define ASC_DEF_CHIP_SCSI_ID 7 #define ASC_DEF_ISA_DMA_SPEED 4 #define ASC_INIT_STATE_BEG_GET_CFG 0x0001 #define ASC_INIT_STATE_END_GET_CFG 0x0002 #define ASC_INIT_STATE_BEG_SET_CFG 0x0004 #define ASC_INIT_STATE_END_SET_CFG 0x0008 #define ASC_INIT_STATE_BEG_LOAD_MC 0x0010 #define ASC_INIT_STATE_END_LOAD_MC 0x0020 #define ASC_INIT_STATE_BEG_INQUIRY 0x0040 #define ASC_INIT_STATE_END_INQUIRY 0x0080 #define ASC_INIT_RESET_SCSI_DONE 0x0100 #define ASC_INIT_STATE_WITHOUT_EEP 0x8000 #define ASC_BUG_FIX_IF_NOT_DWB 0x0001 #define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 #define ASC_MIN_TAGGED_CMD 7 #define ASC_MAX_SCSI_RESET_WAIT 30 #define ASC_OVERRUN_BSIZE 64 struct asc_dvc_var; /* Forward Declaration. */ typedef struct asc_dvc_var { PortAddr iop_base; ushort err_code; ushort dvc_cntl; ushort bug_fix_cntl; ushort bus_type; ASC_SCSI_BIT_ID_TYPE init_sdtr; ASC_SCSI_BIT_ID_TYPE sdtr_done; ASC_SCSI_BIT_ID_TYPE use_tagged_qng; ASC_SCSI_BIT_ID_TYPE unit_not_ready; ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; ASC_SCSI_BIT_ID_TYPE start_motor; uchar *overrun_buf; dma_addr_t overrun_dma; uchar scsi_reset_wait; uchar chip_no; char is_in_int; uchar max_total_qng; uchar cur_total_qng; uchar in_critical_cnt; uchar last_q_shortage; ushort init_state; uchar cur_dvc_qng[ASC_MAX_TID + 1]; uchar max_dvc_qng[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1]; const uchar *sdtr_period_tbl; ASC_DVC_CFG *cfg; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always; char redo_scam; ushort res2; uchar dos_int13_table[ASC_MAX_TID + 1]; ASC_DCNT max_dma_count; ASC_SCSI_BIT_ID_TYPE no_scam; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer; uchar min_sdtr_index; uchar max_sdtr_index; struct asc_board *drv_ptr; int ptr_map_count; void **ptr_map; ASC_DCNT uc_break; } ASC_DVC_VAR; typedef struct asc_dvc_inq_info { uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_DVC_INQ_INFO; typedef struct asc_cap_info { ASC_DCNT lba; ASC_DCNT blk_size; } ASC_CAP_INFO; typedef struct asc_cap_info_array { ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_CAP_INFO_ARRAY; #define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001 #define ASC_MCNTL_NULL_TARGET (ushort)0x0002 #define ASC_CNTL_INITIATOR (ushort)0x0001 #define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002 #define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004 #define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008 #define ASC_CNTL_NO_SCAM (ushort)0x0010 #define ASC_CNTL_INT_MULTI_Q (ushort)0x0080 #define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040 #define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100 #define ASC_CNTL_RESET_SCSI (ushort)0x0200 #define ASC_CNTL_INIT_INQUIRY (ushort)0x0400 #define ASC_CNTL_INIT_VERBOSE (ushort)0x0800 #define ASC_CNTL_SCSI_PARITY (ushort)0x1000 #define ASC_CNTL_BURST_MODE (ushort)0x2000 #define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000 #define ASC_EEP_DVC_CFG_BEG_VL 2 #define ASC_EEP_MAX_DVC_ADDR_VL 15 #define ASC_EEP_DVC_CFG_BEG 32 #define ASC_EEP_MAX_DVC_ADDR 45 #define ASC_EEP_MAX_RETRY 20 /* * These macros keep the chip SCSI id and ISA DMA speed * bitfields in board order. C bitfields aren't portable * between big and little-endian platforms so they are * not used. */ #define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f) #define ASC_EEP_GET_DMA_SPD(cfg) (((cfg)->id_speed & 0xf0) >> 4) #define ASC_EEP_SET_CHIP_ID(cfg, sid) \ ((cfg)->id_speed = ((cfg)->id_speed & 0xf0) | ((sid) & ASC_MAX_TID)) #define ASC_EEP_SET_DMA_SPD(cfg, spd) \ ((cfg)->id_speed = ((cfg)->id_speed & 0x0f) | ((spd) & 0x0f) << 4) typedef struct asceep_config { ushort cfg_lsw; ushort cfg_msw; uchar init_sdtr; uchar disc_enable; uchar use_cmd_qng; uchar start_motor; uchar max_total_qng; uchar max_tag_qng; uchar bios_scan; uchar power_up_wait; uchar no_scam; uchar id_speed; /* low order 4 bits is chip scsi id */ /* high order 4 bits is isa dma speed */ uchar dos_int13_table[ASC_MAX_TID + 1]; uchar adapter_info[6]; ushort cntl; ushort chksum; } ASCEEP_CONFIG; #define ASC_EEP_CMD_READ 0x80 #define ASC_EEP_CMD_WRITE 0x40 #define ASC_EEP_CMD_WRITE_ABLE 0x30 #define ASC_EEP_CMD_WRITE_DISABLE 0x00 #define ASCV_MSGOUT_BEG 0x0000 #define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3) #define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4) #define ASCV_BREAK_SAVED_CODE (ushort)0x0006 #define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8) #define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3) #define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4) #define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8) #define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8) #define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020 #define ASCV_BREAK_ADDR (ushort)0x0028 #define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A #define ASCV_BREAK_CONTROL (ushort)0x002C #define ASCV_BREAK_HIT_COUNT (ushort)0x002E #define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030 #define ASCV_MCODE_CHKSUM_W (ushort)0x0032 #define ASCV_MCODE_SIZE_W (ushort)0x0034 #define ASCV_STOP_CODE_B (ushort)0x0036 #define ASCV_DVC_ERR_CODE_B (ushort)0x0037 #define ASCV_OVERRUN_PADDR_D (ushort)0x0038 #define ASCV_OVERRUN_BSIZE_D (ushort)0x003C #define ASCV_HALTCODE_W (ushort)0x0040 #define ASCV_CHKSUM_W (ushort)0x0042 #define ASCV_MC_DATE_W (ushort)0x0044 #define ASCV_MC_VER_W (ushort)0x0046 #define ASCV_NEXTRDY_B (ushort)0x0048 #define ASCV_DONENEXT_B (ushort)0x0049 #define ASCV_USE_TAGGED_QNG_B (ushort)0x004A #define ASCV_SCSIBUSY_B (ushort)0x004B #define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C #define ASCV_CURCDB_B (ushort)0x004D #define ASCV_RCLUN_B (ushort)0x004E #define ASCV_BUSY_QHEAD_B (ushort)0x004F #define ASCV_DISC1_QHEAD_B (ushort)0x0050 #define ASCV_DISC_ENABLE_B (ushort)0x0052 #define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053 #define ASCV_HOSTSCSI_ID_B (ushort)0x0055 #define ASCV_MCODE_CNTL_B (ushort)0x0056 #define ASCV_NULL_TARGET_B (ushort)0x0057 #define ASCV_FREE_Q_HEAD_W (ushort)0x0058 #define ASCV_DONE_Q_TAIL_W (ushort)0x005A #define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1) #define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1) #define ASCV_HOST_FLAG_B (ushort)0x005D #define ASCV_TOTAL_READY_Q_B (ushort)0x0064 #define ASCV_VER_SERIAL_B (ushort)0x0065 #define ASCV_HALTCODE_SAVED_W (ushort)0x0066 #define ASCV_WTM_FLAG_B (ushort)0x0068 #define ASCV_RISC_FLAG_B (ushort)0x006A #define ASCV_REQ_SG_LIST_QP (ushort)0x006B #define ASC_HOST_FLAG_IN_ISR 0x01 #define ASC_HOST_FLAG_ACK_INT 0x02 #define ASC_RISC_FLAG_GEN_INT 0x01 #define ASC_RISC_FLAG_REQ_SG_LIST 0x02 #define IOP_CTRL (0x0F) #define IOP_STATUS (0x0E) #define IOP_INT_ACK IOP_STATUS #define IOP_REG_IFC (0x0D) #define IOP_SYN_OFFSET (0x0B) #define IOP_EXTRA_CONTROL (0x0D) #define IOP_REG_PC (0x0C) #define IOP_RAM_ADDR (0x0A) #define IOP_RAM_DATA (0x08) #define IOP_EEP_DATA (0x06) #define IOP_EEP_CMD (0x07) #define IOP_VERSION (0x03) #define IOP_CONFIG_HIGH (0x04) #define IOP_CONFIG_LOW (0x02) #define IOP_SIG_BYTE (0x01) #define IOP_SIG_WORD (0x00) #define IOP_REG_DC1 (0x0E) #define IOP_REG_DC0 (0x0C) #define IOP_REG_SB (0x0B) #define IOP_REG_DA1 (0x0A) #define IOP_REG_DA0 (0x08) #define IOP_REG_SC (0x09) #define IOP_DMA_SPEED (0x07) #define IOP_REG_FLAG (0x07) #define IOP_FIFO_H (0x06) #define IOP_FIFO_L (0x04) #define IOP_REG_ID (0x05) #define IOP_REG_QP (0x03) #define IOP_REG_IH (0x02) #define IOP_REG_IX (0x01) #define IOP_REG_AX (0x00) #define IFC_REG_LOCK (0x00) #define IFC_REG_UNLOCK (0x09) #define IFC_WR_EN_FILTER (0x10) #define IFC_RD_NO_EEPROM (0x10) #define IFC_SLEW_RATE (0x20) #define IFC_ACT_NEG (0x40) #define IFC_INP_FILTER (0x80) #define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK) #define SC_SEL (uchar)(0x80) #define SC_BSY (uchar)(0x40) #define SC_ACK (uchar)(0x20) #define SC_REQ (uchar)(0x10) #define SC_ATN (uchar)(0x08) #define SC_IO (uchar)(0x04) #define SC_CD (uchar)(0x02) #define SC_MSG (uchar)(0x01) #define SEC_SCSI_CTL (uchar)(0x80) #define SEC_ACTIVE_NEGATE (uchar)(0x40) #define SEC_SLEW_RATE (uchar)(0x20) #define SEC_ENABLE_FILTER (uchar)(0x10) #define ASC_HALT_EXTMSG_IN (ushort)0x8000 #define ASC_HALT_CHK_CONDITION (ushort)0x8100 #define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200 #define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300 #define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400 #define ASC_HALT_SDTR_REJECTED (ushort)0x4000 #define ASC_HALT_HOST_COPY_SG_LIST_TO_RISC ( ushort )0x2000 #define ASC_MAX_QNO 0xF8 #define ASC_DATA_SEC_BEG (ushort)0x0080 #define ASC_DATA_SEC_END (ushort)0x0080 #define ASC_CODE_SEC_BEG (ushort)0x0080 #define ASC_CODE_SEC_END (ushort)0x0080 #define ASC_QADR_BEG (0x4000) #define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64) #define ASC_QADR_END (ushort)0x7FFF #define ASC_QLAST_ADR (ushort)0x7FC0 #define ASC_QBLK_SIZE 0x40 #define ASC_BIOS_DATA_QBEG 0xF8 #define ASC_MIN_ACTIVE_QNO 0x01 #define ASC_QLINK_END 0xFF #define ASC_EEPROM_WORDS 0x10 #define ASC_MAX_MGS_LEN 0x10 #define ASC_BIOS_ADDR_DEF 0xDC00 #define ASC_BIOS_SIZE 0x3800 #define ASC_BIOS_RAM_OFF 0x3800 #define ASC_BIOS_RAM_SIZE 0x800 #define ASC_BIOS_MIN_ADDR 0xC000 #define ASC_BIOS_MAX_ADDR 0xEC00 #define ASC_BIOS_BANK_SIZE 0x0400 #define ASC_MCODE_START_ADDR 0x0080 #define ASC_CFG0_HOST_INT_ON 0x0020 #define ASC_CFG0_BIOS_ON 0x0040 #define ASC_CFG0_VERA_BURST_ON 0x0080 #define ASC_CFG0_SCSI_PARITY_ON 0x0800 #define ASC_CFG1_SCSI_TARGET_ON 0x0080 #define ASC_CFG1_LRAM_8BITS_ON 0x0800 #define ASC_CFG_MSW_CLR_MASK 0x3080 #define CSW_TEST1 (ASC_CS_TYPE)0x8000 #define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000 #define CSW_RESERVED1 (ASC_CS_TYPE)0x2000 #define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000 #define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800 #define CSW_TEST2 (ASC_CS_TYPE)0x0400 #define CSW_TEST3 (ASC_CS_TYPE)0x0200 #define CSW_RESERVED2 (ASC_CS_TYPE)0x0100 #define CSW_DMA_DONE (ASC_CS_TYPE)0x0080 #define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040 #define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020 #define CSW_HALTED (ASC_CS_TYPE)0x0010 #define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008 #define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004 #define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002 #define CSW_INT_PENDING (ASC_CS_TYPE)0x0001 #define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000 #define CIW_INT_ACK (ASC_CS_TYPE)0x0100 #define CIW_TEST1 (ASC_CS_TYPE)0x0200 #define CIW_TEST2 (ASC_CS_TYPE)0x0400 #define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800 #define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000 #define CC_CHIP_RESET (uchar)0x80 #define CC_SCSI_RESET (uchar)0x40 #define CC_HALT (uchar)0x20 #define CC_SINGLE_STEP (uchar)0x10 #define CC_DMA_ABLE (uchar)0x08 #define CC_TEST (uchar)0x04 #define CC_BANK_ONE (uchar)0x02 #define CC_DIAG (uchar)0x01 #define ASC_1000_ID0W 0x04C1 #define ASC_1000_ID0W_FIX 0x00C1 #define ASC_1000_ID1B 0x25 #define ASC_EISA_REV_IOP_MASK (0x0C83) #define ASC_EISA_CFG_IOP_MASK (0x0C86) #define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000) #define INS_HALTINT (ushort)0x6281 #define INS_HALT (ushort)0x6280 #define INS_SINT (ushort)0x6200 #define INS_RFLAG_WTM (ushort)0x7380 #define ASC_MC_SAVE_CODE_WSIZE 0x500 #define ASC_MC_SAVE_DATA_WSIZE 0x40 typedef struct asc_mc_saved { ushort data[ASC_MC_SAVE_DATA_WSIZE]; ushort code[ASC_MC_SAVE_CODE_WSIZE]; } ASC_MC_SAVED; #define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B) #define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val) #define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W) #define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W) #define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val) #define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val) #define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B) #define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B) #define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val) #define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val) #define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data)) #define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id)) #define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data) #define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id)) #define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE) #define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD) #define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION) #define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW) #define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH) #define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data) #define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data) #define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD) #define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data) #define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA) #define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data) #define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR)) #define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr) #define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA) #define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data) #define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC) #define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data) #define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS) #define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val) #define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL) #define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val) #define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET) #define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data) #define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data) #define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC) #define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH)) #define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID) #define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL) #define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data) #define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX) #define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data) #define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX) #define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data) #define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH) #define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data) #define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP) #define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data) #define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L) #define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data) #define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H) #define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data) #define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED) #define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data) #define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0) #define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data) #define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1) #define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data) #define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0) #define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data) #define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1) #define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data) #define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID) #define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data) /* * Portable Data Types * * Any instance where a 32-bit long or pointer type is assumed * for precision or HW defined structures, the following define * types must be used. In Linux the char, short, and int types * are all consistent at 8, 16, and 32 bits respectively. Pointers * and long types are 64 bits on Alpha and UltraSPARC. */ #define ADV_PADDR __u32 /* Physical address data type. */ #define ADV_VADDR __u32 /* Virtual address data type. */ #define ADV_DCNT __u32 /* Unsigned Data count type. */ #define ADV_SDCNT __s32 /* Signed Data count type. */ /* * These macros are used to convert a virtual address to a * 32-bit value. This currently can be used on Linux Alpha * which uses 64-bit virtual address but a 32-bit bus address. * This is likely to break in the future, but doing this now * will give us time to change the HW and FW to handle 64-bit * addresses. */ #define ADV_VADDR_TO_U32 virt_to_bus #define ADV_U32_TO_VADDR bus_to_virt #define AdvPortAddr void __iomem * /* Virtual memory address size */ /* * Define Adv Library required memory access macros. */ #define ADV_MEM_READB(addr) readb(addr) #define ADV_MEM_READW(addr) readw(addr) #define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr) #define ADV_MEM_WRITEW(addr, word) writew(word, addr) #define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr) #define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 15) /* * Define total number of simultaneous maximum element scatter-gather * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the * maximum number of outstanding commands per wide host adapter. Each * command uses one or more ADV_SG_BLOCK each with 15 scatter-gather * elements. Allow each command to have at least one ADV_SG_BLOCK structure. * This allows about 15 commands to have the maximum 17 ADV_SG_BLOCK * structures or 255 scatter-gather elements. */ #define ADV_TOT_SG_BLOCK ASC_DEF_MAX_HOST_QNG /* * Define maximum number of scatter-gather elements per request. */ #define ADV_MAX_SG_LIST 255 #define NO_OF_SG_PER_BLOCK 15 #define ADV_EEP_DVC_CFG_BEGIN (0x00) #define ADV_EEP_DVC_CFG_END (0x15) #define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ #define ADV_EEP_MAX_WORD_ADDR (0x1E) #define ADV_EEP_DELAY_MS 100 #define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */ #define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */ /* * For the ASC3550 Bit 13 is Termination Polarity control bit. * For later ICs Bit 13 controls whether the CIS (Card Information * Service Section) is loaded from EEPROM. */ #define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */ #define ADV_EEPROM_CIS_LD 0x2000 /* EEPROM Bit 13 */ /* * ASC38C1600 Bit 11 * * If EEPROM Bit 11 is 0 for Function 0, then Function 0 will specify * INT A in the PCI Configuration Space Int Pin field. If it is 1, then * Function 0 will specify INT B. * * If EEPROM Bit 11 is 0 for Function 1, then Function 1 will specify * INT B in the PCI Configuration Space Int Pin field. If it is 1, then * Function 1 will specify INT A. */ #define ADV_EEPROM_INTAB 0x0800 /* EEPROM Bit 11 */ typedef struct adveep_3550_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Term Polarity Control */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_able; /* 04 Synchronous DTR able */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar reserved1; /* reserved byte (not used) */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort ultra_able; /* 13 ULTRA speed able */ ushort reserved2; /* 14 reserved */ uchar max_host_qng; /* 15 maximum host queuing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort bug_fix; /* 17 control bit for bug fix */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort num_of_err; /* 36 number of error */ } ADVEEP_3550_CONFIG; typedef struct adveep_38C0800_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C0800_CONFIG; typedef struct adveep_38C1600_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 11 set - Func. 0 INTB, Func. 1 INTA */ /* clear - Func. 0 INTA, Func. 1 INTB */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 Basic Integrity Checking disabled */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 AIPP (Asyn. Info. Ph. Prot.) dis. */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C1600_CONFIG; /* * EEPROM Commands */ #define ASC_EEP_CMD_DONE 0x0200 /* bios_ctrl */ #define BIOS_CTRL_BIOS 0x0001 #define BIOS_CTRL_EXTENDED_XLAT 0x0002 #define BIOS_CTRL_GT_2_DISK 0x0004 #define BIOS_CTRL_BIOS_REMOVABLE 0x0008 #define BIOS_CTRL_BOOTABLE_CD 0x0010 #define BIOS_CTRL_MULTIPLE_LUN 0x0040 #define BIOS_CTRL_DISPLAY_MSG 0x0080 #define BIOS_CTRL_NO_SCAM 0x0100 #define BIOS_CTRL_RESET_SCSI_BUS 0x0200 #define BIOS_CTRL_INIT_VERBOSE 0x0800 #define BIOS_CTRL_SCSI_PARITY 0x1000 #define BIOS_CTRL_AIPP_DIS 0x2000 #define ADV_3550_MEMSIZE 0x2000 /* 8 KB Internal Memory */ #define ADV_38C0800_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * XXX - Since ASC38C1600 Rev.3 has a local RAM failure issue, there is * a special 16K Adv Library and Microcode version. After the issue is * resolved, should restore 32K support. * * #define ADV_38C1600_MEMSIZE 0x8000L * 32 KB Internal Memory * */ #define ADV_38C1600_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * Byte I/O register address from base of 'iop_base'. */ #define IOPB_INTR_STATUS_REG 0x00 #define IOPB_CHIP_ID_1 0x01 #define IOPB_INTR_ENABLES 0x02 #define IOPB_CHIP_TYPE_REV 0x03 #define IOPB_RES_ADDR_4 0x04 #define IOPB_RES_ADDR_5 0x05 #define IOPB_RAM_DATA 0x06 #define IOPB_RES_ADDR_7 0x07 #define IOPB_FLAG_REG 0x08 #define IOPB_RES_ADDR_9 0x09 #define IOPB_RISC_CSR 0x0A #define IOPB_RES_ADDR_B 0x0B #define IOPB_RES_ADDR_C 0x0C #define IOPB_RES_ADDR_D 0x0D #define IOPB_SOFT_OVER_WR 0x0E #define IOPB_RES_ADDR_F 0x0F #define IOPB_MEM_CFG 0x10 #define IOPB_RES_ADDR_11 0x11 #define IOPB_GPIO_DATA 0x12 #define IOPB_RES_ADDR_13 0x13 #define IOPB_FLASH_PAGE 0x14 #define IOPB_RES_ADDR_15 0x15 #define IOPB_GPIO_CNTL 0x16 #define IOPB_RES_ADDR_17 0x17 #define IOPB_FLASH_DATA 0x18 #define IOPB_RES_ADDR_19 0x19 #define IOPB_RES_ADDR_1A 0x1A #define IOPB_RES_ADDR_1B 0x1B #define IOPB_RES_ADDR_1C 0x1C #define IOPB_RES_ADDR_1D 0x1D #define IOPB_RES_ADDR_1E 0x1E #define IOPB_RES_ADDR_1F 0x1F #define IOPB_DMA_CFG0 0x20 #define IOPB_DMA_CFG1 0x21 #define IOPB_TICKLE 0x22 #define IOPB_DMA_REG_WR 0x23 #define IOPB_SDMA_STATUS 0x24 #define IOPB_SCSI_BYTE_CNT 0x25 #define IOPB_HOST_BYTE_CNT 0x26 #define IOPB_BYTE_LEFT_TO_XFER 0x27 #define IOPB_BYTE_TO_XFER_0 0x28 #define IOPB_BYTE_TO_XFER_1 0x29 #define IOPB_BYTE_TO_XFER_2 0x2A #define IOPB_BYTE_TO_XFER_3 0x2B #define IOPB_ACC_GRP 0x2C #define IOPB_RES_ADDR_2D 0x2D #define IOPB_DEV_ID 0x2E #define IOPB_RES_ADDR_2F 0x2F #define IOPB_SCSI_DATA 0x30 #define IOPB_RES_ADDR_31 0x31 #define IOPB_RES_ADDR_32 0x32 #define IOPB_SCSI_DATA_HSHK 0x33 #define IOPB_SCSI_CTRL 0x34 #define IOPB_RES_ADDR_35 0x35 #define IOPB_RES_ADDR_36 0x36 #define IOPB_RES_ADDR_37 0x37 #define IOPB_RAM_BIST 0x38 #define IOPB_PLL_TEST 0x39 #define IOPB_PCI_INT_CFG 0x3A #define IOPB_RES_ADDR_3B 0x3B #define IOPB_RFIFO_CNT 0x3C #define IOPB_RES_ADDR_3D 0x3D #define IOPB_RES_ADDR_3E 0x3E #define IOPB_RES_ADDR_3F 0x3F /* * Word I/O register address from base of 'iop_base'. */ #define IOPW_CHIP_ID_0 0x00 /* CID0 */ #define IOPW_CTRL_REG 0x02 /* CC */ #define IOPW_RAM_ADDR 0x04 /* LA */ #define IOPW_RAM_DATA 0x06 /* LD */ #define IOPW_RES_ADDR_08 0x08 #define IOPW_RISC_CSR 0x0A /* CSR */ #define IOPW_SCSI_CFG0 0x0C /* CFG0 */ #define IOPW_SCSI_CFG1 0x0E /* CFG1 */ #define IOPW_RES_ADDR_10 0x10 #define IOPW_SEL_MASK 0x12 /* SM */ #define IOPW_RES_ADDR_14 0x14 #define IOPW_FLASH_ADDR 0x16 /* FA */ #define IOPW_RES_ADDR_18 0x18 #define IOPW_EE_CMD 0x1A /* EC */ #define IOPW_EE_DATA 0x1C /* ED */ #define IOPW_SFIFO_CNT 0x1E /* SFC */ #define IOPW_RES_ADDR_20 0x20 #define IOPW_Q_BASE 0x22 /* QB */ #define IOPW_QP 0x24 /* QP */ #define IOPW_IX 0x26 /* IX */ #define IOPW_SP 0x28 /* SP */ #define IOPW_PC 0x2A /* PC */ #define IOPW_RES_ADDR_2C 0x2C #define IOPW_RES_ADDR_2E 0x2E #define IOPW_SCSI_DATA 0x30 /* SD */ #define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */ #define IOPW_SCSI_CTRL 0x34 /* SC */ #define IOPW_HSHK_CFG 0x36 /* HCFG */ #define IOPW_SXFR_STATUS 0x36 /* SXS */ #define IOPW_SXFR_CNTL 0x38 /* SXL */ #define IOPW_SXFR_CNTH 0x3A /* SXH */ #define IOPW_RES_ADDR_3C 0x3C #define IOPW_RFIFO_DATA 0x3E /* RFD */ /* * Doubleword I/O register address from base of 'iop_base'. */ #define IOPDW_RES_ADDR_0 0x00 #define IOPDW_RAM_DATA 0x04 #define IOPDW_RES_ADDR_8 0x08 #define IOPDW_RES_ADDR_C 0x0C #define IOPDW_RES_ADDR_10 0x10 #define IOPDW_COMMA 0x14 #define IOPDW_COMMB 0x18 #define IOPDW_RES_ADDR_1C 0x1C #define IOPDW_SDMA_ADDR0 0x20 #define IOPDW_SDMA_ADDR1 0x24 #define IOPDW_SDMA_COUNT 0x28 #define IOPDW_SDMA_ERROR 0x2C #define IOPDW_RDMA_ADDR0 0x30 #define IOPDW_RDMA_ADDR1 0x34 #define IOPDW_RDMA_COUNT 0x38 #define IOPDW_RDMA_ERROR 0x3C #define ADV_CHIP_ID_BYTE 0x25 #define ADV_CHIP_ID_WORD 0x04C1 #define ADV_INTR_ENABLE_HOST_INTR 0x01 #define ADV_INTR_ENABLE_SEL_INTR 0x02 #define ADV_INTR_ENABLE_DPR_INTR 0x04 #define ADV_INTR_ENABLE_RTA_INTR 0x08 #define ADV_INTR_ENABLE_RMA_INTR 0x10 #define ADV_INTR_ENABLE_RST_INTR 0x20 #define ADV_INTR_ENABLE_DPE_INTR 0x40 #define ADV_INTR_ENABLE_GLOBAL_INTR 0x80 #define ADV_INTR_STATUS_INTRA 0x01 #define ADV_INTR_STATUS_INTRB 0x02 #define ADV_INTR_STATUS_INTRC 0x04 #define ADV_RISC_CSR_STOP (0x0000) #define ADV_RISC_TEST_COND (0x2000) #define ADV_RISC_CSR_RUN (0x4000) #define ADV_RISC_CSR_SINGLE_STEP (0x8000) #define ADV_CTRL_REG_HOST_INTR 0x0100 #define ADV_CTRL_REG_SEL_INTR 0x0200 #define ADV_CTRL_REG_DPR_INTR 0x0400 #define ADV_CTRL_REG_RTA_INTR 0x0800 #define ADV_CTRL_REG_RMA_INTR 0x1000 #define ADV_CTRL_REG_RES_BIT14 0x2000 #define ADV_CTRL_REG_DPE_INTR 0x4000 #define ADV_CTRL_REG_POWER_DONE 0x8000 #define ADV_CTRL_REG_ANY_INTR 0xFF00 #define ADV_CTRL_REG_CMD_RESET 0x00C6 #define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5 #define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4 #define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3 #define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2 #define ADV_TICKLE_NOP 0x00 #define ADV_TICKLE_A 0x01 #define ADV_TICKLE_B 0x02 #define ADV_TICKLE_C 0x03 #define AdvIsIntPending(port) \ (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR) /* * SCSI_CFG0 Register bit definitions */ #define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */ #define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */ #define EVEN_PARITY 0x1000 /* Select Even Parity */ #define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */ #define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */ #define PRIM_MODE 0x0100 /* Primitive SCSI mode */ #define SCAM_EN 0x0080 /* Enable SCAM selection */ #define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */ #define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */ #define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */ #define OUR_ID 0x000F /* SCSI ID */ /* * SCSI_CFG1 Register bit definitions */ #define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */ #define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */ #define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */ #define FILTER_SEL 0x0C00 /* Filter Period Selection */ #define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */ #define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */ #define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */ #define ACTIVE_DBL 0x0200 /* Disable Active Negation */ #define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */ #define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */ #define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */ #define TERM_CTL 0x0030 /* External SCSI Termination Bits */ #define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */ #define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */ #define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */ /* * Addendum for ASC-38C0800 Chip * * The ASC-38C1600 Chip uses the same definitions except that the * bus mode override bits [12:10] have been moved to byte register * offset 0xE (IOPB_SOFT_OVER_WR) bits [12:10]. The [12:10] bits in * SCSI_CFG1 are read-only and always available. Bit 14 (DIS_TERM_DRV) * is not needed. The [12:10] bits in IOPB_SOFT_OVER_WR are write-only. * Also each ASC-38C1600 function or channel uses only cable bits [5:4] * and [1:0]. Bits [14], [7:6], [3:2] are unused. */ #define DIS_TERM_DRV 0x4000 /* 1: Read c_det[3:0], 0: cannot read */ #define HVD_LVD_SE 0x1C00 /* Device Detect Bits */ #define HVD 0x1000 /* HVD Device Detect */ #define LVD 0x0800 /* LVD Device Detect */ #define SE 0x0400 /* SE Device Detect */ #define TERM_LVD 0x00C0 /* LVD Termination Bits */ #define TERM_LVD_HI 0x0080 /* Enable LVD Upper Termination */ #define TERM_LVD_LO 0x0040 /* Enable LVD Lower Termination */ #define TERM_SE 0x0030 /* SE Termination Bits */ #define TERM_SE_HI 0x0020 /* Enable SE Upper Termination */ #define TERM_SE_LO 0x0010 /* Enable SE Lower Termination */ #define C_DET_LVD 0x000C /* LVD Cable Detect Bits */ #define C_DET3 0x0008 /* Cable Detect for LVD External Wide */ #define C_DET2 0x0004 /* Cable Detect for LVD Internal Wide */ #define C_DET_SE 0x0003 /* SE Cable Detect Bits */ #define C_DET1 0x0002 /* Cable Detect for SE Internal Wide */ #define C_DET0 0x0001 /* Cable Detect for SE Internal Narrow */ #define CABLE_ILLEGAL_A 0x7 /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */ #define CABLE_ILLEGAL_B 0xB /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */ /* * MEM_CFG Register bit definitions */ #define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */ #define FAST_EE_CLK 0x20 /* Diagnostic Bit */ #define RAM_SZ 0x1C /* Specify size of RAM to RISC */ #define RAM_SZ_2KB 0x00 /* 2 KB */ #define RAM_SZ_4KB 0x04 /* 4 KB */ #define RAM_SZ_8KB 0x08 /* 8 KB */ #define RAM_SZ_16KB 0x0C /* 16 KB */ #define RAM_SZ_32KB 0x10 /* 32 KB */ #define RAM_SZ_64KB 0x14 /* 64 KB */ /* * DMA_CFG0 Register bit definitions * * This register is only accessible to the host. */ #define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */ #define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */ #define FIFO_THRESH_16B 0x00 /* 16 bytes */ #define FIFO_THRESH_32B 0x20 /* 32 bytes */ #define FIFO_THRESH_48B 0x30 /* 48 bytes */ #define FIFO_THRESH_64B 0x40 /* 64 bytes */ #define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */ #define FIFO_THRESH_96B 0x60 /* 96 bytes */ #define FIFO_THRESH_112B 0x70 /* 112 bytes */ #define START_CTL 0x0C /* DMA start conditions */ #define START_CTL_TH 0x00 /* Wait threshold level (default) */ #define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */ #define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */ #define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */ #define READ_CMD 0x03 /* Memory Read Method */ #define READ_CMD_MR 0x00 /* Memory Read */ #define READ_CMD_MRL 0x02 /* Memory Read Long */ #define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */ /* * ASC-38C0800 RAM BIST Register bit definitions */ #define RAM_TEST_MODE 0x80 #define PRE_TEST_MODE 0x40 #define NORMAL_MODE 0x00 #define RAM_TEST_DONE 0x10 #define RAM_TEST_STATUS 0x0F #define RAM_TEST_HOST_ERROR 0x08 #define RAM_TEST_INTRAM_ERROR 0x04 #define RAM_TEST_RISC_ERROR 0x02 #define RAM_TEST_SCSI_ERROR 0x01 #define RAM_TEST_SUCCESS 0x00 #define PRE_TEST_VALUE 0x05 #define NORMAL_VALUE 0x00 /* * ASC38C1600 Definitions * * IOPB_PCI_INT_CFG Bit Field Definitions */ #define INTAB_LD 0x80 /* Value loaded from EEPROM Bit 11. */ /* * Bit 1 can be set to change the interrupt for the Function to operate in * Totem Pole mode. By default Bit 1 is 0 and the interrupt operates in * Open Drain mode. Both functions of the ASC38C1600 must be set to the same * mode, otherwise the operating mode is undefined. */ #define TOTEMPOLE 0x02 /* * Bit 0 can be used to change the Int Pin for the Function. The value is * 0 by default for both Functions with Function 0 using INT A and Function * B using INT B. For Function 0 if set, INT B is used. For Function 1 if set, * INT A is used. * * EEPROM Word 0 Bit 11 for each Function may change the initial Int Pin * value specified in the PCI Configuration Space. */ #define INTAB 0x01 /* * Adv Library Status Definitions */ #define ADV_TRUE 1 #define ADV_FALSE 0 #define ADV_SUCCESS 1 #define ADV_BUSY 0 #define ADV_ERROR (-1) /* * ADV_DVC_VAR 'warn_code' values */ #define ASC_WARN_BUSRESET_ERROR 0x0001 /* SCSI Bus Reset error */ #define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */ #define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */ #define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */ #define ADV_MAX_TID 15 /* max. target identifier */ #define ADV_MAX_LUN 7 /* max. logical unit number */ /* * Fixed locations of microcode operating variables. */ #define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */ #define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */ #define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */ #define ASC_MC_VERSION_DATE 0x0038 /* microcode version */ #define ASC_MC_VERSION_NUM 0x003A /* microcode number */ #define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */ #define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */ #define ASC_MC_BIOS_SIGNATURE 0x0058 /* BIOS Signature 0x55AA */ #define ASC_MC_BIOS_VERSION 0x005A /* BIOS Version (2 bytes) */ #define ASC_MC_SDTR_SPEED1 0x0090 /* SDTR Speed for TID 0-3 */ #define ASC_MC_SDTR_SPEED2 0x0092 /* SDTR Speed for TID 4-7 */ #define ASC_MC_SDTR_SPEED3 0x0094 /* SDTR Speed for TID 8-11 */ #define ASC_MC_SDTR_SPEED4 0x0096 /* SDTR Speed for TID 12-15 */ #define ASC_MC_CHIP_TYPE 0x009A #define ASC_MC_INTRB_CODE 0x009B #define ASC_MC_WDTR_ABLE 0x009C #define ASC_MC_SDTR_ABLE 0x009E #define ASC_MC_TAGQNG_ABLE 0x00A0 #define ASC_MC_DISC_ENABLE 0x00A2 #define ASC_MC_IDLE_CMD_STATUS 0x00A4 #define ASC_MC_IDLE_CMD 0x00A6 #define ASC_MC_IDLE_CMD_PARAMETER 0x00A8 #define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC #define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE #define ASC_MC_DEFAULT_MEM_CFG 0x00B0 #define ASC_MC_DEFAULT_SEL_MASK 0x00B2 #define ASC_MC_SDTR_DONE 0x00B6 #define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0 #define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0 #define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100 #define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */ #define ASC_MC_WDTR_DONE 0x0124 #define ASC_MC_CAM_MODE_MASK 0x015E /* CAM mode TID bitmask. */ #define ASC_MC_ICQ 0x0160 #define ASC_MC_IRQ 0x0164 #define ASC_MC_PPR_ABLE 0x017A /* * BIOS LRAM variable absolute offsets. */ #define BIOS_CODESEG 0x54 #define BIOS_CODELEN 0x56 #define BIOS_SIGNATURE 0x58 #define BIOS_VERSION 0x5A /* * Microcode Control Flags * * Flags set by the Adv Library in RISC variable 'control_flag' (0x122) * and handled by the microcode. */ #define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */ #define CONTROL_FLAG_ENABLE_AIPP 0x0002 /* Enabled AIPP checking. */ /* * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format */ #define HSHK_CFG_WIDE_XFR 0x8000 #define HSHK_CFG_RATE 0x0F00 #define HSHK_CFG_OFFSET 0x001F #define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */ #define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */ #define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */ #define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */ #define ASC_QC_DATA_CHECK 0x01 /* Require ASC_QC_DATA_OUT set or clear. */ #define ASC_QC_DATA_OUT 0x02 /* Data out DMA transfer. */ #define ASC_QC_START_MOTOR 0x04 /* Send auto-start motor before request. */ #define ASC_QC_NO_OVERRUN 0x08 /* Don't report overrun. */ #define ASC_QC_FREEZE_TIDQ 0x10 /* Freeze TID queue after request. XXX TBD */ #define ASC_QSC_NO_DISC 0x01 /* Don't allow disconnect for request. */ #define ASC_QSC_NO_TAGMSG 0x02 /* Don't allow tag queuing for request. */ #define ASC_QSC_NO_SYNC 0x04 /* Don't use Synch. transfer on request. */ #define ASC_QSC_NO_WIDE 0x08 /* Don't use Wide transfer on request. */ #define ASC_QSC_REDO_DTR 0x10 /* Renegotiate WDTR/SDTR before request. */ /* * Note: If a Tag Message is to be sent and neither ASC_QSC_HEAD_TAG or * ASC_QSC_ORDERED_TAG is set, then a Simple Tag Message (0x20) is used. */ #define ASC_QSC_HEAD_TAG 0x40 /* Use Head Tag Message (0x21). */ #define ASC_QSC_ORDERED_TAG 0x80 /* Use Ordered Tag Message (0x22). */ /* * All fields here are accessed by the board microcode and need to be * little-endian. */ typedef struct adv_carr_t { ADV_VADDR carr_va; /* Carrier Virtual Address */ ADV_PADDR carr_pa; /* Carrier Physical Address */ ADV_VADDR areq_vpa; /* ASC_SCSI_REQ_Q Virtual or Physical Address */ /* * next_vpa [31:4] Carrier Virtual or Physical Next Pointer * * next_vpa [3:1] Reserved Bits * next_vpa [0] Done Flag set in Response Queue. */ ADV_VADDR next_vpa; } ADV_CARR_T; /* * Mask used to eliminate low 4 bits of carrier 'next_vpa' field. */ #define ASC_NEXT_VPA_MASK 0xFFFFFFF0 #define ASC_RQ_DONE 0x00000001 #define ASC_RQ_GOOD 0x00000002 #define ASC_CQ_STOPPER 0x00000000 #define ASC_GET_CARRP(carrp) ((carrp) & ASC_NEXT_VPA_MASK) #define ADV_CARRIER_NUM_PAGE_CROSSING \ (((ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + (PAGE_SIZE - 1))/PAGE_SIZE) #define ADV_CARRIER_BUFSIZE \ ((ADV_CARRIER_COUNT + ADV_CARRIER_NUM_PAGE_CROSSING) * sizeof(ADV_CARR_T)) /* * ASC_SCSI_REQ_Q 'a_flag' definitions * * The Adv Library should limit use to the lower nibble (4 bits) of * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag. */ #define ADV_POLL_REQUEST 0x01 /* poll for request completion */ #define ADV_SCSIQ_DONE 0x02 /* request done */ #define ADV_DONT_RETRY 0x08 /* don't do retry */ #define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */ #define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */ #define ADV_CHIP_ASC38C1600 0x03 /* Ultra3-Wide/LVD2 IC */ /* * Adapter temporary configuration structure * * This structure can be discarded after initialization. Don't add * fields here needed after initialization. * * Field naming convention: * * *_enable indicates the field enables or disables a feature. The * value of the field is never reset. */ typedef struct adv_dvc_cfg { ushort disc_enable; /* enable disconnection */ uchar chip_version; /* chip version */ uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */ ushort control_flag; /* Microcode Control Flag */ ushort mcode_date; /* Microcode date */ ushort mcode_version; /* Microcode version */ ushort serial1; /* EEPROM serial number word 1 */ ushort serial2; /* EEPROM serial number word 2 */ ushort serial3; /* EEPROM serial number word 3 */ } ADV_DVC_CFG; struct adv_dvc_var; struct adv_scsi_req_q; typedef struct asc_sg_block { uchar reserved1; uchar reserved2; uchar reserved3; uchar sg_cnt; /* Valid entries in block. */ ADV_PADDR sg_ptr; /* Pointer to next sg block. */ struct { ADV_PADDR sg_addr; /* SG element address. */ ADV_DCNT sg_count; /* SG element count. */ } sg_list[NO_OF_SG_PER_BLOCK]; } ADV_SG_BLOCK; /* * ADV_SCSI_REQ_Q - microcode request structure * * All fields in this structure up to byte 60 are used by the microcode. * The microcode makes assumptions about the size and ordering of fields * in this structure. Do not change the structure definition here without * coordinating the change with the microcode. * * All fields accessed by microcode must be maintained in little_endian * order. */ typedef struct adv_scsi_req_q { uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */ uchar target_cmd; uchar target_id; /* Device target identifier. */ uchar target_lun; /* Device target logical unit number. */ ADV_PADDR data_addr; /* Data buffer physical address. */ ADV_DCNT data_cnt; /* Data count. Ucode sets to residual. */ ADV_PADDR sense_addr; ADV_PADDR carr_pa; uchar mflag; uchar sense_len; uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */ uchar scsi_cntl; uchar done_status; /* Completion status. */ uchar scsi_status; /* SCSI status byte. */ uchar host_status; /* Ucode host status. */ uchar sg_working_ix; uchar cdb[12]; /* SCSI CDB bytes 0-11. */ ADV_PADDR sg_real_addr; /* SG list physical address. */ ADV_PADDR scsiq_rptr; uchar cdb16[4]; /* SCSI CDB bytes 12-15. */ ADV_VADDR scsiq_ptr; ADV_VADDR carr_va; /* * End of microcode structure - 60 bytes. The rest of the structure * is used by the Adv Library and ignored by the microcode. */ ADV_VADDR srb_ptr; ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */ char *vdata_addr; /* Data buffer virtual address. */ uchar a_flag; uchar pad[2]; /* Pad out to a word boundary. */ } ADV_SCSI_REQ_Q; /* * The following two structures are used to process Wide Board requests. * * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the * Mid-Level SCSI request structure. * * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux * up to 255 scatter-gather elements may be used per request or * ADV_SCSI_REQ_Q. * * Both structures must be 32 byte aligned. */ typedef struct adv_sgblk { ADV_SG_BLOCK sg_block; /* Sgblock structure. */ uchar align[32]; /* Sgblock structure padding. */ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */ } adv_sgblk_t; typedef struct adv_req { ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */ uchar align[32]; /* Request structure padding. */ struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */ adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */ struct adv_req *next_reqp; /* Next Request Structure. */ } adv_req_t; /* * Adapter operation variable structure. * * One structure is required per host adapter. * * Field naming convention: * * *_able indicates both whether a feature should be enabled or disabled * and whether a device isi capable of the feature. At initialization * this field may be set, but later if a device is found to be incapable * of the feature, the field is cleared. */ typedef struct adv_dvc_var { AdvPortAddr iop_base; /* I/O port address */ ushort err_code; /* fatal error code */ ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */ ushort wdtr_able; /* try WDTR for a device */ ushort sdtr_able; /* try SDTR for a device */ ushort ultra_able; /* try SDTR Ultra speed for a device */ ushort sdtr_speed1; /* EEPROM SDTR Speed for TID 0-3 */ ushort sdtr_speed2; /* EEPROM SDTR Speed for TID 4-7 */ ushort sdtr_speed3; /* EEPROM SDTR Speed for TID 8-11 */ ushort sdtr_speed4; /* EEPROM SDTR Speed for TID 12-15 */ ushort tagqng_able; /* try tagged queuing with a device */ ushort ppr_able; /* PPR message capable per TID bitmask. */ uchar max_dvc_qng; /* maximum number of tagged commands per device */ ushort start_motor; /* start motor command allowed */ uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */ uchar chip_no; /* should be assigned by caller */ uchar max_host_qng; /* maximum number of Q'ed command allowed */ ushort no_scam; /* scam_tolerant of EEPROM */ struct asc_board *drv_ptr; /* driver pointer to private structure */ uchar chip_scsi_id; /* chip SCSI target ID */ uchar chip_type; uchar bist_err_code; ADV_CARR_T *carrier_buf; ADV_CARR_T *carr_freelist; /* Carrier free list. */ ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */ ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */ ushort carr_pending_cnt; /* Count of pending carriers. */ struct adv_req *orig_reqp; /* adv_req_t memory block. */ /* * Note: The following fields will not be used after initialization. The * driver may discard the buffer after initialization is done. */ ADV_DVC_CFG *cfg; /* temporary configuration structure */ } ADV_DVC_VAR; /* * Microcode idle loop commands */ #define IDLE_CMD_COMPLETED 0 #define IDLE_CMD_STOP_CHIP 0x0001 #define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002 #define IDLE_CMD_SEND_INT 0x0004 #define IDLE_CMD_ABORT 0x0008 #define IDLE_CMD_DEVICE_RESET 0x0010 #define IDLE_CMD_SCSI_RESET_START 0x0020 /* Assert SCSI Bus Reset */ #define IDLE_CMD_SCSI_RESET_END 0x0040 /* Deassert SCSI Bus Reset */ #define IDLE_CMD_SCSIREQ 0x0080 #define IDLE_CMD_STATUS_SUCCESS 0x0001 #define IDLE_CMD_STATUS_FAILURE 0x0002 /* * AdvSendIdleCmd() flag definitions. */ #define ADV_NOWAIT 0x01 /* * Wait loop time out values. */ #define SCSI_WAIT_100_MSEC 100UL /* 100 milliseconds */ #define SCSI_US_PER_MSEC 1000 /* microseconds per millisecond */ #define SCSI_MAX_RETRY 10 /* retry count */ #define ADV_ASYNC_RDMA_FAILURE 0x01 /* Fatal RDMA failure. */ #define ADV_ASYNC_SCSI_BUS_RESET_DET 0x02 /* Detected SCSI Bus Reset. */ #define ADV_ASYNC_CARRIER_READY_FAILURE 0x03 /* Carrier Ready failure. */ #define ADV_RDMA_IN_CARR_AND_Q_INVALID 0x04 /* RDMAed-in data invalid. */ #define ADV_HOST_SCSI_BUS_RESET 0x80 /* Host Initiated SCSI Bus Reset. */ /* Read byte from a register. */ #define AdvReadByteRegister(iop_base, reg_off) \ (ADV_MEM_READB((iop_base) + (reg_off))) /* Write byte to a register. */ #define AdvWriteByteRegister(iop_base, reg_off, byte) \ (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte))) /* Read word (2 bytes) from a register. */ #define AdvReadWordRegister(iop_base, reg_off) \ (ADV_MEM_READW((iop_base) + (reg_off))) /* Write word (2 bytes) to a register. */ #define AdvWriteWordRegister(iop_base, reg_off, word) \ (ADV_MEM_WRITEW((iop_base) + (reg_off), (word))) /* Write dword (4 bytes) to a register. */ #define AdvWriteDWordRegister(iop_base, reg_off, dword) \ (ADV_MEM_WRITEDW((iop_base) + (reg_off), (dword))) /* Read byte from LRAM. */ #define AdvReadByteLram(iop_base, addr, byte) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \ } while (0) /* Write byte to LRAM. */ #define AdvWriteByteLram(iop_base, addr, byte) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte))) /* Read word (2 bytes) from LRAM. */ #define AdvReadWordLram(iop_base, addr, word) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (word) = (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)); \ } while (0) /* Write word (2 bytes) to LRAM. */ #define AdvWriteWordLram(iop_base, addr, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* Write little-endian double word (4 bytes) to LRAM */ /* Because of unspecified C language ordering don't use auto-increment. */ #define AdvWriteDWordLramNoSwap(iop_base, addr, dword) \ ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword) & 0xFFFF)))), \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword >> 16) & 0xFFFF))))) /* Read word (2 bytes) from LRAM assuming that the address is already set. */ #define AdvReadWordAutoIncLram(iop_base) \ (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)) /* Write word (2 bytes) to LRAM assuming that the address is already set. */ #define AdvWriteWordAutoIncLram(iop_base, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* * Define macro to check for Condor signature. * * Evaluate to ADV_TRUE if a Condor chip is found the specified port * address 'iop_base'. Otherwise evalue to ADV_FALSE. */ #define AdvFindSignature(iop_base) \ (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \ ADV_CHIP_ID_BYTE) && \ (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \ ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE) /* * Define macro to Return the version number of the chip at 'iop_base'. * * The second parameter 'bus_type' is currently unused. */ #define AdvGetChipVersion(iop_base, bus_type) \ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV) /* * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must * match the ASC_SCSI_REQ_Q 'srb_ptr' field. * * If the request has not yet been sent to the device it will simply be * aborted from RISC memory. If the request is disconnected it will be * aborted on reselection by sending an Abort Message to the target ID. * * Return value: * ADV_TRUE(1) - Queue was successfully aborted. * ADV_FALSE(0) - Queue was not found on the active queue list. */ #define AdvAbortQueue(asc_dvc, scsiq) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ (ADV_DCNT) (scsiq)) /* * Send a Bus Device Reset Message to the specified target ID. * * All outstanding commands will be purged if sending the * Bus Device Reset Message is successful. * * Return Value: * ADV_TRUE(1) - All requests on the target are purged. * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests * are not purged. */ #define AdvResetDevice(asc_dvc, target_id) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ (ADV_DCNT) (target_id)) /* * SCSI Wide Type definition. */ #define ADV_SCSI_BIT_ID_TYPE ushort /* * AdvInitScsiTarget() 'cntl_flag' options. */ #define ADV_SCAN_LUN 0x01 #define ADV_CAPINFO_NOLUN 0x02 /* * Convert target id to target id bit mask. */ #define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID)) /* * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values. */ #define QD_NO_STATUS 0x00 /* Request not completed yet. */ #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_QUEUE_ABORTED 0x15 #define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */ #define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */ #define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */ #define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */ #define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */ #define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */ #define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */ /* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */ #define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */ #define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */ #define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */ #define QHSTA_M_SCSI_BUS_RESET 0x30 /* Request aborted from SBR */ #define QHSTA_M_SCSI_BUS_RESET_UNSOL 0x31 /* Request aborted from unsol. SBR */ #define QHSTA_M_BUS_DEVICE_RESET 0x32 /* Request aborted from BDR */ #define QHSTA_M_DIRECTION_ERR 0x35 /* Data Phase mismatch */ #define QHSTA_M_DIRECTION_ERR_HUNG 0x36 /* Data Phase mismatch and bus hang */ #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */ #define QHSTA_M_FROZEN_TIDQ 0x46 /* TID Queue frozen. */ #define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */ /* Return the address that is aligned at the next doubleword >= to 'addr'. */ #define ADV_8BALIGN(addr) (((ulong) (addr) + 0x7) & ~0x7) #define ADV_16BALIGN(addr) (((ulong) (addr) + 0xF) & ~0xF) #define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F) /* * Total contiguous memory needed for driver SG blocks. * * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum * number of scatter-gather elements the driver supports in a * single request. */ #define ADV_SG_LIST_MAX_BYTE_SIZE \ (sizeof(ADV_SG_BLOCK) * \ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)) /* struct asc_board flags */ #define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */ #define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0) #define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */ #define ASC_INFO_SIZE 128 /* advansys_info() line size */ #ifdef CONFIG_PROC_FS /* /proc/scsi/advansys/[0...] related definitions */ #define ASC_PRTBUF_SIZE 2048 #define ASC_PRTLINE_SIZE 160 #define ASC_PRT_NEXT() \ if (cp) { \ totlen += len; \ leftlen -= len; \ if (leftlen == 0) { \ return totlen; \ } \ cp += len; \ } #endif /* CONFIG_PROC_FS */ /* Asc Library return codes */ #define ASC_TRUE 1 #define ASC_FALSE 0 #define ASC_NOERROR 1 #define ASC_BUSY 0 #define ASC_ERROR (-1) /* struct scsi_cmnd function return codes */ #define STATUS_BYTE(byte) (byte) #define MSG_BYTE(byte) ((byte) << 8) #define HOST_BYTE(byte) ((byte) << 16) #define DRIVER_BYTE(byte) ((byte) << 24) #define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1) #ifndef ADVANSYS_STATS #define ASC_STATS_ADD(shost, counter, count) #else /* ADVANSYS_STATS */ #define ASC_STATS_ADD(shost, counter, count) \ (((struct asc_board *) shost_priv(shost))->asc_stats.counter += (count)) #endif /* ADVANSYS_STATS */ /* If the result wraps when calculating tenths, return 0. */ #define ASC_TENTHS(num, den) \ (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \ 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den))))) /* * Display a message to the console. */ #define ASC_PRINT(s) \ { \ printk("advansys: "); \ printk(s); \ } #define ASC_PRINT1(s, a1) \ { \ printk("advansys: "); \ printk((s), (a1)); \ } #define ASC_PRINT2(s, a1, a2) \ { \ printk("advansys: "); \ printk((s), (a1), (a2)); \ } #define ASC_PRINT3(s, a1, a2, a3) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3)); \ } #define ASC_PRINT4(s, a1, a2, a3, a4) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3), (a4)); \ } #ifndef ADVANSYS_DEBUG #define ASC_DBG(lvl, s...) #define ASC_DBG_PRT_SCSI_HOST(lvl, s) #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) #define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_HEX(lvl, name, start, length) #define ASC_DBG_PRT_CDB(lvl, cdb, len) #define ASC_DBG_PRT_SENSE(lvl, sense, len) #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) #else /* ADVANSYS_DEBUG */ /* * Debugging Message Levels: * 0: Errors Only * 1: High-Level Tracing * 2-N: Verbose Tracing */ #define ASC_DBG(lvl, format, arg...) { \ if (asc_dbglvl >= (lvl)) \ printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ __func__ , ## arg); \ } #define ASC_DBG_PRT_SCSI_HOST(lvl, s) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_scsi_host(s); \ } \ } #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_scsi_q(scsiqp); \ } \ } #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_qdone_info(qdone); \ } \ } #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_adv_scsi_req_q(scsiqp); \ } \ } #define ASC_DBG_PRT_HEX(lvl, name, start, length) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_hex((name), (start), (length)); \ } \ } #define ASC_DBG_PRT_CDB(lvl, cdb, len) \ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len)); #define ASC_DBG_PRT_SENSE(lvl, sense, len) \ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len)); #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len)); #endif /* ADVANSYS_DEBUG */ #ifdef ADVANSYS_STATS /* Per board statistics structure */ struct asc_stats { /* Driver Entrypoint Statistics */ ADV_DCNT queuecommand; /* # calls to advansys_queuecommand() */ ADV_DCNT reset; /* # calls to advansys_eh_bus_reset() */ ADV_DCNT biosparam; /* # calls to advansys_biosparam() */ ADV_DCNT interrupt; /* # advansys_interrupt() calls */ ADV_DCNT callback; /* # calls to asc/adv_isr_callback() */ ADV_DCNT done; /* # calls to request's scsi_done function */ ADV_DCNT build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ ADV_DCNT adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ ADV_DCNT adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */ ADV_DCNT exe_noerror; /* # ASC_NOERROR returns. */ ADV_DCNT exe_busy; /* # ASC_BUSY returns. */ ADV_DCNT exe_error; /* # ASC_ERROR returns. */ ADV_DCNT exe_unknown; /* # unknown returns. */ /* Data Transfer Statistics */ ADV_DCNT xfer_cnt; /* # I/O requests received */ ADV_DCNT xfer_elem; /* # scatter-gather elements */ ADV_DCNT xfer_sect; /* # 512-byte blocks */ }; #endif /* ADVANSYS_STATS */ /* * Structure allocated for each board. * * This structure is allocated by scsi_host_alloc() at the end * of the 'Scsi_Host' structure starting at the 'hostdata' * field. It is guaranteed to be allocated from DMA-able memory. */ struct asc_board { struct device *dev; uint flags; /* Board flags */ unsigned int irq; union { ASC_DVC_VAR asc_dvc_var; /* Narrow board */ ADV_DVC_VAR adv_dvc_var; /* Wide board */ } dvc_var; union { ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */ ADV_DVC_CFG adv_dvc_cfg; /* Wide board */ } dvc_cfg; ushort asc_n_io_port; /* Number I/O ports. */ ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */ ushort reqcnt[ADV_MAX_TID + 1]; /* Starvation request count */ ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */ ushort queue_full_cnt[ADV_MAX_TID + 1]; /* Queue full count */ union { ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */ ADVEEP_3550_CONFIG adv_3550_eep; /* 3550 EEPROM config. */ ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */ ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */ } eep_config; ulong last_reset; /* Saved last reset time */ /* /proc/scsi/advansys/[0...] */ char *prtbuf; /* /proc print buffer */ #ifdef ADVANSYS_STATS struct asc_stats asc_stats; /* Board statistics */ #endif /* ADVANSYS_STATS */ /* * The following fields are used only for Narrow Boards. */ uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */ /* * The following fields are used only for Wide Boards. */ void __iomem *ioremap_addr; /* I/O Memory remap address. */ ushort ioport; /* I/O Port address. */ adv_req_t *adv_reqp; /* Request structures. */ adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */ ushort bios_signature; /* BIOS Signature. */ ushort bios_version; /* BIOS Version. */ ushort bios_codeseg; /* BIOS Code Segment. */ ushort bios_codelen; /* BIOS Code Segment Length. */ }; #define asc_dvc_to_board(asc_dvc) container_of(asc_dvc, struct asc_board, \ dvc_var.asc_dvc_var) #define adv_dvc_to_board(adv_dvc) container_of(adv_dvc, struct asc_board, \ dvc_var.adv_dvc_var) #define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev) #ifdef ADVANSYS_DEBUG static int asc_dbglvl = 3; /* * asc_prt_asc_dvc_var() */ static void asc_prt_asc_dvc_var(ASC_DVC_VAR *h) { printk("ASC_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%x, err_code 0x%x, dvc_cntl 0x%x, bug_fix_cntl " "%d,\n", h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl); printk(" bus_type %d, init_sdtr 0x%x,\n", h->bus_type, (unsigned)h->init_sdtr); printk(" sdtr_done 0x%x, use_tagged_qng 0x%x, unit_not_ready 0x%x, " "chip_no 0x%x,\n", (unsigned)h->sdtr_done, (unsigned)h->use_tagged_qng, (unsigned)h->unit_not_ready, (unsigned)h->chip_no); printk(" queue_full_or_busy 0x%x, start_motor 0x%x, scsi_reset_wait " "%u,\n", (unsigned)h->queue_full_or_busy, (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" is_in_int %u, max_total_qng %u, cur_total_qng %u, " "in_critical_cnt %u,\n", (unsigned)h->is_in_int, (unsigned)h->max_total_qng, (unsigned)h->cur_total_qng, (unsigned)h->in_critical_cnt); printk(" last_q_shortage %u, init_state 0x%x, no_scam 0x%x, " "pci_fix_asyn_xfer 0x%x,\n", (unsigned)h->last_q_shortage, (unsigned)h->init_state, (unsigned)h->no_scam, (unsigned)h->pci_fix_asyn_xfer); printk(" cfg 0x%lx\n", (ulong)h->cfg); } /* * asc_prt_asc_dvc_cfg() */ static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h) { printk("ASC_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" can_tagged_qng 0x%x, cmd_qng_enabled 0x%x,\n", h->can_tagged_qng, h->cmd_qng_enabled); printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n", h->disc_enable, h->sdtr_enable); printk(" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, " "chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed, h->isa_dma_channel, h->chip_version); printk(" mcode_date 0x%x, mcode_version %d\n", h->mcode_date, h->mcode_version); } /* * asc_prt_adv_dvc_var() * * Display an ADV_DVC_VAR structure. */ static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h) { printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n", (ulong)h->iop_base, h->err_code, (unsigned)h->ultra_able); printk(" sdtr_able 0x%x, wdtr_able 0x%x\n", (unsigned)h->sdtr_able, (unsigned)h->wdtr_able); printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n", (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%lxn\n", (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, (ulong)h->carr_freelist); printk(" icq_sp 0x%lx, irq_sp 0x%lx\n", (ulong)h->icq_sp, (ulong)h->irq_sp); printk(" no_scam 0x%x, tagqng_able 0x%x\n", (unsigned)h->no_scam, (unsigned)h->tagqng_able); printk(" chip_scsi_id 0x%x, cfg 0x%lx\n", (unsigned)h->chip_scsi_id, (ulong)h->cfg); } /* * asc_prt_adv_dvc_cfg() * * Display an ADV_DVC_CFG structure. */ static void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h) { printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" disc_enable 0x%x, termination 0x%x\n", h->disc_enable, h->termination); printk(" chip_version 0x%x, mcode_date 0x%x\n", h->chip_version, h->mcode_date); printk(" mcode_version 0x%x, control_flag 0x%x\n", h->mcode_version, h->control_flag); } /* * asc_prt_scsi_host() */ static void asc_prt_scsi_host(struct Scsi_Host *s) { struct asc_board *boardp = shost_priv(s); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); printk(" host_busy %u, host_no %d, last_reset %d,\n", s->host_busy, s->host_no, (unsigned)s->last_reset); printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", (ulong)s->base, (ulong)s->io_port, boardp->irq); printk(" dma_channel %d, this_id %d, can_queue %d,\n", s->dma_channel, s->this_id, s->can_queue); printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n", s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma); if (ASC_NARROW_BOARD(boardp)) { asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var); asc_prt_asc_dvc_cfg(&boardp->dvc_cfg.asc_dvc_cfg); } else { asc_prt_adv_dvc_var(&boardp->dvc_var.adv_dvc_var); asc_prt_adv_dvc_cfg(&boardp->dvc_cfg.adv_dvc_cfg); } } /* * asc_prt_hex() * * Print hexadecimal output in 4 byte groupings 32 bytes * or 8 double-words per line. */ static void asc_prt_hex(char *f, uchar *s, int l) { int i; int j; int k; int m; printk("%s: (%d bytes)\n", f, l); for (i = 0; i < l; i += 32) { /* Display a maximum of 8 double-words per line. */ if ((k = (l - i) / 4) >= 8) { k = 8; m = 0; } else { m = (l - i) % 4; } for (j = 0; j < k; j++) { printk(" %2.2X%2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); } switch (m) { case 0: default: break; case 1: printk(" %2.2X", (unsigned)s[i + (j * 4)]); break; case 2: printk(" %2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1]); break; case 3: printk(" %2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); break; } printk("\n"); } } /* * asc_prt_asc_scsi_q() */ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) { ASC_SG_HEAD *sgp; int i; printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); printk (" target_ix 0x%x, target_lun %u, srb_ptr 0x%lx, tag_code 0x%x,\n", q->q2.target_ix, q->q1.target_lun, (ulong)q->q2.srb_ptr, q->q2.tag_code); printk (" data_addr 0x%lx, data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->q1.data_addr), (ulong)le32_to_cpu(q->q1.data_cnt), (ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len); printk(" cdbptr 0x%lx, cdb_len %u, sg_head 0x%lx, sg_queue_cnt %u\n", (ulong)q->cdbptr, q->q2.cdb_len, (ulong)q->sg_head, q->q1.sg_queue_cnt); if (q->sg_head) { sgp = q->sg_head; printk("ASC_SG_HEAD at addr 0x%lx\n", (ulong)sgp); printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, sgp->queue_cnt); for (i = 0; i < sgp->entry_cnt; i++) { printk(" [%u]: addr 0x%lx, bytes %lu\n", i, (ulong)le32_to_cpu(sgp->sg_list[i].addr), (ulong)le32_to_cpu(sgp->sg_list[i].bytes)); } } } /* * asc_prt_asc_qdone_info() */ static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) { printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); printk(" srb_ptr 0x%lx, target_ix %u, cdb_len %u, tag_code %u,\n", (ulong)q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len, q->d2.tag_code); printk (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n", q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg); } /* * asc_prt_adv_sgblock() * * Display an ADV_SG_BLOCK structure. */ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b) { int i; printk(" ASC_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", (ulong)b, sgblockno); printk(" sg_cnt %u, sg_ptr 0x%lx\n", b->sg_cnt, (ulong)le32_to_cpu(b->sg_ptr)); BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK); if (b->sg_ptr != 0) BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK); for (i = 0; i < b->sg_cnt; i++) { printk(" [%u]: sg_addr 0x%lx, sg_count 0x%lx\n", i, (ulong)b->sg_list[i].sg_addr, (ulong)b->sg_list[i].sg_count); } } /* * asc_prt_adv_scsi_req_q() * * Display an ADV_SCSI_REQ_Q structure. */ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) { int sg_blk_cnt; struct asc_sg_block *sg_ptr; printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); printk(" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n", q->target_id, q->target_lun, (ulong)q->srb_ptr, q->a_flag); printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n", q->cntl, (ulong)le32_to_cpu(q->data_addr), (ulong)q->vdata_addr); printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->data_cnt), (ulong)le32_to_cpu(q->sense_addr), q->sense_len); printk (" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n", q->cdb_len, q->done_status, q->host_status, q->scsi_status); printk(" sg_working_ix 0x%x, target_cmd %u\n", q->sg_working_ix, q->target_cmd); printk(" scsiq_rptr 0x%lx, sg_real_addr 0x%lx, sg_list_ptr 0x%lx\n", (ulong)le32_to_cpu(q->scsiq_rptr), (ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr); /* Display the request's ADV_SG_BLOCK structures. */ if (q->sg_list_ptr != NULL) { sg_blk_cnt = 0; while (1) { /* * 'sg_ptr' is a physical address. Convert it to a virtual * address by indexing 'sg_blk_cnt' into the virtual address * array 'sg_list_ptr'. * * XXX - Assumes all SG physical blocks are virtually contiguous. */ sg_ptr = &(((ADV_SG_BLOCK *)(q->sg_list_ptr))[sg_blk_cnt]); asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr); if (sg_ptr->sg_ptr == 0) { break; } sg_blk_cnt++; } } } #endif /* ADVANSYS_DEBUG */ /* * The advansys chip/microcode contains a 32-bit identifier for each command * known as the 'srb'. I don't know what it stands for. The driver used * to encode the scsi_cmnd pointer by calling virt_to_bus and retrieve it * with bus_to_virt. Now the driver keeps a per-host map of integers to * pointers. It auto-expands when full, unless it can't allocate memory. * Note that an srb of 0 is treated specially by the chip/firmware, hence * the return of i+1 in this routine, and the corresponding subtraction in * the inverse routine. */ #define BAD_SRB 0 static u32 advansys_ptr_to_srb(struct asc_dvc_var *asc_dvc, void *ptr) { int i; void **new_ptr; for (i = 0; i < asc_dvc->ptr_map_count; i++) { if (!asc_dvc->ptr_map[i]) goto out; } if (asc_dvc->ptr_map_count == 0) asc_dvc->ptr_map_count = 1; else asc_dvc->ptr_map_count *= 2; new_ptr = krealloc(asc_dvc->ptr_map, asc_dvc->ptr_map_count * sizeof(void *), GFP_ATOMIC); if (!new_ptr) return BAD_SRB; asc_dvc->ptr_map = new_ptr; out: ASC_DBG(3, "Putting ptr %p into array offset %d\n", ptr, i); asc_dvc->ptr_map[i] = ptr; return i + 1; } static void * advansys_srb_to_ptr(struct asc_dvc_var *asc_dvc, u32 srb) { void *ptr; srb--; if (srb >= asc_dvc->ptr_map_count) { printk("advansys: bad SRB %u, max %u\n", srb, asc_dvc->ptr_map_count); return NULL; } ptr = asc_dvc->ptr_map[srb]; asc_dvc->ptr_map[srb] = NULL; ASC_DBG(3, "Returning ptr %p from array offset %d\n", ptr, srb); return ptr; } /* * advansys_info() * * Return suitable for printing on the console with the argument * adapter's configuration information. * * Note: The information line should not exceed ASC_INFO_SIZE bytes, * otherwise the static 'info' array will be overrun. */ static const char *advansys_info(struct Scsi_Host *shost) { static char info[ASC_INFO_SIZE]; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; ADV_DVC_VAR *adv_dvc_varp; char *busname; char *widename = NULL; if (ASC_NARROW_BOARD(boardp)) { asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ASC_DBG(1, "begin\n"); if (asc_dvc_varp->bus_type & ASC_IS_ISA) { if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) == ASC_IS_ISAPNP) { busname = "ISA PnP"; } else { busname = "ISA"; } sprintf(info, "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X", ASC_VERSION, busname, (ulong)shost->io_port, (ulong)shost->io_port + ASC_IOADR_GAP - 1, boardp->irq, shost->dma_channel); } else { if (asc_dvc_varp->bus_type & ASC_IS_VL) { busname = "VL"; } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) { busname = "EISA"; } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) { if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { busname = "PCI Ultra"; } else { busname = "PCI"; } } else { busname = "?"; shost_printk(KERN_ERR, shost, "unknown bus " "type %d\n", asc_dvc_varp->bus_type); } sprintf(info, "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, busname, (ulong)shost->io_port, (ulong)shost->io_port + ASC_IOADR_GAP - 1, boardp->irq); } } else { /* * Wide Adapter Information * * Memory-mapped I/O is used instead of I/O space to access * the adapter, but display the I/O Port range. The Memory * I/O address is displayed through the driver /proc file. */ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { widename = "Ultra-Wide"; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { widename = "Ultra2-Wide"; } else { widename = "Ultra3-Wide"; } sprintf(info, "AdvanSys SCSI %s: PCI %s: PCIMEM 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, widename, (ulong)adv_dvc_varp->iop_base, (ulong)adv_dvc_varp->iop_base + boardp->asc_n_io_port - 1, boardp->irq); } BUG_ON(strlen(info) >= ASC_INFO_SIZE); ASC_DBG(1, "end\n"); return info; } #ifdef CONFIG_PROC_FS /* * asc_prt_line() * * If 'cp' is NULL print to the console, otherwise print to a buffer. * * Return 0 if printing to the console, otherwise return the number of * bytes written to the buffer. * * Note: If any single line is greater than ASC_PRTLINE_SIZE bytes the stack * will be corrupted. 's[]' is defined to be ASC_PRTLINE_SIZE bytes. */ static int asc_prt_line(char *buf, int buflen, char *fmt, ...) { va_list args; int ret; char s[ASC_PRTLINE_SIZE]; va_start(args, fmt); ret = vsprintf(s, fmt, args); BUG_ON(ret >= ASC_PRTLINE_SIZE); if (buf == NULL) { (void)printk(s); ret = 0; } else { ret = min(buflen, ret); memcpy(buf, s, ret); } va_end(args); return ret; } /* * asc_prt_board_devices() * * Print driver information for devices attached to the board. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_board_devices(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int chip_scsi_id; int i; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nDevice Information for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } len = asc_prt_line(cp, leftlen, "Target IDs Detected:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) { len = asc_prt_line(cp, leftlen, " %X,", i); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " (%X=Host Adapter)\n", chip_scsi_id); ASC_PRT_NEXT(); return totlen; } /* * Display Wide Board BIOS Information. */ static int asc_prt_adv_bios(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; ushort major, minor, letter; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nROM BIOS Version: "); ASC_PRT_NEXT(); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature != 0x55AA) { len = asc_prt_line(cp, leftlen, "Disabled or Pre-3.1\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); ASC_PRT_NEXT(); } else { major = (boardp->bios_version >> 12) & 0xF; minor = (boardp->bios_version >> 8) & 0xF; letter = (boardp->bios_version & 0xFF); len = asc_prt_line(cp, leftlen, "%d.%d%c\n", major, minor, letter >= 26 ? '?' : letter + 'A'); ASC_PRT_NEXT(); /* * Current available ROM BIOS release is 3.1I for UW * and 3.2I for U2W. This code doesn't differentiate * UW and U2W boards. */ if (major < 3 || (major <= 3 && minor < 1) || (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { len = asc_prt_line(cp, leftlen, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "ftp://ftp.connectcom.net/pub\n"); ASC_PRT_NEXT(); } } return totlen; } /* * Add serial number to information bar if signature AAh * is found in at bit 15-9 (7 bits) of word 1. * * Serial Number consists fo 12 alpha-numeric digits. * * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits) * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits) * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits) * 5 - Product revision (A-J) Word0: " " * * Signature Word1: 15-9 (7 bits) * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit) * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits) * * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits) * * Note 1: Only production cards will have a serial number. * * Note 2: Signature is most significant 7 bits (0xFE). * * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE. */ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp) { ushort w, num; if ((serialnum[1] & 0xFE00) != ((ushort)0xAA << 8)) { return ASC_FALSE; } else { /* * First word - 6 digits. */ w = serialnum[0]; /* Product type - 1st digit. */ if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') { /* Product type is P=Prototype */ *cp += 0x8; } cp++; /* Manufacturing location - 2nd digit. */ *cp++ = 'A' + ((w & 0x1C00) >> 10); /* Product ID - 3rd, 4th digits. */ num = w & 0x3FF; *cp++ = '0' + (num / 100); num %= 100; *cp++ = '0' + (num / 10); /* Product revision - 5th digit. */ *cp++ = 'A' + (num % 10); /* * Second word */ w = serialnum[1]; /* * Year - 6th digit. * * If bit 15 of third word is set, then the * last digit of the year is greater than 7. */ if (serialnum[2] & 0x8000) { *cp++ = '8' + ((w & 0x1C0) >> 6); } else { *cp++ = '0' + ((w & 0x1C0) >> 6); } /* Week of year - 7th, 8th digits. */ num = w & 0x003F; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; /* * Third word */ w = serialnum[2] & 0x7FFF; /* Serial number - 9th digit. */ *cp++ = 'A' + (w / 1000); /* 10th, 11th, 12th digits. */ num = w % 1000; *cp++ = '0' + num / 100; num %= 100; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; *cp = '\0'; /* Null Terminate the string. */ return ASC_TRUE; } } /* * asc_prt_asc_board_eeprom() * * Print board EEPROM configuration. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_asc_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; int leftlen; int totlen; int len; ASCEEP_CONFIG *ep; int i; #ifdef CONFIG_ISA int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 }; #endif /* CONFIG_ISA */ uchar serialstr[13]; asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ep = &boardp->eep_config.asc_eep; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr) == ASC_TRUE) { len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr); ASC_PRT_NEXT(); } else { if (ep->adapter_info[5] == 0xBB) { len = asc_prt_line(cp, leftlen, " Default Settings Used for EEPROM-less Adapter.\n"); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Serial Number Signature Not Present.\n"); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng, ep->max_tag_qng); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Target ID: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %d", i); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Disconnects: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Queuing: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Start Motor: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); #ifdef CONFIG_ISA if (asc_dvc_varp->bus_type & ASC_IS_ISA) { len = asc_prt_line(cp, leftlen, " Host ISA DMA speed: %d MB/S\n", isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]); ASC_PRT_NEXT(); } #endif /* CONFIG_ISA */ return totlen; } /* * asc_prt_adv_board_eeprom() * * Print board EEPROM configuration. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); ADV_DVC_VAR *adv_dvc_varp; int leftlen; int totlen; int len; int i; char *termstr; uchar serialstr[13]; ADVEEP_3550_CONFIG *ep_3550 = NULL; ADVEEP_38C0800_CONFIG *ep_38C0800 = NULL; ADVEEP_38C1600_CONFIG *ep_38C1600 = NULL; ushort word; ushort *wordp; ushort sdtr_speed = 0; adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; } leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { wordp = &ep_3550->serial_number_word1; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { wordp = &ep_38C0800->serial_number_word1; } else { wordp = &ep_38C1600->serial_number_word1; } if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) { len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Serial Number Signature Not Present.\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_3550->adapter_scsi_id, ep_3550->max_host_qng, ep_3550->max_dvc_qng); ASC_PRT_NEXT(); } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C0800->adapter_scsi_id, ep_38C0800->max_host_qng, ep_38C0800->max_dvc_qng); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C1600->adapter_scsi_id, ep_38C1600->max_host_qng, ep_38C1600->max_dvc_qng); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->termination; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->termination_lvd; } else { word = ep_38C1600->termination_lvd; } switch (word) { case 1: termstr = "Low Off/High Off"; break; case 2: termstr = "Low Off/High On"; break; case 3: termstr = "Low On/High On"; break; default: case 0: termstr = "Automatic"; break; } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_3550->termination, termstr, ep_3550->bios_ctrl); ASC_PRT_NEXT(); } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C0800->termination_lvd, termstr, ep_38C0800->bios_ctrl); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C1600->termination_lvd, termstr, ep_38C1600->bios_ctrl); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, " Target ID: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %X", i); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->disc_enable; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->disc_enable; } else { word = ep_38C1600->disc_enable; } len = asc_prt_line(cp, leftlen, " Disconnects: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->tagqng_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->tagqng_able; } else { word = ep_38C1600->tagqng_able; } len = asc_prt_line(cp, leftlen, " Command Queuing: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->start_motor; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->start_motor; } else { word = ep_38C1600->start_motor; } len = asc_prt_line(cp, leftlen, " Start Motor: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep_3550-> sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Ultra Transfer: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep_3550-> ultra_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->wdtr_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->wdtr_able; } else { word = ep_38C1600->wdtr_able; } len = asc_prt_line(cp, leftlen, " Wide Transfer: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { len = asc_prt_line(cp, leftlen, " Synchronous Transfer Speed (Mhz):\n "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { char *speed_str; if (i == 0) { sdtr_speed = adv_dvc_varp->sdtr_speed1; } else if (i == 4) { sdtr_speed = adv_dvc_varp->sdtr_speed2; } else if (i == 8) { sdtr_speed = adv_dvc_varp->sdtr_speed3; } else if (i == 12) { sdtr_speed = adv_dvc_varp->sdtr_speed4; } switch (sdtr_speed & ADV_MAX_TID) { case 0: speed_str = "Off"; break; case 1: speed_str = " 5"; break; case 2: speed_str = " 10"; break; case 3: speed_str = " 20"; break; case 4: speed_str = " 40"; break; case 5: speed_str = " 80"; break; default: speed_str = "Unk"; break; } len = asc_prt_line(cp, leftlen, "%X:%s ", i, speed_str); ASC_PRT_NEXT(); if (i == 7) { len = asc_prt_line(cp, leftlen, "\n "); ASC_PRT_NEXT(); } sdtr_speed >>= 4; } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_prt_driver_conf() * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_driver_conf(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " host_busy %u, last_reset %u, max_id %u, max_lun %u, max_channel %u\n", shost->host_busy, shost->last_reset, shost->max_id, shost->max_lun, shost->max_channel); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n", shost->unique_id, shost->can_queue, shost->this_id, shost->sg_tablesize, shost->cmd_per_lun); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " unchecked_isa_dma %d, use_clustering %d\n", shost->unchecked_isa_dma, shost->use_clustering); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " flags 0x%x, last_reset 0x%x, jiffies 0x%x, asc_n_io_port 0x%x\n", boardp->flags, boardp->last_reset, jiffies, boardp->asc_n_io_port); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " io_port 0x%x\n", shost->io_port); ASC_PRT_NEXT(); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } return totlen; } /* * asc_prt_asc_board_info() * * Print dynamic board configuration information. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int chip_scsi_id; int leftlen; int totlen; int len; ASC_DVC_VAR *v; ASC_DVC_CFG *c; int i; int renegotiate = 0; v = &boardp->dvc_var.asc_dvc_var; c = &boardp->dvc_cfg.asc_dvc_cfg; chip_scsi_id = c->chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x, err_code %u\n", c->chip_version, c->mcode_date, c->mcode_version, v->err_code); ASC_PRT_NEXT(); /* Current number of commands waiting for the host. */ len = asc_prt_line(cp, leftlen, " Total Command Pending: %d\n", v->cur_total_qng); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Queuing:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (v-> use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Current number of commands waiting for a device. */ len = asc_prt_line(cp, leftlen, " Command Queue Pending:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%u", i, v->cur_dvc_qng[i]); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Current limit on number of commands that can be sent to a device. */ len = asc_prt_line(cp, leftlen, " Command Queue Limit:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%u", i, v->max_dvc_qng[i]); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Indicate whether the device has returned queue full status. */ len = asc_prt_line(cp, leftlen, " Command Queue Full:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) { len = asc_prt_line(cp, leftlen, " %X:Y-%d", i, boardp->queue_full_cnt[i]); } else { len = asc_prt_line(cp, leftlen, " %X:N", i); } ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (v-> sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { uchar syn_period_ix; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((v->init_sdtr & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:", i); ASC_PRT_NEXT(); if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { len = asc_prt_line(cp, leftlen, " Asynchronous"); ASC_PRT_NEXT(); } else { syn_period_ix = (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - 1); len = asc_prt_line(cp, leftlen, " Transfer Period Factor: %d (%d.%d Mhz),", v->sdtr_period_tbl[syn_period_ix], 250 / v->sdtr_period_tbl[syn_period_ix], ASC_TENTHS(250, v-> sdtr_period_tbl [syn_period_ix])); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", boardp-> sdtr_data[i] & ASC_SYN_MAX_OFFSET); ASC_PRT_NEXT(); } if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*\n"); renegotiate = 1; } else { len = asc_prt_line(cp, leftlen, "\n"); } ASC_PRT_NEXT(); } if (renegotiate) { len = asc_prt_line(cp, leftlen, " * = Re-negotiation pending before next command.\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_prt_adv_board_info() * * Print dynamic board configuration information. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int i; ADV_DVC_VAR *v; ADV_DVC_CFG *c; AdvPortAddr iop_base; ushort chip_scsi_id; ushort lramword; uchar lrambyte; ushort tagqng_able; ushort sdtr_able, wdtr_able; ushort wdtr_done, sdtr_done; ushort period = 0; int renegotiate = 0; v = &boardp->dvc_var.adv_dvc_var; c = &boardp->dvc_cfg.adv_dvc_cfg; iop_base = v->iop_base; chip_scsi_id = v->chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " iop_base 0x%lx, cable_detect: %X, err_code %u\n", v->iop_base, AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1) & CABLE_DETECT, v->err_code); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x\n", c->chip_version, c->mcode_date, c->mcode_version); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); len = asc_prt_line(cp, leftlen, " Queuing Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Queue Limit:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, lrambyte); len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Pending:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, lrambyte); len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); len = asc_prt_line(cp, leftlen, " Wide Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); len = asc_prt_line(cp, leftlen, " Transfer Bit Width:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); len = asc_prt_line(cp, leftlen, " %X:%d", i, (lramword & 0x8000) ? 16 : 8); ASC_PRT_NEXT(); if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*"); ASC_PRT_NEXT(); renegotiate = 1; } } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); len = asc_prt_line(cp, leftlen, " Synchronous Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); for (i = 0; i <= ADV_MAX_TID; i++) { AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); lramword &= ~0x8000; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:", i); ASC_PRT_NEXT(); if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ len = asc_prt_line(cp, leftlen, " Asynchronous"); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Transfer Period Factor: "); ASC_PRT_NEXT(); if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ len = asc_prt_line(cp, leftlen, "9 (80.0 Mhz),"); ASC_PRT_NEXT(); } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ len = asc_prt_line(cp, leftlen, "10 (40.0 Mhz),"); ASC_PRT_NEXT(); } else { /* 20 Mhz or below. */ period = (((lramword >> 8) * 25) + 50) / 4; if (period == 0) { /* Should never happen. */ len = asc_prt_line(cp, leftlen, "%d (? Mhz), "); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, "%d (%d.%d Mhz),", period, 250 / period, ASC_TENTHS(250, period)); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", lramword & 0x1F); ASC_PRT_NEXT(); } if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*\n"); renegotiate = 1; } else { len = asc_prt_line(cp, leftlen, "\n"); } ASC_PRT_NEXT(); } if (renegotiate) { len = asc_prt_line(cp, leftlen, " * = Re-negotiation pending before next command.\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_proc_copy() * * Copy proc information to a read buffer taking into account the current * read offset in the file and the remaining space in the read buffer. */ static int asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen, char *cp, int cplen) { int cnt = 0; ASC_DBG(2, "offset %d, advoffset %d, cplen %d\n", (unsigned)offset, (unsigned)advoffset, cplen); if (offset <= advoffset) { /* Read offset below current offset, copy everything. */ cnt = min(cplen, leftlen); ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", (ulong)curbuf, (ulong)cp, cnt); memcpy(curbuf, cp, cnt); } else if (offset < advoffset + cplen) { /* Read offset within current range, partial copy. */ cnt = (advoffset + cplen) - offset; cp = (cp + cplen) - cnt; cnt = min(cnt, leftlen); ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", (ulong)curbuf, (ulong)cp, cnt); memcpy(curbuf, cp, cnt); } return cnt; } #ifdef ADVANSYS_STATS /* * asc_prt_board_stats() * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_board_stats(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); struct asc_stats *s = &boardp->asc_stats; int leftlen = cplen; int len, totlen = 0; len = asc_prt_line(cp, leftlen, "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " queuecommand %lu, reset %lu, biosparam %lu, interrupt %lu\n", s->queuecommand, s->reset, s->biosparam, s->interrupt); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " callback %lu, done %lu, build_error %lu, build_noreq %lu, build_nosg %lu\n", s->callback, s->done, s->build_error, s->adv_build_noreq, s->adv_build_nosg); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " exe_noerror %lu, exe_busy %lu, exe_error %lu, exe_unknown %lu\n", s->exe_noerror, s->exe_busy, s->exe_error, s->exe_unknown); ASC_PRT_NEXT(); /* * Display data transfer statistics. */ if (s->xfer_cnt > 0) { len = asc_prt_line(cp, leftlen, " xfer_cnt %lu, xfer_elem %lu, ", s->xfer_cnt, s->xfer_elem); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "xfer_bytes %lu.%01lu kb\n", s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2)); ASC_PRT_NEXT(); /* Scatter gather transfer statistics */ len = asc_prt_line(cp, leftlen, " avg_num_elem %lu.%01lu, ", s->xfer_elem / s->xfer_cnt, ASC_TENTHS(s->xfer_elem, s->xfer_cnt)); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "avg_elem_size %lu.%01lu kb, ", (s->xfer_sect / 2) / s->xfer_elem, ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem)); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "avg_xfer_size %lu.%01lu kb\n", (s->xfer_sect / 2) / s->xfer_cnt, ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt)); ASC_PRT_NEXT(); } return totlen; } #endif /* ADVANSYS_STATS */ /* * advansys_proc_info() - /proc/scsi/advansys/{0,1,2,3,...} * * *buffer: I/O buffer * **start: if inout == FALSE pointer into buffer where user read should start * offset: current offset into a /proc/scsi/advansys/[0...] file * length: length of buffer * hostno: Scsi_Host host_no * inout: TRUE - user is writing; FALSE - user is reading * * Return the number of bytes read from or written to a * /proc/scsi/advansys/[0...] file. * * Note: This function uses the per board buffer 'prtbuf' which is * allocated when the board is initialized in advansys_detect(). The * buffer is ASC_PRTBUF_SIZE bytes. The function asc_proc_copy() is * used to write to the buffer. The way asc_proc_copy() is written * if 'prtbuf' is too small it will not be overwritten. Instead the * user just won't get all the available statistics. */ static int advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length, int inout) { struct asc_board *boardp = shost_priv(shost); char *cp; int cplen; int cnt; int totcnt; int leftlen; char *curbuf; off_t advoffset; ASC_DBG(1, "begin\n"); /* * User write not supported. */ if (inout == TRUE) return -ENOSYS; /* * User read of /proc/scsi/advansys/[0...] file. */ /* Copy read data starting at the beginning of the buffer. */ *start = buffer; curbuf = buffer; advoffset = 0; totcnt = 0; leftlen = length; /* * Get board configuration information. * * advansys_info() returns the board string from its own static buffer. */ cp = (char *)advansys_info(shost); strcat(cp, "\n"); cplen = strlen(cp); /* Copy board information. */ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display Wide Board BIOS Information. */ if (!ASC_NARROW_BOARD(boardp)) { cp = boardp->prtbuf; cplen = asc_prt_adv_bios(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; } /* * Display driver information for each device attached to the board. */ cp = boardp->prtbuf; cplen = asc_prt_board_devices(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display EEPROM configuration for the board. */ cp = boardp->prtbuf; if (ASC_NARROW_BOARD(boardp)) { cplen = asc_prt_asc_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); } else { cplen = asc_prt_adv_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); } BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display driver configuration and information for the board. */ cp = boardp->prtbuf; cplen = asc_prt_driver_conf(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; #ifdef ADVANSYS_STATS /* * Display driver statistics for the board. */ cp = boardp->prtbuf; cplen = asc_prt_board_stats(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; #endif /* ADVANSYS_STATS */ /* * Display Asc Library dynamic configuration information * for the board. */ cp = boardp->prtbuf; if (ASC_NARROW_BOARD(boardp)) { cplen = asc_prt_asc_board_info(shost, cp, ASC_PRTBUF_SIZE); } else { cplen = asc_prt_adv_board_info(shost, cp, ASC_PRTBUF_SIZE); } BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } #endif /* CONFIG_PROC_FS */ static void asc_scsi_done(struct scsi_cmnd *scp) { scsi_dma_unmap(scp); ASC_STATS(scp->device->host, done); scp->scsi_done(scp); } static void AscSetBank(PortAddr iop_base, uchar bank) { uchar val; val = AscGetChipControl(iop_base) & (~ (CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | CC_CHIP_RESET)); if (bank == 1) { val |= CC_BANK_ONE; } else if (bank == 2) { val |= CC_DIAG | CC_BANK_ONE; } else { val &= ~CC_BANK_ONE; } AscSetChipControl(iop_base, val); } static void AscSetChipIH(PortAddr iop_base, ushort ins_code) { AscSetBank(iop_base, 1); AscWriteChipIH(iop_base, ins_code); AscSetBank(iop_base, 0); } static int AscStartChip(PortAddr iop_base) { AscSetChipControl(iop_base, 0); if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { return (0); } return (1); } static int AscStopChip(PortAddr iop_base) { uchar cc_val; cc_val = AscGetChipControl(iop_base) & (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG)); AscSetChipControl(iop_base, (uchar)(cc_val | CC_HALT)); AscSetChipIH(iop_base, INS_HALT); AscSetChipIH(iop_base, INS_RFLAG_WTM); if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) { return (0); } return (1); } static int AscIsChipHalted(PortAddr iop_base) { if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { if ((AscGetChipControl(iop_base) & CC_HALT) != 0) { return (1); } } return (0); } static int AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i = 10; iop_base = asc_dvc->iop_base; while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscStopChip(iop_base); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT); udelay(60); AscSetChipIH(iop_base, INS_RFLAG_WTM); AscSetChipIH(iop_base, INS_HALT); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT); AscSetChipControl(iop_base, CC_HALT); mdelay(200); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); return (AscIsChipHalted(iop_base)); } static int AscFindSignature(PortAddr iop_base) { ushort sig_word; ASC_DBG(1, "AscGetChipSignatureByte(0x%x) 0x%x\n", iop_base, AscGetChipSignatureByte(iop_base)); if (AscGetChipSignatureByte(iop_base) == (uchar)ASC_1000_ID1B) { ASC_DBG(1, "AscGetChipSignatureWord(0x%x) 0x%x\n", iop_base, AscGetChipSignatureWord(iop_base)); sig_word = AscGetChipSignatureWord(iop_base); if ((sig_word == (ushort)ASC_1000_ID0W) || (sig_word == (ushort)ASC_1000_ID0W_FIX)) { return (1); } } return (0); } static void AscEnableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON); } static void AscDisableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON)); } static uchar AscReadLramByte(PortAddr iop_base, ushort addr) { unsigned char byte_data; unsigned short word_data; if (isodd_word(addr)) { AscSetChipLramAddr(iop_base, addr - 1); word_data = AscGetChipLramData(iop_base); byte_data = (word_data >> 8) & 0xFF; } else { AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); byte_data = word_data & 0xFF; } return byte_data; } static ushort AscReadLramWord(PortAddr iop_base, ushort addr) { ushort word_data; AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); return (word_data); } #if CC_VERY_LONG_SG_LIST static ASC_DCNT AscReadLramDWord(PortAddr iop_base, ushort addr) { ushort val_low, val_high; ASC_DCNT dword_data; AscSetChipLramAddr(iop_base, addr); val_low = AscGetChipLramData(iop_base); val_high = AscGetChipLramData(iop_base); dword_data = ((ASC_DCNT) val_high << 16) | (ASC_DCNT) val_low; return (dword_data); } #endif /* CC_VERY_LONG_SG_LIST */ static void AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < words; i++) { AscSetChipLramData(iop_base, set_wval); } } static void AscWriteLramWord(PortAddr iop_base, ushort addr, ushort word_val) { AscSetChipLramAddr(iop_base, addr); AscSetChipLramData(iop_base, word_val); } static void AscWriteLramByte(PortAddr iop_base, ushort addr, uchar byte_val) { ushort word_data; if (isodd_word(addr)) { addr--; word_data = AscReadLramWord(iop_base, addr); word_data &= 0x00FF; word_data |= (((ushort)byte_val << 8) & 0xFF00); } else { word_data = AscReadLramWord(iop_base, addr); word_data &= 0xFF00; word_data |= ((ushort)byte_val & 0x00FF); } AscWriteLramWord(iop_base, addr, word_data); } /* * Copy 2 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, const uchar *s_buffer, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { /* * On a little-endian system the second argument below * produces a little-endian ushort which is written to * LRAM in little-endian order. On a big-endian system * the second argument produces a big-endian ushort which * is "transparently" byte-swapped by outpw() and written * in little-endian order to LRAM. */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); } } /* * Copy 4 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemDWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, uchar *s_buffer, int dwords) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 4 * dwords; i += 4) { outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); /* LSW */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 3] << 8) | s_buffer[i + 2]); /* MSW */ } } /* * Copy 2 bytes from LRAM. * * The source data is assumed to be in little-endian order in LRAM * and is maintained in little-endian order when written to memory. */ static void AscMemWordCopyPtrFromLram(PortAddr iop_base, ushort s_addr, uchar *d_buffer, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { word = inpw(iop_base + IOP_RAM_DATA); d_buffer[i] = word & 0xff; d_buffer[i + 1] = (word >> 8) & 0xff; } } static ASC_DCNT AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) { ASC_DCNT sum; int i; sum = 0L; for (i = 0; i < words; i++, s_addr += 2) { sum += AscReadLramWord(iop_base, s_addr); } return (sum); } static ushort AscInitLram(ASC_DVC_VAR *asc_dvc) { uchar i; ushort s_addr; PortAddr iop_base; ushort warn_code; iop_base = asc_dvc->iop_base; warn_code = 0; AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0, (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)); i = ASC_MIN_ACTIVE_QNO; s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE; AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); i++; s_addr += ASC_QBLK_SIZE; for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(i - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); } AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)ASC_QLINK_END); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)asc_dvc->max_total_qng); i++; s_addr += ASC_QBLK_SIZE; for (; i <= (uchar)(asc_dvc->max_total_qng + 3); i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_FWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_BWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i); } return warn_code; } static ASC_DCNT AscLoadMicroCode(PortAddr iop_base, ushort s_addr, const uchar *mcode_buf, ushort mcode_size) { ASC_DCNT chksum; ushort mcode_word_size; ushort mcode_chksum; /* Write the microcode buffer starting at LRAM address 0. */ mcode_word_size = (ushort)(mcode_size >> 1); AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size); AscMemWordCopyPtrToLram(iop_base, s_addr, mcode_buf, mcode_word_size); chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size); ASC_DBG(1, "chksum 0x%lx\n", (ulong)chksum); mcode_chksum = (ushort)AscMemSumLramWord(iop_base, (ushort)ASC_CODE_SEC_BEG, (ushort)((mcode_size - s_addr - (ushort) ASC_CODE_SEC_BEG) / 2)); ASC_DBG(1, "mcode_chksum 0x%lx\n", (ulong)mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size); return chksum; } static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i; ushort lram_addr; iop_base = asc_dvc->iop_base; AscPutRiscVarFreeQHead(iop_base, 1); AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscPutVarFreeQHead(iop_base, 1); AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 1)); AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 2)); AscWriteLramByte(iop_base, (ushort)ASCV_TOTAL_READY_Q_B, asc_dvc->max_total_qng); AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0); AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0); AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0); AscPutQDoneInProgress(iop_base, 0); lram_addr = ASC_QADR_BEG; for (i = 0; i < 32; i++, lram_addr += 2) { AscWriteLramWord(iop_base, lram_addr, 0); } } static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) { int i; ushort warn_code; PortAddr iop_base; ASC_PADDR phy_addr; ASC_DCNT phy_size; struct asc_board *board = asc_dvc_to_board(asc_dvc); iop_base = asc_dvc->iop_base; warn_code = 0; for (i = 0; i <= ASC_MAX_TID; i++) { AscPutMCodeInitSDTRAtID(iop_base, i, asc_dvc->cfg->sdtr_period_offset[i]); } AscInitQLinkVar(asc_dvc); AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B, ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id)); /* Ensure overrun buffer is aligned on an 8 byte boundary. */ BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) { warn_code = -ENOMEM; goto err_dma_map; } phy_addr = cpu_to_le32(asc_dvc->overrun_dma); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, (uchar *)&phy_addr, 1); phy_size = cpu_to_le32(ASC_OVERRUN_BSIZE); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_BSIZE_D, (uchar *)&phy_size, 1); asc_dvc->cfg->mcode_date = AscReadLramWord(iop_base, (ushort)ASCV_MC_DATE_W); asc_dvc->cfg->mcode_version = AscReadLramWord(iop_base, (ushort)ASCV_MC_VER_W); AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; warn_code = UW_ERR; goto err_mcode_start; } if (AscStartChip(iop_base) != 1) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; warn_code = UW_ERR; goto err_mcode_start; } return warn_code; err_mcode_start: dma_unmap_single(board->dev, asc_dvc->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); err_dma_map: asc_dvc->overrun_dma = 0; return warn_code; } static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/mcode.bin"; int err; unsigned long chksum; ushort warn_code; PortAddr iop_base; iop_base = asc_dvc->iop_base; warn_code = 0; if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) && !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) { AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC; if (asc_dvc->err_code != 0) return UW_ERR; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return warn_code; } AscDisableInterrupt(iop_base); warn_code |= AscInitLram(asc_dvc); if (asc_dvc->err_code != 0) return UW_ERR; err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; ASC_DBG(1, "_asc_mcode_chksum 0x%lx\n", (ulong)chksum); if (AscLoadMicroCode(iop_base, 0, &fw->data[4], fw->size - 4) != chksum) { asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; release_firmware(fw); return warn_code; } release_firmware(fw); warn_code |= AscInitMicroCodeVar(asc_dvc); if (!asc_dvc->overrun_dma) return warn_code; asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; AscEnableInterrupt(iop_base); return warn_code; } /* * Load the Microcode * * Write the microcode image to RISC memory starting at address 0. * * The microcode is stored compressed in the following format: * * 254 word (508 byte) table indexed by byte code followed * by the following byte codes: * * 1-Byte Code: * 00: Emit word 0 in table. * 01: Emit word 1 in table. * . * FD: Emit word 253 in table. * * Multi-Byte Code: * FE WW WW: (3 byte code) Word to emit is the next word WW WW. * FF BB WW WW: (4 byte code) Emit BB count times next word WW WW. * * Returns 0 or an error if the checksum doesn't match */ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf, int size, int memsize, int chksum) { int i, j, end, len = 0; ADV_DCNT sum; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (i = 253 * 2; i < size; i++) { if (buf[i] == 0xff) { unsigned short word = (buf[i + 3] << 8) | buf[i + 2]; for (j = 0; j < buf[i + 1]; j++) { AdvWriteWordAutoIncLram(iop_base, word); len += 2; } i += 3; } else if (buf[i] == 0xfe) { unsigned short word = (buf[i + 2] << 8) | buf[i + 1]; AdvWriteWordAutoIncLram(iop_base, word); i += 2; len += 2; } else { unsigned int off = buf[i] * 2; unsigned short word = (buf[off + 1] << 8) | buf[off]; AdvWriteWordAutoIncLram(iop_base, word); len += 2; } } end = len; while (len < memsize) { AdvWriteWordAutoIncLram(iop_base, 0); len += 2; } /* Verify the microcode checksum. */ sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (len = 0; len < end; len += 2) { sum += AdvReadWordAutoIncLram(iop_base); } if (sum != chksum) return ASC_IERR_MCODE_CHKSUM; return 0; } static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc) { ADV_CARR_T *carrp; ADV_SDCNT buf_size; ADV_PADDR carr_paddr; carrp = (ADV_CARR_T *) ADV_16BALIGN(asc_dvc->carrier_buf); asc_dvc->carr_freelist = NULL; if (carrp == asc_dvc->carrier_buf) { buf_size = ADV_CARRIER_BUFSIZE; } else { buf_size = ADV_CARRIER_BUFSIZE - sizeof(ADV_CARR_T); } do { /* Get physical address of the carrier 'carrp'. */ carr_paddr = cpu_to_le32(virt_to_bus(carrp)); buf_size -= sizeof(ADV_CARR_T); carrp->carr_pa = carr_paddr; carrp->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(carrp)); /* * Insert the carrier at the beginning of the freelist. */ carrp->next_vpa = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); asc_dvc->carr_freelist = carrp; carrp++; } while (buf_size > 0); } /* * Send an idle command to the chip and wait for completion. * * Command completion is polled for once per microsecond. * * The function can be called from anywhere including an interrupt handler. * But the function is not re-entrant, so it uses the DvcEnter/LeaveCritical() * functions to prevent reentrancy. * * Return Values: * ADV_TRUE - command completed successfully * ADV_FALSE - command failed * ADV_ERROR - command timed out */ static int AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc, ushort idle_cmd, ADV_DCNT idle_cmd_parameter) { int result; ADV_DCNT i, j; AdvPortAddr iop_base; iop_base = asc_dvc->iop_base; /* * Clear the idle command status which is set by the microcode * to a non-zero value to indicate when the command is completed. * The non-zero result is one of the IDLE_CMD_STATUS_* values */ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, (ushort)0); /* * Write the idle command value after the idle command parameter * has been written to avoid a race condition. If the order is not * followed, the microcode may process the idle command before the * parameters have been written to LRAM. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IDLE_CMD_PARAMETER, cpu_to_le32(idle_cmd_parameter)); AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd); /* * Tickle the RISC to tell it to process the idle command. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_B); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_b' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } /* Wait for up to 100 millisecond for the idle command to timeout. */ for (i = 0; i < SCSI_WAIT_100_MSEC; i++) { /* Poll once each microsecond for command completion. */ for (j = 0; j < SCSI_US_PER_MSEC; j++) { AdvReadWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, result); if (result != 0) return result; udelay(1); } } BUG(); /* The idle command should never timeout. */ return ADV_ERROR; } /* * Reset SCSI Bus and purge all outstanding requests. * * Return Value: * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset. * ADV_FALSE(0) - Microcode command failed. * ADV_ERROR(-1) - Microcode command timed-out. Microcode or IC * may be hung which requires driver recovery. */ static int AdvResetSB(ADV_DVC_VAR *asc_dvc) { int status; /* * Send the SCSI Bus Reset idle start idle command which asserts * the SCSI Bus Reset signal. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_START, 0L); if (status != ADV_TRUE) { return status; } /* * Delay for the specified SCSI Bus Reset hold time. * * The hold time delay is done on the host because the RISC has no * microsecond accurate timer. */ udelay(ASC_SCSI_RESET_HOLD_TIME_US); /* * Send the SCSI Bus Reset end idle command which de-asserts * the SCSI Bus Reset signal and purges any pending requests. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_END, 0L); if (status != ADV_TRUE) { return status; } mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ return status; } /* * Initialize the ASC-3550. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/3550.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able = 0, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC3550. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { ushort bios_version, major, minor; bios_version = bios_mem[(ASC_MC_BIOS_VERSION - ASC_MC_BIOSMEM) / 2]; major = (bios_version >> 12) & 0xF; minor = (bios_version >> 8) & 0xF; if (major < 3 || (major == 3 && minor == 1)) { /* BIOS 3.1 and earlier location of 'wdtr_able' variable. */ AdvReadWordLram(iop_base, 0x120, wdtr_able); } else { AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); } } AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_3550_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read and save microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC3550. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC3550); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-3550, setting the START_CTL_EMFU [3:2] bits sets a FIFO * threshold of 128 bytes. This register is only accessible to the host. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, START_CTL_EMFU | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 based on the ULTRA EEPROM per TID * bitmask. These values determine the maximum SDTR speed negotiated * with a device. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. * * 4-bit speed SDTR speed name * =========== =============== * 0000b (0x0) SDTR disabled * 0001b (0x1) 5 Mhz * 0010b (0x2) 10 Mhz * 0011b (0x3) 20 Mhz (Ultra) * 0100b (0x4) 40 Mhz (LVD/Ultra2) * 0101b (0x5) 80 Mhz (LVD2/Ultra3) * 0110b (0x6) Undefined * . * 1111b (0xF) Undefined */ word = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (ADV_TID_TO_TIDMASK(tid) & asc_dvc->ultra_able) { /* Set Ultra speed for TID 'tid'. */ word |= (0x3 << (4 * (tid % 4))); } else { /* Set Fast speed for TID 'tid'. */ word |= (0x2 << (4 * (tid % 4))); } if (tid == 3) { /* Check if done with sdtr_speed1. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, word); word = 0; } else if (tid == 7) { /* Check if done with sdtr_speed2. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, word); word = 0; } else if (tid == 11) { /* Check if done with sdtr_speed3. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, word); word = 0; } else if (tid == 15) { /* Check if done with sdtr_speed4. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, word); /* End of loop. */ } } /* * Set microcode operating variable for the disconnect per TID bitmask. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If all three connectors are in use, return an error. */ if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 || (scsi_cfg1 & CABLE_ILLEGAL_B) == 0) { asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION; return ADV_ERROR; } /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * If this is a differential board and a single-ended device * is attached to one of the connectors, return an error. */ if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0) { asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE; return ADV_ERROR; } /* * If automatic termination control is enabled, then set the * termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting * then 'termination' was set-up in AdvInitFrom3550EEPROM() and * is ready to be 'ored' into SCSI_CFG1. */ if (asc_dvc->cfg->termination == 0) { /* * The software always controls termination by setting TERM_CTL_SEL. * If TERM_CTL_SEL were set to 0, the hardware would set termination. */ asc_dvc->cfg->termination |= TERM_CTL_SEL; switch (scsi_cfg1 & CABLE_DETECT) { /* TERM_CTL_H: on, TERM_CTL_L: on */ case 0x3: case 0x7: case 0xB: case 0xD: case 0xE: case 0xF: asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L); break; /* TERM_CTL_H: on, TERM_CTL_L: off */ case 0x1: case 0x5: case 0x9: case 0xA: case 0xC: asc_dvc->cfg->termination |= TERM_CTL_H; break; /* TERM_CTL_H: off, TERM_CTL_L: off */ case 0x2: case 0x6: break; } } /* * Clear any set TERM_CTL_H and TERM_CTL_L bits. */ scsi_cfg1 &= ~TERM_CTL; /* * Invert the TERM_CTL_H and TERM_CTL_L bits and then * set 'scsi_cfg1'. The TERM_POL bit does not need to be * referenced, because the hardware internally inverts * the Termination High and Low bits if TERM_POL is set. */ scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL)); /* * Set SCSI_CFG1 Microcode Default Value * * Set filter value and possibly modified termination control * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, FLTR_DISABLE | scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-3550 has 8KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_8KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C0800. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C0800.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C0800. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C0800) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (RAM Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C0800_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C0800. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C0800); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C0800, set FIFO_THRESH_80B [6:4] bits and START_CTL_TH [3:2] * bits for the default FIFO threshold. * * Note: ASC-38C0800 FIFO threshold has been changed to 256 bytes. * * For DMA Errata #4 set the BC_THRESH_ENB bit. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, BC_THRESH_ENB | FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * All kind of combinations of devices attached to one of four * connectors are acceptable except HVD device attached. For example, * LVD device can be attached to SE connector while SE device attached * to LVD connector. If LVD device attached to SE connector, it only * runs up to Ultra speed. * * If an HVD device is attached to one of LVD connectors, return an * error. However, there is no way to detect HVD device attached to * SE connectors. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code = ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * If either SE or LVD automatic termination control is enabled, then * set the termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting then * 'termination' was set-up in AdvInitFrom38C0800EEPROM() and is ready * to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; /* TERM_SE_HI: on, TERM_SE_LO: off */ case 0x0: asc_dvc->cfg->termination |= TERM_SE_HI; break; } } if ((asc_dvc->cfg->termination & TERM_LVD) == 0) { /* LVD automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_LVD) { /* TERM_LVD_HI: on, TERM_LVD_LO: on */ case 0x4: case 0x8: case 0xC: asc_dvc->cfg->termination |= TERM_LVD; break; /* TERM_LVD_HI: off, TERM_LVD_LO: off */ case 0x0: break; } } /* * Clear any set TERM_SE and TERM_LVD bits. */ scsi_cfg1 &= (~TERM_SE & ~TERM_LVD); /* * Invert the TERM_SE and TERM_LVD bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & 0xF0); /* * Clear BIG_ENDIAN, DIS_TERM_DRV, Terminator Polarity and HVD/LVD/SE * bits and set possibly modified termination control bits in the * Microcode SCSI_CFG1 Register Value. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL & ~HVD_LVD_SE); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control and reset DIS_TERM_DRV * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C0800 has 16KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. * carr_pa is LE, must be native before write */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. * * carr_pa is LE, must be native before write * */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C1600. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C1600.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; long word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, ppr_able, tagqng_able; uchar max_cmd[ASC_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) { return ADV_ERROR; } /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C1600. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C1600_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C1600. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C1600); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * If the BIOS control flag AIPP (Asynchronous Information * Phase Protection) disable bit is not set, then set the firmware * 'control_flag' CONTROL_FLAG_ENABLE_AIPP bit to enable * AIPP checking and encoding. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_AIPP_DIS) == 0) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_ENABLE_AIPP; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C1600 use DMA_CFG0 default values: FIFO_THRESH_80B [6:4], * and START_CTL_TH [3:2]. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Calculate SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. * * Each ASC-38C1600 function has only two cable detect bits. * The bus mode override bits are in IOPB_SOFT_OVER_WR. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the cable is reversed all of the SCSI_CTRL register signals * will be set. Check for and return an error if this condition is * found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * Each ASC-38C1600 function has two connectors. Only an HVD device * can not be connected to either connector. An LVD device or SE device * may be connected to either connecor. If an SE device is connected, * then at most Ultra speed (20 Mhz) can be used on both connectors. * * If an HVD device is attached, return an error. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code |= ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * Each function in the ASC-38C1600 uses only the SE cable detect and * termination because there are two connectors for each function. Each * function may use either LVD or SE mode. Corresponding the SE automatic * termination control EEPROM bits are used for each function. Each * function has its own EEPROM. If SE automatic control is enabled for * the function, then set the termination value based on a table listed * in a_condor.h. * * If manual termination is specified in the EEPROM for the function, * then 'termination' was set-up in AscInitFrom38C1600EEPROM() and is * ready to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; case 0x0: if (PCI_FUNC(pdev->devfn) == 0) { /* Function 0 - TERM_SE_HI: off, TERM_SE_LO: off */ } else { /* Function 1 - TERM_SE_HI: on, TERM_SE_LO: off */ asc_dvc->cfg->termination |= TERM_SE_HI; } break; } } /* * Clear any set TERM_SE bits. */ scsi_cfg1 &= ~TERM_SE; /* * Invert the TERM_SE bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & TERM_SE); /* * Clear Big Endian and Terminator Polarity bits and set possibly * modified termination control bits in the Microcode SCSI_CFG1 * Register Value. * * Big Endian bit is not used even on big endian machines. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control bits in the Microcode * SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C1600 has 32KB internal memory. * * XXX - Since ASC38C1600 Rev.3 has a Local RAM failure issue, we come * out a special 16K Adv Library and Microcode version. After the issue * resolved, we should turn back to the 32K support. Both a_condor.h and * mcode.sas files also need to be updated. * * AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, * BIOS_EN | RAM_SZ_32KB); */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. Initialize the * COMMA register to the same value otherwise the RISC will * prematurely detect a command is available. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(asc_dvc->icq_sp->carr_pa)); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * per TID microcode operating variables. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Reset chip and SCSI Bus. * * Return Value: * ADV_TRUE(1) - Chip re-initialization and SCSI Bus Reset successful. * ADV_FALSE(0) - Chip re-initialization and SCSI Bus Reset failure. */ static int AdvResetChipAndSB(ADV_DVC_VAR *asc_dvc) { int status; ushort wdtr_able, sdtr_able, tagqng_able; ushort ppr_able = 0; uchar tid, max_cmd[ADV_MAX_TID + 1]; AdvPortAddr iop_base; ushort bios_sig; iop_base = asc_dvc->iop_base; /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * Force the AdvInitAsc3550/38C0800Driver() function to * perform a SCSI Bus Reset by clearing the BIOS signature word. * The initialization functions assumes a SCSI Bus Reset is not * needed if the BIOS signature word is present. */ AdvReadWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, 0); /* * Stop chip and reset it. */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_STOP); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); /* * Reset Adv Library error code, if any, and try * re-initializing the chip. */ asc_dvc->err_code = 0; if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitAsc38C1600Driver(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitAsc38C0800Driver(asc_dvc); } else { status = AdvInitAsc3550Driver(asc_dvc); } /* Translate initialization return value to status value. */ if (status == 0) { status = ADV_TRUE; } else { status = ADV_FALSE; } /* * Restore the BIOS signature word. */ AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } return status; } /* * adv_async_callback() - Adv Library asynchronous event callback function. */ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code) { switch (code) { case ADV_ASYNC_SCSI_BUS_RESET_DET: /* * The firmware detected a SCSI Bus reset. */ ASC_DBG(0, "ADV_ASYNC_SCSI_BUS_RESET_DET\n"); break; case ADV_ASYNC_RDMA_FAILURE: /* * Handle RDMA failure by resetting the SCSI Bus and * possibly the chip if it is unresponsive. Log the error * with a unique code. */ ASC_DBG(0, "ADV_ASYNC_RDMA_FAILURE\n"); AdvResetChipAndSB(adv_dvc_varp); break; case ADV_HOST_SCSI_BUS_RESET: /* * Host generated SCSI bus reset occurred. */ ASC_DBG(0, "ADV_HOST_SCSI_BUS_RESET\n"); break; default: ASC_DBG(0, "unknown code 0x%x\n", code); break; } } /* * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR(). * * Callback function for the Wide SCSI Adv Library. */ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) { struct asc_board *boardp; adv_req_t *reqp; adv_sgblk_t *sgblkp; struct scsi_cmnd *scp; struct Scsi_Host *shost; ADV_DCNT resid_cnt; ASC_DBG(1, "adv_dvc_varp 0x%lx, scsiqp 0x%lx\n", (ulong)adv_dvc_varp, (ulong)scsiqp); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); /* * Get the adv_req_t structure for the command that has been * completed. The adv_req_t structure actually contains the * completed ADV_SCSI_REQ_Q structure. */ reqp = (adv_req_t *)ADV_U32_TO_VADDR(scsiqp->srb_ptr); ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); if (reqp == NULL) { ASC_PRINT("adv_isr_callback: reqp is NULL\n"); return; } /* * Get the struct scsi_cmnd structure and Scsi_Host structure for the * command that has been completed. * * Note: The adv_req_t request structure and adv_sgblk_t structure, * if any, are dropped, because a board structure pointer can not be * determined. */ scp = reqp->cmndp; ASC_DBG(1, "scp 0x%p\n", scp); if (scp == NULL) { ASC_PRINT ("adv_isr_callback: scp is NULL; adv_req_t dropped.\n"); return; } ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); shost = scp->device->host; ASC_STATS(shost, callback); ASC_DBG(1, "shost 0x%p\n", shost); boardp = shost_priv(shost); BUG_ON(adv_dvc_varp != &boardp->dvc_var.adv_dvc_var); /* * 'done_status' contains the command's ending status. */ switch (scsiqp->done_status) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); scp->result = 0; /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * then return the number of underrun bytes. */ resid_cnt = le32_to_cpu(scsiqp->data_cnt); if (scsi_bufflen(scp) != 0 && resid_cnt != 0 && resid_cnt <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %lu bytes\n", (ulong)resid_cnt); scsi_set_resid(scp, resid_cnt); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (scsiqp->host_status) { case QHSTA_NO_ERROR: if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* * Note: The 'status_byte()' macro used by * target drivers defined in scsi.h shifts the * status byte returned by host drivers right * by 1 bit. This is why target drivers also * use right shifted status byte definitions. * For instance target drivers use * CHECK_CONDITION, defined to 0x1, instead of * the SCSI defined check condition value of * 0x2. Host drivers are supposed to return * the status byte as it is defined by SCSI. */ scp->result = DRIVER_BYTE(DRIVER_SENSE) | STATUS_BYTE(scsiqp->scsi_status); } else { scp->result = STATUS_BYTE(scsiqp->scsi_status); } break; default: /* Some other QHSTA error occurred. */ ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status); scp->result = HOST_BYTE(DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); scp->result = HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status); break; default: ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status); scp->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && scsiqp->done_status == QD_NO_ERROR && scsiqp->host_status == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); /* * Free all 'adv_sgblk_t' structures allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; /* Add 'sgblkp' to the board free list. */ sgblkp->next_sgblkp = boardp->adv_sgblkp; boardp->adv_sgblkp = sgblkp; } /* * Free the adv_req_t structure used with the command by adding * it back to the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; ASC_DBG(1, "done\n"); } /* * Adv Library Interrupt Service Routine * * This function is called by a driver's interrupt service routine. * The function disables and re-enables interrupts. * * When a microcode idle command is completed, the ADV_DVC_VAR * 'idle_cmd_done' field is set to ADV_TRUE. * * Note: AdvISR() can be called when interrupts are disabled or even * when there is no hardware interrupt condition present. It will * always check for completed idle commands and microcode requests. * This is an important feature that shouldn't be changed because it * allows commands to be completed from polling mode loops. * * Return: * ADV_TRUE(1) - interrupt was pending * ADV_FALSE(0) - no interrupt was pending */ static int AdvISR(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; uchar int_stat; ushort target_bit; ADV_CARR_T *free_carrp; ADV_VADDR irq_next_vpa; ADV_SCSI_REQ_Q *scsiq; iop_base = asc_dvc->iop_base; /* Reading the register clears the interrupt. */ int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG); if ((int_stat & (ADV_INTR_STATUS_INTRA | ADV_INTR_STATUS_INTRB | ADV_INTR_STATUS_INTRC)) == 0) { return ADV_FALSE; } /* * Notify the driver of an asynchronous microcode condition by * calling the adv_async_callback function. The function * is passed the microcode ASC_MC_INTRB_CODE byte value. */ if (int_stat & ADV_INTR_STATUS_INTRB) { uchar intrb_code; AdvReadByteLram(iop_base, ASC_MC_INTRB_CODE, intrb_code); if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { if (intrb_code == ADV_ASYNC_CARRIER_READY_FAILURE && asc_dvc->carr_pending_cnt != 0) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } } adv_async_callback(asc_dvc, intrb_code); } /* * Check if the IRQ stopper carrier contains a completed request. */ while (((irq_next_vpa = le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ASC_RQ_DONE) != 0) { /* * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure. * The RISC will have set 'areq_vpa' to a virtual address. * * The firmware will have copied the ASC_SCSI_REQ_Q.scsiq_ptr * field to the carrier ADV_CARR_T.areq_vpa field. The conversion * below complements the conversion of ASC_SCSI_REQ_Q.scsiq_ptr' * in AdvExeScsiQueue(). */ scsiq = (ADV_SCSI_REQ_Q *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->areq_vpa)); /* * Request finished with good status and the queue was not * DMAed to host memory by the firmware. Set all status fields * to indicate good status. */ if ((irq_next_vpa & ASC_RQ_GOOD) != 0) { scsiq->done_status = QD_NO_ERROR; scsiq->host_status = scsiq->scsi_status = 0; scsiq->data_cnt = 0L; } /* * Advance the stopper pointer to the next carrier * ignoring the lower four bits. Free the previous * stopper carrier. */ free_carrp = asc_dvc->irq_sp; asc_dvc->irq_sp = (ADV_CARR_T *) ADV_U32_TO_VADDR(ASC_GET_CARRP(irq_next_vpa)); free_carrp->next_vpa = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); asc_dvc->carr_freelist = free_carrp; asc_dvc->carr_pending_cnt--; target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id); /* * Clear request microcode control flag. */ scsiq->cntl = 0; /* * Notify the driver of the completed request by passing * the ADV_SCSI_REQ_Q pointer to its callback function. */ scsiq->a_flag |= ADV_SCSIQ_DONE; adv_isr_callback(asc_dvc, scsiq); /* * Note: After the driver callback function is called, 'scsiq' * can no longer be referenced. * * Fall through and continue processing other completed * requests... */ } return ADV_TRUE; } static int AscSetLibErrorCode(ASC_DVC_VAR *asc_dvc, ushort err_code) { if (asc_dvc->err_code == 0) { asc_dvc->err_code = err_code; AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W, err_code); } return err_code; } static void AscAckInterrupt(PortAddr iop_base) { uchar host_flag; uchar risc_flag; ushort loop; loop = 0; do { risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B); if (loop++ > 0x7FFF) { break; } } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | ASC_HOST_FLAG_ACK_INT)); AscSetChipStatus(iop_base, CIW_INT_ACK); loop = 0; while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) { AscSetChipStatus(iop_base, CIW_INT_ACK); if (loop++ > 3) { break; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); } static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time) { const uchar *period_table; int max_index; int min_index; int i; period_table = asc_dvc->sdtr_period_tbl; max_index = (int)asc_dvc->max_sdtr_index; min_index = (int)asc_dvc->min_sdtr_index; if ((syn_time <= period_table[max_index])) { for (i = min_index; i < (max_index - 1); i++) { if (syn_time <= period_table[i]) { return (uchar)i; } } return (uchar)max_index; } else { return (uchar)(max_index + 1); } } static uchar AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset) { EXT_MSG sdtr_buf; uchar sdtr_period_index; PortAddr iop_base; iop_base = asc_dvc->iop_base; sdtr_buf.msg_type = EXTENDED_MESSAGE; sdtr_buf.msg_len = MS_SDTR_LEN; sdtr_buf.msg_req = EXTENDED_SDTR; sdtr_buf.xfer_period = sdtr_period; sdtr_offset &= ASC_SYN_MAX_OFFSET; sdtr_buf.req_ack_offset = sdtr_offset; sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period); if (sdtr_period_index <= asc_dvc->max_sdtr_index) { AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return ((sdtr_period_index << 4) | sdtr_offset); } else { sdtr_buf.req_ack_offset = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return 0; } } static uchar AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset) { uchar byte; uchar sdtr_period_ix; sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period); if (sdtr_period_ix > asc_dvc->max_sdtr_index) return 0xFF; byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET); return byte; } static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) { ASC_SCSI_BIT_ID_TYPE org_id; int i; int sta = TRUE; AscSetBank(iop_base, 1); org_id = AscReadChipDvcID(iop_base); for (i = 0; i <= ASC_MAX_TID; i++) { if (org_id == (0x01 << i)) break; } org_id = (ASC_SCSI_BIT_ID_TYPE) i; AscWriteChipDvcID(iop_base, id); if (AscReadChipDvcID(iop_base) == (0x01 << id)) { AscSetBank(iop_base, 0); AscSetChipSyn(iop_base, sdtr_data); if (AscGetChipSyn(iop_base) != sdtr_data) { sta = FALSE; } } else { sta = FALSE; } AscSetBank(iop_base, 1); AscWriteChipDvcID(iop_base, org_id); AscSetBank(iop_base, 0); return (sta); } static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no) { AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data); } static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) { EXT_MSG ext_msg; EXT_MSG out_msg; ushort halt_q_addr; int sdtr_accept; ushort int_halt_code; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; uchar tag_code; uchar q_status; uchar halt_qp; uchar sdtr_data; uchar target_ix; uchar q_cntl, tid_no; uchar cur_dvc_qng; uchar asyn_sdtr; uchar scsi_status; struct asc_board *boardp; BUG_ON(!asc_dvc->drv_ptr); boardp = asc_dvc->drv_ptr; iop_base = asc_dvc->iop_base; int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W); halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B); halt_q_addr = ASC_QNO_TO_QADDR(halt_qp); target_ix = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TARGET_IX)); q_cntl = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL)); tid_no = ASC_TIX_TO_TID(target_ix); target_id = (uchar)ASC_TID_TO_TARGET_ID(tid_no); if (asc_dvc->pci_fix_asyn_xfer & target_id) { asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB; } else { asyn_sdtr = 0; } if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, 0, tid_no); boardp->sdtr_data[tid_no] = 0; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_EXTMSG_IN) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGIN_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_SDTR && ext_msg.msg_len == MS_SDTR_LEN) { sdtr_accept = TRUE; if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { sdtr_accept = FALSE; ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET; } if ((ext_msg.xfer_period < asc_dvc->sdtr_period_tbl[asc_dvc->min_sdtr_index]) || (ext_msg.xfer_period > asc_dvc->sdtr_period_tbl[asc_dvc-> max_sdtr_index])) { sdtr_accept = FALSE; ext_msg.xfer_period = asc_dvc->sdtr_period_tbl[asc_dvc-> min_sdtr_index]; } if (sdtr_accept) { sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); if ((sdtr_data == 0xFF)) { q_cntl |= QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } } if (ext_msg.req_ack_offset == 0) { q_cntl &= ~QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); } else { if (sdtr_accept && (q_cntl & QC_MSG_OUT)) { q_cntl &= ~QC_MSG_OUT; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; } else { q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; } } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_WDTR && ext_msg.msg_len == MS_WDTR_LEN) { ext_msg.wdtr_width = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else { ext_msg.msg_type = MESSAGE_REJECT; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } } else if (int_halt_code == ASC_HALT_CHK_CONDITION) { q_cntl |= QC_REQ_SENSE; if ((asc_dvc->init_sdtr & target_id) != 0) { asc_dvc->sdtr_done &= ~target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); tag_code = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE)); tag_code &= 0xDC; if ((asc_dvc->pci_fix_asyn_xfer & target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & target_id) ) { tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TAG_CODE), tag_code); q_status = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS)); q_status |= (QS_READY | QS_BUSY); AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_STATUS), q_status); scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&out_msg, sizeof(EXT_MSG) >> 1); if ((out_msg.msg_type == EXTENDED_MESSAGE) && (out_msg.msg_len == MS_SDTR_LEN) && (out_msg.msg_req == EXTENDED_SDTR)) { asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } q_cntl &= ~QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) { scsi_status = AscReadLramByte(iop_base, (ushort)((ushort)halt_q_addr + (ushort) ASC_SCSIQ_SCSI_STATUS)); cur_dvc_qng = AscReadLramByte(iop_base, (ushort)((ushort)ASC_QADR_BEG + (ushort)target_ix)); if ((cur_dvc_qng > 0) && (asc_dvc->cur_dvc_qng[tid_no] > 0)) { scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy |= target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy |= target_id; if (scsi_status == SAM_STAT_TASK_SET_FULL) { if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) { cur_dvc_qng -= 1; asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng; AscWriteLramByte(iop_base, (ushort)((ushort) ASCV_MAX_DVC_QNG_BEG + (ushort) tid_no), cur_dvc_qng); /* * Set the device queue depth to the * number of active requests when the * QUEUE FULL condition was encountered. */ boardp->queue_full |= target_id; boardp->queue_full_cnt[tid_no] = cur_dvc_qng; } } } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } #if CC_VERY_LONG_SG_LIST else if (int_halt_code == ASC_HALT_HOST_COPY_SG_LIST_TO_RISC) { uchar q_no; ushort q_addr; uchar sg_wk_q_no; uchar first_sg_wk_q_no; ASC_SCSI_Q *scsiq; /* Ptr to driver request. */ ASC_SG_HEAD *sg_head; /* Ptr to driver SG request. */ ASC_SG_LIST_Q scsi_sg_q; /* Structure written to queue. */ ushort sg_list_dwords; ushort sg_entry_cnt; uchar next_qp; int i; q_no = AscReadLramByte(iop_base, (ushort)ASCV_REQ_SG_LIST_QP); if (q_no == ASC_QLINK_END) return 0; q_addr = ASC_QNO_TO_QADDR(q_no); /* * Convert the request's SRB pointer to a host ASC_SCSI_REQ * structure pointer using a macro provided by the driver. * The ASC_SCSI_REQ pointer provides a pointer to the * host ASC_SG_HEAD structure. */ /* Read request's SRB pointer. */ scsiq = (ASC_SCSI_Q *) ASC_SRB2SCSIQ(ASC_U32_TO_VADDR(AscReadLramDWord(iop_base, (ushort) (q_addr + ASC_SCSIQ_D_SRBPTR)))); /* * Get request's first and working SG queue. */ sg_wk_q_no = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_SG_WK_QP)); first_sg_wk_q_no = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FIRST_SG_WK_QP)); /* * Reset request's working SG queue back to the * first SG queue. */ AscWriteLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_SG_WK_QP), first_sg_wk_q_no); sg_head = scsiq->sg_head; /* * Set sg_entry_cnt to the number of SG elements * that will be completed on this interrupt. * * Note: The allocated SG queues contain ASC_MAX_SG_LIST - 1 * SG elements. The data_cnt and data_addr fields which * add 1 to the SG element capacity are not used when * restarting SG handling after a halt. */ if (scsiq->remain_sg_entry_cnt > (ASC_MAX_SG_LIST - 1)) { sg_entry_cnt = ASC_MAX_SG_LIST - 1; /* * Keep track of remaining number of SG elements that * will need to be handled on the next interrupt. */ scsiq->remain_sg_entry_cnt -= (ASC_MAX_SG_LIST - 1); } else { sg_entry_cnt = scsiq->remain_sg_entry_cnt; scsiq->remain_sg_entry_cnt = 0; } /* * Copy SG elements into the list of allocated SG queues. * * Last index completed is saved in scsiq->next_sg_index. */ next_qp = first_sg_wk_q_no; q_addr = ASC_QNO_TO_QADDR(next_qp); scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { scsi_sg_q.seq_no = i + 1; if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); sg_entry_cnt -= ASC_SG_LIST_PER_Q; /* * After very first SG queue RISC FW uses next * SG queue first element then checks sg_list_cnt * against zero and then decrements, so set * sg_list_cnt 1 less than number of SG elements * in each SG queue. */ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1; } else { /* * This is the last SG queue in the list of * allocated SG queues. If there are more * SG elements than will fit in the allocated * queues, then set the QCSG_SG_XFER_MORE flag. */ if (scsiq->remain_sg_entry_cnt != 0) { scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; } else { scsi_sg_q.cntl |= QCSG_SG_XFER_END; } /* equals sg_entry_cnt * 2 */ sg_list_dwords = sg_entry_cnt << 1; scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; sg_entry_cnt = 0; } scsi_sg_q.q_no = next_qp; AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_SGHD_CPY_BEG, (uchar *)&scsi_sg_q, sizeof(ASC_SG_LIST_Q) >> 1); AscMemDWordCopyPtrToLram(iop_base, q_addr + ASC_SGQ_LIST_BEG, (uchar *)&sg_head-> sg_list[scsiq->next_sg_index], sg_list_dwords); scsiq->next_sg_index += ASC_SG_LIST_PER_Q; /* * If the just completed SG queue contained the * last SG element, then no more SG queues need * to be written. */ if (scsi_sg_q.cntl & QCSG_SG_XFER_END) { break; } next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); q_addr = ASC_QNO_TO_QADDR(next_qp); } /* * Clear the halt condition so the RISC will be restarted * after the return. */ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } #endif /* CC_VERY_LONG_SG_LIST */ return (0); } /* * void * DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) * * Calling/Exit State: * none * * Description: * Input an ASC_QDONE_INFO structure from the chip */ static void DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 10) { continue; } word = inpw(iop_base + IOP_RAM_DATA); inbuf[i] = word & 0xff; inbuf[i + 1] = (word >> 8) & 0xff; } ASC_DBG_PRT_HEX(2, "DvcGetQinfo", inbuf, 2 * words); } static uchar _AscCopyLramScsiDoneQ(PortAddr iop_base, ushort q_addr, ASC_QDONE_INFO *scsiq, ASC_DCNT max_dma_count) { ushort _val; uchar sg_queue_cnt; DvcGetQinfo(iop_base, q_addr + ASC_SCSIQ_DONE_INFO_BEG, (uchar *)scsiq, (sizeof(ASC_SCSIQ_2) + sizeof(ASC_SCSIQ_3)) / 2); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS)); scsiq->q_status = (uchar)_val; scsiq->q_no = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_CNTL)); scsiq->cntl = (uchar)_val; sg_queue_cnt = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_SENSE_LEN)); scsiq->sense_len = (uchar)_val; scsiq->extra_bytes = (uchar)(_val >> 8); /* * Read high word of remain bytes from alternate location. */ scsiq->remain_bytes = (((ADV_DCNT)AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_W_ALT_DC1))) << 16); /* * Read low word of remain bytes from original location. */ scsiq->remain_bytes += AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT)); scsiq->remain_bytes &= max_dma_count; return sg_queue_cnt; } /* * asc_isr_callback() - Second Level Interrupt Handler called by AscISR(). * * Interrupt callback function for the Narrow SCSI Asc Library. */ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep) { struct asc_board *boardp; struct scsi_cmnd *scp; struct Scsi_Host *shost; ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep); ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep); scp = advansys_srb_to_ptr(asc_dvc_varp, qdonep->d2.srb_ptr); if (!scp) return; ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); shost = scp->device->host; ASC_STATS(shost, callback); ASC_DBG(1, "shost 0x%p\n", shost); boardp = shost_priv(shost); BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var); dma_unmap_single(boardp->dev, scp->SCp.dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* * 'qdonep' contains the command's ending status. */ switch (qdonep->d3.done_stat) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); scp->result = 0; /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * return the number of underrun bytes. */ if (scsi_bufflen(scp) != 0 && qdonep->remain_bytes != 0 && qdonep->remain_bytes <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %u bytes\n", (unsigned)qdonep->remain_bytes); scsi_set_resid(scp, qdonep->remain_bytes); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (qdonep->d3.host_stat) { case QHSTA_NO_ERROR: if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* * Note: The 'status_byte()' macro used by * target drivers defined in scsi.h shifts the * status byte returned by host drivers right * by 1 bit. This is why target drivers also * use right shifted status byte definitions. * For instance target drivers use * CHECK_CONDITION, defined to 0x1, instead of * the SCSI defined check condition value of * 0x2. Host drivers are supposed to return * the status byte as it is defined by SCSI. */ scp->result = DRIVER_BYTE(DRIVER_SENSE) | STATUS_BYTE(qdonep->d3.scsi_stat); } else { scp->result = STATUS_BYTE(qdonep->d3.scsi_stat); } break; default: /* QHSTA error occurred */ ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat); scp->result = HOST_BYTE(DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); scp->result = HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3. scsi_msg) | STATUS_BYTE(qdonep->d3.scsi_stat); break; default: ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat); scp->result = HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3. scsi_msg) | STATUS_BYTE(qdonep->d3.scsi_stat); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && qdonep->d3.done_stat == QD_NO_ERROR && qdonep->d3.host_stat == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); } static int AscIsrQDone(ASC_DVC_VAR *asc_dvc) { uchar next_qp; uchar n_q_used; uchar sg_list_qp; uchar sg_queue_cnt; uchar q_cnt; uchar done_q_tail; uchar tid_no; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; ushort q_addr; ushort sg_q_addr; uchar cur_target_qng; ASC_QDONE_INFO scsiq_buf; ASC_QDONE_INFO *scsiq; int false_overrun; iop_base = asc_dvc->iop_base; n_q_used = 1; scsiq = (ASC_QDONE_INFO *)&scsiq_buf; done_q_tail = (uchar)AscGetVarDoneQTail(iop_base); q_addr = ASC_QNO_TO_QADDR(done_q_tail); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_FWD)); if (next_qp != ASC_QLINK_END) { AscPutVarDoneQTail(iop_base, next_qp); q_addr = ASC_QNO_TO_QADDR(next_qp); sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count); AscWriteLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (uchar)(scsiq-> q_status & (uchar)~(QS_READY | QS_ABORTED))); tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix); target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix); if ((scsiq->cntl & QC_SG_HEAD) != 0) { sg_q_addr = q_addr; sg_list_qp = next_qp; for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) { sg_list_qp = AscReadLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_FWD)); sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp); if (sg_list_qp == ASC_QLINK_END) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_Q_LINKS); scsiq->d3.done_stat = QD_WITH_ERROR; scsiq->d3.host_stat = QHSTA_D_QDONE_SG_LIST_CORRUPTED; goto FATAL_ERR_QDONE; } AscWriteLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_STATUS), QS_FREE); } n_q_used = sg_queue_cnt + 1; AscPutVarDoneQTail(iop_base, sg_list_qp); } if (asc_dvc->queue_full_or_busy & target_id) { cur_target_qng = AscReadLramByte(iop_base, (ushort)((ushort) ASC_QADR_BEG + (ushort) scsiq->d2. target_ix)); if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) { scsi_busy = AscReadLramByte(iop_base, (ushort) ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy &= ~target_id; } } if (asc_dvc->cur_total_qng >= n_q_used) { asc_dvc->cur_total_qng -= n_q_used; if (asc_dvc->cur_dvc_qng[tid_no] != 0) { asc_dvc->cur_dvc_qng[tid_no]--; } } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG); scsiq->d3.done_stat = QD_WITH_ERROR; goto FATAL_ERR_QDONE; } if ((scsiq->d2.srb_ptr == 0UL) || ((scsiq->q_status & QS_ABORTED) != 0)) { return (0x11); } else if (scsiq->q_status == QS_DONE) { false_overrun = FALSE; if (scsiq->extra_bytes != 0) { scsiq->remain_bytes += (ADV_DCNT)scsiq->extra_bytes; } if (scsiq->d3.done_stat == QD_WITH_ERROR) { if (scsiq->d3.host_stat == QHSTA_M_DATA_OVER_RUN) { if ((scsiq-> cntl & (QC_DATA_IN | QC_DATA_OUT)) == 0) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } else if (false_overrun) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } } else if (scsiq->d3.host_stat == QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) { AscStopChip(iop_base); AscSetChipControl(iop_base, (uchar)(CC_SCSI_RESET | CC_HALT)); udelay(60); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); AscSetChipControl(iop_base, 0); } } if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } else { if ((AscReadLramByte(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_CDB_BEG)) == START_STOP)) { asc_dvc->unit_not_ready &= ~target_id; if (scsiq->d3.done_stat != QD_NO_ERROR) { asc_dvc->start_motor &= ~target_id; } } } return (1); } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS); FATAL_ERR_QDONE: if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } return (0x80); } } return (0); } static int AscISR(ASC_DVC_VAR *asc_dvc) { ASC_CS_TYPE chipstat; PortAddr iop_base; ushort saved_ram_addr; uchar ctrl_reg; uchar saved_ctrl_reg; int int_pending; int status; uchar host_flag; iop_base = asc_dvc->iop_base; int_pending = FALSE; if (AscIsIntPending(iop_base) == 0) return int_pending; if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) { return ERR; } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL); return ERR; } if (asc_dvc->is_in_int) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY); return ERR; } asc_dvc->is_in_int = TRUE; ctrl_reg = AscGetChipControl(iop_base); saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET | CC_SINGLE_STEP | CC_DIAG | CC_TEST)); chipstat = AscGetChipStatus(iop_base); if (chipstat & CSW_SCSI_RESET_LATCH) { if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) { int i = 10; int_pending = TRUE; asc_dvc->sdtr_done = 0; saved_ctrl_reg &= (uchar)(~CC_HALT); while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT)); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); chipstat = AscGetChipStatus(iop_base); } } saved_ram_addr = AscGetChipLramAddr(iop_base); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (uchar)(~ASC_HOST_FLAG_IN_ISR); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR)); if ((chipstat & CSW_INT_PENDING) || (int_pending)) { AscAckInterrupt(iop_base); int_pending = TRUE; if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) { if (AscIsrChipHalted(asc_dvc) == ERR) { goto ISR_REPORT_QDONE_FATAL_ERROR; } else { saved_ctrl_reg &= (uchar)(~CC_HALT); } } else { ISR_REPORT_QDONE_FATAL_ERROR: if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) { while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) { } } else { do { if ((status = AscIsrQDone(asc_dvc)) == 1) { break; } } while (status == 0x11); } if ((status & 0x80) != 0) int_pending = ERR; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); AscSetChipLramAddr(iop_base, saved_ram_addr); AscSetChipControl(iop_base, saved_ctrl_reg); asc_dvc->is_in_int = FALSE; return int_pending; } /* * advansys_reset() * * Reset the bus associated with the command 'scp'. * * This function runs its own thread. Interrupts must be blocked but * sleeping is allowed and no locking other than for host structures is * required. Returns SUCCESS or FAILED. */ static int advansys_reset(struct scsi_cmnd *scp) { struct Scsi_Host *shost = scp->device->host; struct asc_board *boardp = shost_priv(shost); unsigned long flags; int status; int ret = SUCCESS; ASC_DBG(1, "0x%p\n", scp); ASC_STATS(shost, reset); scmd_printk(KERN_INFO, scp, "SCSI bus reset started...\n"); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; /* Reset the chip and SCSI bus. */ ASC_DBG(1, "before AscInitAsc1000Driver()\n"); status = AscInitAsc1000Driver(asc_dvc); /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ if (asc_dvc->err_code || !asc_dvc->overrun_dma) { scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " "0x%x, status: 0x%x\n", asc_dvc->err_code, status); ret = FAILED; } else if (status) { scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " "0x%x\n", status); } else { scmd_printk(KERN_INFO, scp, "SCSI bus reset " "successful\n"); } ASC_DBG(1, "after AscInitAsc1000Driver()\n"); spin_lock_irqsave(shost->host_lock, flags); } else { /* * If the suggest reset bus flags are set, then reset the bus. * Otherwise only reset the device. */ ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; /* * Reset the target's SCSI bus. */ ASC_DBG(1, "before AdvResetChipAndSB()\n"); switch (AdvResetChipAndSB(adv_dvc)) { case ASC_TRUE: scmd_printk(KERN_INFO, scp, "SCSI bus reset " "successful\n"); break; case ASC_FALSE: default: scmd_printk(KERN_INFO, scp, "SCSI bus reset error\n"); ret = FAILED; break; } spin_lock_irqsave(shost->host_lock, flags); AdvISR(adv_dvc); } /* Save the time of the most recently completed reset. */ boardp->last_reset = jiffies; spin_unlock_irqrestore(shost->host_lock, flags); ASC_DBG(1, "ret %d\n", ret); return ret; } /* * advansys_biosparam() * * Translate disk drive geometry if the "BIOS greater than 1 GB" * support is enabled for a drive. * * ip (information pointer) is an int array with the following definition: * ip[0]: heads * ip[1]: sectors * ip[2]: cylinders */ static int advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int ip[]) { struct asc_board *boardp = shost_priv(sdev->host); ASC_DBG(1, "begin\n"); ASC_STATS(sdev->host, biosparam); if (ASC_NARROW_BOARD(boardp)) { if ((boardp->dvc_var.asc_dvc_var.dvc_cntl & ASC_CNTL_BIOS_GT_1GB) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } else { if ((boardp->dvc_var.adv_dvc_var.bios_ctrl & BIOS_CTRL_EXTENDED_XLAT) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); ASC_DBG(1, "end\n"); return 0; } /* * First-level interrupt handler. * * 'dev_id' is a pointer to the interrupting adapter's Scsi_Host. */ static irqreturn_t advansys_interrupt(int irq, void *dev_id) { struct Scsi_Host *shost = dev_id; struct asc_board *boardp = shost_priv(shost); irqreturn_t result = IRQ_NONE; ASC_DBG(2, "boardp 0x%p\n", boardp); spin_lock(shost->host_lock); if (ASC_NARROW_BOARD(boardp)) { if (AscIsIntPending(shost->io_port)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); ASC_DBG(1, "before AscISR()\n"); AscISR(&boardp->dvc_var.asc_dvc_var); } } else { ASC_DBG(1, "before AdvISR()\n"); if (AdvISR(&boardp->dvc_var.adv_dvc_var)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); } } spin_unlock(shost->host_lock); ASC_DBG(1, "end\n"); return result; } static int AscHostReqRiscHalt(PortAddr iop_base) { int count = 0; int sta = 0; uchar saved_stop_code; if (AscIsChipHalted(iop_base)) return (1); saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP); do { if (AscIsChipHalted(iop_base)) { sta = 1; break; } mdelay(100); } while (count++ < 20); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code); return (sta); } static int AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data) { int sta = FALSE; if (AscHostReqRiscHalt(iop_base)) { sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscStartChip(iop_base); } return sta; } static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev) { char type = sdev->type; ASC_SCSI_BIT_ID_TYPE tid_bits = 1 << sdev->id; if (!(asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN)) return; if (asc_dvc->init_sdtr & tid_bits) return; if ((type == TYPE_ROM) && (strncmp(sdev->vendor, "HP ", 3) == 0)) asc_dvc->pci_fix_asyn_xfer_always |= tid_bits; asc_dvc->pci_fix_asyn_xfer |= tid_bits; if ((type == TYPE_PROCESSOR) || (type == TYPE_SCANNER) || (type == TYPE_ROM) || (type == TYPE_TAPE)) asc_dvc->pci_fix_asyn_xfer &= ~tid_bits; if (asc_dvc->pci_fix_asyn_xfer & tid_bits) AscSetRunChipSynRegAtID(asc_dvc->iop_base, sdev->id, ASYN_SDTR_DATA_FIX_PCI_REV_AB); } static void advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) { ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id; ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng; if (sdev->lun == 0) { ASC_SCSI_BIT_ID_TYPE orig_init_sdtr = asc_dvc->init_sdtr; if ((asc_dvc->cfg->sdtr_enable & tid_bit) && sdev->sdtr) { asc_dvc->init_sdtr |= tid_bit; } else { asc_dvc->init_sdtr &= ~tid_bit; } if (orig_init_sdtr != asc_dvc->init_sdtr) AscAsyncFix(asc_dvc, sdev); } if (sdev->tagged_supported) { if (asc_dvc->cfg->cmd_qng_enabled & tid_bit) { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng |= tid_bit; asc_dvc->use_tagged_qng |= tid_bit; } scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, asc_dvc->max_dvc_qng[sdev->id]); } } else { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng &= ~tid_bit; asc_dvc->use_tagged_qng &= ~tid_bit; } scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); } if ((sdev->lun == 0) && (orig_use_tagged_qng != asc_dvc->use_tagged_qng)) { AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B, asc_dvc->use_tagged_qng); AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B, asc_dvc->cfg->can_tagged_qng); asc_dvc->max_dvc_qng[sdev->id] = asc_dvc->cfg->max_tag_qng[sdev->id]; AscWriteLramByte(asc_dvc->iop_base, (ushort)(ASCV_MAX_DVC_QNG_BEG + sdev->id), asc_dvc->max_dvc_qng[sdev->id]); } } /* * Wide Transfers * * If the EEPROM enabled WDTR for the device and the device supports wide * bus (16 bit) transfers, then turn on the device's 'wdtr_able' bit and * write the new value to the microcode. */ static void advansys_wide_enable_wdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); /* * Clear the microcode SDTR and WDTR negotiation done indicators for * the target to cause it to negotiate with the new setting set above. * WDTR when accepted causes the target to enter asynchronous mode, so * SDTR must be negotiated. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); } /* * Synchronous Transfers * * If the EEPROM enabled SDTR for the device and the device * supports synchronous transfers, then turn on the device's * 'sdtr_able' bit. Write the new value to the microcode. */ static void advansys_wide_enable_sdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); /* * Clear the microcode "SDTR negotiation" done indicator for the * target to cause it to negotiate with the new setting set above. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); } /* * PPR (Parallel Protocol Request) Capable * * If the device supports DT mode, then it must be PPR capable. * The PPR message will be used in place of the SDTR and WDTR * messages to negotiate synchronous speed and offset, transfer * width, and protocol options. */ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc, AdvPortAddr iop_base, unsigned short tidmask) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); adv_dvc->ppr_able |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); } static void advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) { AdvPortAddr iop_base = adv_dvc->iop_base; unsigned short tidmask = 1 << sdev->id; if (sdev->lun == 0) { /* * Handle WDTR, SDTR, and Tag Queuing. If the feature * is enabled in the EEPROM and the device supports the * feature, then enable it in the microcode. */ if ((adv_dvc->wdtr_able & tidmask) && sdev->wdtr) advansys_wide_enable_wdtr(iop_base, tidmask); if ((adv_dvc->sdtr_able & tidmask) && sdev->sdtr) advansys_wide_enable_sdtr(iop_base, tidmask); if (adv_dvc->chip_type == ADV_CHIP_ASC38C1600 && sdev->ppr) advansys_wide_enable_ppr(adv_dvc, iop_base, tidmask); /* * Tag Queuing is disabled for the BIOS which runs in polled * mode and would see no benefit from Tag Queuing. Also by * disabling Tag Queuing in the BIOS devices with Tag Queuing * bugs will at least work with the BIOS. */ if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + sdev->id, adv_dvc->max_dvc_qng); } } if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, adv_dvc->max_dvc_qng); } else { scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); } } /* * Set the number of commands to queue per device for the * specified host adapter. */ static int advansys_slave_configure(struct scsi_device *sdev) { struct asc_board *boardp = shost_priv(sdev->host); if (ASC_NARROW_BOARD(boardp)) advansys_narrow_slave_configure(sdev, &boardp->dvc_var.asc_dvc_var); else advansys_wide_slave_configure(sdev, &boardp->dvc_var.adv_dvc_var); return 0; } static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp) { struct asc_board *board = shost_priv(scp->device->host); scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); dma_cache_sync(board->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); return cpu_to_le32(scp->SCp.dma_handle); } static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, struct asc_scsi_q *asc_scsi_q) { struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var; int use_sg; memset(asc_scsi_q, 0, sizeof(*asc_scsi_q)); /* * Point the ASC_SCSI_Q to the 'struct scsi_cmnd'. */ asc_scsi_q->q2.srb_ptr = advansys_ptr_to_srb(asc_dvc, scp); if (asc_scsi_q->q2.srb_ptr == BAD_SRB) { scp->result = HOST_BYTE(DID_SOFT_ERROR); return ASC_ERROR; } /* * Build the ASC_SCSI_Q request. */ asc_scsi_q->cdbptr = &scp->cmnd[0]; asc_scsi_q->q2.cdb_len = scp->cmd_len; asc_scsi_q->q1.target_id = ASC_TID_TO_TARGET_ID(scp->device->id); asc_scsi_q->q1.target_lun = scp->device->lun; asc_scsi_q->q2.target_ix = ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp); asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE; /* * If there are any outstanding requests for the current target, * then every 255th request send an ORDERED request. This heuristic * tries to retain the benefit of request sorting while preventing * request starvation. 255 is the max number of tags or pending commands * a device may have outstanding. * * The request count is incremented below for every successfully * started request. * */ if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && (boardp->reqcnt[scp->device->id] % 255) == 0) { asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG; } else { asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG; } /* Build ASC_SCSI_Q */ use_sg = scsi_dma_map(scp); if (use_sg != 0) { int sgcnt; struct scatterlist *slp; struct asc_sg_head *asc_sg_head; if (use_sg > scp->device->host->sg_tablesize) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "sg_tablesize %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_ERROR); return ASC_ERROR; } asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) + use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC); if (!asc_sg_head) { scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_SOFT_ERROR); return ASC_ERROR; } asc_scsi_q->q1.cntl |= QC_SG_HEAD; asc_scsi_q->sg_head = asc_sg_head; asc_scsi_q->q1.data_cnt = 0; asc_scsi_q->q1.data_addr = 0; /* This is a byte value, otherwise it would need to be swapped. */ asc_sg_head->entry_cnt = asc_scsi_q->q1.sg_queue_cnt = use_sg; ASC_STATS_ADD(scp->device->host, xfer_elem, asc_sg_head->entry_cnt); /* * Convert scatter-gather list into ASC_SG_HEAD list. */ scsi_for_each_sg(scp, slp, use_sg, sgcnt) { asc_sg_head->sg_list[sgcnt].addr = cpu_to_le32(sg_dma_address(slp)); asc_sg_head->sg_list[sgcnt].bytes = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); } } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ASC_SCSI_Q(2, asc_scsi_q); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); return ASC_NOERROR; } /* * Build scatter-gather list for Adv Library (Wide Board). * * Additional ADV_SG_BLOCK structures will need to be allocated * if the total number of scatter-gather elements exceeds * NO_OF_SG_PER_BLOCK (15). The ADV_SG_BLOCK structures are * assumed to be physically contiguous. * * Return: * ADV_SUCCESS(1) - SG List successfully created * ADV_ERROR(-1) - SG List creation failed */ static int adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, int use_sg) { adv_sgblk_t *sgblkp; ADV_SCSI_REQ_Q *scsiqp; struct scatterlist *slp; int sg_elem_cnt; ADV_SG_BLOCK *sg_block, *prev_sg_block; ADV_PADDR sg_block_paddr; int i; scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); slp = scsi_sglist(scp); sg_elem_cnt = use_sg; prev_sg_block = NULL; reqp->sgblkp = NULL; for (;;) { /* * Allocate a 'adv_sgblk_t' structure from the board free * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK * (15) scatter-gather elements. */ if ((sgblkp = boardp->adv_sgblkp) == NULL) { ASC_DBG(1, "no free adv_sgblk_t\n"); ASC_STATS(scp->device->host, adv_build_nosg); /* * Allocation failed. Free 'adv_sgblk_t' structures * already allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; /* Add 'sgblkp' to the board free list. */ sgblkp->next_sgblkp = boardp->adv_sgblkp; boardp->adv_sgblkp = sgblkp; } return ASC_BUSY; } /* Complete 'adv_sgblk_t' board allocation. */ boardp->adv_sgblkp = sgblkp->next_sgblkp; sgblkp->next_sgblkp = NULL; /* * Get 8 byte aligned virtual and physical addresses * for the allocated ADV_SG_BLOCK structure. */ sg_block = (ADV_SG_BLOCK *)ADV_8BALIGN(&sgblkp->sg_block); sg_block_paddr = virt_to_bus(sg_block); /* * Check if this is the first 'adv_sgblk_t' for the * request. */ if (reqp->sgblkp == NULL) { /* Request's first scatter-gather block. */ reqp->sgblkp = sgblkp; /* * Set ADV_SCSI_REQ_T ADV_SG_BLOCK virtual and physical * address pointers. */ scsiqp->sg_list_ptr = sg_block; scsiqp->sg_real_addr = cpu_to_le32(sg_block_paddr); } else { /* Request's second or later scatter-gather block. */ sgblkp->next_sgblkp = reqp->sgblkp; reqp->sgblkp = sgblkp; /* * Point the previous ADV_SG_BLOCK structure to * the newly allocated ADV_SG_BLOCK structure. */ prev_sg_block->sg_ptr = cpu_to_le32(sg_block_paddr); } for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { sg_block->sg_list[i].sg_addr = cpu_to_le32(sg_dma_address(slp)); sg_block->sg_list[i].sg_count = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); if (--sg_elem_cnt == 0) { /* Last ADV_SG_BLOCK and scatter-gather entry. */ sg_block->sg_cnt = i + 1; sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ return ADV_SUCCESS; } slp++; } sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; prev_sg_block = sg_block; } } /* * Build a request structure for the Adv Library (Wide Board). * * If an adv_req_t can not be allocated to issue the request, * then return ASC_BUSY. If an error occurs, then return ASC_ERROR. * * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the * microcode for DMA addresses or math operations are byte swapped * to little-endian order. */ static int adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, ADV_SCSI_REQ_Q **adv_scsiqpp) { adv_req_t *reqp; ADV_SCSI_REQ_Q *scsiqp; int i; int ret; int use_sg; /* * Allocate an adv_req_t structure from the board to execute * the command. */ if (boardp->adv_reqp == NULL) { ASC_DBG(1, "no free adv_req_t\n"); ASC_STATS(scp->device->host, adv_build_noreq); return ASC_BUSY; } else { reqp = boardp->adv_reqp; boardp->adv_reqp = reqp->next_reqp; reqp->next_reqp = NULL; } /* * Get 32-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers. */ scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); /* * Initialize the structure. */ scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0; /* * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure. */ scsiqp->srb_ptr = ADV_VADDR_TO_U32(reqp); /* * Set the adv_req_t 'cmndp' to point to the struct scsi_cmnd structure. */ reqp->cmndp = scp; /* * Build the ADV_SCSI_REQ_Q request. */ /* Set CDB length and copy it to the request structure. */ scsiqp->cdb_len = scp->cmd_len; /* Copy first 12 CDB bytes to cdb[]. */ for (i = 0; i < scp->cmd_len && i < 12; i++) { scsiqp->cdb[i] = scp->cmnd[i]; } /* Copy last 4 CDB bytes, if present, to cdb16[]. */ for (; i < scp->cmd_len; i++) { scsiqp->cdb16[i - 12] = scp->cmnd[i]; } scsiqp->target_id = scp->device->id; scsiqp->target_lun = scp->device->lun; scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0])); scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; /* Build ADV_SCSI_REQ_Q */ use_sg = scsi_dma_map(scp); if (use_sg == 0) { /* Zero-length transfer */ reqp->sgblkp = NULL; scsiqp->data_cnt = 0; scsiqp->vdata_addr = NULL; scsiqp->data_addr = 0; scsiqp->sg_list_ptr = NULL; scsiqp->sg_real_addr = 0; } else { if (use_sg > ADV_MAX_SG_LIST) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "ADV_MAX_SG_LIST %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_ERROR); /* * Free the 'adv_req_t' structure by adding it back * to the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; return ASC_ERROR; } scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp)); ret = adv_get_sglist(boardp, reqp, scp, use_sg); if (ret != ADV_SUCCESS) { /* * Free the adv_req_t structure by adding it back to * the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; return ret; } ASC_STATS_ADD(scp->device->host, xfer_elem, use_sg); } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); *adv_scsiqpp = scsiqp; return ASC_NOERROR; } static int AscSgListToQueue(int sg_list) { int n_sg_list_qs; n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q); if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0) n_sg_list_qs++; return n_sg_list_qs + 1; } static uint AscGetNumOfFreeQueue(ASC_DVC_VAR *asc_dvc, uchar target_ix, uchar n_qs) { uint cur_used_qs; uint cur_free_qs; ASC_SCSI_BIT_ID_TYPE target_id; uchar tid_no; target_id = ASC_TIX_TO_TARGET_ID(target_ix); tid_no = ASC_TIX_TO_TID(target_ix); if ((asc_dvc->unit_not_ready & target_id) || (asc_dvc->queue_full_or_busy & target_id)) { return 0; } if (n_qs == 1) { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) asc_dvc->last_q_shortage + (uint) ASC_MIN_FREE_Q; } else { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) ASC_MIN_FREE_Q; } if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) { cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs; if (asc_dvc->cur_dvc_qng[tid_no] >= asc_dvc->max_dvc_qng[tid_no]) { return 0; } return cur_free_qs; } if (n_qs > 1) { if ((n_qs > asc_dvc->last_q_shortage) && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) { asc_dvc->last_q_shortage = n_qs; } } return 0; } static uchar AscAllocFreeQueue(PortAddr iop_base, uchar free_q_head) { ushort q_addr; uchar next_qp; uchar q_status; q_addr = ASC_QNO_TO_QADDR(free_q_head); q_status = (uchar)AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_STATUS)); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END)) return next_qp; return ASC_QLINK_END; } static uchar AscAllocMultipleFreeQueue(PortAddr iop_base, uchar free_q_head, uchar n_free_q) { uchar i; for (i = 0; i < n_free_q; i++) { free_q_head = AscAllocFreeQueue(iop_base, free_q_head); if (free_q_head == ASC_QLINK_END) break; } return free_q_head; } /* * void * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) * * Calling/Exit State: * none * * Description: * Output an ASC_SCSI_Q structure to the chip */ static void DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) { int i; ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", outbuf, 2 * words); AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 4 || i == 20) { continue; } outpw(iop_base + IOP_RAM_DATA, ((ushort)outbuf[i + 1] << 8) | outbuf[i]); } } static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { ushort q_addr; uchar tid_no; uchar sdtr_data; uchar syn_period_ix; uchar syn_offset; PortAddr iop_base; iop_base = asc_dvc->iop_base; if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) && ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) { tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix); sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); syn_period_ix = (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1); syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET; AscMsgOutSDTR(asc_dvc, asc_dvc->sdtr_period_tbl[syn_period_ix], syn_offset); scsiq->q1.cntl |= QC_MSG_OUT; } q_addr = ASC_QNO_TO_QADDR(q_no); if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; } scsiq->q1.status = QS_FREE; AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_CDB_BEG, (uchar *)scsiq->cdbptr, scsiq->q2.cdb_len >> 1); DvcPutScsiQ(iop_base, q_addr + ASC_SCSIQ_CPY_BEG, (uchar *)&scsiq->q1.cntl, ((sizeof(ASC_SCSIQ_1) + sizeof(ASC_SCSIQ_2)) / 2) - 1); AscWriteLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (ushort)(((ushort)scsiq->q1. q_no << 8) | (ushort)QS_READY)); return 1; } static int AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { int sta; int i; ASC_SG_HEAD *sg_head; ASC_SG_LIST_Q scsi_sg_q; ASC_DCNT saved_data_addr; ASC_DCNT saved_data_cnt; PortAddr iop_base; ushort sg_list_dwords; ushort sg_index; ushort sg_entry_cnt; ushort q_addr; uchar next_qp; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; saved_data_addr = scsiq->q1.data_addr; saved_data_cnt = scsiq->q1.data_cnt; scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr; scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes; #if CC_VERY_LONG_SG_LIST /* * If sg_head->entry_cnt is greater than ASC_MAX_SG_LIST * then not all SG elements will fit in the allocated queues. * The rest of the SG elements will be copied when the RISC * completes the SG elements that fit and halts. */ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { /* * Set sg_entry_cnt to be the number of SG elements that * will fit in the allocated SG queues. It is minus 1, because * the first SG element is handled above. ASC_MAX_SG_LIST is * already inflated by 1 to account for this. For example it * may be 50 which is 1 + 7 queues * 7 SG elements. */ sg_entry_cnt = ASC_MAX_SG_LIST - 1; /* * Keep track of remaining number of SG elements that will * need to be handled from a_isr.c. */ scsiq->remain_sg_entry_cnt = sg_head->entry_cnt - ASC_MAX_SG_LIST; } else { #endif /* CC_VERY_LONG_SG_LIST */ /* * Set sg_entry_cnt to be the number of SG elements that * will fit in the allocated SG queues. It is minus 1, because * the first SG element is handled above. */ sg_entry_cnt = sg_head->entry_cnt - 1; #if CC_VERY_LONG_SG_LIST } #endif /* CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt != 0) { scsiq->q1.cntl |= QC_SG_HEAD; q_addr = ASC_QNO_TO_QADDR(q_no); sg_index = 1; scsiq->q1.sg_queue_cnt = sg_head->queue_cnt; scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { scsi_sg_q.seq_no = i + 1; if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); sg_entry_cnt -= ASC_SG_LIST_PER_Q; if (i == 0) { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q; } else { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1; } } else { #if CC_VERY_LONG_SG_LIST /* * This is the last SG queue in the list of * allocated SG queues. If there are more * SG elements than will fit in the allocated * queues, then set the QCSG_SG_XFER_MORE flag. */ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; } else { #endif /* CC_VERY_LONG_SG_LIST */ scsi_sg_q.cntl |= QCSG_SG_XFER_END; #if CC_VERY_LONG_SG_LIST } #endif /* CC_VERY_LONG_SG_LIST */ sg_list_dwords = sg_entry_cnt << 1; if (i == 0) { scsi_sg_q.sg_list_cnt = sg_entry_cnt; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt; } else { scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; } sg_entry_cnt = 0; } next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); scsi_sg_q.q_no = next_qp; q_addr = ASC_QNO_TO_QADDR(next_qp); AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_SGHD_CPY_BEG, (uchar *)&scsi_sg_q, sizeof(ASC_SG_LIST_Q) >> 1); AscMemDWordCopyPtrToLram(iop_base, q_addr + ASC_SGQ_LIST_BEG, (uchar *)&sg_head-> sg_list[sg_index], sg_list_dwords); sg_index += ASC_SG_LIST_PER_Q; scsiq->next_sg_index = sg_index; } } else { scsiq->q1.cntl &= ~QC_SG_HEAD; } sta = AscPutReadyQueue(asc_dvc, scsiq, q_no); scsiq->q1.data_addr = saved_data_addr; scsiq->q1.data_cnt = saved_data_cnt; return (sta); } static int AscSendScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar n_q_required) { PortAddr iop_base; uchar free_q_head; uchar next_qp; uchar tid_no; uchar target_ix; int sta; iop_base = asc_dvc->iop_base; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); sta = 0; free_q_head = (uchar)AscGetVarFreeQHead(iop_base); if (n_q_required > 1) { next_qp = AscAllocMultipleFreeQueue(iop_base, free_q_head, (uchar)n_q_required); if (next_qp != ASC_QLINK_END) { asc_dvc->last_q_shortage = 0; scsiq->sg_head->queue_cnt = n_q_required - 1; scsiq->q1.q_no = free_q_head; sta = AscPutReadySgListQueue(asc_dvc, scsiq, free_q_head); } } else if (n_q_required == 1) { next_qp = AscAllocFreeQueue(iop_base, free_q_head); if (next_qp != ASC_QLINK_END) { scsiq->q1.q_no = free_q_head; sta = AscPutReadyQueue(asc_dvc, scsiq, free_q_head); } } if (sta == 1) { AscPutVarFreeQHead(iop_base, next_qp); asc_dvc->cur_total_qng += n_q_required; asc_dvc->cur_dvc_qng[tid_no]++; } return sta; } #define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16 static uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] = { INQUIRY, REQUEST_SENSE, READ_CAPACITY, READ_TOC, MODE_SELECT, MODE_SENSE, MODE_SELECT_10, MODE_SENSE_10, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) { PortAddr iop_base; int sta; int n_q_required; int disable_syn_offset_one_fix; int i; ASC_PADDR addr; ushort sg_entry_cnt = 0; ushort sg_entry_cnt_minus_one = 0; uchar target_ix; uchar tid_no; uchar sdtr_data; uchar extra_bytes; uchar scsi_cmd; uchar disable_cmd; ASC_SG_HEAD *sg_head; ASC_DCNT data_cnt; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; if (asc_dvc->err_code != 0) return (ERR); scsiq->q1.q_no = 0; if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) { scsiq->q1.extra_bytes = 0; } sta = 0; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); n_q_required = 1; if (scsiq->cdbptr[0] == REQUEST_SENSE) { if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) { asc_dvc->sdtr_done &= ~scsiq->q1.target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT); } } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY); return (ERR); } asc_dvc->in_critical_cnt++; if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if ((sg_entry_cnt = sg_head->entry_cnt) == 0) { asc_dvc->in_critical_cnt--; return (ERR); } #if !CC_VERY_LONG_SG_LIST if (sg_entry_cnt > ASC_MAX_SG_LIST) { asc_dvc->in_critical_cnt--; return (ERR); } #endif /* !CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt == 1) { scsiq->q1.data_addr = (ADV_PADDR)sg_head->sg_list[0].addr; scsiq->q1.data_cnt = (ADV_DCNT)sg_head->sg_list[0].bytes; scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE); } sg_entry_cnt_minus_one = sg_entry_cnt - 1; } scsi_cmd = scsiq->cdbptr[0]; disable_syn_offset_one_fix = FALSE; if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) { if (scsiq->q1.cntl & QC_SG_HEAD) { data_cnt = 0; for (i = 0; i < sg_entry_cnt; i++) { data_cnt += (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i]. bytes); } } else { data_cnt = le32_to_cpu(scsiq->q1.data_cnt); } if (data_cnt != 0UL) { if (data_cnt < 512UL) { disable_syn_offset_one_fix = TRUE; } else { for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; i++) { disable_cmd = _syn_offset_one_disable_cmd[i]; if (disable_cmd == 0xFF) { break; } if (scsi_cmd == disable_cmd) { disable_syn_offset_one_fix = TRUE; break; } } } } } if (disable_syn_offset_one_fix) { scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | ASC_TAG_FLAG_DISABLE_DISCONNECT); } else { scsiq->q2.tag_code &= 0x27; } if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = (ADV_PADDR)le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. addr) + (ADV_DCNT)le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; scsiq->q1.extra_bytes = extra_bytes; data_cnt = le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); data_cnt -= (ASC_DCNT) extra_bytes; sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes = cpu_to_le32(data_cnt); } } } } sg_head->entry_to_copy = sg_head->entry_cnt; #if CC_VERY_LONG_SG_LIST /* * Set the sg_entry_cnt to the maximum possible. The rest of * the SG elements will be copied when the RISC completes the * SG elements that fit and halts. */ if (sg_entry_cnt > ASC_MAX_SG_LIST) { sg_entry_cnt = ASC_MAX_SG_LIST; } #endif /* CC_VERY_LONG_SG_LIST */ n_q_required = AscSgListToQueue(sg_entry_cnt); if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >= (uint) n_q_required) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } else { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = le32_to_cpu(scsiq->q1.data_addr) + le32_to_cpu(scsiq->q1.data_cnt); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { data_cnt = le32_to_cpu(scsiq->q1. data_cnt); if (((ushort)data_cnt & 0x01FF) == 0) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; data_cnt -= (ASC_DCNT) extra_bytes; scsiq->q1.data_cnt = cpu_to_le32 (data_cnt); scsiq->q1.extra_bytes = extra_bytes; } } } } } n_q_required = 1; if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } asc_dvc->in_critical_cnt--; return (sta); } /* * AdvExeScsiQueue() - Send a request to the RISC microcode program. * * Allocate a carrier structure, point the carrier to the ADV_SCSI_REQ_Q, * add the carrier to the ICQ (Initiator Command Queue), and tickle the * RISC to notify it a new command is ready to be executed. * * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be * set to SCSI_MAX_RETRY. * * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the microcode * for DMA addresses or math operations are byte swapped to little-endian * order. * * Return: * ADV_SUCCESS(1) - The request was successfully queued. * ADV_BUSY(0) - Resource unavailable; Retry again after pending * request completes. * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure * host IC error. */ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq) { AdvPortAddr iop_base; ADV_PADDR req_paddr; ADV_CARR_T *new_carrp; /* * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID. */ if (scsiq->target_id > ADV_MAX_TID) { scsiq->host_status = QHSTA_M_INVALID_DEVICE; scsiq->done_status = QD_WITH_ERROR; return ADV_ERROR; } iop_base = asc_dvc->iop_base; /* * Allocate a carrier ensuring at least one carrier always * remains on the freelist and initialize fields. */ if ((new_carrp = asc_dvc->carr_freelist) == NULL) { return ADV_BUSY; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(new_carrp->next_vpa)); asc_dvc->carr_pending_cnt++; /* * Set the carrier to be a stopper by setting 'next_vpa' * to the stopper value. The current stopper will be changed * below to point to the new stopper. */ new_carrp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Clear the ADV_SCSI_REQ_Q done flag. */ scsiq->a_flag &= ~ADV_SCSIQ_DONE; req_paddr = virt_to_bus(scsiq); BUG_ON(req_paddr & 31); /* Wait for assertion before making little-endian */ req_paddr = cpu_to_le32(req_paddr); /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */ scsiq->scsiq_ptr = cpu_to_le32(ADV_VADDR_TO_U32(scsiq)); scsiq->scsiq_rptr = req_paddr; scsiq->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->icq_sp)); /* * Every ADV_CARR_T.carr_pa is byte swapped to little-endian * order during initialization. */ scsiq->carr_pa = asc_dvc->icq_sp->carr_pa; /* * Use the current stopper to send the ADV_SCSI_REQ_Q command to * the microcode. The newly allocated stopper will become the new * stopper. */ asc_dvc->icq_sp->areq_vpa = req_paddr; /* * Set the 'next_vpa' pointer for the old stopper to be the * physical address of the new stopper. The RISC can only * follow physical addresses. */ asc_dvc->icq_sp->next_vpa = new_carrp->carr_pa; /* * Set the host adapter stopper pointer to point to the new carrier. */ asc_dvc->icq_sp = new_carrp; if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { /* * Tickle the RISC to tell it to read its Command Queue Head pointer. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_a' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { /* * Notify the RISC a carrier is ready by writing the physical * address of the new carrier stopper to the COMMA register. */ AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(new_carrp->carr_pa)); } return ADV_SUCCESS; } /* * Execute a single 'Scsi_Cmnd'. */ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) { int ret, err_code; struct asc_board *boardp = shost_priv(scp->device->host); ASC_DBG(1, "scp 0x%p\n", scp); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; struct asc_scsi_q asc_scsi_q; /* asc_build_req() can not return ASC_BUSY. */ ret = asc_build_req(boardp, scp, &asc_scsi_q); if (ret == ASC_ERROR) { ASC_STATS(scp->device->host, build_error); return ASC_ERROR; } ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q); kfree(asc_scsi_q.sg_head); err_code = asc_dvc->err_code; } else { ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; ADV_SCSI_REQ_Q *adv_scsiqp; switch (adv_build_req(boardp, scp, &adv_scsiqp)) { case ASC_NOERROR: ASC_DBG(3, "adv_build_req ASC_NOERROR\n"); break; case ASC_BUSY: ASC_DBG(1, "adv_build_req ASC_BUSY\n"); /* * The asc_stats fields 'adv_build_noreq' and * 'adv_build_nosg' count wide board busy conditions. * They are updated in adv_build_req and * adv_get_sglist, respectively. */ return ASC_BUSY; case ASC_ERROR: default: ASC_DBG(1, "adv_build_req ASC_ERROR\n"); ASC_STATS(scp->device->host, build_error); return ASC_ERROR; } ret = AdvExeScsiQueue(adv_dvc, adv_scsiqp); err_code = adv_dvc->err_code; } switch (ret) { case ASC_NOERROR: ASC_STATS(scp->device->host, exe_noerror); /* * Increment monotonically increasing per device * successful request counter. Wrapping doesn't matter. */ boardp->reqcnt[scp->device->id]++; ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n"); break; case ASC_BUSY: ASC_STATS(scp->device->host, exe_busy); break; case ASC_ERROR: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_error); scp->result = HOST_BYTE(DID_ERROR); break; default: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_unknown); scp->result = HOST_BYTE(DID_ERROR); break; } ASC_DBG(1, "end\n"); return ret; } /* * advansys_queuecommand() - interrupt-driven I/O entrypoint. * * This function always returns 0. Command return status is saved * in the 'scp' result field. */ static int advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *shost = scp->device->host; int asc_res, result = 0; ASC_STATS(shost, queuecommand); scp->scsi_done = done; asc_res = asc_execute_scsi_cmnd(scp); switch (asc_res) { case ASC_NOERROR: break; case ASC_BUSY: result = SCSI_MLQUEUE_HOST_BUSY; break; case ASC_ERROR: default: asc_scsi_done(scp); break; } return result; } static DEF_SCSI_QCMD(advansys_queuecommand) static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) { PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) (ASC_EISA_CFG_IOP_MASK); return inpw(eisa_cfg_iop); } /* * Return the BIOS address of the adapter at the specified * I/O port and with the specified bus type. */ static unsigned short __devinit AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type) { unsigned short cfg_lsw; unsigned short bios_addr; /* * The PCI BIOS is re-located by the motherboard BIOS. Because * of this the driver can not determine where a PCI BIOS is * loaded and executes. */ if (bus_type & ASC_IS_PCI) return 0; if ((bus_type & ASC_IS_EISA) != 0) { cfg_lsw = AscGetEisaChipCfg(iop_base); cfg_lsw &= 0x000F; bios_addr = ASC_BIOS_MIN_ADDR + cfg_lsw * ASC_BIOS_BANK_SIZE; return bios_addr; } cfg_lsw = AscGetChipCfgLsw(iop_base); /* * ISA PnP uses the top bit as the 32K BIOS flag */ if (bus_type == ASC_IS_ISAPNP) cfg_lsw &= 0x7FFF; bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE; return bios_addr; } static uchar __devinit AscSetChipScsiID(PortAddr iop_base, uchar new_host_id) { ushort cfg_lsw; if (AscGetChipScsiID(iop_base) == new_host_id) { return (new_host_id); } cfg_lsw = AscGetChipCfgLsw(iop_base); cfg_lsw &= 0xF8FF; cfg_lsw |= (ushort)((new_host_id & ASC_MAX_TID) << 8); AscSetChipCfgLsw(iop_base, cfg_lsw); return (AscGetChipScsiID(iop_base)); } static unsigned char __devinit AscGetChipScsiCtrl(PortAddr iop_base) { unsigned char sc; AscSetBank(iop_base, 1); sc = inp(iop_base + IOP_REG_SC); AscSetBank(iop_base, 0); return sc; } static unsigned char __devinit AscGetChipVersion(PortAddr iop_base, unsigned short bus_type) { if (bus_type & ASC_IS_EISA) { PortAddr eisa_iop; unsigned char revision; eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) ASC_EISA_REV_IOP_MASK; revision = inp(eisa_iop); return ASC_CHIP_MIN_VER_EISA - 1 + revision; } return AscGetChipVerNo(iop_base); } #ifdef CONFIG_ISA static void __devinit AscEnableIsaDma(uchar dma_channel) { if (dma_channel < 4) { outp(0x000B, (ushort)(0xC0 | dma_channel)); outp(0x000A, dma_channel); } else if (dma_channel < 8) { outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4))); outp(0x00D4, (ushort)(dma_channel - 4)); } } #endif /* CONFIG_ISA */ static int AscStopQueueExe(PortAddr iop_base) { int count = 0; if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) { AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_REQ_RISC_STOP); do { if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) & ASC_STOP_ACK_RISC_STOP) { return (1); } mdelay(100); } while (count++ < 20); } return (0); } static ASC_DCNT __devinit AscGetMaxDmaCount(ushort bus_type) { if (bus_type & ASC_IS_ISA) return ASC_MAX_ISA_DMA_COUNT; else if (bus_type & (ASC_IS_EISA | ASC_IS_VL)) return ASC_MAX_VL_DMA_COUNT; return ASC_MAX_PCI_DMA_COUNT; } #ifdef CONFIG_ISA static ushort __devinit AscGetIsaDmaChannel(PortAddr iop_base) { ushort channel; channel = AscGetChipCfgLsw(iop_base) & 0x0003; if (channel == 0x03) return (0); else if (channel == 0x00) return (7); return (channel + 4); } static ushort __devinit AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel) { ushort cfg_lsw; uchar value; if ((dma_channel >= 5) && (dma_channel <= 7)) { if (dma_channel == 7) value = 0x00; else value = dma_channel - 4; cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC; cfg_lsw |= value; AscSetChipCfgLsw(iop_base, cfg_lsw); return (AscGetIsaDmaChannel(iop_base)); } return 0; } static uchar __devinit AscGetIsaDmaSpeed(PortAddr iop_base) { uchar speed_value; AscSetBank(iop_base, 1); speed_value = AscReadChipDmaSpeed(iop_base); speed_value &= 0x07; AscSetBank(iop_base, 0); return speed_value; } static uchar __devinit AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value) { speed_value &= 0x07; AscSetBank(iop_base, 1); AscWriteChipDmaSpeed(iop_base, speed_value); AscSetBank(iop_base, 0); return AscGetIsaDmaSpeed(iop_base); } #endif /* CONFIG_ISA */ static ushort __devinit AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) { int i; PortAddr iop_base; ushort warn_code; uchar chip_version; iop_base = asc_dvc->iop_base; warn_code = 0; asc_dvc->err_code = 0; if ((asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) { asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE; } AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, 0); asc_dvc->bug_fix_cntl = 0; asc_dvc->pci_fix_asyn_xfer = 0; asc_dvc->pci_fix_asyn_xfer_always = 0; /* asc_dvc->init_state initialized in AscInitGetConfig(). */ asc_dvc->sdtr_done = 0; asc_dvc->cur_total_qng = 0; asc_dvc->is_in_int = 0; asc_dvc->in_critical_cnt = 0; asc_dvc->last_q_shortage = 0; asc_dvc->use_tagged_qng = 0; asc_dvc->no_scam = 0; asc_dvc->unit_not_ready = 0; asc_dvc->queue_full_or_busy = 0; asc_dvc->redo_scam = 0; asc_dvc->res2 = 0; asc_dvc->min_sdtr_index = 0; asc_dvc->cfg->can_tagged_qng = 0; asc_dvc->cfg->cmd_qng_enabled = 0; asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL; asc_dvc->init_sdtr = 0; asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG; asc_dvc->scsi_reset_wait = 3; asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type); asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID; chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type); asc_dvc->cfg->chip_version = chip_version; asc_dvc->sdtr_period_tbl = asc_syn_xfer_period; asc_dvc->max_sdtr_index = 7; if ((asc_dvc->bus_type & ASC_IS_PCI) && (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) { asc_dvc->bus_type = ASC_IS_PCI_ULTRA; asc_dvc->sdtr_period_tbl = asc_syn_ultra_xfer_period; asc_dvc->max_sdtr_index = 15; if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_ENABLE_FILTER)); } } if (asc_dvc->bus_type == ASC_IS_PCI) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED; #ifdef CONFIG_ISA if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) { if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) { AscSetChipIFC(iop_base, IFC_INIT_DEFAULT); asc_dvc->bus_type = ASC_IS_ISAPNP; } asc_dvc->cfg->isa_dma_channel = (uchar)AscGetIsaDmaChannel(iop_base); } #endif /* CONFIG_ISA */ for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->cur_dvc_qng[i] = 0; asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG; asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *)0L; asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L; asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG; } return warn_code; } static int __devinit AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg) { int retry; for (retry = 0; retry < ASC_EEP_MAX_RETRY; retry++) { unsigned char read_back; AscSetChipEEPCmd(iop_base, cmd_reg); mdelay(1); read_back = AscGetChipEEPCmd(iop_base); if (read_back == cmd_reg) return 1; } return 0; } static void __devinit AscWaitEEPRead(void) { mdelay(1); } static ushort __devinit AscReadEEPWord(PortAddr iop_base, uchar addr) { ushort read_wval; uchar cmd_reg; AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); cmd_reg = addr | ASC_EEP_CMD_READ; AscWriteEEPCmdReg(iop_base, cmd_reg); AscWaitEEPRead(); read_wval = AscGetChipEEPData(iop_base); AscWaitEEPRead(); return read_wval; } static ushort __devinit AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { ushort wval; ushort sum; ushort *wbuf; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; int s_addr; wbuf = (ushort *)cfg_buf; sum = 0; /* Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); sum += *wbuf; } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { wval = AscReadEEPWord(iop_base, (uchar)s_addr); if (s_addr <= uchar_end_in_config) { /* * Swap all char fields - must unswap bytes already swapped * by AscReadEEPWord(). */ *wbuf = le16_to_cpu(wval); } else { /* Don't swap word field at the end - cntl field. */ *wbuf = wval; } sum += wval; /* Checksum treats all EEPROM data as words. */ } /* * Read the checksum word which will be compared against 'sum' * by the caller. Word field already swapped. */ *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); return sum; } static int __devinit AscTestExternalLram(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; ushort q_addr; ushort saved_word; int sta; iop_base = asc_dvc->iop_base; sta = 0; q_addr = ASC_QNO_TO_QADDR(241); saved_word = AscReadLramWord(iop_base, q_addr); AscSetChipLramAddr(iop_base, q_addr); AscSetChipLramData(iop_base, 0x55AA); mdelay(10); AscSetChipLramAddr(iop_base, q_addr); if (AscGetChipLramData(iop_base) == 0x55AA) { sta = 1; AscWriteLramWord(iop_base, q_addr, saved_word); } return (sta); } static void __devinit AscWaitEEPWrite(void) { mdelay(20); } static int __devinit AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg) { ushort read_back; int retry; retry = 0; while (TRUE) { AscSetChipEEPData(iop_base, data_reg); mdelay(1); read_back = AscGetChipEEPData(iop_base); if (read_back == data_reg) { return (1); } if (retry++ > ASC_EEP_MAX_RETRY) { return (0); } } } static ushort __devinit AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val) { ushort read_wval; read_wval = AscReadEEPWord(iop_base, addr); if (read_wval != word_val) { AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE); AscWaitEEPRead(); AscWriteEEPDataReg(iop_base, word_val); AscWaitEEPRead(); AscWriteEEPCmdReg(iop_base, (uchar)((uchar)ASC_EEP_CMD_WRITE | addr)); AscWaitEEPWrite(); AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); return (AscReadEEPWord(iop_base, addr)); } return (read_wval); } static int __devinit AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int n_error; ushort *wbuf; ushort word; ushort sum; int s_addr; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; wbuf = (ushort *)cfg_buf; n_error = 0; sum = 0; /* Write two config words; AscWriteEEPWord() will swap bytes. */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { sum += *wbuf; if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * This is a char field. Swap char fields before they are * swapped again by AscWriteEEPWord(). */ word = cpu_to_le16(*wbuf); if (word != AscWriteEEPWord(iop_base, (uchar)s_addr, word)) { n_error++; } } else { /* Don't swap word field at the end - cntl field. */ if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } sum += *wbuf; /* Checksum calculated from word values. */ } /* Write checksum word. It will be swapped by AscWriteEEPWord(). */ *wbuf = sum; if (sum != AscWriteEEPWord(iop_base, (uchar)s_addr, sum)) { n_error++; } /* Read EEPROM back again. */ wbuf = (ushort *)cfg_buf; /* * Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { if (*wbuf != AscReadEEPWord(iop_base, (uchar)s_addr)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * Swap all char fields. Must unswap bytes already swapped * by AscReadEEPWord(). */ word = le16_to_cpu(AscReadEEPWord (iop_base, (uchar)s_addr)); } else { /* Don't swap word field at the end - cntl field. */ word = AscReadEEPWord(iop_base, (uchar)s_addr); } if (*wbuf != word) { n_error++; } } /* Read checksum; Byte swapping not needed. */ if (AscReadEEPWord(iop_base, (uchar)s_addr) != sum) { n_error++; } return n_error; } static int __devinit AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int retry; int n_error; retry = 0; while (TRUE) { if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf, bus_type)) == 0) { break; } if (++retry > ASC_EEP_MAX_RETRY) { break; } } return n_error; } static ushort __devinit AscInitFromEEP(ASC_DVC_VAR *asc_dvc) { ASCEEP_CONFIG eep_config_buf; ASCEEP_CONFIG *eep_config; PortAddr iop_base; ushort chksum; ushort warn_code; ushort cfg_msw, cfg_lsw; int i; int write_eep = 0; iop_base = asc_dvc->iop_base; warn_code = 0; AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE); AscStopQueueExe(iop_base); if ((AscStopChip(iop_base) == FALSE) || (AscGetChipScsiCtrl(iop_base) != 0)) { asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE; AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } if (AscIsChipHalted(iop_base) == FALSE) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; return (warn_code); } AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; return (warn_code); } eep_config = (ASCEEP_CONFIG *)&eep_config_buf; cfg_msw = AscGetChipCfgMsw(iop_base); cfg_lsw = AscGetChipCfgLsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type); ASC_DBG(1, "chksum 0x%x\n", chksum); if (chksum == 0) { chksum = 0xaa55; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; if (asc_dvc->cfg->chip_version == 3) { if (eep_config->cfg_lsw != cfg_lsw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_lsw = AscGetChipCfgLsw(iop_base); } if (eep_config->cfg_msw != cfg_msw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); } } } eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON; ASC_DBG(1, "eep_config->chksum 0x%x\n", eep_config->chksum); if (chksum != eep_config->chksum) { if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == ASC_CHIP_VER_PCI_ULTRA_3050) { ASC_DBG(1, "chksum error ignored; EEPROM-less board\n"); eep_config->init_sdtr = 0xFF; eep_config->disc_enable = 0xFF; eep_config->start_motor = 0xFF; eep_config->use_cmd_qng = 0; eep_config->max_total_qng = 0xF0; eep_config->max_tag_qng = 0x20; eep_config->cntl = 0xBFFF; ASC_EEP_SET_CHIP_ID(eep_config, 7); eep_config->no_scam = 0; eep_config->adapter_info[0] = 0; eep_config->adapter_info[1] = 0; eep_config->adapter_info[2] = 0; eep_config->adapter_info[3] = 0; eep_config->adapter_info[4] = 0; /* Indicate EEPROM-less board. */ eep_config->adapter_info[5] = 0xBB; } else { ASC_PRINT ("AscInitFromEEP: EEPROM checksum error; Will try to re-write EEPROM.\n"); write_eep = 1; warn_code |= ASC_WARN_EEPROM_CHKSUM; } } asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr; asc_dvc->cfg->disc_enable = eep_config->disc_enable; asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng; asc_dvc->cfg->isa_dma_speed = ASC_EEP_GET_DMA_SPD(eep_config); asc_dvc->start_motor = eep_config->start_motor; asc_dvc->dvc_cntl = eep_config->cntl; asc_dvc->no_scam = eep_config->no_scam; asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0]; asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1]; asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2]; asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3]; asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4]; asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5]; if (!AscTestExternalLram(asc_dvc)) { if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA)) { eep_config->max_total_qng = ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG; } else { eep_config->cfg_msw |= 0x0800; cfg_msw |= 0x0800; AscSetChipCfgMsw(iop_base, cfg_msw); eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG; } } else { } if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) { eep_config->max_total_qng = ASC_MIN_TOTAL_QNG; } if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) { eep_config->max_total_qng = ASC_MAX_TOTAL_QNG; } if (eep_config->max_tag_qng > eep_config->max_total_qng) { eep_config->max_tag_qng = eep_config->max_total_qng; } if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) { eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC; } asc_dvc->max_total_qng = eep_config->max_total_qng; if ((eep_config->use_cmd_qng & eep_config->disc_enable) != eep_config->use_cmd_qng) { eep_config->disc_enable = eep_config->use_cmd_qng; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } ASC_EEP_SET_CHIP_ID(eep_config, ASC_EEP_GET_CHIP_ID(eep_config) & ASC_MAX_TID); asc_dvc->cfg->chip_scsi_id = ASC_EEP_GET_CHIP_ID(eep_config); if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) && !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) { asc_dvc->min_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX; } for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i]; asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng; asc_dvc->cfg->sdtr_period_offset[i] = (uchar)(ASC_DEF_SDTR_OFFSET | (asc_dvc->min_sdtr_index << 4)); } eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); if (write_eep) { if ((i = AscSetEEPConfig(iop_base, eep_config, asc_dvc->bus_type)) != 0) { ASC_PRINT1 ("AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i); } else { ASC_PRINT ("AscInitFromEEP: Successfully re-wrote EEPROM.\n"); } } return (warn_code); } static int __devinit AscInitGetConfig(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; unsigned short warn_code = 0; asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (AscFindSignature(asc_dvc->iop_base)) { warn_code |= AscInitAscDvcVar(asc_dvc); warn_code |= AscInitFromEEP(asc_dvc); asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG; if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; } else { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; } switch (warn_code) { case 0: /* No error */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing enabled w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } static int __devinit AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; PortAddr iop_base = asc_dvc->iop_base; unsigned short cfg_msw; unsigned short warn_code = 0; asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return asc_dvc->err_code; } cfg_msw = AscGetChipCfgMsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) != asc_dvc->cfg->cmd_qng_enabled) { asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; } #ifdef CONFIG_PCI if (asc_dvc->bus_type & ASC_IS_PCI) { cfg_msw &= 0xFFC0; AscSetChipCfgMsw(iop_base, cfg_msw); if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { } else { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } } } else #endif /* CONFIG_PCI */ if (asc_dvc->bus_type == ASC_IS_ISAPNP) { if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == ASC_CHIP_VER_ASYN_BUG) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } } if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) != asc_dvc->cfg->chip_scsi_id) { asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID; } #ifdef CONFIG_ISA if (asc_dvc->bus_type & ASC_IS_ISA) { AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel); AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed); } #endif /* CONFIG_ISA */ asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG; switch (warn_code) { case 0: /* No error. */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } /* * EEPROM Configuration. * * All drivers should use this structure to set the default EEPROM * configuration. The BIOS now uses this structure when it is built. * Additional structure information can be found in a_condor.h where * the structure is defined. * * The *_Field_IsChar structs are needed to correct for endianness. * These values are read from the board 16 bits at a time directly * into the structs. Because some fields are char, the values will be * in the wrong order. The *_Field_IsChar tells when to flip the * bytes. Data read and written to PCI memory is automatically swapped * on big-endian platforms so char fields read as words are actually being * unswapped on big-endian platforms. */ static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */ 0x0000, /* cfg_msw */ 0xFFFF, /* disc_enable */ 0xFFFF, /* wdtr_able */ 0xFFFF, /* sdtr_able */ 0xFFFF, /* start_motor */ 0xFFFF, /* tagqng_able */ 0xFFFF, /* bios_scan */ 0, /* scam_tolerant */ 7, /* adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* termination */ 0, /* reserved1 */ 0xFFE7, /* bios_ctrl */ 0xFFFF, /* ultra_able */ 0, /* reserved2 */ ASC_DEF_MAX_HOST_QNG, /* max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar __devinitdata = { 0, /* cfg_lsw */ 0, /* cfg_msw */ 0, /* -disc_enable */ 0, /* wdtr_able */ 0, /* sdtr_able */ 0, /* start_motor */ 0, /* tagqng_able */ 0, /* bios_scan */ 0, /* scam_tolerant */ 1, /* adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* termination */ 1, /* reserved1 */ 0, /* bios_ctrl */ 0, /* ultra_able */ 0, /* reserved2 */ 1, /* max_host_qng */ 1, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x4444, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x4444, /* 13 sdtr_speed2 */ 0x4444, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x4444, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar __devinitdata = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x5555, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x5555, /* 13 sdtr_speed2 */ 0x5555, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x5555, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar __devinitdata = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; #ifdef CONFIG_PCI /* * Wait for EEPROM command to complete */ static void __devinit AdvWaitEEPCmd(AdvPortAddr iop_base) { int eep_delay_ms; for (eep_delay_ms = 0; eep_delay_ms < ADV_EEP_DELAY_MS; eep_delay_ms++) { if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) { break; } mdelay(1); } if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) == 0) BUG(); } /* * Read the EEPROM from specified location */ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr) { AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_READ | eep_word_addr); AdvWaitEEPCmd(iop_base); return AdvReadWordRegister(iop_base, IOPW_EE_DATA); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort *wbuf; ushort addr, chksum; ushort *charfields; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_3550_CONFIG eep_config; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet3550EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_3550_EEPROM_Config, sizeof(ADVEEP_3550_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet3550EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_able = eep_config.sdtr_able; asc_dvc->ultra_able = eep_config.ultra_able; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination == 0) { asc_dvc->cfg->termination = 0; /* auto termination */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination == 1) { asc_dvc->cfg->termination = TERM_CTL_SEL; /* Enable manual control with low off / high on. */ } else if (eep_config.termination == 2) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H; /* Enable manual control with low on / high on. */ } else if (eep_config.termination == 3) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L; } else { /* * The EEPROM 'termination' field contains a bad value. Use * automatic termination instead. */ asc_dvc->cfg->termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C0800_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C0800EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C0800_EEPROM_Config, sizeof(ADVEEP_38C0800_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C0800EEPConfig(iop_base, &eep_config); } /* * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ADV_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ASC_DVC_VAR and * ASC_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C1600_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C1600EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C1600_EEPROM_Config, sizeof(ADVEEP_38C1600_CONFIG)); if (PCI_FUNC(pdev->devfn) != 0) { u8 ints; /* * Disable Bit 14 (BIOS_ENABLE) to fix SPARC Ultra 60 * and old Mac system booting problem. The Expansion * ROM must be disabled in Function 1 for these systems */ eep_config.cfg_lsw &= ~ADV_EEPROM_BIOS_ENABLE; /* * Clear the INTAB (bit 11) if the GPIO 0 input * indicates the Function 1 interrupt line is wired * to INTB. * * Set/Clear Bit 11 (INTAB) from the GPIO bit 0 input: * 1 - Function 1 interrupt line wired to INT A. * 0 - Function 1 interrupt line wired to INT B. * * Note: Function 0 is always wired to INTA. * Put all 5 GPIO bits in input mode and then read * their input values. */ AdvWriteByteRegister(iop_base, IOPB_GPIO_CNTL, 0); ints = AdvReadByteRegister(iop_base, IOPB_GPIO_DATA); if ((ints & 0x01) == 0) eep_config.cfg_lsw &= ~ADV_EEPROM_INTAB; } /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C1600EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->ppr_able = 0; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ASC_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ASC_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ASC_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ASC_DVC_VAR 'max_host_qng' and ASC_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ASC_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ASC_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Initialize the ADV_DVC_VAR structure. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. */ static int __devinit AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var; unsigned short warn_code = 0; AdvPortAddr iop_base = asc_dvc->iop_base; u16 cmd; int status; asc_dvc->err_code = 0; /* * Save the state of the PCI Configuration Command Register * "Parity Error Response Control" Bit. If the bit is clear (0), * in AdvInitAsc3550/38C0800Driver() tell the microcode to ignore * DMA parity errors. */ asc_dvc->cfg->control_flag = 0; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if ((cmd & PCI_COMMAND_PARITY) == 0) asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR; asc_dvc->cfg->chip_version = AdvGetChipVersion(iop_base, asc_dvc->bus_type); ASC_DBG(1, "iopb_chip_id_1: 0x%x 0x%x\n", (ushort)AdvReadByteRegister(iop_base, IOPB_CHIP_ID_1), (ushort)ADV_CHIP_ID_BYTE); ASC_DBG(1, "iopw_chip_id_0: 0x%x 0x%x\n", (ushort)AdvReadWordRegister(iop_base, IOPW_CHIP_ID_0), (ushort)ADV_CHIP_ID_WORD); /* * Reset the chip to start and allow register writes. */ if (AdvFindSignature(iop_base) == 0) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return ADV_ERROR; } else { /* * The caller must set 'chip_type' to a valid setting. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550 && asc_dvc->chip_type != ADV_CHIP_ASC38C0800 && asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code |= ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } /* * Reset Chip. */ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitFrom38C1600EEP(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitFrom38C0800EEP(asc_dvc); } else { status = AdvInitFrom3550EEP(asc_dvc); } warn_code |= status; } if (warn_code != 0) shost_printk(KERN_WARNING, shost, "warning: 0x%x\n", warn_code); if (asc_dvc->err_code) shost_printk(KERN_ERR, shost, "error code 0x%x\n", asc_dvc->err_code); return asc_dvc->err_code; } #endif static struct scsi_host_template advansys_template = { .proc_name = DRV_NAME, #ifdef CONFIG_PROC_FS .proc_info = advansys_proc_info, #endif .name = DRV_NAME, .info = advansys_info, .queuecommand = advansys_queuecommand, .eh_bus_reset_handler = advansys_reset, .bios_param = advansys_biosparam, .slave_configure = advansys_slave_configure, /* * Because the driver may control an ISA adapter 'unchecked_isa_dma' * must be set. The flag will be cleared in advansys_board_found * for non-ISA adapters. */ .unchecked_isa_dma = 1, /* * All adapters controlled by this driver are capable of large * scatter-gather lists. According to the mid-level SCSI documentation * this obviates any performance gain provided by setting * 'use_clustering'. But empirically while CPU utilization is increased * by enabling clustering, I/O throughput increases as well. */ .use_clustering = ENABLE_CLUSTERING, }; static int __devinit advansys_wide_init_chip(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; int req_cnt = 0; adv_req_t *reqp = NULL; int sg_cnt = 0; adv_sgblk_t *sgp; int warn_code, err_code; /* * Allocate buffer carrier structures. The total size * is about 4 KB, so allocate all at once. */ adv_dvc->carrier_buf = kmalloc(ADV_CARRIER_BUFSIZE, GFP_KERNEL); ASC_DBG(1, "carrier_buf 0x%p\n", adv_dvc->carrier_buf); if (!adv_dvc->carrier_buf) goto kmalloc_failed; /* * Allocate up to 'max_host_qng' request structures for the Wide * board. The total size is about 16 KB, so allocate all at once. * If the allocation fails decrement and try again. */ for (req_cnt = adv_dvc->max_host_qng; req_cnt > 0; req_cnt--) { reqp = kmalloc(sizeof(adv_req_t) * req_cnt, GFP_KERNEL); ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", reqp, req_cnt, (ulong)sizeof(adv_req_t) * req_cnt); if (reqp) break; } if (!reqp) goto kmalloc_failed; adv_dvc->orig_reqp = reqp; /* * Allocate up to ADV_TOT_SG_BLOCK request structures for * the Wide board. Each structure is about 136 bytes. */ board->adv_sgblkp = NULL; for (sg_cnt = 0; sg_cnt < ADV_TOT_SG_BLOCK; sg_cnt++) { sgp = kmalloc(sizeof(adv_sgblk_t), GFP_KERNEL); if (!sgp) break; sgp->next_sgblkp = board->adv_sgblkp; board->adv_sgblkp = sgp; } ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", sg_cnt, sizeof(adv_sgblk_t), sizeof(adv_sgblk_t) * sg_cnt); if (!board->adv_sgblkp) goto kmalloc_failed; /* * Point 'adv_reqp' to the request structures and * link them together. */ req_cnt--; reqp[req_cnt].next_reqp = NULL; for (; req_cnt > 0; req_cnt--) { reqp[req_cnt - 1].next_reqp = &reqp[req_cnt]; } board->adv_reqp = &reqp[0]; if (adv_dvc->chip_type == ADV_CHIP_ASC3550) { ASC_DBG(2, "AdvInitAsc3550Driver()\n"); warn_code = AdvInitAsc3550Driver(adv_dvc); } else if (adv_dvc->chip_type == ADV_CHIP_ASC38C0800) { ASC_DBG(2, "AdvInitAsc38C0800Driver()\n"); warn_code = AdvInitAsc38C0800Driver(adv_dvc); } else { ASC_DBG(2, "AdvInitAsc38C1600Driver()\n"); warn_code = AdvInitAsc38C1600Driver(adv_dvc); } err_code = adv_dvc->err_code; if (warn_code || err_code) { shost_printk(KERN_WARNING, shost, "error: warn 0x%x, error " "0x%x\n", warn_code, err_code); } goto exit; kmalloc_failed: shost_printk(KERN_ERR, shost, "error: kmalloc() failed\n"); err_code = ADV_ERROR; exit: return err_code; } static void advansys_wide_free_mem(struct asc_board *board) { struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; kfree(adv_dvc->carrier_buf); adv_dvc->carrier_buf = NULL; kfree(adv_dvc->orig_reqp); adv_dvc->orig_reqp = board->adv_reqp = NULL; while (board->adv_sgblkp) { adv_sgblk_t *sgp = board->adv_sgblkp; board->adv_sgblkp = sgp->next_sgblkp; kfree(sgp); } } static int __devinit advansys_board_found(struct Scsi_Host *shost, unsigned int iop, int bus_type) { struct pci_dev *pdev; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp = NULL; ADV_DVC_VAR *adv_dvc_varp = NULL; int share_irq, warn_code, ret; pdev = (bus_type == ASC_IS_PCI) ? to_pci_dev(boardp->dev) : NULL; if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(1, "narrow board\n"); asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; asc_dvc_varp->bus_type = bus_type; asc_dvc_varp->drv_ptr = boardp; asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg; asc_dvc_varp->iop_base = iop; } else { #ifdef CONFIG_PCI adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; adv_dvc_varp->drv_ptr = boardp; adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW) { ASC_DBG(1, "wide board ASC-3550\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; } else if (pdev->device == PCI_DEVICE_ID_38C0800_REV1) { ASC_DBG(1, "wide board ASC-38C0800\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; } else { ASC_DBG(1, "wide board ASC-38C1600\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C1600; } boardp->asc_n_io_port = pci_resource_len(pdev, 1); boardp->ioremap_addr = pci_ioremap_bar(pdev, 1); if (!boardp->ioremap_addr) { shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) " "returned NULL\n", (long)pci_resource_start(pdev, 1), boardp->asc_n_io_port); ret = -ENODEV; goto err_shost; } adv_dvc_varp->iop_base = (AdvPortAddr)boardp->ioremap_addr; ASC_DBG(1, "iop_base: 0x%p\n", adv_dvc_varp->iop_base); /* * Even though it isn't used to access wide boards, other * than for the debug line below, save I/O Port address so * that it can be reported. */ boardp->ioport = iop; ASC_DBG(1, "iopb_chip_id_1 0x%x, iopw_chip_id_0 0x%x\n", (ushort)inp(iop + 1), (ushort)inpw(iop)); #endif /* CONFIG_PCI */ } #ifdef CONFIG_PROC_FS /* * Allocate buffer for printing information from * /proc/scsi/advansys/[0...]. */ boardp->prtbuf = kmalloc(ASC_PRTBUF_SIZE, GFP_KERNEL); if (!boardp->prtbuf) { shost_printk(KERN_ERR, shost, "kmalloc(%d) returned NULL\n", ASC_PRTBUF_SIZE); ret = -ENOMEM; goto err_unmap; } #endif /* CONFIG_PROC_FS */ if (ASC_NARROW_BOARD(boardp)) { /* * Set the board bus type and PCI IRQ before * calling AscInitGetConfig(). */ switch (asc_dvc_varp->bus_type) { #ifdef CONFIG_ISA case ASC_IS_ISA: shost->unchecked_isa_dma = TRUE; share_irq = 0; break; case ASC_IS_VL: shost->unchecked_isa_dma = FALSE; share_irq = 0; break; case ASC_IS_EISA: shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; break; #endif /* CONFIG_ISA */ #ifdef CONFIG_PCI case ASC_IS_PCI: shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; break; #endif /* CONFIG_PCI */ default: shost_printk(KERN_ERR, shost, "unknown adapter type: " "%d\n", asc_dvc_varp->bus_type); shost->unchecked_isa_dma = TRUE; share_irq = 0; break; } /* * NOTE: AscInitGetConfig() may change the board's * bus_type value. The bus_type value should no * longer be used. If the bus_type field must be * referenced only use the bit-wise AND operator "&". */ ASC_DBG(2, "AscInitGetConfig()\n"); ret = AscInitGetConfig(shost) ? -ENODEV : 0; } else { #ifdef CONFIG_PCI /* * For Wide boards set PCI information before calling * AdvInitGetConfig(). */ shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; ASC_DBG(2, "AdvInitGetConfig()\n"); ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; #endif /* CONFIG_PCI */ } if (ret) goto err_free_proc; /* * Save the EEPROM configuration so that it can be displayed * from /proc/scsi/advansys/[0...]. */ if (ASC_NARROW_BOARD(boardp)) { ASCEEP_CONFIG *ep; /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id); /* * Save EEPROM settings for the board. */ ep = &boardp->eep_config.asc_eep; ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable; ep->disc_enable = asc_dvc_varp->cfg->disc_enable; ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled; ASC_EEP_SET_DMA_SPD(ep, asc_dvc_varp->cfg->isa_dma_speed); ep->start_motor = asc_dvc_varp->start_motor; ep->cntl = asc_dvc_varp->dvc_cntl; ep->no_scam = asc_dvc_varp->no_scam; ep->max_total_qng = asc_dvc_varp->max_total_qng; ASC_EEP_SET_CHIP_ID(ep, asc_dvc_varp->cfg->chip_scsi_id); /* 'max_tag_qng' is set to the same value for every device. */ ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0]; ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0]; ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1]; ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2]; ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3]; ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4]; ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5]; /* * Modify board configuration. */ ASC_DBG(2, "AscInitSetConfig()\n"); ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0; if (ret) goto err_free_proc; } else { ADVEEP_3550_CONFIG *ep_3550; ADVEEP_38C0800_CONFIG *ep_38C0800; ADVEEP_38C1600_CONFIG *ep_38C1600; /* * Save Wide EEP Configuration Information. */ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; ep_3550->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_3550->max_host_qng = adv_dvc_varp->max_host_qng; ep_3550->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_3550->termination = adv_dvc_varp->cfg->termination; ep_3550->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_3550->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_3550->wdtr_able = adv_dvc_varp->wdtr_able; ep_3550->sdtr_able = adv_dvc_varp->sdtr_able; ep_3550->ultra_able = adv_dvc_varp->ultra_able; ep_3550->tagqng_able = adv_dvc_varp->tagqng_able; ep_3550->start_motor = adv_dvc_varp->start_motor; ep_3550->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_3550->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_3550->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_3550->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; ep_38C0800->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C0800->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C0800->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C0800->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C0800->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C0800->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C0800->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C0800->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C0800->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C0800->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->start_motor = adv_dvc_varp->start_motor; ep_38C0800->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C0800->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C0800->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C0800->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; ep_38C1600->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C1600->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C1600->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C1600->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C1600->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C1600->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C1600->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C1600->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C1600->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C1600->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->start_motor = adv_dvc_varp->start_motor; ep_38C1600->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C1600->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C1600->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C1600->serial_number_word3 = adv_dvc_varp->cfg->serial3; } /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id); } /* * Channels are numbered beginning with 0. For AdvanSys one host * structure supports one channel. Multi-channel boards have a * separate host structure for each channel. */ shost->max_channel = 0; if (ASC_NARROW_BOARD(boardp)) { shost->max_id = ASC_MAX_TID + 1; shost->max_lun = ASC_MAX_LUN + 1; shost->max_cmd_len = ASC_MAX_CDB_LEN; shost->io_port = asc_dvc_varp->iop_base; boardp->asc_n_io_port = ASC_IOADR_GAP; shost->this_id = asc_dvc_varp->cfg->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = asc_dvc_varp->max_total_qng; } else { shost->max_id = ADV_MAX_TID + 1; shost->max_lun = ADV_MAX_LUN + 1; shost->max_cmd_len = ADV_MAX_CDB_LEN; /* * Save the I/O Port address and length even though * I/O ports are not used to access Wide boards. * Instead the Wide boards are accessed with * PCI Memory Mapped I/O. */ shost->io_port = iop; shost->this_id = adv_dvc_varp->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = adv_dvc_varp->max_host_qng; } /* * Following v1.3.89, 'cmd_per_lun' is no longer needed * and should be set to zero. * * But because of a bug introduced in v1.3.89 if the driver is * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level * SCSI function 'allocate_device' will panic. To allow the driver * to work as a module in these kernels set 'cmd_per_lun' to 1. * * Note: This is wrong. cmd_per_lun should be set to the depth * you want on untagged devices always. #ifdef MODULE */ shost->cmd_per_lun = 1; /* #else shost->cmd_per_lun = 0; #endif */ /* * Set the maximum number of scatter-gather elements the * adapter can handle. */ if (ASC_NARROW_BOARD(boardp)) { /* * Allow two commands with 'sg_tablesize' scatter-gather * elements to be executed simultaneously. This value is * the theoretical hardware limit. It may be decreased * below. */ shost->sg_tablesize = (((asc_dvc_varp->max_total_qng - 2) / 2) * ASC_SG_LIST_PER_Q) + 1; } else { shost->sg_tablesize = ADV_MAX_SG_LIST; } /* * The value of 'sg_tablesize' can not exceed the SCSI * mid-level driver definition of SG_ALL. SG_ALL also * must not be exceeded, because it is used to define the * size of the scatter-gather table in 'struct asc_sg_head'. */ if (shost->sg_tablesize > SG_ALL) { shost->sg_tablesize = SG_ALL; } ASC_DBG(1, "sg_tablesize: %d\n", shost->sg_tablesize); /* BIOS start address. */ if (ASC_NARROW_BOARD(boardp)) { shost->base = AscGetChipBiosAddress(asc_dvc_varp->iop_base, asc_dvc_varp->bus_type); } else { /* * Fill-in BIOS board variables. The Wide BIOS saves * information in LRAM that is used by the driver. */ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_SIGNATURE, boardp->bios_signature); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_VERSION, boardp->bios_version); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODESEG, boardp->bios_codeseg); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODELEN, boardp->bios_codelen); ASC_DBG(1, "bios_signature 0x%x, bios_version 0x%x\n", boardp->bios_signature, boardp->bios_version); ASC_DBG(1, "bios_codeseg 0x%x, bios_codelen 0x%x\n", boardp->bios_codeseg, boardp->bios_codelen); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature == 0x55AA) { /* * Convert x86 realmode code segment to a linear * address by shifting left 4. */ shost->base = ((ulong)boardp->bios_codeseg << 4); } else { shost->base = 0; } } /* * Register Board Resources - I/O Port, DMA, IRQ */ /* Register DMA Channel for Narrow boards. */ shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */ #ifdef CONFIG_ISA if (ASC_NARROW_BOARD(boardp)) { /* Register DMA channel for ISA bus. */ if (asc_dvc_varp->bus_type & ASC_IS_ISA) { shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel; ret = request_dma(shost->dma_channel, DRV_NAME); if (ret) { shost_printk(KERN_ERR, shost, "request_dma() " "%d failed %d\n", shost->dma_channel, ret); goto err_free_proc; } AscEnableIsaDma(shost->dma_channel); } } #endif /* CONFIG_ISA */ /* Register IRQ Number. */ ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost); ret = request_irq(boardp->irq, advansys_interrupt, share_irq, DRV_NAME, shost); if (ret) { if (ret == -EBUSY) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "already in use\n", boardp->irq); } else if (ret == -EINVAL) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "not valid\n", boardp->irq); } else { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "failed with %d\n", boardp->irq, ret); } goto err_free_dma; } /* * Initialize board RISC chip and enable interrupts. */ if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(2, "AscInitAsc1000Driver()\n"); asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); if (!asc_dvc_varp->overrun_buf) { ret = -ENOMEM; goto err_free_irq; } warn_code = AscInitAsc1000Driver(asc_dvc_varp); if (warn_code || asc_dvc_varp->err_code) { shost_printk(KERN_ERR, shost, "error: init_state 0x%x, " "warn 0x%x, error 0x%x\n", asc_dvc_varp->init_state, warn_code, asc_dvc_varp->err_code); if (!asc_dvc_varp->overrun_dma) { ret = -ENODEV; goto err_free_mem; } } } else { if (advansys_wide_init_chip(shost)) { ret = -ENODEV; goto err_free_mem; } } ASC_DBG_PRT_SCSI_HOST(2, shost); ret = scsi_add_host(shost, boardp->dev); if (ret) goto err_free_mem; scsi_scan_host(shost); return 0; err_free_mem: if (ASC_NARROW_BOARD(boardp)) { if (asc_dvc_varp->overrun_dma) dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(asc_dvc_varp->overrun_buf); } else advansys_wide_free_mem(boardp); err_free_irq: free_irq(boardp->irq, shost); err_free_dma: #ifdef CONFIG_ISA if (shost->dma_channel != NO_ISA_DMA) free_dma(shost->dma_channel); #endif err_free_proc: kfree(boardp->prtbuf); err_unmap: if (boardp->ioremap_addr) iounmap(boardp->ioremap_addr); err_shost: return ret; } /* * advansys_release() * * Release resources allocated for a single AdvanSys adapter. */ static int advansys_release(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DBG(1, "begin\n"); scsi_remove_host(shost); free_irq(board->irq, shost); #ifdef CONFIG_ISA if (shost->dma_channel != NO_ISA_DMA) { ASC_DBG(1, "free_dma()\n"); free_dma(shost->dma_channel); } #endif if (ASC_NARROW_BOARD(board)) { dma_unmap_single(board->dev, board->dvc_var.asc_dvc_var.overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(board->dvc_var.asc_dvc_var.overrun_buf); } else { iounmap(board->ioremap_addr); advansys_wide_free_mem(board); } kfree(board->prtbuf); scsi_host_put(shost); ASC_DBG(1, "end\n"); return 0; } #define ASC_IOADR_TABLE_MAX_IX 11 static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = { 0x100, 0x0110, 0x120, 0x0130, 0x140, 0x0150, 0x0190, 0x0210, 0x0230, 0x0250, 0x0330 }; /* * The ISA IRQ number is found in bits 2 and 3 of the CfgLsw. It decodes as: * 00: 10 * 01: 11 * 10: 12 * 11: 15 */ static unsigned int __devinit advansys_isa_irq_no(PortAddr iop_base) { unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10; if (chip_irq == 13) chip_irq = 15; return chip_irq; } static int __devinit advansys_isa_probe(struct device *dev, unsigned int id) { int err = -ENODEV; PortAddr iop_base = _asc_def_iop_base[id]; struct Scsi_Host *shost; struct asc_board *board; if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); return -ENODEV; } ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); if (!AscFindSignature(iop_base)) goto release_region; if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT)) goto release_region; err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = advansys_isa_irq_no(iop_base); board->dev = dev; err = advansys_board_found(shost, iop_base, ASC_IS_ISA); if (err) goto free_host; dev_set_drvdata(dev, shost); return 0; free_host: scsi_host_put(shost); release_region: release_region(iop_base, ASC_IOADR_GAP); return err; } static int __devexit advansys_isa_remove(struct device *dev, unsigned int id) { int ioport = _asc_def_iop_base[id]; advansys_release(dev_get_drvdata(dev)); release_region(ioport, ASC_IOADR_GAP); return 0; } static struct isa_driver advansys_isa_driver = { .probe = advansys_isa_probe, .remove = __devexit_p(advansys_isa_remove), .driver = { .owner = THIS_MODULE, .name = DRV_NAME, }, }; /* * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as: * 000: invalid * 001: 10 * 010: 11 * 011: 12 * 100: invalid * 101: 14 * 110: 15 * 111: invalid */ static unsigned int __devinit advansys_vlb_irq_no(PortAddr iop_base) { unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9; if ((chip_irq < 10) || (chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int __devinit advansys_vlb_probe(struct device *dev, unsigned int id) { int err = -ENODEV; PortAddr iop_base = _asc_def_iop_base[id]; struct Scsi_Host *shost; struct asc_board *board; if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); return -ENODEV; } ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); if (!AscFindSignature(iop_base)) goto release_region; /* * I don't think this condition can actually happen, but the old * driver did it, and the chances of finding a VLB setup in 2007 * to do testing with is slight to none. */ if (AscGetChipVersion(iop_base, ASC_IS_VL) > ASC_CHIP_MAX_VER_VL) goto release_region; err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = advansys_vlb_irq_no(iop_base); board->dev = dev; err = advansys_board_found(shost, iop_base, ASC_IS_VL); if (err) goto free_host; dev_set_drvdata(dev, shost); return 0; free_host: scsi_host_put(shost); release_region: release_region(iop_base, ASC_IOADR_GAP); return -ENODEV; } static struct isa_driver advansys_vlb_driver = { .probe = advansys_vlb_probe, .remove = __devexit_p(advansys_isa_remove), .driver = { .owner = THIS_MODULE, .name = "advansys_vlb", }, }; static struct eisa_device_id advansys_eisa_table[] __devinitdata = { { "ABP7401" }, { "ABP7501" }, { "" } }; MODULE_DEVICE_TABLE(eisa, advansys_eisa_table); /* * EISA is a little more tricky than PCI; each EISA device may have two * channels, and this driver is written to make each channel its own Scsi_Host */ struct eisa_scsi_data { struct Scsi_Host *host[2]; }; /* * The EISA IRQ number is found in bits 8 to 10 of the CfgLsw. It decodes as: * 000: 10 * 001: 11 * 010: 12 * 011: invalid * 100: 14 * 101: 15 * 110: invalid * 111: invalid */ static unsigned int __devinit advansys_eisa_irq_no(struct eisa_device *edev) { unsigned short cfg_lsw = inw(edev->base_addr + 0xc86); unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10; if ((chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int __devinit advansys_eisa_probe(struct device *dev) { int i, ioport, irq = 0; int err; struct eisa_device *edev = to_eisa_device(dev); struct eisa_scsi_data *data; err = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto fail; ioport = edev->base_addr + 0xc30; err = -ENODEV; for (i = 0; i < 2; i++, ioport += 0x20) { struct asc_board *board; struct Scsi_Host *shost; if (!request_region(ioport, ASC_IOADR_GAP, DRV_NAME)) { printk(KERN_WARNING "Region %x-%x busy\n", ioport, ioport + ASC_IOADR_GAP - 1); continue; } if (!AscFindSignature(ioport)) { release_region(ioport, ASC_IOADR_GAP); continue; } /* * I don't know why we need to do this for EISA chips, but * not for any others. It looks to be equivalent to * AscGetChipCfgMsw, but I may have overlooked something, * so I'm not converting it until I get an EISA board to * test with. */ inw(ioport + 4); if (!irq) irq = advansys_eisa_irq_no(edev); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = irq; board->dev = dev; err = advansys_board_found(shost, ioport, ASC_IS_EISA); if (!err) { data->host[i] = shost; continue; } scsi_host_put(shost); release_region: release_region(ioport, ASC_IOADR_GAP); break; } if (err) goto free_data; dev_set_drvdata(dev, data); return 0; free_data: kfree(data->host[0]); kfree(data->host[1]); kfree(data); fail: return err; } static __devexit int advansys_eisa_remove(struct device *dev) { int i; struct eisa_scsi_data *data = dev_get_drvdata(dev); for (i = 0; i < 2; i++) { int ioport; struct Scsi_Host *shost = data->host[i]; if (!shost) continue; ioport = shost->io_port; advansys_release(shost); release_region(ioport, ASC_IOADR_GAP); } kfree(data); return 0; } static struct eisa_driver advansys_eisa_driver = { .id_table = advansys_eisa_table, .driver = { .name = DRV_NAME, .probe = advansys_eisa_probe, .remove = __devexit_p(advansys_eisa_remove), } }; /* PCI Devices supported by this driver */ static struct pci_device_id advansys_pci_tbl[] __devinitdata = { {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {} }; MODULE_DEVICE_TABLE(pci, advansys_pci_tbl); static void __devinit advansys_set_latency(struct pci_dev *pdev) { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0); } else { u8 latency; pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency); if (latency < 0x20) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20); } } static int __devinit advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err, ioport; struct Scsi_Host *shost; struct asc_board *board; err = pci_enable_device(pdev); if (err) goto fail; err = pci_request_regions(pdev, DRV_NAME); if (err) goto disable_device; pci_set_master(pdev); advansys_set_latency(pdev); err = -ENODEV; if (pci_resource_len(pdev, 0) == 0) goto release_region; ioport = pci_resource_start(pdev, 0); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = pdev->irq; board->dev = &pdev->dev; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW || pdev->device == PCI_DEVICE_ID_38C0800_REV1 || pdev->device == PCI_DEVICE_ID_38C1600_REV1) { board->flags |= ASC_IS_WIDE_BOARD; } err = advansys_board_found(shost, ioport, ASC_IS_PCI); if (err) goto free_host; pci_set_drvdata(pdev, shost); return 0; free_host: scsi_host_put(shost); release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } static void __devexit advansys_pci_remove(struct pci_dev *pdev) { advansys_release(pci_get_drvdata(pdev)); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver advansys_pci_driver = { .name = DRV_NAME, .id_table = advansys_pci_tbl, .probe = advansys_pci_probe, .remove = __devexit_p(advansys_pci_remove), }; static int __init advansys_init(void) { int error; error = isa_register_driver(&advansys_isa_driver, ASC_IOADR_TABLE_MAX_IX); if (error) goto fail; error = isa_register_driver(&advansys_vlb_driver, ASC_IOADR_TABLE_MAX_IX); if (error) goto unregister_isa; error = eisa_driver_register(&advansys_eisa_driver); if (error) goto unregister_vlb; error = pci_register_driver(&advansys_pci_driver); if (error) goto unregister_eisa; return 0; unregister_eisa: eisa_driver_unregister(&advansys_eisa_driver); unregister_vlb: isa_unregister_driver(&advansys_vlb_driver); unregister_isa: isa_unregister_driver(&advansys_isa_driver); fail: return error; } static void __exit advansys_exit(void) { pci_unregister_driver(&advansys_pci_driver); eisa_driver_unregister(&advansys_eisa_driver); isa_unregister_driver(&advansys_vlb_driver); isa_unregister_driver(&advansys_isa_driver); } module_init(advansys_init); module_exit(advansys_exit); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("advansys/mcode.bin"); MODULE_FIRMWARE("advansys/3550.bin"); MODULE_FIRMWARE("advansys/38C0800.bin"); MODULE_FIRMWARE("advansys/38C1600.bin");
gpl-2.0
EPDCenter/android_kernel_rikomagic_mk808
drivers/crypto/omap-aes.c
3181
22910
/* * Cryptographic API. * * Support for OMAP AES HW acceleration. * * Copyright (c) 2010 Nokia Corporation * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/crypto.h> #include <linux/interrupt.h> #include <crypto/scatterwalk.h> #include <crypto/aes.h> #include <plat/cpu.h> #include <plat/dma.h> /* OMAP TRM gives bitfields as start:end, where start is the higher bit number. For example 7:0 */ #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04)) #define AES_REG_IV(x) (0x20 + ((x) * 0x04)) #define AES_REG_CTRL 0x30 #define AES_REG_CTRL_CTR_WIDTH (1 << 7) #define AES_REG_CTRL_CTR (1 << 6) #define AES_REG_CTRL_CBC (1 << 5) #define AES_REG_CTRL_KEY_SIZE (3 << 3) #define AES_REG_CTRL_DIRECTION (1 << 2) #define AES_REG_CTRL_INPUT_READY (1 << 1) #define AES_REG_CTRL_OUTPUT_READY (1 << 0) #define AES_REG_DATA 0x34 #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04)) #define AES_REG_REV 0x44 #define AES_REG_REV_MAJOR 0xF0 #define AES_REG_REV_MINOR 0x0F #define AES_REG_MASK 0x48 #define AES_REG_MASK_SIDLE (1 << 6) #define AES_REG_MASK_START (1 << 5) #define AES_REG_MASK_DMA_OUT_EN (1 << 3) #define AES_REG_MASK_DMA_IN_EN (1 << 2) #define AES_REG_MASK_SOFTRESET (1 << 1) #define AES_REG_AUTOIDLE (1 << 0) #define AES_REG_SYSSTATUS 0x4C #define AES_REG_SYSSTATUS_RESETDONE (1 << 0) #define DEFAULT_TIMEOUT (5*HZ) #define FLAGS_MODE_MASK 0x000f #define FLAGS_ENCRYPT BIT(0) #define FLAGS_CBC BIT(1) #define FLAGS_GIV BIT(2) #define FLAGS_INIT BIT(4) #define FLAGS_FAST BIT(5) #define FLAGS_BUSY BIT(6) struct omap_aes_ctx { struct omap_aes_dev *dd; int keylen; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; unsigned long flags; }; struct omap_aes_reqctx { unsigned long mode; }; #define OMAP_AES_QUEUE_LENGTH 1 #define OMAP_AES_CACHE_SIZE 0 struct omap_aes_dev { struct list_head list; unsigned long phys_base; void __iomem *io_base; struct clk *iclk; struct omap_aes_ctx *ctx; struct device *dev; unsigned long flags; int err; spinlock_t lock; struct crypto_queue queue; struct tasklet_struct done_task; struct tasklet_struct queue_task; struct ablkcipher_request *req; size_t total; struct scatterlist *in_sg; size_t in_offset; struct scatterlist *out_sg; size_t out_offset; size_t buflen; void *buf_in; size_t dma_size; int dma_in; int dma_lch_in; dma_addr_t dma_addr_in; void *buf_out; int dma_out; int dma_lch_out; dma_addr_t dma_addr_out; }; /* keep registered devices data here */ static LIST_HEAD(dev_list); static DEFINE_SPINLOCK(list_lock); static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) { return __raw_readl(dd->io_base + offset); } static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value) { __raw_writel(value, dd->io_base + offset); } static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, u32 value, u32 mask) { u32 val; val = omap_aes_read(dd, offset); val &= ~mask; val |= value; omap_aes_write(dd, offset, val); } static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, u32 *value, int count) { for (; count--; value++, offset += 4) omap_aes_write(dd, offset, *value); } static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) { unsigned long timeout = jiffies + DEFAULT_TIMEOUT; while (!(omap_aes_read(dd, offset) & bit)) { if (time_is_before_jiffies(timeout)) { dev_err(dd->dev, "omap-aes timeout\n"); return -ETIMEDOUT; } } return 0; } static int omap_aes_hw_init(struct omap_aes_dev *dd) { /* * clocks are enabled when request starts and disabled when finished. * It may be long delays between requests. * Device might go to off mode to save power. */ clk_enable(dd->iclk); if (!(dd->flags & FLAGS_INIT)) { /* is it necessary to reset before every operation? */ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, AES_REG_MASK_SOFTRESET); /* * prevent OCP bus error (SRESP) in case an access to the module * is performed while the module is coming out of soft reset */ __asm__ __volatile__("nop"); __asm__ __volatile__("nop"); if (omap_aes_wait(dd, AES_REG_SYSSTATUS, AES_REG_SYSSTATUS_RESETDONE)) return -ETIMEDOUT; dd->flags |= FLAGS_INIT; dd->err = 0; } return 0; } static int omap_aes_write_ctrl(struct omap_aes_dev *dd) { unsigned int key32; int i, err; u32 val, mask; err = omap_aes_hw_init(dd); if (err) return err; val = 0; if (dd->dma_lch_out >= 0) val |= AES_REG_MASK_DMA_OUT_EN; if (dd->dma_lch_in >= 0) val |= AES_REG_MASK_DMA_IN_EN; mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN; omap_aes_write_mask(dd, AES_REG_MASK, val, mask); key32 = dd->ctx->keylen / sizeof(u32); /* it seems a key should always be set even if it has not changed */ for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(i), __le32_to_cpu(dd->ctx->key[i])); } if ((dd->flags & FLAGS_CBC) && dd->req->info) omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4); val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); if (dd->flags & FLAGS_CBC) val |= AES_REG_CTRL_CBC; if (dd->flags & FLAGS_ENCRYPT) val |= AES_REG_CTRL_DIRECTION; mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | AES_REG_CTRL_KEY_SIZE; omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); /* IN */ omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, dd->phys_base + AES_REG_DATA, 0, 4); omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); /* OUT */ omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, dd->phys_base + AES_REG_DATA, 0, 4); omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); return 0; } static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) { struct omap_aes_dev *dd = NULL, *tmp; spin_lock_bh(&list_lock); if (!ctx->dd) { list_for_each_entry(tmp, &dev_list, list) { /* FIXME: take fist available aes core */ dd = tmp; break; } ctx->dd = dd; } else { /* already found before */ dd = ctx->dd; } spin_unlock_bh(&list_lock); return dd; } static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) { struct omap_aes_dev *dd = data; if (ch_status != OMAP_DMA_BLOCK_IRQ) { pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); dd->err = -EIO; dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ } else if (lch == dd->dma_lch_in) { return; } /* dma_lch_out - completed */ tasklet_schedule(&dd->done_task); } static int omap_aes_dma_init(struct omap_aes_dev *dd) { int err = -ENOMEM; dd->dma_lch_out = -1; dd->dma_lch_in = -1; dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; dd->buflen &= ~(AES_BLOCK_SIZE - 1); if (!dd->buf_in || !dd->buf_out) { dev_err(dd->dev, "unable to alloc pages.\n"); goto err_alloc; } /* MAP here */ dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, DMA_TO_DEVICE); if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); err = -EINVAL; goto err_map_in; } dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, DMA_FROM_DEVICE); if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); err = -EINVAL; goto err_map_out; } err = omap_request_dma(dd->dma_in, "omap-aes-rx", omap_aes_dma_callback, dd, &dd->dma_lch_in); if (err) { dev_err(dd->dev, "Unable to request DMA channel\n"); goto err_dma_in; } err = omap_request_dma(dd->dma_out, "omap-aes-tx", omap_aes_dma_callback, dd, &dd->dma_lch_out); if (err) { dev_err(dd->dev, "Unable to request DMA channel\n"); goto err_dma_out; } return 0; err_dma_out: omap_free_dma(dd->dma_lch_in); err_dma_in: dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, DMA_FROM_DEVICE); err_map_out: dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); err_map_in: free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); err_alloc: if (err) pr_err("error: %d\n", err); return err; } static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) { omap_free_dma(dd->dma_lch_out); omap_free_dma(dd->dma_lch_in); dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, DMA_FROM_DEVICE); dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); } static void sg_copy_buf(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out) { struct scatter_walk walk; if (!nbytes) return; scatterwalk_start(&walk, sg); scatterwalk_advance(&walk, start); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); } static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, size_t buflen, size_t total, int out) { unsigned int count, off = 0; while (buflen && total) { count = min((*sg)->length - *offset, total); count = min(count, buflen); if (!count) return off; /* * buflen and total are AES_BLOCK_SIZE size aligned, * so count should be also aligned */ sg_copy_buf(buf + off, *sg, *offset, count, out); off += count; buflen -= count; *offset += count; total -= count; if (*offset == (*sg)->length) { *sg = sg_next(*sg); if (*sg) *offset = 0; else total = 0; } } return off; } static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) { struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct omap_aes_dev *dd = ctx->dd; int len32; pr_debug("len: %d\n", length); dd->dma_size = length; if (!(dd->flags & FLAGS_FAST)) dma_sync_single_for_device(dd->dev, dma_addr_in, length, DMA_TO_DEVICE); len32 = DIV_ROUND_UP(length, sizeof(u32)); /* IN */ omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, OMAP_DMA_DST_SYNC); omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC, dma_addr_in, 0, 0); /* OUT */ omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_out, OMAP_DMA_SRC_SYNC); omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, dma_addr_out, 0, 0); omap_start_dma(dd->dma_lch_in); omap_start_dma(dd->dma_lch_out); /* start DMA or disable idle mode */ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, AES_REG_MASK_START); return 0; } static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm( crypto_ablkcipher_reqtfm(dd->req)); int err, fast = 0, in, out; size_t count; dma_addr_t addr_in, addr_out; pr_debug("total: %d\n", dd->total); if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { /* check for alignment */ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); fast = in && out; } if (fast) { count = min(dd->total, sg_dma_len(dd->in_sg)); count = min(count, sg_dma_len(dd->out_sg)); if (count != dd->total) { pr_err("request length != buffer length\n"); return -EINVAL; } pr_debug("fast\n"); err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); if (!err) { dev_err(dd->dev, "dma_map_sg() error\n"); return -EINVAL; } err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); if (!err) { dev_err(dd->dev, "dma_map_sg() error\n"); dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); return -EINVAL; } addr_in = sg_dma_address(dd->in_sg); addr_out = sg_dma_address(dd->out_sg); dd->flags |= FLAGS_FAST; } else { /* use cache buffers */ count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, dd->buflen, dd->total, 0); addr_in = dd->dma_addr_in; addr_out = dd->dma_addr_out; dd->flags &= ~FLAGS_FAST; } dd->total -= count; err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); if (err) { dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); } return err; } static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) { struct ablkcipher_request *req = dd->req; pr_debug("err: %d\n", err); clk_disable(dd->iclk); dd->flags &= ~FLAGS_BUSY; req->base.complete(&req->base, err); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { int err = 0; size_t count; pr_debug("total: %d\n", dd->total); omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); omap_stop_dma(dd->dma_lch_in); omap_stop_dma(dd->dma_lch_out); if (dd->flags & FLAGS_FAST) { dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); } else { dma_sync_single_for_device(dd->dev, dd->dma_addr_out, dd->dma_size, DMA_FROM_DEVICE); /* copy data */ count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, dd->buflen, dd->dma_size, 1); if (count != dd->dma_size) { err = -EINVAL; pr_err("not all data converted: %u\n", count); } } return err; } static int omap_aes_handle_queue(struct omap_aes_dev *dd, struct ablkcipher_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_aes_ctx *ctx; struct omap_aes_reqctx *rctx; unsigned long flags; int err, ret = 0; spin_lock_irqsave(&dd->lock, flags); if (req) ret = ablkcipher_enqueue_request(&dd->queue, req); if (dd->flags & FLAGS_BUSY) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); if (async_req) dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ablkcipher_request_cast(async_req); /* assign new request to device */ dd->req = req; dd->total = req->nbytes; dd->in_offset = 0; dd->in_sg = req->src; dd->out_offset = 0; dd->out_sg = req->dst; rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; dd->ctx = ctx; ctx->dd = dd; err = omap_aes_write_ctrl(dd); if (!err) err = omap_aes_crypt_dma_start(dd); if (err) { /* aes_task will not finish it, so do it here */ omap_aes_finish_req(dd, err); tasklet_schedule(&dd->queue_task); } return ret; /* return ret, which is enqueue return value */ } static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; int err; pr_debug("enter\n"); err = omap_aes_crypt_dma_stop(dd); err = dd->err ? : err; if (dd->total && !err) { err = omap_aes_crypt_dma_start(dd); if (!err) return; /* DMA started. Not fininishing. */ } omap_aes_finish_req(dd, err); omap_aes_handle_queue(dd, NULL); pr_debug("exit\n"); } static void omap_aes_queue_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; omap_aes_handle_queue(dd, NULL); } static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct omap_aes_dev *dd; pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { pr_err("request size is not exact amount of AES blocks\n"); return -EINVAL; } dd = omap_aes_find_dev(ctx); if (!dd) return -ENODEV; rctx->mode = mode; return omap_aes_handle_queue(dd, req); } /* ********************** ALG API ************************************ */ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; pr_debug("enter, keylen: %d\n", keylen); memcpy(ctx->key, key, keylen); ctx->keylen = keylen; return 0; } static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) { return omap_aes_crypt(req, FLAGS_ENCRYPT); } static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) { return omap_aes_crypt(req, 0); } static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) { return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); } static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) { return omap_aes_crypt(req, FLAGS_CBC); } static int omap_aes_cra_init(struct crypto_tfm *tfm) { pr_debug("enter\n"); tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); return 0; } static void omap_aes_cra_exit(struct crypto_tfm *tfm) { pr_debug("enter\n"); } /* ********************** ALGS ************************************ */ static struct crypto_alg algs[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-omap", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, .cra_exit = omap_aes_cra_exit, .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = omap_aes_setkey, .encrypt = omap_aes_ecb_encrypt, .decrypt = omap_aes_ecb_decrypt, } }, { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-omap", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, .cra_exit = omap_aes_cra_exit, .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = omap_aes_setkey, .encrypt = omap_aes_cbc_encrypt, .decrypt = omap_aes_cbc_decrypt, } } }; static int omap_aes_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct omap_aes_dev *dd; struct resource *res; int err = -ENOMEM, i, j; u32 reg; dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); if (dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); goto err_data; } dd->dev = dev; platform_set_drvdata(pdev, dd); spin_lock_init(&dd->lock); crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); /* Get the base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "invalid resource type\n"); err = -ENODEV; goto err_res; } dd->phys_base = res->start; /* Get the DMA */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) dev_info(dev, "no DMA info\n"); else dd->dma_out = res->start; /* Get the DMA */ res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) dev_info(dev, "no DMA info\n"); else dd->dma_in = res->start; /* Initializing the clock */ dd->iclk = clk_get(dev, "ick"); if (IS_ERR(dd->iclk)) { dev_err(dev, "clock intialization failed.\n"); err = PTR_ERR(dd->iclk); goto err_res; } dd->io_base = ioremap(dd->phys_base, SZ_4K); if (!dd->io_base) { dev_err(dev, "can't ioremap\n"); err = -ENOMEM; goto err_io; } clk_enable(dd->iclk); reg = omap_aes_read(dd, AES_REG_REV); dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); clk_disable(dd->iclk); tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); err = omap_aes_dma_init(dd); if (err) goto err_dma; INIT_LIST_HEAD(&dd->list); spin_lock(&list_lock); list_add_tail(&dd->list, &dev_list); spin_unlock(&list_lock); for (i = 0; i < ARRAY_SIZE(algs); i++) { pr_debug("i: %d\n", i); INIT_LIST_HEAD(&algs[i].cra_list); err = crypto_register_alg(&algs[i]); if (err) goto err_algs; } pr_info("probe() done\n"); return 0; err_algs: for (j = 0; j < i; j++) crypto_unregister_alg(&algs[j]); omap_aes_dma_cleanup(dd); err_dma: tasklet_kill(&dd->done_task); tasklet_kill(&dd->queue_task); iounmap(dd->io_base); err_io: clk_put(dd->iclk); err_res: kfree(dd); dd = NULL; err_data: dev_err(dev, "initialization failed.\n"); return err; } static int omap_aes_remove(struct platform_device *pdev) { struct omap_aes_dev *dd = platform_get_drvdata(pdev); int i; if (!dd) return -ENODEV; spin_lock(&list_lock); list_del(&dd->list); spin_unlock(&list_lock); for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_alg(&algs[i]); tasklet_kill(&dd->done_task); tasklet_kill(&dd->queue_task); omap_aes_dma_cleanup(dd); iounmap(dd->io_base); clk_put(dd->iclk); kfree(dd); dd = NULL; return 0; } static struct platform_driver omap_aes_driver = { .probe = omap_aes_probe, .remove = omap_aes_remove, .driver = { .name = "omap-aes", .owner = THIS_MODULE, }, }; static int __init omap_aes_mod_init(void) { pr_info("loading %s driver\n", "omap-aes"); if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) { pr_err("Unsupported cpu\n"); return -ENODEV; } return platform_driver_register(&omap_aes_driver); } static void __exit omap_aes_mod_exit(void) { platform_driver_unregister(&omap_aes_driver); } module_init(omap_aes_mod_init); module_exit(omap_aes_mod_exit); MODULE_DESCRIPTION("OMAP AES hw acceleration support."); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Dmitry Kasatkin");
gpl-2.0
uberlaggydarwin/htc-bfam-caf
drivers/sbus/char/bbc_i2c.c
4461
9713
/* bbc_i2c.c: I2C low-level driver for BBC device on UltraSPARC-III * platforms. * * Copyright (C) 2001, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/bbc.h> #include <asm/io.h> #include "bbc_i2c.h" /* Convert this driver to use i2c bus layer someday... */ #define I2C_PCF_PIN 0x80 #define I2C_PCF_ESO 0x40 #define I2C_PCF_ES1 0x20 #define I2C_PCF_ES2 0x10 #define I2C_PCF_ENI 0x08 #define I2C_PCF_STA 0x04 #define I2C_PCF_STO 0x02 #define I2C_PCF_ACK 0x01 #define I2C_PCF_START (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_ENI | I2C_PCF_STA | I2C_PCF_ACK) #define I2C_PCF_STOP (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_STO | I2C_PCF_ACK) #define I2C_PCF_REPSTART ( I2C_PCF_ESO | I2C_PCF_STA | I2C_PCF_ACK) #define I2C_PCF_IDLE (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_ACK) #define I2C_PCF_INI 0x40 /* 1 if not initialized */ #define I2C_PCF_STS 0x20 #define I2C_PCF_BER 0x10 #define I2C_PCF_AD0 0x08 #define I2C_PCF_LRB 0x08 #define I2C_PCF_AAS 0x04 #define I2C_PCF_LAB 0x02 #define I2C_PCF_BB 0x01 /* The BBC devices have two I2C controllers. The first I2C controller * connects mainly to configuration proms (NVRAM, cpu configuration, * dimm types, etc.). Whereas the second I2C controller connects to * environmental control devices such as fans and temperature sensors. * The second controller also connects to the smartcard reader, if present. */ static void set_device_claimage(struct bbc_i2c_bus *bp, struct platform_device *op, int val) { int i; for (i = 0; i < NUM_CHILDREN; i++) { if (bp->devs[i].device == op) { bp->devs[i].client_claimed = val; return; } } } #define claim_device(BP,ECHILD) set_device_claimage(BP,ECHILD,1) #define release_device(BP,ECHILD) set_device_claimage(BP,ECHILD,0) struct platform_device *bbc_i2c_getdev(struct bbc_i2c_bus *bp, int index) { struct platform_device *op = NULL; int curidx = 0, i; for (i = 0; i < NUM_CHILDREN; i++) { if (!(op = bp->devs[i].device)) break; if (curidx == index) goto out; op = NULL; curidx++; } out: if (curidx == index) return op; return NULL; } struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *op) { struct bbc_i2c_client *client; const u32 *reg; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return NULL; client->bp = bp; client->op = op; reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { kfree(client); return NULL; } client->bus = reg[0]; client->address = reg[1]; claim_device(bp, op); return client; } void bbc_i2c_detach(struct bbc_i2c_client *client) { struct bbc_i2c_bus *bp = client->bp; struct platform_device *op = client->op; release_device(bp, op); kfree(client); } static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status) { DECLARE_WAITQUEUE(wait, current); int limit = 32; int ret = 1; bp->waiting = 1; add_wait_queue(&bp->wq, &wait); while (limit-- > 0) { long val; val = wait_event_interruptible_timeout( bp->wq, (((*status = readb(bp->i2c_control_regs + 0)) & I2C_PCF_PIN) == 0), msecs_to_jiffies(250)); if (val > 0) { ret = 0; break; } } remove_wait_queue(&bp->wq, &wait); bp->waiting = 0; return ret; } int bbc_i2c_writeb(struct bbc_i2c_client *client, unsigned char val, int off) { struct bbc_i2c_bus *bp = client->bp; int address = client->address; u8 status; int ret = -1; if (bp->i2c_bussel_reg != NULL) writeb(client->bus, bp->i2c_bussel_reg); writeb(address, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); if (wait_for_pin(bp, &status)) goto out; writeb(off, bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status) || (status & I2C_PCF_LRB) != 0) goto out; writeb(val, bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status)) goto out; ret = 0; out: writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); return ret; } int bbc_i2c_readb(struct bbc_i2c_client *client, unsigned char *byte, int off) { struct bbc_i2c_bus *bp = client->bp; unsigned char address = client->address, status; int ret = -1; if (bp->i2c_bussel_reg != NULL) writeb(client->bus, bp->i2c_bussel_reg); writeb(address, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); if (wait_for_pin(bp, &status)) goto out; writeb(off, bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status) || (status & I2C_PCF_LRB) != 0) goto out; writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); address |= 0x1; /* READ */ writeb(address, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); if (wait_for_pin(bp, &status)) goto out; /* Set PIN back to one so the device sends the first * byte. */ (void) readb(bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status)) goto out; writeb(I2C_PCF_ESO | I2C_PCF_ENI, bp->i2c_control_regs + 0x0); *byte = readb(bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status)) goto out; ret = 0; out: writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); (void) readb(bp->i2c_control_regs + 0x1); return ret; } int bbc_i2c_write_buf(struct bbc_i2c_client *client, char *buf, int len, int off) { int ret = 0; while (len > 0) { ret = bbc_i2c_writeb(client, *buf, off); if (ret < 0) break; len--; buf++; off++; } return ret; } int bbc_i2c_read_buf(struct bbc_i2c_client *client, char *buf, int len, int off) { int ret = 0; while (len > 0) { ret = bbc_i2c_readb(client, buf, off); if (ret < 0) break; len--; buf++; off++; } return ret; } EXPORT_SYMBOL(bbc_i2c_getdev); EXPORT_SYMBOL(bbc_i2c_attach); EXPORT_SYMBOL(bbc_i2c_detach); EXPORT_SYMBOL(bbc_i2c_writeb); EXPORT_SYMBOL(bbc_i2c_readb); EXPORT_SYMBOL(bbc_i2c_write_buf); EXPORT_SYMBOL(bbc_i2c_read_buf); static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id) { struct bbc_i2c_bus *bp = dev_id; /* PIN going from set to clear is the only event which * makes the i2c assert an interrupt. */ if (bp->waiting && !(readb(bp->i2c_control_regs + 0x0) & I2C_PCF_PIN)) wake_up_interruptible(&bp->wq); return IRQ_HANDLED; } static void __init reset_one_i2c(struct bbc_i2c_bus *bp) { writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); writeb(bp->own, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); writeb(bp->clock, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); } static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) { struct bbc_i2c_bus *bp; struct device_node *dp; int entry; bp = kzalloc(sizeof(*bp), GFP_KERNEL); if (!bp) return NULL; bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); if (!bp->i2c_control_regs) goto fail; bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); if (!bp->i2c_bussel_reg) goto fail; bp->waiting = 0; init_waitqueue_head(&bp->wq); if (request_irq(op->archdata.irqs[0], bbc_i2c_interrupt, IRQF_SHARED, "bbc_i2c", bp)) goto fail; bp->index = index; bp->op = op; spin_lock_init(&bp->lock); entry = 0; for (dp = op->dev.of_node->child; dp && entry < 8; dp = dp->sibling, entry++) { struct platform_device *child_op; child_op = of_find_device_by_node(dp); bp->devs[entry].device = child_op; bp->devs[entry].client_claimed = 0; } writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); bp->own = readb(bp->i2c_control_regs + 0x01); writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); bp->clock = readb(bp->i2c_control_regs + 0x01); printk(KERN_INFO "i2c-%d: Regs at %p, %d devices, own %02x, clock %02x.\n", bp->index, bp->i2c_control_regs, entry, bp->own, bp->clock); reset_one_i2c(bp); return bp; fail: if (bp->i2c_bussel_reg) of_iounmap(&op->resource[1], bp->i2c_bussel_reg, 1); if (bp->i2c_control_regs) of_iounmap(&op->resource[0], bp->i2c_control_regs, 2); kfree(bp); return NULL; } extern int bbc_envctrl_init(struct bbc_i2c_bus *bp); extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp); static int __devinit bbc_i2c_probe(struct platform_device *op) { struct bbc_i2c_bus *bp; int err, index = 0; bp = attach_one_i2c(op, index); if (!bp) return -EINVAL; err = bbc_envctrl_init(bp); if (err) { free_irq(op->archdata.irqs[0], bp); if (bp->i2c_bussel_reg) of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); if (bp->i2c_control_regs) of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); kfree(bp); } else { dev_set_drvdata(&op->dev, bp); } return err; } static int __devexit bbc_i2c_remove(struct platform_device *op) { struct bbc_i2c_bus *bp = dev_get_drvdata(&op->dev); bbc_envctrl_cleanup(bp); free_irq(op->archdata.irqs[0], bp); if (bp->i2c_bussel_reg) of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); if (bp->i2c_control_regs) of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); kfree(bp); return 0; } static const struct of_device_id bbc_i2c_match[] = { { .name = "i2c", .compatible = "SUNW,bbc-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, bbc_i2c_match); static struct platform_driver bbc_i2c_driver = { .driver = { .name = "bbc_i2c", .owner = THIS_MODULE, .of_match_table = bbc_i2c_match, }, .probe = bbc_i2c_probe, .remove = __devexit_p(bbc_i2c_remove), }; module_platform_driver(bbc_i2c_driver); MODULE_LICENSE("GPL");
gpl-2.0
links234/qr-linux-kernel
drivers/mfd/lm3533-ctrlbank.c
4973
3774
/* * lm3533-ctrlbank.c -- LM3533 Generic Control Bank interface * * Copyright (C) 2011-2012 Texas Instruments * * Author: Johan Hovold <jhovold@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/device.h> #include <linux/module.h> #include <linux/mfd/lm3533.h> #define LM3533_MAX_CURRENT_MIN 5000 #define LM3533_MAX_CURRENT_MAX 29800 #define LM3533_MAX_CURRENT_STEP 800 #define LM3533_BRIGHTNESS_MAX 255 #define LM3533_PWM_MAX 0x3f #define LM3533_REG_PWM_BASE 0x14 #define LM3533_REG_MAX_CURRENT_BASE 0x1f #define LM3533_REG_CTRLBANK_ENABLE 0x27 #define LM3533_REG_BRIGHTNESS_BASE 0x40 static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base) { return base + cb->id; } int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb) { u8 mask; int ret; dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); mask = 1 << cb->id; ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, mask, mask); if (ret) dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id); return ret; } EXPORT_SYMBOL_GPL(lm3533_ctrlbank_enable); int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb) { u8 mask; int ret; dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); mask = 1 << cb->id; ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask); if (ret) dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id); return ret; } EXPORT_SYMBOL_GPL(lm3533_ctrlbank_disable); /* * Full-scale current. * * imax 5000 - 29800 uA (800 uA step) */ int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax) { u8 reg; u8 val; int ret; if (imax < LM3533_MAX_CURRENT_MIN || imax > LM3533_MAX_CURRENT_MAX) return -EINVAL; val = (imax - LM3533_MAX_CURRENT_MIN) / LM3533_MAX_CURRENT_STEP; reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE); ret = lm3533_write(cb->lm3533, reg, val); if (ret) dev_err(cb->dev, "failed to set max current\n"); return ret; } EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current); #define lm3533_ctrlbank_set(_name, _NAME) \ int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val) \ { \ u8 reg; \ int ret; \ \ if (val > LM3533_##_NAME##_MAX) \ return -EINVAL; \ \ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \ ret = lm3533_write(cb->lm3533, reg, val); \ if (ret) \ dev_err(cb->dev, "failed to set " #_name "\n"); \ \ return ret; \ } \ EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name); #define lm3533_ctrlbank_get(_name, _NAME) \ int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val) \ { \ u8 reg; \ int ret; \ \ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \ ret = lm3533_read(cb->lm3533, reg, val); \ if (ret) \ dev_err(cb->dev, "failed to get " #_name "\n"); \ \ return ret; \ } \ EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name); lm3533_ctrlbank_set(brightness, BRIGHTNESS); lm3533_ctrlbank_get(brightness, BRIGHTNESS); /* * PWM-input control mask: * * bit 5 - PWM-input enabled in Zone 4 * bit 4 - PWM-input enabled in Zone 3 * bit 3 - PWM-input enabled in Zone 2 * bit 2 - PWM-input enabled in Zone 1 * bit 1 - PWM-input enabled in Zone 0 * bit 0 - PWM-input enabled */ lm3533_ctrlbank_set(pwm, PWM); lm3533_ctrlbank_get(pwm, PWM); MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>"); MODULE_DESCRIPTION("LM3533 Control Bank interface"); MODULE_LICENSE("GPL");
gpl-2.0
kaldaris/WIDzard-A850K
drivers/mfd/t7l66xb.c
4973
11181
/* * * Toshiba T7L66XB core mfd support * * Copyright (c) 2005, 2007, 2008 Ian Molton * Copyright (c) 2008 Dmitry Baryshkov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * T7L66 features: * * Supported in this driver: * SD/MMC * SM/NAND flash controller * * As yet not supported * GPIO interface (on NAND pins) * Serial interface * TFT 'interface converter' * PCMCIA interface logic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/mfd/core.h> #include <linux/mfd/tmio.h> #include <linux/mfd/t7l66xb.h> enum { T7L66XB_CELL_NAND, T7L66XB_CELL_MMC, }; static const struct resource t7l66xb_mmc_resources[] = { { .start = 0x800, .end = 0x9ff, .flags = IORESOURCE_MEM, }, { .start = IRQ_T7L66XB_MMC, .end = IRQ_T7L66XB_MMC, .flags = IORESOURCE_IRQ, }, }; #define SCR_REVID 0x08 /* b Revision ID */ #define SCR_IMR 0x42 /* b Interrupt Mask */ #define SCR_DEV_CTL 0xe0 /* b Device control */ #define SCR_ISR 0xe1 /* b Interrupt Status */ #define SCR_GPO_OC 0xf0 /* b GPO output control */ #define SCR_GPO_OS 0xf1 /* b GPO output enable */ #define SCR_GPI_S 0xf2 /* w GPI status */ #define SCR_APDC 0xf8 /* b Active pullup down ctrl */ #define SCR_DEV_CTL_USB BIT(0) /* USB enable */ #define SCR_DEV_CTL_MMC BIT(1) /* MMC enable */ /*--------------------------------------------------------------------------*/ struct t7l66xb { void __iomem *scr; /* Lock to protect registers requiring read/modify/write ops. */ spinlock_t lock; struct resource rscr; struct clk *clk48m; struct clk *clk32k; int irq; int irq_base; }; /*--------------------------------------------------------------------------*/ static int t7l66xb_mmc_enable(struct platform_device *mmc) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned long flags; u8 dev_ctl; clk_enable(t7l66xb->clk32k); spin_lock_irqsave(&t7l66xb->lock, flags); dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL); dev_ctl |= SCR_DEV_CTL_MMC; tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL); spin_unlock_irqrestore(&t7l66xb->lock, flags); tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0, t7l66xb_mmc_resources[0].start & 0xfffe); return 0; } static int t7l66xb_mmc_disable(struct platform_device *mmc) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned long flags; u8 dev_ctl; spin_lock_irqsave(&t7l66xb->lock, flags); dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL); dev_ctl &= ~SCR_DEV_CTL_MMC; tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL); spin_unlock_irqrestore(&t7l66xb->lock, flags); clk_disable(t7l66xb->clk32k); return 0; } static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state); } static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state); } /*--------------------------------------------------------------------------*/ static struct tmio_mmc_data t7166xb_mmc_data = { .hclk = 24000000, .set_pwr = t7l66xb_mmc_pwr, .set_clk_div = t7l66xb_mmc_clk_div, }; static const struct resource t7l66xb_nand_resources[] = { { .start = 0xc00, .end = 0xc07, .flags = IORESOURCE_MEM, }, { .start = 0x0100, .end = 0x01ff, .flags = IORESOURCE_MEM, }, { .start = IRQ_T7L66XB_NAND, .end = IRQ_T7L66XB_NAND, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell t7l66xb_cells[] = { [T7L66XB_CELL_MMC] = { .name = "tmio-mmc", .enable = t7l66xb_mmc_enable, .disable = t7l66xb_mmc_disable, .platform_data = &t7166xb_mmc_data, .pdata_size = sizeof(t7166xb_mmc_data), .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources), .resources = t7l66xb_mmc_resources, }, [T7L66XB_CELL_NAND] = { .name = "tmio-nand", .num_resources = ARRAY_SIZE(t7l66xb_nand_resources), .resources = t7l66xb_nand_resources, }, }; /*--------------------------------------------------------------------------*/ /* Handle the T7L66XB interrupt mux */ static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) { struct t7l66xb *t7l66xb = irq_get_handler_data(irq); unsigned int isr; unsigned int i, irq_base; irq_base = t7l66xb->irq_base; while ((isr = tmio_ioread8(t7l66xb->scr + SCR_ISR) & ~tmio_ioread8(t7l66xb->scr + SCR_IMR))) for (i = 0; i < T7L66XB_NR_IRQS; i++) if (isr & (1 << i)) generic_handle_irq(irq_base + i); } static void t7l66xb_irq_mask(struct irq_data *data) { struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&t7l66xb->lock, flags); imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); imr |= 1 << (data->irq - t7l66xb->irq_base); tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); spin_unlock_irqrestore(&t7l66xb->lock, flags); } static void t7l66xb_irq_unmask(struct irq_data *data) { struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&t7l66xb->lock, flags); imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); imr &= ~(1 << (data->irq - t7l66xb->irq_base)); tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); spin_unlock_irqrestore(&t7l66xb->lock, flags); } static struct irq_chip t7l66xb_chip = { .name = "t7l66xb", .irq_ack = t7l66xb_irq_mask, .irq_mask = t7l66xb_irq_mask, .irq_unmask = t7l66xb_irq_unmask, }; /*--------------------------------------------------------------------------*/ /* Install the IRQ handler */ static void t7l66xb_attach_irq(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned int irq, irq_base; irq_base = t7l66xb->irq_base; for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq); irq_set_chip_data(irq, t7l66xb); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); #endif } irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING); irq_set_handler_data(t7l66xb->irq, t7l66xb); irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq); } static void t7l66xb_detach_irq(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned int irq, irq_base; irq_base = t7l66xb->irq_base; irq_set_chained_handler(t7l66xb->irq, NULL); irq_set_handler_data(t7l66xb->irq, NULL); for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif irq_set_chip(irq, NULL); irq_set_chip_data(irq, NULL); } } /*--------------------------------------------------------------------------*/ #ifdef CONFIG_PM static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); struct t7l66xb_platform_data *pdata = dev->dev.platform_data; if (pdata && pdata->suspend) pdata->suspend(dev); clk_disable(t7l66xb->clk48m); return 0; } static int t7l66xb_resume(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); struct t7l66xb_platform_data *pdata = dev->dev.platform_data; clk_enable(t7l66xb->clk48m); if (pdata && pdata->resume) pdata->resume(dev); tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0, t7l66xb_mmc_resources[0].start & 0xfffe); return 0; } #else #define t7l66xb_suspend NULL #define t7l66xb_resume NULL #endif /*--------------------------------------------------------------------------*/ static int t7l66xb_probe(struct platform_device *dev) { struct t7l66xb_platform_data *pdata = dev->dev.platform_data; struct t7l66xb *t7l66xb; struct resource *iomem, *rscr; int ret; if (pdata == NULL) return -EINVAL; iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!iomem) return -EINVAL; t7l66xb = kzalloc(sizeof *t7l66xb, GFP_KERNEL); if (!t7l66xb) return -ENOMEM; spin_lock_init(&t7l66xb->lock); platform_set_drvdata(dev, t7l66xb); ret = platform_get_irq(dev, 0); if (ret >= 0) t7l66xb->irq = ret; else goto err_noirq; t7l66xb->irq_base = pdata->irq_base; t7l66xb->clk32k = clk_get(&dev->dev, "CLK_CK32K"); if (IS_ERR(t7l66xb->clk32k)) { ret = PTR_ERR(t7l66xb->clk32k); goto err_clk32k_get; } t7l66xb->clk48m = clk_get(&dev->dev, "CLK_CK48M"); if (IS_ERR(t7l66xb->clk48m)) { ret = PTR_ERR(t7l66xb->clk48m); goto err_clk48m_get; } rscr = &t7l66xb->rscr; rscr->name = "t7l66xb-core"; rscr->start = iomem->start; rscr->end = iomem->start + 0xff; rscr->flags = IORESOURCE_MEM; ret = request_resource(iomem, rscr); if (ret) goto err_request_scr; t7l66xb->scr = ioremap(rscr->start, resource_size(rscr)); if (!t7l66xb->scr) { ret = -ENOMEM; goto err_ioremap; } clk_enable(t7l66xb->clk48m); if (pdata && pdata->enable) pdata->enable(dev); /* Mask all interrupts */ tmio_iowrite8(0xbf, t7l66xb->scr + SCR_IMR); printk(KERN_INFO "%s rev %d @ 0x%08lx, irq %d\n", dev->name, tmio_ioread8(t7l66xb->scr + SCR_REVID), (unsigned long)iomem->start, t7l66xb->irq); t7l66xb_attach_irq(dev); t7l66xb_cells[T7L66XB_CELL_NAND].platform_data = pdata->nand_data; t7l66xb_cells[T7L66XB_CELL_NAND].pdata_size = sizeof(*pdata->nand_data); ret = mfd_add_devices(&dev->dev, dev->id, t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), iomem, t7l66xb->irq_base); if (!ret) return 0; t7l66xb_detach_irq(dev); iounmap(t7l66xb->scr); err_ioremap: release_resource(&t7l66xb->rscr); err_request_scr: clk_put(t7l66xb->clk48m); err_clk48m_get: clk_put(t7l66xb->clk32k); err_clk32k_get: err_noirq: kfree(t7l66xb); return ret; } static int t7l66xb_remove(struct platform_device *dev) { struct t7l66xb_platform_data *pdata = dev->dev.platform_data; struct t7l66xb *t7l66xb = platform_get_drvdata(dev); int ret; ret = pdata->disable(dev); clk_disable(t7l66xb->clk48m); clk_put(t7l66xb->clk48m); clk_disable(t7l66xb->clk32k); clk_put(t7l66xb->clk32k); t7l66xb_detach_irq(dev); iounmap(t7l66xb->scr); release_resource(&t7l66xb->rscr); mfd_remove_devices(&dev->dev); platform_set_drvdata(dev, NULL); kfree(t7l66xb); return ret; } static struct platform_driver t7l66xb_platform_driver = { .driver = { .name = "t7l66xb", .owner = THIS_MODULE, }, .suspend = t7l66xb_suspend, .resume = t7l66xb_resume, .probe = t7l66xb_probe, .remove = t7l66xb_remove, }; /*--------------------------------------------------------------------------*/ module_platform_driver(t7l66xb_platform_driver); MODULE_DESCRIPTION("Toshiba T7L66XB core driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Ian Molton"); MODULE_ALIAS("platform:t7l66xb");
gpl-2.0
kangsterizer/linux-3.1.y-rsbac
arch/mips/dec/wbflush.c
4973
2109
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/system.h> #include <asm/wbflush.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
KylinUI/android_kernel_samsung_t1
arch/mips/dec/wbflush.c
4973
2109
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/system.h> #include <asm/wbflush.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
Garcia98/kernel-amami
drivers/net/wireless/ath/ath9k/eeprom_4k.c
4973
33600
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <asm/unaligned.h> #include "hw.h" #include "ar9002_phy.h" static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF); } static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); } #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.map4k; int addr, eep_start_loc = 64; for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { ath_dbg(common, EEPROM, "Unable to read eeprom region\n"); return false; } eep_data++; } return true; } static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah) { u16 *eep_data = (u16 *)&ah->eeprom.map4k; ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K); return true; } static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) return __ath9k_hw_usb_4k_fill_eeprom(ah); else return __ath9k_hw_4k_fill_eeprom(ah); } #if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_4k_header *modal_hdr) { PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Switch Settle", modal_hdr->switchSettling); PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]); PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]); PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize); PR_EEP("PGA Desired size", modal_hdr->pgaDesiredSize); PR_EEP("Chain0 xlna Gain", modal_hdr->xlnaGainCh[0]); PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff); PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn); PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn); PR_EEP("CCA Threshold)", modal_hdr->thresh62); PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]); PR_EEP("xpdGain", modal_hdr->xpdGain); PR_EEP("External PD", modal_hdr->xpd); PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]); PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]); PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap); PR_EEP("O/D Bias Version", modal_hdr->version); PR_EEP("CCK OutputBias", modal_hdr->ob_0); PR_EEP("BPSK OutputBias", modal_hdr->ob_1); PR_EEP("QPSK OutputBias", modal_hdr->ob_2); PR_EEP("16QAM OutputBias", modal_hdr->ob_3); PR_EEP("64QAM OutputBias", modal_hdr->ob_4); PR_EEP("CCK Driver1_Bias", modal_hdr->db1_0); PR_EEP("BPSK Driver1_Bias", modal_hdr->db1_1); PR_EEP("QPSK Driver1_Bias", modal_hdr->db1_2); PR_EEP("16QAM Driver1_Bias", modal_hdr->db1_3); PR_EEP("64QAM Driver1_Bias", modal_hdr->db1_4); PR_EEP("CCK Driver2_Bias", modal_hdr->db2_0); PR_EEP("BPSK Driver2_Bias", modal_hdr->db2_1); PR_EEP("QPSK Driver2_Bias", modal_hdr->db2_2); PR_EEP("16QAM Driver2_Bias", modal_hdr->db2_3); PR_EEP("64QAM Driver2_Bias", modal_hdr->db2_4); PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl); PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart); PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn); PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc); PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]); PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]); PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40); PR_EEP("Chain0 xatten2Db", modal_hdr->xatten2Db[0]); PR_EEP("Chain0 xatten2Margin", modal_hdr->xatten2Margin[0]); PR_EEP("Ant. Diversity ctl1", modal_hdr->antdiv_ctl1); PR_EEP("Ant. Diversity ctl2", modal_hdr->antdiv_ctl2); PR_EEP("TX Diversity", modal_hdr->tx_diversity); return len; } static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; if (!dump_base_hdr) { len += snprintf(buf + len, size - len, "%20s :\n", "2GHz modal Header"); len += ath9k_dump_4k_modal_eeprom(buf, len, size, &eep->modalHeader); goto out; } PR_EEP("Major Version", pBase->version >> 12); PR_EEP("Minor Version", pBase->version & 0xFFF); PR_EEP("Checksum", pBase->checksum); PR_EEP("Length", pBase->length); PR_EEP("RegDomain1", pBase->regDmn[0]); PR_EEP("RegDomain2", pBase->regDmn[1]); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G)); PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20)); PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40)); PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); PR_EEP("TX Gain type", pBase->txGainType); len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", pBase->macAddr); out: if (len > size) len = size; return len; } #else static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { return 0; } #endif #undef SIZE_EEPROM_4K static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) { #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *eep = (struct ar5416_eeprom_4k *) &ah->eeprom.map4k; u16 *eepdata, temp, magic, magic2; u32 sum = 0, el; bool need_swap = false; int i, addr; if (!ath9k_hw_use_flash(ah)) { if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { ath_err(common, "Reading Magic # failed\n"); return false; } ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); if (magic2 == AR5416_EEPROM_MAGIC) { need_swap = true; eepdata = (u16 *) (&ah->eeprom); for (addr = 0; addr < EEPROM_4K_SIZE; addr++) { temp = swab16(*eepdata); *eepdata = temp; eepdata++; } } else { ath_err(common, "Invalid EEPROM Magic. Endianness mismatch.\n"); return -EINVAL; } } } ath_dbg(common, EEPROM, "need_swap = %s\n", need_swap ? "True" : "False"); if (need_swap) el = swab16(ah->eeprom.map4k.baseEepHeader.length); else el = ah->eeprom.map4k.baseEepHeader.length; if (el > sizeof(struct ar5416_eeprom_4k)) el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); else el = el / sizeof(u16); eepdata = (u16 *)(&ah->eeprom); for (i = 0; i < el; i++) sum ^= *eepdata++; if (need_swap) { u32 integer; u16 word; ath_dbg(common, EEPROM, "EEPROM Endianness is not native.. Changing\n"); word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; word = swab16(eep->baseEepHeader.checksum); eep->baseEepHeader.checksum = word; word = swab16(eep->baseEepHeader.version); eep->baseEepHeader.version = word; word = swab16(eep->baseEepHeader.regDmn[0]); eep->baseEepHeader.regDmn[0] = word; word = swab16(eep->baseEepHeader.regDmn[1]); eep->baseEepHeader.regDmn[1] = word; word = swab16(eep->baseEepHeader.rfSilent); eep->baseEepHeader.rfSilent = word; word = swab16(eep->baseEepHeader.blueToothOptions); eep->baseEepHeader.blueToothOptions = word; word = swab16(eep->baseEepHeader.deviceCap); eep->baseEepHeader.deviceCap = word; integer = swab32(eep->modalHeader.antCtrlCommon); eep->modalHeader.antCtrlCommon = integer; for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { integer = swab32(eep->modalHeader.antCtrlChain[i]); eep->modalHeader.antCtrlChain[i] = integer; } for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { word = swab16(eep->modalHeader.spurChans[i].spurChan); eep->modalHeader.spurChans[i].spurChan = word; } } if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", sum, ah->eep_ops->get_eeprom_ver(ah)); return -EINVAL; } return 0; #undef EEPROM_4K_SIZE } static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u16 ver_minor; ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK; switch (param) { case EEP_NFTHRESH_2: return pModal->noiseFloorThreshCh[0]; case EEP_MAC_LSW: return get_unaligned_be16(pBase->macAddr); case EEP_MAC_MID: return get_unaligned_be16(pBase->macAddr + 2); case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: return pBase->regDmn[0]; case EEP_OP_CAP: return pBase->deviceCap; case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: return pBase->rfSilent; case EEP_OB_2: return pModal->ob_0; case EEP_DB_2: return pModal->db1_1; case EEP_MINOR_REV: return ver_minor; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: return pBase->rxMask; case EEP_FRAC_N_5G: return 0; case EEP_PWR_TABLE_OFFSET: return AR5416_PWR_TABLE_OFFSET_DB; case EEP_MODAL_VER: return pModal->version; case EEP_ANT_DIV_CTL1: return pModal->antdiv_ctl1; case EEP_TXGAIN_TYPE: return pBase->txGainType; case EEP_ANTENNA_GAIN_2G: return pModal->antennaGainCh[0]; default: return 0; } } static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct cal_data_per_freq_4k *pRawDataset; u8 *pCalBChans = NULL; u16 pdGainOverlap_t2; static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; u16 numPiers, i, j; u16 numXpdGain, xpdMask; u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; u32 reg32, regOffset, regChainOffset; xpdMask = pEepData->modalHeader.xpdGain; if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; } else { pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); } pCalBChans = pEepData->calFreqPier2G; numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS; numXpdGain = 0; for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) { if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) { if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS) break; xpdGainValues[numXpdGain] = (u16)(AR5416_PD_GAINS_IN_MASK - i); numXpdGain++; } } REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, (numXpdGain - 1) & 0x3); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, xpdGainValues[0]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, xpdGainValues[1]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0); for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { regChainOffset = i * 0x1000; if (pEepData->baseEepHeader.txMask & (1 << i)) { pRawDataset = pEepData->calPierData2G[i]; ath9k_hw_get_gain_boundaries_pdadcs(ah, chan, pRawDataset, pCalBChans, numPiers, pdGainOverlap_t2, gainBoundaries, pdadcValues, numXpdGain); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, SM(pdGainOverlap_t2, AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | SM(gainBoundaries[0], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) | SM(gainBoundaries[1], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) | SM(gainBoundaries[2], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) | SM(gainBoundaries[3], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; for (j = 0; j < 32; j++) { reg32 = get_unaligned_le32(&pdadcValues[4 * j]); REG_WRITE(ah, regOffset, reg32); ath_dbg(common, EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); ath_dbg(common, EEPROM, "PDADC: Chain %d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d |\n", i, 4 * j, pdadcValues[4 * j], 4 * j + 1, pdadcValues[4 * j + 1], 4 * j + 2, pdadcValues[4 * j + 2], 4 * j + 3, pdadcValues[4 * j + 3]); regOffset += 4; } REGWRITE_BUFFER_FLUSH(ah); } } } static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl, u16 antenna_reduction, u16 powerLimit) { #define CMP_TEST_GRP \ (((cfgCtl & ~CTL_MODE_M)| (pCtlMode[ctlMode] & CTL_MODE_M)) == \ pEepData->ctlIndex[i]) \ || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) int i; u16 twiceMinEdgePower; u16 twiceMaxEdgePower; u16 scaledPower = 0, minCtlPower; u16 numCtlModes; const u16 *pCtlMode; u16 ctlMode, freq; struct chan_centers centers; struct cal_ctl_data_4k *rep; struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { 0, { 0, 0, 0, 0} }; struct cal_target_power_leg targetPowerOfdmExt = { 0, { 0, 0, 0, 0} }, targetPowerCckExt = { 0, { 0, 0, 0, 0 } }; struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { 0, {0, 0, 0, 0} }; static const u16 ctlModesFor11g[] = { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40 }; ath9k_hw_get_channel_centers(ah, chan, &centers); scaledPower = powerLimit - antenna_reduction; numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; pCtlMode = ctlModesFor11g; ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCck, 4, false); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdm, 4, false); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT20, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerHt20, 8, false); if (IS_CHAN_HT40(chan)) { numCtlModes = ARRAY_SIZE(ctlModesFor11g); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT40, AR5416_NUM_2G_40_TARGET_POWERS, &targetPowerHt40, 8, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCckExt, 4, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdmExt, 4, true); } for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || (pCtlMode[ctlMode] == CTL_2GHT40); if (isHt40CtlMode) freq = centers.synth_center; else if (pCtlMode[ctlMode] & EXT_ADDITIVE) freq = centers.ext_center; else freq = centers.ctl_center; twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { if (CMP_TEST_GRP) { rep = &(pEepData->ctlData[i]); twiceMinEdgePower = ath9k_hw_get_max_edge_power( freq, rep->ctlEdges[ ar5416_get_ntxchains(ah->txchainmask) - 1], IS_CHAN_2GHZ(chan), AR5416_EEP4K_NUM_BAND_EDGES); if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { twiceMaxEdgePower = min(twiceMaxEdgePower, twiceMinEdgePower); } else { twiceMaxEdgePower = twiceMinEdgePower; break; } } } minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); switch (pCtlMode[ctlMode]) { case CTL_11B: for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) { targetPowerCck.tPow2x[i] = min((u16)targetPowerCck.tPow2x[i], minCtlPower); } break; case CTL_11G: for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) { targetPowerOfdm.tPow2x[i] = min((u16)targetPowerOfdm.tPow2x[i], minCtlPower); } break; case CTL_2GHT20: for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) { targetPowerHt20.tPow2x[i] = min((u16)targetPowerHt20.tPow2x[i], minCtlPower); } break; case CTL_11B_EXT: targetPowerCckExt.tPow2x[0] = min((u16)targetPowerCckExt.tPow2x[0], minCtlPower); break; case CTL_11G_EXT: targetPowerOfdmExt.tPow2x[0] = min((u16)targetPowerOfdmExt.tPow2x[0], minCtlPower); break; case CTL_2GHT40: for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { targetPowerHt40.tPow2x[i] = min((u16)targetPowerHt40.tPow2x[i], minCtlPower); } break; default: break; } } ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = ratesArray[rate18mb] = ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0]; ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; ratesArray[rate1l] = targetPowerCck.tPow2x[0]; ratesArray[rate2s] = ratesArray[rate2l] = targetPowerCck.tPow2x[1]; ratesArray[rate5_5s] = ratesArray[rate5_5l] = targetPowerCck.tPow2x[2]; ratesArray[rate11s] = ratesArray[rate11l] = targetPowerCck.tPow2x[3]; if (IS_CHAN_HT40(chan)) { for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { ratesArray[rateHt40_0 + i] = targetPowerHt40.tPow2x[i]; } ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; } #undef CMP_TEST_GRP } static void ath9k_hw_4k_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 powerLimit, bool test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &pEepData->modalHeader; int16_t ratesArray[Ar5416RateSize]; u8 ht40PowerIncForPdadc = 2; int i; memset(ratesArray, 0, sizeof(ratesArray)); if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; } ath9k_hw_set_4k_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, twiceAntennaReduction, powerLimit); ath9k_hw_set_4k_power_cal_table(ah, chan); regulatory->max_power_level = 0; for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { if (ratesArray[i] > MAX_RATE_POWER) ratesArray[i] = MAX_RATE_POWER; if (ratesArray[i] > regulatory->max_power_level) regulatory->max_power_level = ratesArray[i]; } if (test) return; for (i = 0; i < Ar5416RateSize; i++) ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; ENABLE_REGWRITE_BUFFER(ah); /* OFDM power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, ATH9K_POW_SM(ratesArray[rate18mb], 24) | ATH9K_POW_SM(ratesArray[rate12mb], 16) | ATH9K_POW_SM(ratesArray[rate9mb], 8) | ATH9K_POW_SM(ratesArray[rate6mb], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, ATH9K_POW_SM(ratesArray[rate54mb], 24) | ATH9K_POW_SM(ratesArray[rate48mb], 16) | ATH9K_POW_SM(ratesArray[rate36mb], 8) | ATH9K_POW_SM(ratesArray[rate24mb], 0)); /* CCK power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, ATH9K_POW_SM(ratesArray[rate2s], 24) | ATH9K_POW_SM(ratesArray[rate2l], 16) | ATH9K_POW_SM(ratesArray[rateXr], 8) | ATH9K_POW_SM(ratesArray[rate1l], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, ATH9K_POW_SM(ratesArray[rate11s], 24) | ATH9K_POW_SM(ratesArray[rate11l], 16) | ATH9K_POW_SM(ratesArray[rate5_5s], 8) | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); /* HT20 power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, ATH9K_POW_SM(ratesArray[rateHt20_3], 24) | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, ATH9K_POW_SM(ratesArray[rateHt20_7], 24) | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); /* HT40 power per rate */ if (IS_CHAN_HT40(chan)) { REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, ATH9K_POW_SM(ratesArray[rateHt40_3] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_2] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_1] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_0] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, ATH9K_POW_SM(ratesArray[rateHt40_7] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_6] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_5] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_4] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) | ATH9K_POW_SM(ratesArray[rateExtCck], 16) | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); } REGWRITE_BUFFER_FLUSH(ah); } static void ath9k_hw_4k_set_gain(struct ath_hw *ah, struct modal_eep_4k_header *pModal, struct ar5416_eeprom_4k *eep, u8 txRxAttenLocal) { REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0, pModal->antCtrlChain[0]); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[0]; REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); /* Set the block 1 value to block 0 value */ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); } REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); } /* * Read EEPROM header info and program the device for correct operation * given the channel value. */ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { struct modal_eep_4k_header *pModal; struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u8 txRxAttenLocal; u8 ob[5], db1[5], db2[5]; u8 ant_div_control1, ant_div_control2; u8 bb_desired_scale; u32 regVal; pModal = &eep->modalHeader; txRxAttenLocal = 23; REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); /* Single chain for 4K EEPROM*/ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); /* Initialize Ant Diversity settings from EEPROM */ if (pModal->version >= 3) { ant_div_control1 = pModal->antdiv_ctl1; ant_div_control2 = pModal->antdiv_ctl2; regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL)); regVal |= SM(ant_div_control1, AR_PHY_9285_ANT_DIV_CTL); regVal |= SM(ant_div_control2, AR_PHY_9285_ANT_DIV_ALT_LNACONF); regVal |= SM((ant_div_control2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF); regVal |= SM((ant_div_control1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB); regVal |= SM((ant_div_control1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB); REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal); regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); regVal &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); regVal |= SM((ant_div_control1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); } if (pModal->version >= 2) { ob[0] = pModal->ob_0; ob[1] = pModal->ob_1; ob[2] = pModal->ob_2; ob[3] = pModal->ob_3; ob[4] = pModal->ob_4; db1[0] = pModal->db1_0; db1[1] = pModal->db1_1; db1[2] = pModal->db1_2; db1[3] = pModal->db1_3; db1[4] = pModal->db1_4; db2[0] = pModal->db2_0; db2[1] = pModal->db2_1; db2[2] = pModal->db2_2; db2[3] = pModal->db2_3; db2[4] = pModal->db2_4; } else if (pModal->version == 1) { ob[0] = pModal->ob_0; ob[1] = ob[2] = ob[3] = ob[4] = pModal->ob_1; db1[0] = pModal->db1_0; db1[1] = db1[2] = db1[3] = db1[4] = pModal->db1_1; db2[0] = pModal->db2_0; db2[1] = db2[2] = db2[3] = db2[4] = pModal->db2_1; } else { int i; for (i = 0; i < 5; i++) { ob[i] = pModal->ob_0; db1[i] = pModal->db1_0; db2[i] = pModal->db1_0; } } if (AR_SREV_9271(ah)) { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_cck, AR9271_AN_RF2G3_OB_cck_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_psk, AR9271_AN_RF2G3_OB_psk_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_qam, AR9271_AN_RF2G3_OB_qam_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_DB_1, AR9271_AN_RF2G3_DB_1_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9271_AN_RF2G4_DB_2, AR9271_AN_RF2G4_DB_2_S, db2[0]); } else { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_0, AR9285_AN_RF2G3_OB_0_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_1, AR9285_AN_RF2G3_OB_1_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_2, AR9285_AN_RF2G3_OB_2_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_3, AR9285_AN_RF2G3_OB_3_S, ob[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_4, AR9285_AN_RF2G3_OB_4_S, ob[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_0, AR9285_AN_RF2G3_DB1_0_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_1, AR9285_AN_RF2G3_DB1_1_S, db1[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_2, AR9285_AN_RF2G3_DB1_2_S, db1[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_3, AR9285_AN_RF2G4_DB1_3_S, db1[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_4, AR9285_AN_RF2G4_DB1_4_S, db1[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_0, AR9285_AN_RF2G4_DB2_0_S, db2[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_1, AR9285_AN_RF2G4_DB2_1_S, db2[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_2, AR9285_AN_RF2G4_DB2_2_S, db2[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_3, AR9285_AN_RF2G4_DB2_3_S, db2[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_4, AR9285_AN_RF2G4_DB2_4_S, db2[4]); } REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->switchSettling); REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, pModal->adcDesiredSize); REG_WRITE(ah, AR_PHY_RF_CTL4, SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); if (AR_SREV_9271_10(ah)) REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, pModal->thresh62); REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn); } if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40); } bb_desired_scale = (pModal->bb_scale_smrt_antenna & EEP_4K_BB_DESIRED_SCALE_MASK); if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) { u32 pwrctrl, mask, clr; mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr); REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr); mask = BIT(0)|BIT(5)|BIT(15); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr); mask = BIT(0)|BIT(5); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr); } } static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { #define EEP_MAP4K_SPURCHAN \ (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan) struct ath_common *common = ath9k_hw_common(ah); u16 spur_val = AR_NO_SPUR; ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { case SPUR_DISABLE: break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP4K_SPURCHAN; break; } return spur_val; #undef EEP_MAP4K_SPURCHAN } const struct eeprom_ops eep_4k_ops = { .check_eeprom = ath9k_hw_4k_check_eeprom, .get_eeprom = ath9k_hw_4k_get_eeprom, .fill_eeprom = ath9k_hw_4k_fill_eeprom, .dump_eeprom = ath9k_hw_4k_dump_eeprom, .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver, .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, .set_board_values = ath9k_hw_4k_set_board_values, .set_txpower = ath9k_hw_4k_set_txpower, .get_spur_channel = ath9k_hw_4k_get_spur_channel };
gpl-2.0
javelinanddart/android_kernel_lge_hamerhead
lib/mpi/mpi-cmp.c
4973
1647
/* mpi-cmp.c - MPI functions * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "mpi-internal.h" int mpi_cmp_ui(MPI u, unsigned long v) { mpi_limb_t limb = v; mpi_normalize(u); if (!u->nlimbs && !limb) return 0; if (u->sign) return -1; if (u->nlimbs > 1) return 1; if (u->d[0] == limb) return 0; else if (u->d[0] > limb) return 1; else return -1; } int mpi_cmp(MPI u, MPI v) { mpi_size_t usize, vsize; int cmp; mpi_normalize(u); mpi_normalize(v); usize = u->nlimbs; vsize = v->nlimbs; if (!u->sign && v->sign) return 1; if (u->sign && !v->sign) return -1; if (usize != vsize && !u->sign && !v->sign) return usize - vsize; if (usize != vsize && u->sign && v->sign) return vsize + usize; if (!usize) return 0; cmp = mpihelp_cmp(u->d, v->d, usize); if (!cmp) return 0; if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0)) return 1; return -1; }
gpl-2.0
mali1/NST-kernel_115
arch/arm/nwfpe/extended_cpdo.c
14957
4135
/* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" floatx80 floatx80_exp(floatx80 Fm); floatx80 floatx80_ln(floatx80 Fm); floatx80 floatx80_sin(floatx80 rFm); floatx80 floatx80_cos(floatx80 rFm); floatx80 floatx80_arcsin(floatx80 rFm); floatx80 floatx80_arctan(floatx80 rFm); floatx80 floatx80_log(floatx80 rFm); floatx80 floatx80_tan(floatx80 rFm); floatx80 floatx80_arccos(floatx80 rFm); floatx80 floatx80_pow(floatx80 rFn, floatx80 rFm); floatx80 floatx80_pol(floatx80 rFn, floatx80 rFm); static floatx80 floatx80_rsf(struct roundingData *roundData, floatx80 rFn, floatx80 rFm) { return floatx80_sub(roundData, rFm, rFn); } static floatx80 floatx80_rdv(struct roundingData *roundData, floatx80 rFn, floatx80 rFm) { return floatx80_div(roundData, rFm, rFn); } static floatx80 (*const dyadic_extended[16])(struct roundingData*, floatx80 rFn, floatx80 rFm) = { [ADF_CODE >> 20] = floatx80_add, [MUF_CODE >> 20] = floatx80_mul, [SUF_CODE >> 20] = floatx80_sub, [RSF_CODE >> 20] = floatx80_rsf, [DVF_CODE >> 20] = floatx80_div, [RDF_CODE >> 20] = floatx80_rdv, [RMF_CODE >> 20] = floatx80_rem, /* strictly, these opcodes should not be implemented */ [FML_CODE >> 20] = floatx80_mul, [FDV_CODE >> 20] = floatx80_div, [FRD_CODE >> 20] = floatx80_rdv, }; static floatx80 floatx80_mvf(struct roundingData *roundData, floatx80 rFm) { return rFm; } static floatx80 floatx80_mnf(struct roundingData *roundData, floatx80 rFm) { rFm.high ^= 0x8000; return rFm; } static floatx80 floatx80_abs(struct roundingData *roundData, floatx80 rFm) { rFm.high &= 0x7fff; return rFm; } static floatx80 (*const monadic_extended[16])(struct roundingData*, floatx80 rFm) = { [MVF_CODE >> 20] = floatx80_mvf, [MNF_CODE >> 20] = floatx80_mnf, [ABS_CODE >> 20] = floatx80_abs, [RND_CODE >> 20] = floatx80_round_to_int, [URD_CODE >> 20] = floatx80_round_to_int, [SQT_CODE >> 20] = floatx80_sqrt, [NRM_CODE >> 20] = floatx80_mvf, }; unsigned int ExtendedCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd) { FPA11 *fpa11 = GET_FPA11(); floatx80 rFm; unsigned int Fm, opc_mask_shift; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getExtendedConstant(Fm); } else { switch (fpa11->fType[Fm]) { case typeSingle: rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle); break; case typeDouble: rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble); break; case typeExtended: rFm = fpa11->fpreg[Fm].fExtended; break; default: return 0; } } opc_mask_shift = (opcode & MASK_ARITHMETIC_OPCODE) >> 20; if (!MONADIC_INSTRUCTION(opcode)) { unsigned int Fn = getFn(opcode); floatx80 rFn; switch (fpa11->fType[Fn]) { case typeSingle: rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); break; case typeDouble: rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); break; case typeExtended: rFn = fpa11->fpreg[Fn].fExtended; break; default: return 0; } if (dyadic_extended[opc_mask_shift]) { rFd->fExtended = dyadic_extended[opc_mask_shift](roundData, rFn, rFm); } else { return 0; } } else { if (monadic_extended[opc_mask_shift]) { rFd->fExtended = monadic_extended[opc_mask_shift](roundData, rFm); } else { return 0; } } return 1; }
gpl-2.0
CyanogenMod/htc-kernel-liberty
arch/arm/nwfpe/extended_cpdo.c
14957
4135
/* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" floatx80 floatx80_exp(floatx80 Fm); floatx80 floatx80_ln(floatx80 Fm); floatx80 floatx80_sin(floatx80 rFm); floatx80 floatx80_cos(floatx80 rFm); floatx80 floatx80_arcsin(floatx80 rFm); floatx80 floatx80_arctan(floatx80 rFm); floatx80 floatx80_log(floatx80 rFm); floatx80 floatx80_tan(floatx80 rFm); floatx80 floatx80_arccos(floatx80 rFm); floatx80 floatx80_pow(floatx80 rFn, floatx80 rFm); floatx80 floatx80_pol(floatx80 rFn, floatx80 rFm); static floatx80 floatx80_rsf(struct roundingData *roundData, floatx80 rFn, floatx80 rFm) { return floatx80_sub(roundData, rFm, rFn); } static floatx80 floatx80_rdv(struct roundingData *roundData, floatx80 rFn, floatx80 rFm) { return floatx80_div(roundData, rFm, rFn); } static floatx80 (*const dyadic_extended[16])(struct roundingData*, floatx80 rFn, floatx80 rFm) = { [ADF_CODE >> 20] = floatx80_add, [MUF_CODE >> 20] = floatx80_mul, [SUF_CODE >> 20] = floatx80_sub, [RSF_CODE >> 20] = floatx80_rsf, [DVF_CODE >> 20] = floatx80_div, [RDF_CODE >> 20] = floatx80_rdv, [RMF_CODE >> 20] = floatx80_rem, /* strictly, these opcodes should not be implemented */ [FML_CODE >> 20] = floatx80_mul, [FDV_CODE >> 20] = floatx80_div, [FRD_CODE >> 20] = floatx80_rdv, }; static floatx80 floatx80_mvf(struct roundingData *roundData, floatx80 rFm) { return rFm; } static floatx80 floatx80_mnf(struct roundingData *roundData, floatx80 rFm) { rFm.high ^= 0x8000; return rFm; } static floatx80 floatx80_abs(struct roundingData *roundData, floatx80 rFm) { rFm.high &= 0x7fff; return rFm; } static floatx80 (*const monadic_extended[16])(struct roundingData*, floatx80 rFm) = { [MVF_CODE >> 20] = floatx80_mvf, [MNF_CODE >> 20] = floatx80_mnf, [ABS_CODE >> 20] = floatx80_abs, [RND_CODE >> 20] = floatx80_round_to_int, [URD_CODE >> 20] = floatx80_round_to_int, [SQT_CODE >> 20] = floatx80_sqrt, [NRM_CODE >> 20] = floatx80_mvf, }; unsigned int ExtendedCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd) { FPA11 *fpa11 = GET_FPA11(); floatx80 rFm; unsigned int Fm, opc_mask_shift; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getExtendedConstant(Fm); } else { switch (fpa11->fType[Fm]) { case typeSingle: rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle); break; case typeDouble: rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble); break; case typeExtended: rFm = fpa11->fpreg[Fm].fExtended; break; default: return 0; } } opc_mask_shift = (opcode & MASK_ARITHMETIC_OPCODE) >> 20; if (!MONADIC_INSTRUCTION(opcode)) { unsigned int Fn = getFn(opcode); floatx80 rFn; switch (fpa11->fType[Fn]) { case typeSingle: rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); break; case typeDouble: rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); break; case typeExtended: rFn = fpa11->fpreg[Fn].fExtended; break; default: return 0; } if (dyadic_extended[opc_mask_shift]) { rFd->fExtended = dyadic_extended[opc_mask_shift](roundData, rFn, rFm); } else { return 0; } } else { if (monadic_extended[opc_mask_shift]) { rFd->fExtended = monadic_extended[opc_mask_shift](roundData, rFm); } else { return 0; } } return 1; }
gpl-2.0
Lyanzh/linux
drivers/media/usb/em28xx/em28xx-audio.c
110
26987
/* * Empiatech em28x1 audio extension * * Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com> * * Copyright (C) 2007-2014 Mauro Carvalho Chehab * - Port to work with the in-kernel driver * - Cleanups, fixes, alsa-controls, etc. * * This driver is based on my previous au600 usb pstn audio driver * and inherits all the copyrights * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/usb.h> #include <linux/init.h> #include <linux/sound.h> #include <linux/spinlock.h> #include <linux/soundcard.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/initval.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/ac97_codec.h> #include <media/v4l2-common.h> #include "em28xx.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "activates debug info"); #define EM28XX_MAX_AUDIO_BUFS 5 #define EM28XX_MIN_AUDIO_PACKETS 64 #define dprintk(fmt, arg...) do { \ if (debug) \ printk(KERN_INFO "em28xx-audio %s: " fmt, \ __func__, ##arg); \ } while (0) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static int em28xx_deinit_isoc_audio(struct em28xx *dev) { int i; dprintk("Stopping isoc\n"); for (i = 0; i < dev->adev.num_urb; i++) { struct urb *urb = dev->adev.urb[i]; if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); } return 0; } static void em28xx_audio_isocirq(struct urb *urb) { struct em28xx *dev = urb->context; int i; unsigned int oldptr; int period_elapsed = 0; int status; unsigned char *cp; unsigned int stride; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; if (dev->disconnected) { dprintk("device disconnected while streaming. URB status=%d.\n", urb->status); atomic_set(&dev->adev.stream_started, 0); return; } switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dprintk("urb completition error %d.\n", urb->status); break; } if (atomic_read(&dev->adev.stream_started) == 0) return; if (dev->adev.capture_pcm_substream) { substream = dev->adev.capture_pcm_substream; runtime = substream->runtime; stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { int length = urb->iso_frame_desc[i].actual_length / stride; cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (!length) continue; oldptr = dev->adev.hwptr_done_capture; if (oldptr + length >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride); memcpy(runtime->dma_area, cp + cnt * stride, length * stride - cnt * stride); } else { memcpy(runtime->dma_area + oldptr * stride, cp, length * stride); } snd_pcm_stream_lock(substream); dev->adev.hwptr_done_capture += length; if (dev->adev.hwptr_done_capture >= runtime->buffer_size) dev->adev.hwptr_done_capture -= runtime->buffer_size; dev->adev.capture_transfer_done += length; if (dev->adev.capture_transfer_done >= runtime->period_size) { dev->adev.capture_transfer_done -= runtime->period_size; period_elapsed = 1; } snd_pcm_stream_unlock(substream); } if (period_elapsed) snd_pcm_period_elapsed(substream); } urb->status = 0; status = usb_submit_urb(urb, GFP_ATOMIC); if (status < 0) em28xx_errdev("resubmit of audio urb failed (error=%i)\n", status); return; } static int em28xx_init_audio_isoc(struct em28xx *dev) { int i, errCode; dprintk("Starting isoc transfers\n"); /* Start streaming */ for (i = 0; i < dev->adev.num_urb; i++) { memset(dev->adev.transfer_buffer[i], 0x80, dev->adev.urb[i]->transfer_buffer_length); errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC); if (errCode) { em28xx_errdev("submit of audio urb failed (error=%i)\n", errCode); em28xx_deinit_isoc_audio(dev); atomic_set(&dev->adev.stream_started, 0); return errCode; } } return 0; } static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size) { struct snd_pcm_runtime *runtime = subs->runtime; dprintk("Allocating vbuffer\n"); if (runtime->dma_area) { if (runtime->dma_bytes > size) return 0; vfree(runtime->dma_area); } runtime->dma_area = vmalloc(size); if (!runtime->dma_area) return -ENOMEM; runtime->dma_bytes = size; return 0; } static struct snd_pcm_hardware snd_em28xx_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */ /* * The period is 12.288 bytes. Allow a 10% of variation along its * value, in order to avoid overruns/underruns due to some clock * drift. * * FIXME: This period assumes 64 packets, and a 48000 PCM rate. * Calculate it dynamically. */ .period_bytes_min = 11059, .period_bytes_max = 13516, .periods_min = 2, .periods_max = 98, /* 12544, */ }; static int snd_em28xx_capture_open(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int nonblock, ret = 0; if (!dev) { em28xx_err("BUG: em28xx can't find device struct." " Can't proceed with open\n"); return -ENODEV; } if (dev->disconnected) return -ENODEV; dprintk("opening device and trying to acquire exclusive lock\n"); nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); runtime->hw = snd_em28xx_hw_capture; if (dev->adev.users == 0) { if (dev->alt == 0 || dev->is_audio_only) { if (dev->is_audio_only) /* audio is on a separate interface */ dev->alt = 1; else /* audio is on the same interface as video */ dev->alt = 7; /* * FIXME: The intention seems to be to select * the alt setting with the largest * wMaxPacketSize for the video endpoint. * At least dev->alt should be used instead, but * we should probably not touch it at all if it * is already >0, because wMaxPacketSize of the * audio endpoints seems to be the same for all. */ dprintk("changing alternate number on interface %d to %d\n", dev->ifnum, dev->alt); usb_set_interface(dev->udev, dev->ifnum, dev->alt); } /* Sets volume, mute, etc */ dev->mute = 0; ret = em28xx_audio_analog_set(dev); if (ret < 0) goto err; } kref_get(&dev->ref); dev->adev.users++; mutex_unlock(&dev->lock); /* Dynamically adjust the period size */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, dev->adev.period * 95 / 100, dev->adev.period * 105 / 100); dev->adev.capture_pcm_substream = substream; return 0; err: mutex_unlock(&dev->lock); em28xx_err("Error while configuring em28xx mixer\n"); return ret; } static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); dprintk("closing device\n"); dev->mute = 1; mutex_lock(&dev->lock); dev->adev.users--; if (atomic_read(&dev->adev.stream_started) > 0) { atomic_set(&dev->adev.stream_started, 0); schedule_work(&dev->adev.wq_trigger); } em28xx_audio_analog_set(dev); if (substream->runtime->dma_area) { dprintk("freeing\n"); vfree(substream->runtime->dma_area); substream->runtime->dma_area = NULL; } mutex_unlock(&dev->lock); kref_put(&dev->ref, em28xx_free_device); return 0; } static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret; struct em28xx *dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return -ENODEV; dprintk("Setting capture parameters\n"); ret = snd_pcm_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (ret < 0) return ret; #if 0 /* TODO: set up em28xx audio chip to deliver the correct audio format, current default is 48000hz multiplexed => 96000hz mono which shouldn't matter since analogue TV only supports mono */ unsigned int channels, rate, format; format = params_format(hw_params); rate = params_rate(hw_params); channels = params_channels(hw_params); #endif return 0; } static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); struct em28xx_audio *adev = &dev->adev; dprintk("Stop capture, if needed\n"); if (atomic_read(&adev->stream_started) > 0) { atomic_set(&adev->stream_started, 0); schedule_work(&adev->wq_trigger); } return 0; } static int snd_em28xx_prepare(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return -ENODEV; dev->adev.hwptr_done_capture = 0; dev->adev.capture_transfer_done = 0; return 0; } static void audio_trigger(struct work_struct *work) { struct em28xx_audio *adev = container_of(work, struct em28xx_audio, wq_trigger); struct em28xx *dev = container_of(adev, struct em28xx, adev); if (atomic_read(&adev->stream_started)) { dprintk("starting capture"); em28xx_init_audio_isoc(dev); } else { dprintk("stopping capture"); em28xx_deinit_isoc_audio(dev); } } static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct em28xx *dev = snd_pcm_substream_chip(substream); int retval = 0; if (dev->disconnected) return -ENODEV; switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: atomic_set(&dev->adev.stream_started, 1); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: atomic_set(&dev->adev.stream_started, 0); break; default: retval = -EINVAL; } schedule_work(&dev->adev.wq_trigger); return retval; } static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream *substream) { unsigned long flags; struct em28xx *dev; snd_pcm_uframes_t hwptr_done; dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return SNDRV_PCM_POS_XRUN; spin_lock_irqsave(&dev->adev.slock, flags); hwptr_done = dev->adev.hwptr_done_capture; spin_unlock_irqrestore(&dev->adev.slock, flags); return hwptr_done; } static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * AC97 volume control support */ static int em28xx_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); if (dev->disconnected) return -ENODEV; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0; info->value.integer.max = 0x1f; return 0; } static int em28xx_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) | (0x1f - (value->value.integer.value[1] & 0x1f)) << 8; int nonblock = 0; int rc; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; val |= rc & 0x8000; /* Preserve the mute flag */ rc = em28xx_write_ac97(dev, kcontrol->private_value, val); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int val; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); value->value.integer.value[0] = 0x1f - (val & 0x1f); value->value.integer.value[1] = 0x1f - ((val << 8) & 0x1f); return 0; } static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); u16 val = value->value.integer.value[0]; struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int rc; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; if (val) rc &= 0x1f1f; else rc |= 0x8000; rc = em28xx_write_ac97(dev, kcontrol->private_value, rc); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int val; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; if (val & 0x8000) value->value.integer.value[0] = 0; else value->value.integer.value[0] = 1; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); return 0; } static const DECLARE_TLV_DB_SCALE(em28xx_db_scale, -3450, 150, 0); static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev, char *name, int id) { int err; char ctl_name[44]; struct snd_kcontrol *kctl; struct snd_kcontrol_new tmp; memset (&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, tmp.private_value = id, tmp.name = ctl_name, /* Add Mute Control */ sprintf(ctl_name, "%s Switch", name); tmp.get = em28xx_vol_get_mute; tmp.put = em28xx_vol_put_mute; tmp.info = snd_ctl_boolean_mono_info; kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); memset (&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, tmp.private_value = id, tmp.name = ctl_name, /* Add Volume Control */ sprintf(ctl_name, "%s Volume", name); tmp.get = em28xx_vol_get; tmp.put = em28xx_vol_put; tmp.info = em28xx_vol_info; tmp.tlv.p = em28xx_db_scale, kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); return 0; } /* * register/unregister code and data */ static struct snd_pcm_ops snd_em28xx_pcm_capture = { .open = snd_em28xx_capture_open, .close = snd_em28xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_em28xx_hw_capture_params, .hw_free = snd_em28xx_hw_capture_free, .prepare = snd_em28xx_prepare, .trigger = snd_em28xx_capture_trigger, .pointer = snd_em28xx_capture_pointer, .page = snd_pcm_get_vmalloc_page, }; static void em28xx_audio_free_urb(struct em28xx *dev) { int i; for (i = 0; i < dev->adev.num_urb; i++) { struct urb *urb = dev->adev.urb[i]; if (!urb) continue; usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->adev.transfer_buffer[i], urb->transfer_dma); usb_free_urb(urb); } kfree(dev->adev.urb); kfree(dev->adev.transfer_buffer); dev->adev.num_urb = 0; } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ static int em28xx_audio_ep_packet_size(struct usb_device *udev, struct usb_endpoint_descriptor *e) { int size = le16_to_cpu(e->wMaxPacketSize); if (udev->speed == USB_SPEED_HIGH) return (size & 0x7ff) * (1 + (((size) >> 11) & 0x03)); return size & 0x7ff; } static int em28xx_audio_urb_init(struct em28xx *dev) { struct usb_interface *intf; struct usb_endpoint_descriptor *e, *ep = NULL; int i, ep_size, interval, num_urb, npackets; int urb_size, bytes_per_transfer; u8 alt; if (dev->ifnum) alt = 1; else alt = 7; intf = usb_ifnum_to_if(dev->udev, dev->ifnum); if (intf->num_altsetting <= alt) { em28xx_errdev("alt %d doesn't exist on interface %d\n", dev->ifnum, alt); return -ENODEV; } for (i = 0; i < intf->altsetting[alt].desc.bNumEndpoints; i++) { e = &intf->altsetting[alt].endpoint[i].desc; if (!usb_endpoint_dir_in(e)) continue; if (e->bEndpointAddress == EM28XX_EP_AUDIO) { ep = e; break; } } if (!ep) { em28xx_errdev("Couldn't find an audio endpoint"); return -ENODEV; } ep_size = em28xx_audio_ep_packet_size(dev->udev, ep); interval = 1 << (ep->bInterval - 1); em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n", EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed), dev->ifnum, alt, interval, ep_size); /* Calculate the number and size of URBs to better fit the audio samples */ /* * Estimate the number of bytes per DMA transfer. * * This is given by the bit rate (for now, only 48000 Hz) multiplied * by 2 channels and 2 bytes/sample divided by the number of microframe * intervals and by the microframe rate (125 us) */ bytes_per_transfer = DIV_ROUND_UP(48000 * 2 * 2, 125 * interval); /* * Estimate the number of transfer URBs. Don't let it go past the * maximum number of URBs that is known to be supported by the device. */ num_urb = DIV_ROUND_UP(bytes_per_transfer, ep_size); if (num_urb > EM28XX_MAX_AUDIO_BUFS) num_urb = EM28XX_MAX_AUDIO_BUFS; /* * Now that we know the number of bytes per transfer and the number of * URBs, estimate the typical size of an URB, in order to adjust the * minimal number of packets. */ urb_size = bytes_per_transfer / num_urb; /* * Now, calculate the amount of audio packets to be filled on each * URB. In order to preserve the old behaviour, use a minimal * threshold for this value. */ npackets = EM28XX_MIN_AUDIO_PACKETS; if (urb_size > ep_size * npackets) npackets = DIV_ROUND_UP(urb_size, ep_size); em28xx_info("Number of URBs: %d, with %d packets and %d size", num_urb, npackets, urb_size); /* Estimate the bytes per period */ dev->adev.period = urb_size * npackets; /* Allocate space to store the number of URBs to be used */ dev->adev.transfer_buffer = kcalloc(num_urb, sizeof(*dev->adev.transfer_buffer), GFP_ATOMIC); if (!dev->adev.transfer_buffer) { return -ENOMEM; } dev->adev.urb = kcalloc(num_urb, sizeof(*dev->adev.urb), GFP_ATOMIC); if (!dev->adev.urb) { kfree(dev->adev.transfer_buffer); return -ENOMEM; } /* Alloc memory for each URB and for each transfer buffer */ dev->adev.num_urb = num_urb; for (i = 0; i < num_urb; i++) { struct urb *urb; int j, k; void *buf; urb = usb_alloc_urb(npackets, GFP_ATOMIC); if (!urb) { em28xx_errdev("usb_alloc_urb failed!\n"); em28xx_audio_free_urb(dev); return -ENOMEM; } dev->adev.urb[i] = urb; buf = usb_alloc_coherent(dev->udev, npackets * ep_size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { em28xx_errdev("usb_alloc_coherent failed!\n"); em28xx_audio_free_urb(dev); return -ENOMEM; } dev->adev.transfer_buffer[i] = buf; urb->dev = dev->udev; urb->context = dev; urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer = buf; urb->interval = interval; urb->complete = em28xx_audio_isocirq; urb->number_of_packets = npackets; urb->transfer_buffer_length = ep_size * npackets; for (j = k = 0; j < npackets; j++, k += ep_size) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = ep_size; } } return 0; } static int em28xx_audio_init(struct em28xx *dev) { struct em28xx_audio *adev = &dev->adev; struct snd_pcm *pcm; struct snd_card *card; static int devnr; int err; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) { /* This device does not support the extension (in this case the device is expecting the snd-usb-audio module or doesn't have analog audio support at all) */ return 0; } em28xx_info("Binding audio extension\n"); kref_get(&dev->ref); printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus " "Rechberger\n"); printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n"); err = snd_card_new(&dev->udev->dev, index[devnr], "Em28xx Audio", THIS_MODULE, 0, &card); if (err < 0) return err; spin_lock_init(&adev->slock); adev->sndcard = card; adev->udev = dev->udev; err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm); if (err < 0) goto card_free; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture); pcm->info_flags = 0; pcm->private_data = dev; strcpy(pcm->name, "Empia 28xx Capture"); strcpy(card->driver, "Em28xx-Audio"); strcpy(card->shortname, "Em28xx Audio"); strcpy(card->longname, "Empia Em28xx Audio"); INIT_WORK(&adev->wq_trigger, audio_trigger); if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { em28xx_cvol_new(card, dev, "Video", AC97_VIDEO); em28xx_cvol_new(card, dev, "Line In", AC97_LINE); em28xx_cvol_new(card, dev, "Phone", AC97_PHONE); em28xx_cvol_new(card, dev, "Microphone", AC97_MIC); em28xx_cvol_new(card, dev, "CD", AC97_CD); em28xx_cvol_new(card, dev, "AUX", AC97_AUX); em28xx_cvol_new(card, dev, "PCM", AC97_PCM); em28xx_cvol_new(card, dev, "Master", AC97_MASTER); em28xx_cvol_new(card, dev, "Line", AC97_HEADPHONE); em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO); em28xx_cvol_new(card, dev, "LFE", AC97_CENTER_LFE_MASTER); em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER); } err = em28xx_audio_urb_init(dev); if (err) goto card_free; err = snd_card_register(card); if (err < 0) goto urb_free; em28xx_info("Audio extension successfully initialized\n"); return 0; urb_free: em28xx_audio_free_urb(dev); card_free: snd_card_free(card); adev->sndcard = NULL; return err; } static int em28xx_audio_fini(struct em28xx *dev) { if (dev == NULL) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) { /* This device does not support the extension (in this case the device is expecting the snd-usb-audio module or doesn't have analog audio support at all) */ return 0; } em28xx_info("Closing audio extension"); if (dev->adev.sndcard) { snd_card_disconnect(dev->adev.sndcard); flush_work(&dev->adev.wq_trigger); em28xx_audio_free_urb(dev); snd_card_free(dev->adev.sndcard); dev->adev.sndcard = NULL; } kref_put(&dev->ref, em28xx_free_device); return 0; } static int em28xx_audio_suspend(struct em28xx *dev) { if (dev == NULL) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) return 0; em28xx_info("Suspending audio extension"); em28xx_deinit_isoc_audio(dev); atomic_set(&dev->adev.stream_started, 0); return 0; } static int em28xx_audio_resume(struct em28xx *dev) { if (dev == NULL) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) return 0; em28xx_info("Resuming audio extension"); /* Nothing to do other than schedule_work() ?? */ schedule_work(&dev->adev.wq_trigger); return 0; } static struct em28xx_ops audio_ops = { .id = EM28XX_AUDIO, .name = "Em28xx Audio Extension", .init = em28xx_audio_init, .fini = em28xx_audio_fini, .suspend = em28xx_audio_suspend, .resume = em28xx_audio_resume, }; static int __init em28xx_alsa_register(void) { return em28xx_register_extension(&audio_ops); } static void __exit em28xx_alsa_unregister(void) { em28xx_unregister_extension(&audio_ops); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_DESCRIPTION(DRIVER_DESC " - audio interface"); MODULE_VERSION(EM28XX_VERSION); module_init(em28xx_alsa_register); module_exit(em28xx_alsa_unregister);
gpl-2.0
adis1313-test/android_kernel_samsung_wave-1
drivers/media/video/samsung/tv20/s5pv210/tv_clock_s5pv210.c
110
6579
/* linux/drivers/media/video/samsung/tv20/s5pv210/tv_clock_s5pc110.c * * Copyright (c) 2010 Samsung Electronics * http://www.samsung.com/ * * clock raw ftn file for Samsung TVOut driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/uaccess.h> #include <linux/io.h> #include <mach/map.h> #include <mach/regs-clock.h> #include "tv_out_s5pv210.h" #include "regs/regs-clock_extra.h" #ifdef COFIG_TVOUT_RAW_DBG #define S5P_TVOUT_CLK_DEBUG 1 #endif #ifdef S5P_TVOUT_CLK_DEBUG #define TVCLKPRINTK(fmt, args...) \ printk(KERN_INFO "\t\t[TVCLK] %s: " fmt, __func__ , ## args) #else #define TVCLKPRINTK(fmt, args...) #endif void __s5p_tv_clk_init_hpll(unsigned int lock_time, bool vsel, unsigned int mdiv, unsigned int pdiv, unsigned int sdiv) { u32 temp; TVCLKPRINTK("%d,%d,%d,%d\n\r", lock_time, mdiv, pdiv, sdiv); temp = readl(S5P_VPLL_CON); temp &= ~VPLL_ENABLE; writel(temp, S5P_VPLL_CON); temp = 0; if (vsel) temp |= VCO_FREQ_SEL; temp |= VPLL_ENABLE; temp |= MDIV(mdiv) | PDIV(pdiv) | SDIV(sdiv); writel(VPLL_LOCKTIME(lock_time), S5P_VPLL_LOCK); writel(temp, S5P_VPLL_CON); while (!VPLL_LOCKED(readl(S5P_VPLL_CON))); TVCLKPRINTK("0x%08x,0x%08x\n\r", readl(S5P_VPLL_LOCK), \ readl(S5P_VPLL_CON)); } void __s5p_tv_clk_hpll_onoff(bool en) { } s5p_tv_clk_err __s5p_tv_clk_init_href(s5p_tv_clk_hpll_ref hpll_ref) { return S5P_TV_CLK_ERR_NO_ERROR; } /* prevent hdmi hang-up when reboot */ int __s5p_tv_clk_change_internal(void) { u32 reg = readl(S5P_CLK_SRC1); /* set to SCLK_DAC */ reg &= HDMI_SEL_MASK; /* set to SCLK_PIXEL */ reg &= VMIXER_SEL_MASK; writel(reg, S5P_CLK_SRC1); return 0; } s5p_tv_clk_err __s5p_tv_clk_init_mout_hpll(s5p_tv_clk_mout_hpll mout_hpll) { TVCLKPRINTK("(%d)\n\r", mout_hpll); writel(readl(S5P_CLK_SRC1) | HDMI_SEL_HDMIPHY, S5P_CLK_SRC1); TVCLKPRINTK("S5P_CLK_SRC1 :0x%08x\n", readl(S5P_CLK_SRC1)); return S5P_TV_CLK_ERR_NO_ERROR; } s5p_tv_clk_err __s5p_tv_clk_init_video_mixer(s5p_tv_clk_vmiexr_srcclk src_clk) { switch (src_clk) { /* for analog tv out 0:SCLK_DAC */ case TVOUT_CLK_VMIXER_SRCCLK_VCLK_54: writel(readl(S5P_CLK_SRC1) & VMIXER_SEL_MASK, S5P_CLK_SRC1); break; /* for digital hdmi_phy 1: SCLK_HDMI */ case TVOUT_CLK_VMIXER_SRCCLK_MOUT_HPLL: writel(readl(S5P_CLK_SRC1) | VMIXER_SEL_MOUT_VPLL, \ S5P_CLK_SRC1); break; default: TVCLKPRINTK("[ERR] invalid src_clk parameter = %d\n", src_clk); return S5P_TV_CLK_ERR_INVALID_PARAM; break; } TVCLKPRINTK("S5P_CLK_SRC1 :0x%08x\n", readl(S5P_CLK_SRC1)); return S5P_TV_CLK_ERR_NO_ERROR; } void __s5p_tv_clk_init_hdmi_ratio(unsigned int clk_div) { TVCLKPRINTK("(%d)\n\r", clk_div); writel((readl(S5P_CLK_DIV1) & HDMI_DIV_RATIO_MASK) | \ HDMI_DIV_RATIO(clk_div), S5P_CLK_DIV1); TVCLKPRINTK("(0x%08x)\n\r", readl(S5P_CLK_DIV3)); } /* * hclk gating */ /* VP */ void __s5p_tv_clk_set_vp_clk_onoff(bool clk_on) { /* TVCLKPRINTK("VP hclk : %s\n\r", clk_on ? "on":"off"); if (clk_on) bit_add_l(S5P_CLKGATE_IP1_VP, S5P_CLKGATE_IP1); else bit_del_l(S5P_CLKGATE_IP1_VP, S5P_CLKGATE_IP1); TVCLKPRINTK("S5P_CLKGATE_MAIN1 :0x%08x\n\r", readl(S5P_CLKGATE_MAIN1)); */ } /* MIXER */ void __s5p_tv_clk_set_vmixer_hclk_onoff(bool clk_on) { /* TVCLKPRINTK("MIXER hclk : %s\n\r", clk_on ? "on":"off"); if (clk_on) bit_add_l(S5P_CLKGATE_IP1_MIXER, S5P_CLKGATE_IP1); else bit_del_l(S5P_CLKGATE_IP1_MIXER, S5P_CLKGATE_IP1); TVCLKPRINTK("S5P_CLKGATE_MAIN1 :0x%08x\n\r", readl(S5P_CLKGATE_MAIN1)); */ } /* TVENC */ void __s5p_tv_clk_set_sdout_hclk_onoff(bool clk_on) { /* TVCLKPRINTK("TVENC hclk : %s\n\r", clk_on ? "on":"off"); if (clk_on) bit_add_l(S5P_CLKGATE_IP1_TVENC, S5P_CLKGATE_IP1); else bit_del_l(S5P_CLKGATE_IP1_TVENC, S5P_CLKGATE_IP1); */ } /* HDMI */ void __s5p_tv_clk_set_hdmi_hclk_onoff(bool clk_on) { /* TVCLKPRINTK("HDMI hclk : %s\n\r", clk_on ? "on":"off"); if (clk_on) { bit_add_l(S5P_CLKGATE_IP1_HDMI, S5P_CLKGATE_IP1); bit_add_l(VMIXER_OUT_SEL_HDMI, S5P_MIXER_OUT_SEL); } else bit_del_l(S5P_CLKGATE_IP1_HDMI, S5P_CLKGATE_IP1); TVCLKPRINTK("S5P_CLKGATE_PERI1 :0x%08x\n\r", readl(S5P_CLKGATE_PERI1)); TVCLKPRINTK("clk output is %s\n\r", readl(S5P_MIXER_OUT_SEL) ? "HDMI":"SDOUT"); */ } /* * sclk gating */ /* MIXER */ void __s5p_tv_clk_set_vmixer_sclk_onoff(bool clk_on) { #if 0 TVCLKPRINTK("MIXER sclk : %s\n\r", clk_on ? "on":"off"); if (clk_on) bit_add_l(CLK_SCLK_VMIXER_PASS, S5P_SCLKGATE0); else bit_del_l(CLK_SCLK_VMIXER_PASS, S5P_SCLKGATE0); TVCLKPRINTK("S5P_SCLKGATE0 :0x%08x\n\r", readl(S5P_SCLKGATE0)); #endif } /* TVENC */ void __s5p_tv_clk_set_sdout_sclk_onoff(bool clk_on) { #if 0 TVCLKPRINTK("TVENC sclk : %s\n\r", clk_on ? "on":"off"); if (clk_on) bit_add_l(CLK_SCLK_TV54_PASS | CLK_SCLK_VDAC54_PASS, S5P_SCLKGATE0); else bit_del_l(CLK_SCLK_TV54_PASS | CLK_SCLK_VDAC54_PASS, S5P_SCLKGATE0); TVCLKPRINTK("S5P_SCLKGATE0 :0x%08x\n\r", readl(S5P_SCLKGATE0)); #endif } /* HDMI */ void __s5p_tv_clk_set_hdmi_sclk_onoff(bool clk_on) { #if 0 TVCLKPRINTK("HDMI sclk : %s\n\r", clk_on ? "on":"off"); if (clk_on) bit_add_l(CLK_SCLK_HDMI_PASS, S5P_SCLKGATE0); else bit_del_l(CLK_SCLK_HDMI_PASS, S5P_SCLKGATE0); TVCLKPRINTK("S5P_SCLKGATE0 :0x%08x\n\r", readl(S5P_SCLKGATE0)); #endif } void __s5p_tv_clk_start(bool vp, bool sdout, bool hdmi) { __s5p_tv_clk_set_vp_clk_onoff(vp); __s5p_tv_clk_set_sdout_hclk_onoff(sdout); __s5p_tv_clk_set_sdout_sclk_onoff(sdout); __s5p_tv_clk_set_hdmi_hclk_onoff(hdmi); __s5p_tv_clk_set_vmixer_hclk_onoff(true); __s5p_tv_clk_set_vmixer_sclk_onoff(true); if (hdmi) __s5p_tv_clk_hpll_onoff(true); } void __s5p_tv_clk_stop(void) { __s5p_tv_clk_set_sdout_sclk_onoff(false); __s5p_tv_clk_set_sdout_hclk_onoff(false); __s5p_tv_clk_set_hdmi_sclk_onoff(false); __s5p_tv_clk_set_hdmi_hclk_onoff(false); __s5p_tv_clk_set_vp_clk_onoff(false); __s5p_tv_clk_set_vmixer_sclk_onoff(false); __s5p_tv_clk_set_vmixer_hclk_onoff(false); __s5p_tv_clk_hpll_onoff(false); } int __init __s5p_tvclk_probe(struct platform_device *pdev, u32 res_num) { return 0; } int __init __s5p_tvclk_release(struct platform_device *pdev) { return 0; }
gpl-2.0
hyperdriveguy/limbo-android
jni/qemu/roms/openbios/libopenbios/font_8x16.c
110
95894
/**********************************************/ /* */ /* Font file generated by cpi2fnt */ /* * * * * originally from the Linux distribution * * * **********************************************/ #include "libopenbios/fontdata.h" const unsigned char fontdata_8x16[FONTDATAMAX_8X16] = { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 1 0x01 '^A' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x81, /* 10000001 */ 0xa5, /* 10100101 */ 0x81, /* 10000001 */ 0x81, /* 10000001 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0x81, /* 10000001 */ 0x81, /* 10000001 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 2 0x02 '^B' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xdb, /* 11011011 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 3 0x03 '^C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 4 0x04 '^D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 5 0x05 '^E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 6 0x06 '^F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 7 0x07 '^G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 8 0x08 '^H' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xe7, /* 11100111 */ 0xc3, /* 11000011 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 9 0x09 '^I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x42, /* 01000010 */ 0x42, /* 01000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 10 0x0a '^J' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0x99, /* 10011001 */ 0xbd, /* 10111101 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0xc3, /* 11000011 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 11 0x0b '^K' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x0e, /* 00001110 */ 0x1a, /* 00011010 */ 0x32, /* 00110010 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 12 0x0c '^L' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 13 0x0d '^M' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x33, /* 00110011 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x70, /* 01110000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 14 0x0e '^N' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x67, /* 01100111 */ 0xe7, /* 11100111 */ 0xe6, /* 11100110 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 15 0x0f '^O' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xdb, /* 11011011 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0x3c, /* 00111100 */ 0xdb, /* 11011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 16 0x10 '^P' */ 0x00, /* 00000000 */ 0x80, /* 10000000 */ 0xc0, /* 11000000 */ 0xe0, /* 11100000 */ 0xf0, /* 11110000 */ 0xf8, /* 11111000 */ 0xfe, /* 11111110 */ 0xf8, /* 11111000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ 0xc0, /* 11000000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 17 0x11 '^Q' */ 0x00, /* 00000000 */ 0x02, /* 00000010 */ 0x06, /* 00000110 */ 0x0e, /* 00001110 */ 0x1e, /* 00011110 */ 0x3e, /* 00111110 */ 0xfe, /* 11111110 */ 0x3e, /* 00111110 */ 0x1e, /* 00011110 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 18 0x12 '^R' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 19 0x13 '^S' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 20 0x14 '^T' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7f, /* 01111111 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7b, /* 01111011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 21 0x15 '^U' */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 22 0x16 '^V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 23 0x17 '^W' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 24 0x18 '^X' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 25 0x19 '^Y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 26 0x1a '^Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 27 0x1b '^[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 28 0x1c '^\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 29 0x1d '^]' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 30 0x1e '^^' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 31 0x1f '^_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 32 0x20 ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 33 0x21 '!' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 34 0x22 '"' */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x24, /* 00100100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 35 0x23 '#' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 36 0x24 '$' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x86, /* 10000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 37 0x25 '%' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc2, /* 11000010 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0x86, /* 10000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 38 0x26 '&' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 39 0x27 ''' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 40 0x28 '(' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 41 0x29 ')' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 42 0x2a '*' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0xff, /* 11111111 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 43 0x2b '+' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 44 0x2c ',' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 45 0x2d '-' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 46 0x2e '.' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 47 0x2f '/' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x02, /* 00000010 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 48 0x30 '0' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 49 0x31 '1' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x38, /* 00111000 */ 0x78, /* 01111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 50 0x32 '2' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 51 0x33 '3' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x3c, /* 00111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 52 0x34 '4' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x1c, /* 00011100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 53 0x35 '5' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 54 0x36 '6' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 55 0x37 '7' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 56 0x38 '8' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 57 0x39 '9' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 58 0x3a ':' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 59 0x3b ';' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 60 0x3c '<' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 61 0x3d '=' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 62 0x3e '>' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 63 0x3f '?' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 64 0x40 '@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xdc, /* 11011100 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 65 0x41 'A' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 66 0x42 'B' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 67 0x43 'C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc2, /* 11000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 68 0x44 'D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 69 0x45 'E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x60, /* 01100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 70 0x46 'F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 71 0x47 'G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xde, /* 11011110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x66, /* 01100110 */ 0x3a, /* 00111010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 72 0x48 'H' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 73 0x49 'I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 74 0x4a 'J' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 75 0x4b 'K' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe6, /* 11100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 76 0x4c 'L' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 77 0x4d 'M' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xee, /* 11101110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 78 0x4e 'N' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xfe, /* 11111110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 79 0x4f 'O' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 80 0x50 'P' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 81 0x51 'Q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xde, /* 11011110 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 82 0x52 'R' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 83 0x53 'S' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 84 0x54 'T' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x5a, /* 01011010 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 85 0x55 'U' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 86 0x56 'V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 87 0x57 'W' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0xee, /* 11101110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 88 0x58 'X' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 89 0x59 'Y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 90 0x5a 'Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x86, /* 10000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc2, /* 11000010 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 91 0x5b '[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 92 0x5c '\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x80, /* 10000000 */ 0xc0, /* 11000000 */ 0xe0, /* 11100000 */ 0x70, /* 01110000 */ 0x38, /* 00111000 */ 0x1c, /* 00011100 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 93 0x5d ']' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 94 0x5e '^' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 95 0x5f '_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 96 0x60 '`' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 97 0x61 'a' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 98 0x62 'b' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 99 0x63 'c' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 100 0x64 'd' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 101 0x65 'e' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 102 0x66 'f' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x36, /* 00110110 */ 0x32, /* 00110010 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 103 0x67 'g' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 104 0x68 'h' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x6c, /* 01101100 */ 0x76, /* 01110110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 105 0x69 'i' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 106 0x6a 'j' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 107 0x6b 'k' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 108 0x6c 'l' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 109 0x6d 'm' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 110 0x6e 'n' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 111 0x6f 'o' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 112 0x70 'p' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ /* 113 0x71 'q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ /* 114 0x72 'r' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x66, /* 01100110 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 115 0x73 's' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 116 0x74 't' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0xfc, /* 11111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x36, /* 00110110 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 117 0x75 'u' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 118 0x76 'v' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 119 0x77 'w' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 120 0x78 'x' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 121 0x79 'y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ /* 122 0x7a 'z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 123 0x7b '{' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 124 0x7c '|' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 125 0x7d '}' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 126 0x7e '~' */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 127 0x7f '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 128 0x80 '€' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc2, /* 11000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 129 0x81 '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 130 0x82 '‚' */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 131 0x83 'ƒ' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 132 0x84 '„' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 133 0x85 '…' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 134 0x86 '†' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 135 0x87 '‡' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 136 0x88 'ˆ' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 137 0x89 '‰' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 138 0x8a 'Š' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 139 0x8b '‹' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 140 0x8c 'Œ' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 141 0x8d '' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 142 0x8e 'Ž' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 143 0x8f '' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 144 0x90 '' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 145 0x91 '‘' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x6e, /* 01101110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 146 0x92 '’' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3e, /* 00111110 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xce, /* 11001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 147 0x93 '“' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 148 0x94 '”' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 149 0x95 '•' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 150 0x96 '–' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 151 0x97 '—' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 152 0x98 '˜' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 153 0x99 '™' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 154 0x9a 'š' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 155 0x9b '›' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 156 0x9c 'œ' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x64, /* 01100100 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xe6, /* 11100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 157 0x9d '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 158 0x9e 'ž' */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xf8, /* 11111000 */ 0xc4, /* 11000100 */ 0xcc, /* 11001100 */ 0xde, /* 11011110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 159 0x9f 'Ÿ' */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 160 0xa0 ' ' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 161 0xa1 '¡' */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 162 0xa2 '¢' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 163 0xa3 '£' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 164 0xa4 '¤' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 165 0xa5 '¥' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xfe, /* 11111110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 166 0xa6 '¦' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 167 0xa7 '§' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 168 0xa8 '¨' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 169 0xa9 '©' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 170 0xaa 'ª' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 171 0xab '«' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0xe0, /* 11100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xdc, /* 11011100 */ 0x86, /* 10000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 172 0xac '¬' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0xe0, /* 11100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x66, /* 01100110 */ 0xce, /* 11001110 */ 0x9a, /* 10011010 */ 0x3f, /* 00111111 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 173 0xad '­' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 174 0xae '®' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x36, /* 00110110 */ 0x6c, /* 01101100 */ 0xd8, /* 11011000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 175 0xaf '¯' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xd8, /* 11011000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x6c, /* 01101100 */ 0xd8, /* 11011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 176 0xb0 '°' */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ /* 177 0xb1 '±' */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ /* 178 0xb2 '²' */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ /* 179 0xb3 '³' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 180 0xb4 '´' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 181 0xb5 'µ' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 182 0xb6 '¶' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 183 0xb7 '·' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 184 0xb8 '¸' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 185 0xb9 '¹' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 186 0xba 'º' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 187 0xbb '»' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 188 0xbc '¼' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 189 0xbd '½' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 190 0xbe '¾' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 191 0xbf '¿' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 192 0xc0 'À' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 193 0xc1 'Á' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 194 0xc2 'Â' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 195 0xc3 'Ã' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 196 0xc4 'Ä' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 197 0xc5 'Å' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 198 0xc6 'Æ' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 199 0xc7 'Ç' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 200 0xc8 'È' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 201 0xc9 'É' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 202 0xca 'Ê' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 203 0xcb 'Ë' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 204 0xcc 'Ì' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 205 0xcd 'Í' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 206 0xce 'Î' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 207 0xcf 'Ï' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 208 0xd0 'Ð' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 209 0xd1 'Ñ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 210 0xd2 'Ò' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 211 0xd3 'Ó' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 212 0xd4 'Ô' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 213 0xd5 'Õ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 214 0xd6 'Ö' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 215 0xd7 '×' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 216 0xd8 'Ø' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 217 0xd9 'Ù' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 218 0xda 'Ú' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 219 0xdb 'Û' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 220 0xdc 'Ü' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 221 0xdd 'Ý' */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ /* 222 0xde 'Þ' */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ /* 223 0xdf 'ß' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 224 0xe0 'à' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 225 0xe1 'á' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 226 0xe2 'â' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 227 0xe3 'ã' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 228 0xe4 'ä' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 229 0xe5 'å' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 230 0xe6 'æ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 231 0xe7 'ç' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 232 0xe8 'è' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 233 0xe9 'é' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 234 0xea 'ê' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xee, /* 11101110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 235 0xeb 'ë' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x3e, /* 00111110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 236 0xec 'ì' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 237 0xed 'í' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x03, /* 00000011 */ 0x06, /* 00000110 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xf3, /* 11110011 */ 0x7e, /* 01111110 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 238 0xee 'î' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 239 0xef 'ï' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 240 0xf0 'ð' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 241 0xf1 'ñ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 242 0xf2 'ò' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 243 0xf3 'ó' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 244 0xf4 'ô' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 245 0xf5 'õ' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 246 0xf6 'ö' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 247 0xf7 '÷' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 248 0xf8 'ø' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 249 0xf9 'ù' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 250 0xfa 'ú' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 251 0xfb 'û' */ 0x00, /* 00000000 */ 0x0f, /* 00001111 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xec, /* 11101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3c, /* 00111100 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 252 0xfc 'ü' */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 253 0xfd 'ý' */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x32, /* 00110010 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 254 0xfe 'þ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 255 0xff 'ÿ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ };
gpl-2.0
xiaowei942/tiny210-uboot
board/sixnet/fpgadata.c
366
73917
0xff, 0x87, 0xff, 0x88, 0x7f, 0xff, 0xf9, 0xff, 0xff, 0xf5, 0xff, 0x8f, 0xff, 0xf0, 0x8f, 0xf9, 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf1, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x7f, 0x7b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0xf6, 0xf0, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x0f, 0x7f, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf6, 0xf0, 0xff, 0xff, 0x7f, 0x8f, 0x7f, 0xf0, 0xff, 0x0f, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xff, 0xf8, 0xf7, 0x8f, 0xcf, 0xf0, 0xf6, 0xff, 0xff, 0xef, 0xff, 0xfb, 0x7f, 0x2f, 0x1f, 0x71, 0xf5, 0xff, 0xff, 0xef, 0x7f, 0xff, 0x7f, 0xff, 0xf7, 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x7f, 0x77, 0xf7, 0xff, 0xfb, 0x0f, 0xff, 0xf0, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xfe, 0xff, 0x8f, 0x7f, 0xf1, 0xff, 0xff, 0xfa, 0xce, 0xff, 0xfd, 0xff, 0xff, 0x9f, 0xff, 0x8e, 0xff, 0xf0, 0xbf, 0x7f, 0xf5, 0xff, 0xef, 0x9f, 0xfd, 0x81, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xef, 0x9f, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x77, 0xfa, 0xb6, 0xff, 0x78, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xfd, 0x0f, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf6, 0xf7, 0xf6, 0x7f, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xef, 0xbf, 0xf2, 0x7f, 0xef, 0xff, 0xfe, 0xfb, 0xff, 0xef, 0xff, 0xff, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xf7, 0xcf, 0x8f, 0xff, 0xf0, 0xef, 0xf9, 0xfb, 0xff, 0xff, 0xff, 0x9f, 0x0f, 0x65, 0xe1, 0xfb, 0x7b, 0xf3, 0xff, 0xf7, 0xf6, 0xfe, 0xff, 0x8f, 0xf6, 0xe8, 0xf6, 0xf1, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0x6f, 0x61, 0xf1, 0xfb, 0xff, 0xff, 0xde, 0x8f, 0x8f, 0xf0, 0xf0, 0xff, 0xff, 0xf7, 0xbf, 0xff, 0xd4, 0x8f, 0x0f, 0x71, 0xc1, 0x6f, 0xd1, 0xeb, 0x5f, 0xfd, 0xff, 0x9f, 0xff, 0xfb, 0xff, 0x8f, 0x9f, 0xf7, 0x9f, 0xff, 0xf4, 0xb7, 0xfd, 0xff, 0xfe, 0x8f, 0xbf, 0x71, 0x1f, 0xff, 0x7f, 0xff, 0xfd, 0x87, 0x87, 0xf0, 0x70, 0x1f, 0xf7, 0xbf, 0xff, 0xff, 0xff, 0x8f, 0x0f, 0x71, 0x81, 0xbf, 0x3e, 0x7f, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0x07, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0x8d, 0x7f, 0xf1, 0xff, 0xff, 0x9f, 0x6f, 0xf1, 0xff, 0xbf, 0x71, 0x87, 0xfe, 0xf0, 0x8f, 0x8f, 0xf0, 0xfb, 0xcb, 0xff, 0xf0, 0x8f, 0x7f, 0xf1, 0x8f, 0x1e, 0xe1, 0x7e, 0x91, 0x7f, 0xbf, 0x1a, 0xff, 0x71, 0xff, 0x9f, 0x8f, 0xf6, 0xf8, 0xdf, 0xf7, 0xf4, 0xff, 0xff, 0xff, 0x8f, 0x1f, 0xf0, 0x7f, 0x97, 0xff, 0xbf, 0x97, 0xff, 0xfb, 0xbf, 0xdf, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0xdf, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xf1, 0xff, 0xff, 0x9f, 0xfc, 0xfb, 0xff, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0x9d, 0xff, 0xf4, 0xcf, 0xff, 0x7f, 0xf7, 0xff, 0xff, 0xff, 0xcf, 0xff, 0x97, 0xff, 0xfa, 0xff, 0x8f, 0xf8, 0xf0, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xfd, 0xff, 0x0f, 0x7f, 0xe1, 0xff, 0xf1, 0xff, 0xff, 0x83, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x6f, 0x7f, 0x77, 0x7d, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x6f, 0xf1, 0xff, 0xd7, 0xff, 0xfe, 0xff, 0xff, 0x9f, 0xfd, 0x78, 0xef, 0xff, 0xbf, 0xff, 0xf5, 0xff, 0xff, 0xbf, 0x0f, 0x79, 0xd1, 0xff, 0xff, 0xd2, 0xff, 0x72, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xfe, 0x70, 0x9d, 0xff, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xbf, 0x7f, 0xff, 0x07, 0xff, 0x78, 0xff, 0x9f, 0xff, 0xfe, 0xff, 0x77, 0x7f, 0x8f, 0x7f, 0xf0, 0xff, 0x8f, 0x7f, 0xe1, 0x0f, 0x71, 0xf1, 0xff, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xfd, 0xff, 0xba, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xef, 0x7f, 0xa1, 0x7f, 0xff, 0xbd, 0x7f, 0xf7, 0xf9, 0xfd, 0xfb, 0xff, 0xff, 0x8f, 0xbf, 0xb7, 0x8f, 0xaf, 0xdf, 0xff, 0xff, 0xff, 0xff, 0x5f, 0xeb, 0xbf, 0xfd, 0xf8, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xff, 0xf7, 0xcf, 0xfb, 0xf0, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xef, 0x7f, 0xab, 0xff, 0xfd, 0xfa, 0xbf, 0x8f, 0xbf, 0xca, 0xfe, 0xff, 0xff, 0xdf, 0x6f, 0xd4, 0xf6, 0x0f, 0x3f, 0x11, 0xf9, 0xff, 0x7f, 0x8b, 0xbf, 0xff, 0x8f, 0xff, 0xc0, 0xfb, 0xf5, 0xef, 0xf7, 0x7f, 0xff, 0xff, 0xfb, 0x7f, 0xff, 0x7f, 0xff, 0x6f, 0xff, 0xff, 0xff, 0xbf, 0x87, 0xbb, 0xf8, 0xfb, 0xcf, 0xfe, 0xfe, 0xff, 0xef, 0xff, 0xfb, 0x7f, 0xff, 0xff, 0x8f, 0xff, 0xe1, 0x7f, 0x7b, 0xff, 0xbf, 0x80, 0x89, 0x88, 0xb0, 0xf5, 0xf0, 0xff, 0xf7, 0xdf, 0xfe, 0x7c, 0x8f, 0x0f, 0x71, 0xe1, 0xff, 0xf1, 0xe5, 0x0e, 0x2b, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xcf, 0xf5, 0x9f, 0xff, 0xff, 0xfe, 0xff, 0x8f, 0x7f, 0x71, 0x8f, 0xff, 0x91, 0x7f, 0xfb, 0xff, 0xff, 0x7f, 0x7f, 0xcf, 0x8a, 0xff, 0xf0, 0xff, 0x57, 0xfe, 0xfb, 0x8f, 0xff, 0xf0, 0xff, 0x7e, 0xff, 0xff, 0x9a, 0xff, 0xf1, 0xff, 0xff, 0xcf, 0xb7, 0xce, 0xff, 0xf4, 0xff, 0xff, 0x7f, 0xf7, 0xfb, 0xff, 0xfe, 0xff, 0x7f, 0xff, 0xfd, 0xfe, 0x75, 0xfd, 0xff, 0xef, 0xcf, 0xff, 0xf5, 0xff, 0xf5, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0xff, 0xcf, 0x7f, 0x31, 0x7f, 0xff, 0x3f, 0x78, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x0f, 0xf1, 0xf1, 0xdf, 0xff, 0xff, 0xff, 0x9f, 0xff, 0x84, 0x0e, 0xff, 0xf8, 0x7f, 0xf7, 0x7f, 0xff, 0xff, 0x8f, 0x8f, 0x80, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x9f, 0x8e, 0x05, 0x71, 0xbf, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x8f, 0x0f, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x0f, 0x71, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xf7, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf0, 0xff, 0xff, 0x7f, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0x8f, 0x7e, 0xbf, 0xff, 0x78, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0x8f, 0xff, 0x87, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x7f, 0xf1, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0x3f, 0xff, 0xf8, 0xff, 0x8f, 0x7f, 0xf0, 0x8f, 0xff, 0xf0, 0x0f, 0xff, 0x70, 0xff, 0x8f, 0x7e, 0xf1, 0xdf, 0xff, 0xfb, 0x8e, 0xff, 0x80, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0x7f, 0x84, 0xff, 0xf1, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0x8f, 0x7f, 0xff, 0x80, 0xff, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0x7f, 0x8f, 0xff, 0x81, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf0, 0xdf, 0xdf, 0xff, 0xdf, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf1, 0xff, 0xfd, 0xff, 0xff, 0xff, 0x0f, 0xff, 0x80, 0xff, 0xf0, 0xff, 0xff, 0xdf, 0xff, 0xdf, 0x8e, 0x0f, 0x01, 0x71, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xbf, 0x87, 0xf8, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x8f, 0x8f, 0xd0, 0xf0, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xfe, 0xff, 0xdf, 0xff, 0xfb, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0xfe, 0xf5, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x8f, 0xf0, 0x80, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xaf, 0x71, 0xa7, 0x6f, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0x77, 0x79, 0x8f, 0xff, 0xf0, 0x8f, 0xff, 0x00, 0xff, 0xd0, 0x4f, 0x3d, 0xf0, 0xf7, 0xfd, 0x8f, 0x7f, 0x81, 0x7f, 0xd1, 0xff, 0xcd, 0xff, 0xff, 0x8f, 0x0f, 0x70, 0xf0, 0xff, 0x7f, 0x7f, 0xff, 0xff, 0xdb, 0x8d, 0x4b, 0x73, 0xf9, 0xff, 0xdf, 0xff, 0x3f, 0xfc, 0xff, 0x8f, 0xff, 0xf2, 0x8f, 0x8f, 0x70, 0x7a, 0x3f, 0xbc, 0xf7, 0xdb, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xee, 0xff, 0xe8, 0xf7, 0x8f, 0xfd, 0x80, 0xff, 0xf0, 0x9f, 0xa5, 0x7a, 0xf4, 0x6f, 0x3f, 0xcf, 0x07, 0x6a, 0xe1, 0xff, 0x8f, 0x7f, 0xff, 0xff, 0x77, 0xf1, 0x8f, 0x8f, 0xf0, 0xf0, 0xbf, 0xff, 0xe7, 0x7f, 0x8f, 0x24, 0x03, 0x77, 0xf3, 0xff, 0xfe, 0xff, 0xff, 0xbf, 0x9f, 0x77, 0x8b, 0xff, 0xf0, 0xff, 0xef, 0x7d, 0x7f, 0xff, 0x9f, 0xeb, 0x3d, 0xff, 0xf7, 0xff, 0xfb, 0xfe, 0xff, 0xdf, 0xff, 0xff, 0x77, 0xff, 0x8f, 0x8f, 0xf0, 0xf0, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x5d, 0xf5, 0xbb, 0xef, 0xff, 0xff, 0xff, 0x7f, 0x8f, 0x8f, 0xf0, 0xf8, 0xff, 0xff, 0xf7, 0x7f, 0xff, 0xff, 0xaf, 0xbf, 0x75, 0xb7, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x87, 0x7f, 0xf8, 0xff, 0xf7, 0xf7, 0x8f, 0xff, 0xf0, 0x7f, 0xf7, 0xff, 0xad, 0xff, 0xf7, 0xee, 0x9f, 0xff, 0xf5, 0xff, 0xf8, 0x07, 0xff, 0x80, 0x8f, 0x80, 0x80, 0xf0, 0x8f, 0x7f, 0x70, 0x4f, 0x0f, 0x79, 0xf1, 0xfd, 0xff, 0xef, 0x8f, 0x7f, 0xbf, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xd0, 0xbf, 0xdb, 0xe5, 0x3b, 0xfe, 0xf7, 0xff, 0x8f, 0xff, 0xff, 0x8f, 0x77, 0x80, 0xff, 0xf0, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xbd, 0xef, 0x07, 0x7f, 0xf1, 0xfe, 0xff, 0xfe, 0xff, 0x7f, 0x7f, 0xff, 0xf7, 0xf7, 0xff, 0xf7, 0x8f, 0xbf, 0x70, 0xf5, 0x7f, 0xff, 0xef, 0x3f, 0x7d, 0xf7, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0x97, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x7e, 0xff, 0xff, 0x9f, 0xdf, 0xf7, 0x3b, 0xff, 0xf7, 0xff, 0x7f, 0xfe, 0xff, 0x3f, 0x78, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x1f, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80, 0x0e, 0xff, 0xf8, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0x8f, 0x9f, 0x80, 0xe1, 0xf1, 0xff, 0xff, 0xef, 0xff, 0xfe, 0x9f, 0x0e, 0x01, 0x71, 0xbf, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf1, 0xf1, 0xff, 0xff, 0xef, 0xfe, 0xef, 0xff, 0x8f, 0x0f, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0x8f, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x0f, 0x71, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xff, 0xef, 0xfe, 0xef, 0xef, 0xff, 0xff, 0xef, 0xff, 0xff, 0xf7, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xe0, 0xff, 0xff, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7e, 0xbf, 0xff, 0x78, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xef, 0xff, 0xff, 0xff, 0xff, 0xee, 0xef, 0x9f, 0xff, 0x07, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0x8f, 0xff, 0xe0, 0xff, 0xff, 0xff, 0xef, 0x8e, 0x7f, 0xf1, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xfe, 0x8f, 0x7f, 0x3f, 0xff, 0xf8, 0xff, 0x8f, 0x7f, 0xf0, 0xdf, 0xff, 0xf0, 0x0f, 0xff, 0x70, 0xff, 0x8f, 0x7e, 0xe1, 0xdf, 0xff, 0xf7, 0x8e, 0xff, 0x80, 0x7f, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0x80, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xef, 0xfe, 0x8f, 0x7f, 0xff, 0x80, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x8f, 0xff, 0x81, 0x7f, 0xf0, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xff, 0x7f, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7e, 0xf1, 0xff, 0xff, 0xff, 0xf7, 0xff, 0x0f, 0x8f, 0x80, 0xf7, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9e, 0x6f, 0x91, 0x71, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xef, 0xff, 0xff, 0xbf, 0x87, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x8f, 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xfe, 0xff, 0xef, 0xff, 0xd7, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0x8f, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xff, 0xfe, 0xf9, 0xdf, 0xff, 0xff, 0xff, 0x8f, 0xbf, 0xf7, 0x9f, 0xf8, 0xf0, 0xff, 0xff, 0x77, 0xff, 0x0e, 0x1f, 0x61, 0x81, 0x7f, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0x7f, 0xb9, 0xcf, 0xff, 0xff, 0x0f, 0xff, 0x00, 0xff, 0xd0, 0x7f, 0x75, 0x8b, 0x7f, 0xf1, 0x8f, 0x7f, 0x80, 0x7e, 0x91, 0xff, 0xbf, 0xdf, 0xff, 0xa7, 0x47, 0x70, 0xf7, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0x0f, 0x61, 0xf1, 0xef, 0xff, 0xff, 0x7f, 0xfe, 0xef, 0x5f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xb7, 0xfc, 0xeb, 0x9f, 0x7f, 0xf1, 0x9f, 0x0f, 0x71, 0xf1, 0xee, 0xff, 0xf0, 0xf7, 0x3f, 0xef, 0x97, 0xf8, 0xe8, 0xff, 0x9f, 0x7f, 0xf0, 0x7f, 0x9f, 0x6f, 0x91, 0x7e, 0xf1, 0x9f, 0x8f, 0x57, 0xff, 0xff, 0x26, 0xb9, 0xb8, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xf7, 0x7f, 0x6f, 0xf4, 0x9f, 0x1f, 0x71, 0xe1, 0xfe, 0x7f, 0xff, 0xbf, 0xff, 0x71, 0xbb, 0xe8, 0xff, 0xff, 0xf8, 0xbf, 0xff, 0xaf, 0xff, 0xf8, 0x9d, 0x6f, 0xf1, 0xbf, 0xff, 0xb7, 0xff, 0xbd, 0xbf, 0xff, 0xff, 0xdf, 0x97, 0xc7, 0xf7, 0xf0, 0xff, 0xff, 0x93, 0xff, 0xff, 0xef, 0xcf, 0x5f, 0xf1, 0xf7, 0xdf, 0xf5, 0x9f, 0xff, 0xff, 0x87, 0xbf, 0xe0, 0xbf, 0xf7, 0xff, 0xf7, 0x7f, 0xff, 0xff, 0x8f, 0x5f, 0x21, 0xb1, 0xff, 0x6d, 0xff, 0xef, 0xff, 0xff, 0xff, 0xd7, 0xff, 0xb8, 0xff, 0xff, 0xff, 0x3f, 0xef, 0xf0, 0x7f, 0xd7, 0x7f, 0xf1, 0xff, 0xef, 0xee, 0xbf, 0x7f, 0xf1, 0xff, 0xf8, 0x47, 0x0f, 0xc7, 0xf0, 0x7f, 0xf0, 0xf0, 0x90, 0x7f, 0x70, 0x8f, 0x2f, 0xc1, 0x0f, 0x11, 0x1f, 0xef, 0xaf, 0x7f, 0xbf, 0x7f, 0xf0, 0x9f, 0xe7, 0xf7, 0x38, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf0, 0xaf, 0xff, 0xff, 0xbf, 0xfe, 0xfd, 0xdf, 0x8f, 0xff, 0xff, 0xbf, 0xf7, 0x8f, 0xff, 0xf7, 0xff, 0xeb, 0xff, 0xff, 0xff, 0x8d, 0x3f, 0x81, 0x7f, 0xd1, 0xfe, 0xdf, 0xfe, 0xff, 0x7f, 0xff, 0xff, 0xdf, 0xa8, 0xff, 0xf0, 0xff, 0xff, 0xf0, 0xf7, 0xff, 0xff, 0xff, 0xef, 0xef, 0xef, 0x9f, 0x7f, 0x7e, 0xfe, 0xff, 0xff, 0xef, 0xff, 0xa7, 0x77, 0xff, 0xff, 0xef, 0xff, 0xff, 0xdf, 0xff, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0x3f, 0x78, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0x0f, 0x0f, 0xf1, 0xe1, 0xff, 0xff, 0xef, 0xef, 0xff, 0xff, 0x8e, 0x0e, 0xff, 0xf8, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0x8f, 0x8f, 0x80, 0xf1, 0xf1, 0xef, 0xaf, 0xaf, 0xff, 0xee, 0xdf, 0x0e, 0x01, 0x71, 0xbf, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xef, 0x8f, 0x9f, 0xf1, 0xe1, 0xff, 0xaf, 0xef, 0xfe, 0xff, 0xff, 0x8f, 0x0f, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0x9f, 0xf1, 0xf1, 0xef, 0xff, 0xaf, 0xff, 0xff, 0xff, 0x8e, 0x0f, 0x71, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xbf, 0xef, 0xff, 0xef, 0xbf, 0xff, 0xef, 0xff, 0xff, 0xf7, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xef, 0xff, 0xef, 0xfe, 0xcf, 0x3f, 0xf0, 0xff, 0xff, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0x88, 0xff, 0xf0, 0xff, 0xff, 0xef, 0xfe, 0xff, 0xff, 0xff, 0x8f, 0x6e, 0xbf, 0xff, 0x78, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xe0, 0xff, 0xef, 0xff, 0xff, 0xff, 0xee, 0xef, 0x9f, 0xff, 0x8f, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0x8f, 0xff, 0xa0, 0xff, 0xfe, 0xff, 0xbf, 0x8e, 0x6f, 0xf1, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x6f, 0x3f, 0xff, 0xf8, 0xff, 0x8f, 0x7f, 0xf0, 0xcf, 0xff, 0xb0, 0x0f, 0xaf, 0x70, 0xff, 0x8f, 0x7e, 0xf1, 0xff, 0xff, 0xf1, 0x9e, 0xff, 0x80, 0x7f, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xef, 0x8f, 0x7f, 0x90, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xaf, 0xfe, 0x8f, 0x7f, 0xff, 0x80, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xdf, 0xff, 0x81, 0x7f, 0xf0, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x7e, 0xf1, 0xff, 0xff, 0x7f, 0xff, 0xff, 0x0f, 0xaf, 0x80, 0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0x0f, 0x91, 0x7f, 0xf1, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xff, 0xfe, 0xff, 0xff, 0xbf, 0xff, 0xfb, 0xbf, 0x87, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xdf, 0x8f, 0x8f, 0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xdf, 0xbf, 0xff, 0xef, 0xff, 0xff, 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xaf, 0xff, 0xf0, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xdf, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x8f, 0xf0, 0x8f, 0xff, 0xf0, 0xf9, 0xff, 0xf7, 0xff, 0x0f, 0x5f, 0x29, 0x89, 0x77, 0xf1, 0xfa, 0xff, 0xde, 0xff, 0xc3, 0x3f, 0x4b, 0x7f, 0xe9, 0x0f, 0xff, 0x00, 0xff, 0x90, 0x0f, 0xd7, 0xff, 0x7f, 0xf9, 0x8f, 0x7f, 0x81, 0x7f, 0x81, 0xff, 0xff, 0xfb, 0x7d, 0x80, 0x46, 0x76, 0xf0, 0xff, 0xff, 0x6f, 0xff, 0xff, 0xad, 0xcf, 0x3f, 0x71, 0xf9, 0xff, 0xff, 0xff, 0x3f, 0xba, 0xff, 0xc7, 0xf7, 0xb9, 0xcf, 0xde, 0x77, 0xb7, 0x77, 0xfe, 0xff, 0xbf, 0x6f, 0xf9, 0xff, 0x7e, 0x79, 0xb9, 0xfe, 0xff, 0xe4, 0xf7, 0x8f, 0xfe, 0x07, 0xfe, 0xf8, 0xff, 0x89, 0x7f, 0xe8, 0x7f, 0xd7, 0x7f, 0x99, 0x76, 0xf1, 0xff, 0x0f, 0x7b, 0xbf, 0xff, 0xb6, 0xb9, 0x8f, 0xdf, 0xf6, 0xff, 0xff, 0xf7, 0xff, 0xff, 0x8f, 0xdd, 0x87, 0x7f, 0x71, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0x7f, 0xf1, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xcf, 0xfb, 0xe8, 0x9d, 0x77, 0xa9, 0xff, 0x77, 0xda, 0x7f, 0xff, 0xbf, 0xff, 0xf7, 0xf7, 0x86, 0xe5, 0xf0, 0xe0, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xef, 0x8f, 0x7f, 0xbd, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xef, 0x86, 0x8f, 0xf0, 0xff, 0xf6, 0x9f, 0xff, 0x7f, 0xff, 0xff, 0xcf, 0x1f, 0x71, 0xdd, 0x7f, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xf7, 0xb9, 0xff, 0xff, 0xfa, 0x3f, 0xef, 0xf0, 0xff, 0xef, 0x7f, 0xd5, 0xff, 0xfb, 0xff, 0xf7, 0x6e, 0xf1, 0xff, 0xfc, 0xc7, 0xbf, 0xc8, 0xc0, 0x59, 0xff, 0xdf, 0xff, 0x7b, 0xf0, 0xa7, 0x1f, 0xa9, 0x77, 0x79, 0x71, 0x11, 0xff, 0x79, 0xbf, 0xfb, 0x70, 0xbf, 0xff, 0xf9, 0x37, 0xbe, 0xff, 0xff, 0x8f, 0x7f, 0xf4, 0x9f, 0xff, 0xff, 0xd7, 0x7f, 0xff, 0xff, 0xaf, 0xff, 0xff, 0x9e, 0xf7, 0x9f, 0xfe, 0xe4, 0xff, 0xcf, 0xcf, 0xff, 0xff, 0xdf, 0x7f, 0x8d, 0x7f, 0xf9, 0xfa, 0xdf, 0x9f, 0xef, 0x7f, 0xef, 0xff, 0xff, 0xbe, 0xfd, 0xd2, 0xdf, 0xff, 0x7e, 0xf7, 0xff, 0xff, 0xab, 0x97, 0xef, 0xf3, 0xfe, 0x7f, 0x71, 0xfe, 0xff, 0x9f, 0xff, 0xff, 0xb6, 0xfb, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xbf, 0xff, 0xb7, 0xdb, 0xff, 0xbb, 0xef, 0xff, 0xff, 0xff, 0x3f, 0x68, 0xfe, 0xfd, 0xfb, 0xff, 0xff, 0xef, 0xf1, 0x1e, 0x1b, 0xf1, 0xf5, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xfb, 0x9a, 0x36, 0xff, 0xfc, 0x7d, 0xff, 0x73, 0xf7, 0xff, 0xaf, 0x9f, 0x94, 0xfd, 0xf5, 0xff, 0xf7, 0xff, 0xfb, 0xfe, 0xef, 0x3e, 0x07, 0x4d, 0xbf, 0xe8, 0xf8, 0xff, 0x7f, 0xff, 0xf7, 0xf7, 0xf1, 0x8f, 0xaf, 0xd1, 0xf7, 0xf9, 0xfd, 0xff, 0xf8, 0xdf, 0xfb, 0x8f, 0x2f, 0xff, 0xf8, 0x7f, 0xff, 0xf7, 0xf7, 0xff, 0xff, 0xa7, 0xaf, 0xf7, 0xf3, 0xdf, 0xff, 0xfd, 0xff, 0xfd, 0xff, 0xae, 0x0f, 0x71, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xf3, 0xf3, 0xff, 0xf3, 0xff, 0xf7, 0xfb, 0xf3, 0xff, 0xff, 0xff, 0xeb, 0xff, 0xf3, 0xdb, 0xff, 0xeb, 0x7b, 0xfb, 0xf7, 0xff, 0x8b, 0xf7, 0xfc, 0xf7, 0xfb, 0xff, 0xfb, 0xf3, 0xff, 0xff, 0x8b, 0x7f, 0xd4, 0xfb, 0xff, 0x7f, 0xec, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xf7, 0xff, 0x8e, 0xff, 0xf8, 0xf7, 0xfb, 0xfd, 0xff, 0xfd, 0x9f, 0xf7, 0x9f, 0x7e, 0xbf, 0xfb, 0x7c, 0xff, 0xf7, 0xff, 0xff, 0xfb, 0xfb, 0xf1, 0x8f, 0xf3, 0xdc, 0xf7, 0xfd, 0xff, 0xe9, 0xeb, 0xef, 0xc3, 0xb7, 0xff, 0x07, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0xff, 0xf4, 0x8f, 0xfb, 0xfc, 0xff, 0xef, 0xff, 0xf7, 0x8f, 0x7f, 0xd1, 0xff, 0xfa, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x89, 0xef, 0xf8, 0xff, 0xf7, 0xff, 0xef, 0xef, 0xf7, 0xf3, 0xab, 0x7f, 0x3f, 0xf9, 0x7e, 0xf9, 0x8f, 0x7f, 0xf0, 0xef, 0xff, 0xfc, 0x1b, 0xff, 0x7c, 0xff, 0x8f, 0x6e, 0xf1, 0xf7, 0x73, 0xff, 0xa6, 0xff, 0x80, 0x7f, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xf9, 0x8f, 0x7f, 0x84, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x8f, 0x7f, 0xff, 0x96, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x57, 0xaf, 0xfb, 0x85, 0x7f, 0xf4, 0xff, 0xfe, 0xef, 0xff, 0xef, 0xbf, 0x53, 0xff, 0x7d, 0xff, 0xff, 0xe3, 0xff, 0xff, 0xff, 0x97, 0x71, 0xf8, 0xff, 0xff, 0xff, 0xdb, 0xef, 0xef, 0xe7, 0x97, 0x72, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x0f, 0xe3, 0x86, 0xf0, 0xf4, 0xfb, 0xff, 0xdf, 0xff, 0xfb, 0x8e, 0x0b, 0xa5, 0x72, 0xf9, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xf7, 0xff, 0xf3, 0xff, 0xf7, 0xff, 0xf3, 0xff, 0xff, 0xff, 0xfb, 0xee, 0xfb, 0xff, 0xef, 0xff, 0xff, 0xbf, 0x82, 0xf8, 0xf8, 0xf7, 0x7f, 0xf7, 0xff, 0xff, 0xef, 0x87, 0x87, 0xf0, 0xf0, 0xfb, 0xff, 0xfb, 0xf7, 0xef, 0xef, 0x87, 0xff, 0xf6, 0xff, 0xfa, 0xf1, 0xef, 0xf3, 0xf7, 0x7f, 0xff, 0xff, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xfb, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xfb, 0xf2, 0xf3, 0xff, 0xf1, 0xf7, 0xff, 0xef, 0xf7, 0xef, 0xf7, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xef, 0xff, 0xe7, 0xff, 0xfb, 0xfb, 0xff, 0xf5, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x77, 0xff, 0xff, 0xfe, 0xff, 0xf7, 0xff, 0xef, 0xef, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xe5, 0xff, 0xfe, 0x61, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xef, 0xef, 0xf3, 0xf7, 0xff, 0xff, 0x0f, 0x9f, 0xfa, 0x87, 0xff, 0xf6, 0xeb, 0xff, 0xff, 0xef, 0x0f, 0x6f, 0xfd, 0x0d, 0x53, 0xf1, 0xf3, 0xff, 0xff, 0xbf, 0x1b, 0x7f, 0x96, 0xfe, 0xff, 0x8f, 0xfb, 0x00, 0xff, 0xb0, 0x17, 0x7c, 0x8f, 0xff, 0xfd, 0x8f, 0x7f, 0x81, 0x7e, 0xf1, 0xff, 0xfd, 0xed, 0xee, 0x9e, 0x0b, 0x79, 0xff, 0xfb, 0x77, 0x5b, 0xff, 0x9f, 0xff, 0x4f, 0x0f, 0x71, 0xf0, 0xdb, 0xff, 0xf7, 0x7f, 0xe7, 0xef, 0x18, 0xff, 0xff, 0x9d, 0x8e, 0x67, 0xbf, 0x4f, 0xff, 0xff, 0xae, 0xff, 0xf1, 0xeb, 0xef, 0xfd, 0xad, 0xf6, 0xff, 0xfc, 0xf7, 0x1f, 0xff, 0x9f, 0xfb, 0xfc, 0xff, 0x8f, 0x77, 0xec, 0x5f, 0x6f, 0xdf, 0x25, 0x7e, 0xd9, 0xe6, 0x97, 0x3f, 0xff, 0xf7, 0x67, 0xec, 0x92, 0xbe, 0xf1, 0xfb, 0xff, 0x7f, 0xdf, 0x7b, 0x5e, 0x7d, 0xe7, 0x5f, 0xf1, 0xf1, 0xfb, 0xff, 0xf7, 0xbf, 0xf7, 0x71, 0x9a, 0xfd, 0xff, 0xf7, 0xfb, 0x5f, 0x7f, 0xaf, 0xdf, 0xf9, 0xe7, 0x77, 0xdd, 0x6f, 0xf7, 0xbb, 0xff, 0x8b, 0xbf, 0xff, 0x77, 0xff, 0x93, 0xfe, 0xf8, 0xfe, 0xbf, 0xfe, 0xbf, 0xff, 0xff, 0xbf, 0xab, 0x7f, 0xfd, 0xff, 0xcf, 0x67, 0xff, 0xff, 0x7f, 0x07, 0x9f, 0xe4, 0xdb, 0xff, 0xf1, 0xf7, 0x7f, 0xff, 0xff, 0x8f, 0x6f, 0xd1, 0x6d, 0x73, 0xff, 0xff, 0xfb, 0xff, 0xff, 0x6f, 0x9f, 0x7b, 0xfd, 0xff, 0xf6, 0xfd, 0x27, 0xff, 0xfc, 0xff, 0xaf, 0xff, 0xfd, 0xfe, 0x7f, 0xdf, 0xff, 0x7f, 0xef, 0xff, 0xfe, 0x81, 0xe7, 0x93, 0x91, 0x83, 0x85, 0xef, 0x8f, 0x7f, 0x74, 0x8d, 0x1b, 0x2d, 0xe2, 0xcd, 0xe5, 0xb5, 0x9f, 0x77, 0xbf, 0x7f, 0xe4, 0xef, 0xff, 0xf7, 0xdb, 0xfd, 0x7f, 0xfe, 0xab, 0x7f, 0xfc, 0xbf, 0xff, 0xde, 0x77, 0xfb, 0xdf, 0xef, 0xbf, 0xff, 0xff, 0x1e, 0x7f, 0x8f, 0xff, 0x92, 0xf3, 0xdf, 0x7b, 0xff, 0x7b, 0xff, 0xdb, 0x3d, 0x5f, 0xf9, 0xf6, 0xff, 0xf2, 0xf7, 0x7f, 0x7f, 0xff, 0xff, 0xef, 0xd2, 0xf0, 0xb7, 0xfb, 0x7f, 0xfc, 0x77, 0xd7, 0x3f, 0xc7, 0x7f, 0xf3, 0xe7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xef, 0x7b, 0xef, 0xf5, 0xda, 0xff, 0x7c, 0xff, 0xff, 0xff, 0xff, 0x7b, 0xeb, 0xfb, 0xef, 0xff, 0xef, 0xff, 0xff, 0x3f, 0x60, 0xfc, 0xfb, 0xf7, 0xff, 0xff, 0xff, 0xfb, 0x00, 0x0f, 0xf1, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x86, 0x3e, 0xff, 0xf8, 0x7f, 0xfb, 0x73, 0xff, 0xff, 0x9f, 0xab, 0x8c, 0xf5, 0xd1, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xeb, 0x36, 0x0d, 0x49, 0xbf, 0xf0, 0xfc, 0xfb, 0x73, 0xff, 0xf3, 0xff, 0xff, 0xab, 0xa7, 0xf1, 0xf9, 0xff, 0xf7, 0xdf, 0xfa, 0xfb, 0xff, 0xa7, 0x3f, 0xff, 0xf8, 0x7f, 0xff, 0xfb, 0xfb, 0xfb, 0xff, 0xaf, 0x8f, 0xf9, 0xf9, 0xdf, 0xdf, 0xf7, 0xdb, 0xff, 0xff, 0xba, 0x2f, 0x69, 0xff, 0xe7, 0xfb, 0xfb, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xd7, 0xff, 0xdf, 0xf7, 0xd7, 0xdf, 0xf3, 0xdb, 0xff, 0xdb, 0xff, 0xff, 0xe3, 0x7b, 0xf9, 0xfb, 0xff, 0x8f, 0xfb, 0xf8, 0xff, 0xff, 0xef, 0xdf, 0xf3, 0xd7, 0xdf, 0xa3, 0x5b, 0xc4, 0xfb, 0xef, 0x7f, 0xe0, 0xfd, 0xfb, 0xfb, 0xff, 0xfb, 0xeb, 0xff, 0x8c, 0xeb, 0xf0, 0xd3, 0xff, 0xd7, 0xff, 0xf7, 0xbb, 0x7f, 0x8f, 0x7e, 0xbf, 0xfb, 0x6c, 0xfb, 0xfb, 0xff, 0xfb, 0xff, 0xfb, 0xf3, 0x8b, 0xf3, 0xf4, 0xf7, 0xd7, 0xff, 0xf3, 0xff, 0xfe, 0xc2, 0xbf, 0xff, 0x87, 0x7f, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0x8f, 0xff, 0xf4, 0xff, 0xdf, 0xff, 0xfb, 0x8f, 0x7f, 0xc5, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf3, 0x87, 0xef, 0xfc, 0xfd, 0xfb, 0xff, 0xff, 0xdf, 0xff, 0xfb, 0xab, 0x7f, 0x3f, 0xf3, 0xfa, 0xf9, 0x8f, 0x7f, 0xf0, 0xeb, 0xfb, 0xec, 0x1f, 0xcf, 0x7e, 0xff, 0x8f, 0x5e, 0xd1, 0xbf, 0xff, 0xfe, 0xaa, 0xff, 0x80, 0x7d, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xf7, 0x8f, 0x5f, 0x8c, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x9f, 0x6f, 0xff, 0x9a, 0xfd, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x6f, 0xbf, 0xd7, 0x89, 0x7f, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xdf, 0xbf, 0x6f, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xff, 0xfb, 0xff, 0x2b, 0x73, 0xf0, 0xf3, 0xff, 0xff, 0xc3, 0xff, 0xff, 0xff, 0x8b, 0x62, 0xfd, 0xff, 0xef, 0xff, 0xff, 0xfb, 0x0f, 0x8b, 0x8e, 0xf0, 0xdc, 0xf7, 0xff, 0xff, 0xff, 0xfb, 0xae, 0x43, 0xa9, 0x73, 0xf9, 0xfb, 0x7f, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf3, 0xfe, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x87, 0xf8, 0xf8, 0xf9, 0x7f, 0xf9, 0xff, 0xff, 0x7f, 0x8f, 0x8f, 0xf0, 0xf0, 0xf3, 0xff, 0xf3, 0xfb, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xff, 0xfb, 0xef, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xf7, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xfe, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xff, 0xf1, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xfe, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xf1, 0xff, 0x85, 0xff, 0xfe, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xf3, 0xde, 0xff, 0xf3, 0xff, 0xbf, 0xff, 0x0f, 0x9f, 0xfa, 0x9f, 0xeb, 0xf2, 0xe7, 0xff, 0x7b, 0xff, 0x4f, 0x73, 0x31, 0x81, 0x5f, 0xf1, 0xfe, 0xff, 0xbf, 0xff, 0xaf, 0x7f, 0x94, 0xfb, 0xfe, 0x8f, 0xff, 0x00, 0xff, 0xf0, 0xef, 0xef, 0x5f, 0xfb, 0xf5, 0x8f, 0x7f, 0x81, 0x5e, 0xf1, 0xff, 0xf9, 0xff, 0xef, 0x86, 0x0f, 0x71, 0xf6, 0xff, 0x7f, 0x7f, 0x97, 0xcf, 0xfd, 0xbf, 0x5f, 0xf9, 0xf1, 0xf3, 0xff, 0xff, 0x3f, 0xdb, 0xed, 0x1e, 0xff, 0xf6, 0x95, 0x9a, 0x6f, 0x3d, 0xff, 0xf8, 0xfb, 0xdf, 0xf7, 0xfd, 0xfb, 0xf7, 0xfd, 0xed, 0xde, 0x7f, 0xf0, 0xf7, 0x87, 0x7f, 0x9b, 0xff, 0xec, 0x9f, 0xbf, 0x7f, 0xcd, 0x7f, 0xf7, 0x3b, 0xad, 0x7e, 0xf8, 0xff, 0xbb, 0x79, 0xff, 0xff, 0xe3, 0x7c, 0x01, 0x8d, 0xf5, 0xfb, 0xe7, 0xf7, 0xff, 0xff, 0x9e, 0x7d, 0x0f, 0x7f, 0xf1, 0xcd, 0xfe, 0xf7, 0xff, 0x3f, 0xd7, 0xf4, 0x9a, 0xf7, 0xed, 0xff, 0xf3, 0xb7, 0xff, 0xef, 0xff, 0xbd, 0xe7, 0x5f, 0xbd, 0xff, 0xef, 0xfe, 0x7f, 0xf1, 0x3f, 0xff, 0xe7, 0xff, 0xcf, 0xfa, 0xf8, 0xff, 0xff, 0xdf, 0xbf, 0xfe, 0xdf, 0xff, 0xd3, 0x1f, 0xfd, 0xef, 0x7f, 0xff, 0xcf, 0x7f, 0xff, 0x93, 0xdf, 0xf0, 0xef, 0xf3, 0xd4, 0x77, 0x6f, 0xff, 0xff, 0xbf, 0x7f, 0x7d, 0xfd, 0x7f, 0x7d, 0xff, 0xff, 0xf7, 0xff, 0xf7, 0xdf, 0xfb, 0xbc, 0xef, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0x7f, 0xb7, 0xff, 0xfd, 0x5f, 0xcf, 0xff, 0xef, 0x7f, 0xfd, 0xff, 0xee, 0x87, 0xef, 0x92, 0xf0, 0x7e, 0xe5, 0xbf, 0x8f, 0x7f, 0x60, 0xd9, 0xdb, 0x71, 0xb3, 0x2d, 0x49, 0x6c, 0x29, 0x7f, 0xbf, 0xff, 0xe4, 0x6f, 0xf3, 0xfa, 0x57, 0xfd, 0xff, 0xfe, 0xb7, 0x7f, 0xfc, 0xff, 0x73, 0xdf, 0xf3, 0x7f, 0xfd, 0xff, 0xbf, 0xff, 0xef, 0x8b, 0x7f, 0x8f, 0xff, 0xf2, 0xff, 0xff, 0xf7, 0xfb, 0xff, 0xff, 0xdf, 0xed, 0xef, 0xf1, 0xf7, 0xfd, 0xdf, 0xf7, 0xff, 0xff, 0xff, 0xf7, 0xe7, 0xe6, 0xf1, 0xff, 0xdf, 0xfb, 0xe9, 0xfe, 0xbf, 0xff, 0xbf, 0x5f, 0xff, 0xbf, 0x0e, 0x75, 0xfa, 0xff, 0xff, 0xff, 0x6f, 0xfb, 0xf9, 0xff, 0xff, 0xf3, 0xff, 0xfb, 0xbf, 0xef, 0xff, 0xf3, 0x7f, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0x38, 0xf8, 0xf7, 0xff, 0xff, 0xdf, 0x9f, 0xf7, 0x0b, 0x0f, 0xf5, 0xf5, 0xff, 0xff, 0xff, 0xbf, 0xf7, 0xf3, 0x8e, 0x0e, 0xbf, 0xe8, 0x6f, 0xef, 0x7f, 0xff, 0xdf, 0xdf, 0xef, 0x88, 0xf5, 0x91, 0xfb, 0xff, 0xff, 0xbf, 0xfe, 0xbf, 0xa6, 0x81, 0x71, 0xff, 0xf0, 0xf8, 0xff, 0x67, 0xef, 0xff, 0xb7, 0xf7, 0x8f, 0x2f, 0xd1, 0x41, 0xff, 0xcf, 0x5f, 0xfe, 0xff, 0x7b, 0x8f, 0x9f, 0xff, 0xf8, 0x6f, 0xef, 0xf7, 0xe7, 0xff, 0xff, 0xbf, 0x8f, 0xd1, 0xf1, 0xcf, 0xdf, 0xcf, 0xdf, 0xff, 0xff, 0x9f, 0x8f, 0xe1, 0xff, 0xe7, 0xff, 0xf7, 0xe7, 0x6f, 0xf7, 0xe7, 0xe7, 0x77, 0xef, 0xef, 0x6f, 0xff, 0xff, 0xdf, 0xff, 0xdf, 0xdf, 0xff, 0xff, 0xff, 0xa7, 0x6f, 0xff, 0xf7, 0xef, 0x97, 0xe7, 0xf0, 0xef, 0x7f, 0xaf, 0x4f, 0xff, 0xff, 0xdf, 0xbf, 0x5f, 0xe0, 0x7f, 0xef, 0x7f, 0xa0, 0xef, 0xff, 0xe7, 0xff, 0xf7, 0xf7, 0xff, 0x8b, 0xbf, 0xf8, 0xdf, 0xff, 0xcf, 0x7e, 0xff, 0xdf, 0x7f, 0x8e, 0x5f, 0xff, 0xff, 0x38, 0xff, 0xf7, 0xff, 0xf7, 0xf7, 0xf7, 0xf7, 0x8f, 0xf7, 0xf8, 0xf7, 0xcf, 0xff, 0xff, 0xff, 0xfe, 0xcb, 0x3f, 0x3f, 0x9f, 0x7f, 0xf8, 0xff, 0xef, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xaf, 0xff, 0xf0, 0xff, 0xdf, 0xff, 0xff, 0xae, 0x7f, 0xc1, 0x7f, 0xf0, 0x7f, 0xff, 0xff, 0xff, 0xef, 0xff, 0xf7, 0xbf, 0xbf, 0xd0, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0x9b, 0xff, 0x7f, 0xcf, 0xf8, 0xff, 0x8f, 0x6f, 0xe0, 0xd7, 0xf7, 0xf7, 0xff, 0xfe, 0xf0, 0xfe, 0x8f, 0x5e, 0xd1, 0xff, 0xdf, 0xdf, 0xbe, 0xff, 0x84, 0x7f, 0xf8, 0xff, 0x7f, 0xdf, 0xff, 0xff, 0xaf, 0x7f, 0x81, 0x7f, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x9f, 0x3f, 0xff, 0xd8, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x0f, 0xff, 0x85, 0x7f, 0xf0, 0xff, 0xfe, 0xbf, 0xff, 0xdf, 0x6f, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xaf, 0xff, 0xf7, 0xdf, 0xf7, 0x47, 0xf4, 0xff, 0xef, 0xff, 0xdf, 0x7f, 0xff, 0xbf, 0xcf, 0x5a, 0xf1, 0xff, 0xbf, 0xbf, 0xff, 0xff, 0x3f, 0x8f, 0xc0, 0xf3, 0xd1, 0xff, 0xfb, 0xef, 0xff, 0xdf, 0xbe, 0x0f, 0x25, 0xe9, 0xd1, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, 0xf7, 0xf7, 0x2f, 0xaf, 0xf3, 0xfb, 0xef, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xcf, 0xbf, 0xfb, 0xbf, 0x87, 0xf8, 0xf8, 0xdf, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xe0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xfe, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xf7, 0xff, 0xff, 0xcf, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xfe, 0xcf, 0xff, 0x7b, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xdf, 0xff, 0xbf, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xef, 0xff, 0xfb, 0xbe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xd4, 0xbf, 0xf0, 0xbf, 0xff, 0xff, 0xff, 0x93, 0x2f, 0xfd, 0xad, 0xf7, 0x75, 0xff, 0xff, 0xfe, 0xbf, 0x7f, 0xff, 0x9a, 0xff, 0xf4, 0x0f, 0xff, 0x00, 0xde, 0xf0, 0xf3, 0xf9, 0xbf, 0x7d, 0xff, 0x8f, 0x7f, 0x81, 0x0f, 0xd1, 0xff, 0xfb, 0xdf, 0xee, 0x8b, 0x0b, 0x78, 0xf0, 0xff, 0xfa, 0x7f, 0xbf, 0xff, 0xd5, 0x8f, 0x8f, 0xe1, 0xf7, 0xfb, 0xfb, 0xff, 0x7f, 0xb7, 0x99, 0xef, 0xdf, 0xf4, 0xff, 0xff, 0xe4, 0xf4, 0x5d, 0xf6, 0xef, 0x9f, 0xef, 0xf7, 0x3b, 0x3f, 0xdf, 0xbf, 0xec, 0xff, 0xec, 0xf7, 0xb9, 0x6b, 0xbc, 0xfb, 0xf7, 0xef, 0xff, 0x7e, 0xfd, 0x7e, 0xbb, 0xdf, 0x85, 0xfe, 0xf7, 0xff, 0x7b, 0x7f, 0xff, 0xff, 0xa7, 0xee, 0xe7, 0x5f, 0xe0, 0xf0, 0xff, 0xff, 0xff, 0x5f, 0xe6, 0x6f, 0x81, 0x8d, 0xd5, 0xf7, 0xbf, 0xef, 0xb6, 0xff, 0xd7, 0xf4, 0xee, 0xb7, 0x7c, 0xff, 0xd7, 0xaf, 0x7f, 0xed, 0x9f, 0xe5, 0xbf, 0xf7, 0x7d, 0xfb, 0xb7, 0xad, 0xd7, 0xfd, 0xbf, 0xff, 0xff, 0xc7, 0x8b, 0xff, 0xf0, 0xf6, 0xff, 0xfd, 0xfb, 0xff, 0xdf, 0xbe, 0x0f, 0x7f, 0xd5, 0xf7, 0xff, 0xf2, 0xfe, 0xff, 0xff, 0xc5, 0xff, 0xf0, 0x7c, 0xff, 0xad, 0x7f, 0x7f, 0xef, 0xff, 0xcf, 0x4f, 0xf1, 0xf5, 0x7b, 0xdd, 0xff, 0xdf, 0xff, 0xff, 0x77, 0xef, 0xff, 0xd8, 0xbf, 0xf7, 0xf3, 0x5f, 0xfb, 0xf9, 0x7f, 0xe7, 0xff, 0xd7, 0x7f, 0xad, 0xff, 0xfb, 0xfb, 0xf3, 0xff, 0xcc, 0x95, 0x8f, 0xd8, 0xf3, 0xfc, 0xbc, 0xdc, 0xdf, 0xbb, 0x44, 0x8b, 0xcb, 0x87, 0xb1, 0xb7, 0xa7, 0x97, 0xee, 0xf3, 0xff, 0x7f, 0xb4, 0xbf, 0xff, 0xc7, 0x7f, 0xcb, 0xfd, 0xbf, 0x7f, 0x7f, 0x74, 0xe7, 0xdf, 0xf5, 0xbb, 0xcf, 0xed, 0xfe, 0xfd, 0xff, 0x3f, 0xfb, 0x77, 0xcc, 0xbb, 0xf0, 0xfb, 0xff, 0xef, 0xbe, 0xff, 0xcf, 0xff, 0x85, 0x3f, 0xb5, 0xff, 0xf7, 0x37, 0x7f, 0x3f, 0xf7, 0xbf, 0xcf, 0x9f, 0xd7, 0xf7, 0xef, 0xff, 0x78, 0xe7, 0xff, 0xff, 0xff, 0x1f, 0x7f, 0x65, 0xbf, 0xbf, 0xff, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xf7, 0xdf, 0xff, 0x77, 0x7f, 0xff, 0xff, 0xbf, 0xbf, 0xde, 0x77, 0xdd, 0xff, 0xff, 0xfe, 0xff, 0xbf, 0x68, 0xf8, 0xff, 0xf7, 0xff, 0xcf, 0xcf, 0xf3, 0x17, 0x3f, 0xd5, 0xdd, 0xf7, 0xff, 0xff, 0xcf, 0xdf, 0x73, 0x95, 0x3f, 0xff, 0xac, 0x6f, 0xef, 0x77, 0xdf, 0xff, 0xf7, 0xbb, 0x85, 0xdd, 0xe1, 0xf7, 0xfb, 0x7b, 0xdf, 0xfe, 0xff, 0xb7, 0x9f, 0x79, 0xff, 0xd8, 0xac, 0xfb, 0x47, 0xaf, 0xeb, 0xf7, 0xff, 0xaf, 0x2e, 0x70, 0xd9, 0xf7, 0xfb, 0xdf, 0xea, 0xfb, 0xfb, 0x1b, 0x5f, 0xff, 0xf8, 0x6f, 0xaf, 0xd7, 0xb7, 0xeb, 0xff, 0xe7, 0xaf, 0x7c, 0x70, 0xfb, 0xdf, 0xff, 0x7b, 0xfb, 0xff, 0xda, 0x9f, 0xf9, 0xff, 0x95, 0xbb, 0xfd, 0xc7, 0xcf, 0xfb, 0x83, 0xef, 0xf3, 0xbf, 0xcf, 0x47, 0xf7, 0xe7, 0x1f, 0xd7, 0x8b, 0x6f, 0x33, 0xbe, 0xff, 0xc7, 0x6f, 0xfd, 0x97, 0x2f, 0xeb, 0xb7, 0xdc, 0x77, 0xd7, 0x1f, 0x67, 0xf7, 0xe7, 0x9e, 0xe7, 0xdb, 0x34, 0xdb, 0xfb, 0x7f, 0xa8, 0xef, 0xff, 0xe7, 0xef, 0xff, 0xf7, 0xff, 0x8b, 0xbf, 0xd8, 0xef, 0xff, 0xe7, 0xdf, 0xf7, 0xfb, 0xf7, 0x9f, 0x66, 0xff, 0xfb, 0x6c, 0xff, 0xb7, 0x9f, 0xcf, 0xcb, 0xbb, 0x93, 0xaf, 0xff, 0xa8, 0xff, 0xc7, 0x3f, 0xa7, 0xcf, 0xfe, 0xe3, 0x3f, 0x3f, 0xdf, 0x7b, 0xfa, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0x3f, 0xd0, 0xd3, 0x7f, 0xfc, 0xff, 0x8e, 0xff, 0xf3, 0x8f, 0x4e, 0xe4, 0x7f, 0xb8, 0xff, 0xff, 0xff, 0xef, 0x8f, 0xdf, 0xf3, 0xbb, 0x3f, 0xe0, 0xf3, 0xff, 0xff, 0xef, 0x8f, 0xd7, 0xf3, 0xab, 0xef, 0x7f, 0x8f, 0xf8, 0xfb, 0x8f, 0x6f, 0xa0, 0xff, 0xff, 0xdc, 0xff, 0x5f, 0xfc, 0xf3, 0x8f, 0x6e, 0xb1, 0xf7, 0xf7, 0xf7, 0x3e, 0xff, 0x90, 0x7b, 0xf8, 0xff, 0x7f, 0xdf, 0xff, 0xfb, 0x9f, 0x7f, 0xa0, 0xf3, 0xf1, 0xff, 0xff, 0xdf, 0xff, 0xdb, 0xbf, 0x3f, 0xff, 0xdc, 0xfb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x77, 0xaf, 0xff, 0xad, 0xf3, 0xf8, 0xff, 0xfe, 0xef, 0xff, 0xff, 0x6f, 0xbf, 0xff, 0xff, 0xcf, 0xff, 0xa3, 0xff, 0xaf, 0xcf, 0x93, 0xc3, 0x74, 0xef, 0xdf, 0xff, 0xab, 0x2f, 0xe7, 0xc7, 0xf3, 0x73, 0x79, 0xff, 0xff, 0xcf, 0xff, 0xc3, 0x7f, 0x83, 0xe4, 0xd3, 0xbc, 0x7b, 0xdb, 0xdf, 0x7f, 0x8b, 0x8e, 0x83, 0x01, 0x51, 0xd5, 0x7b, 0xff, 0xfb, 0xdf, 0xff, 0xc3, 0xef, 0xb3, 0xff, 0xb7, 0xff, 0xfe, 0xbf, 0xdf, 0xff, 0x8b, 0x6e, 0xf3, 0xfb, 0xb7, 0x1f, 0xfe, 0xbf, 0x82, 0xc8, 0xf8, 0xd3, 0x7f, 0xf3, 0xfb, 0xff, 0xef, 0x87, 0x87, 0xd0, 0x70, 0x8b, 0xff, 0xf3, 0xff, 0xef, 0xef, 0x87, 0xff, 0xff, 0xff, 0xff, 0x47, 0xff, 0xf3, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xdf, 0x7f, 0xcb, 0xef, 0xf2, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xf2, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xdf, 0xf7, 0xca, 0x7f, 0xf3, 0xf7, 0xef, 0xff, 0xf6, 0xff, 0xfa, 0xfb, 0xff, 0xe7, 0xff, 0xf7, 0xff, 0xff, 0xef, 0xff, 0xf7, 0x57, 0x7f, 0xca, 0xef, 0xf3, 0xff, 0xff, 0xef, 0xef, 0xff, 0xf6, 0xeb, 0xfa, 0xf7, 0xff, 0xf7, 0x8f, 0xff, 0xe3, 0xf7, 0xef, 0xd7, 0xf7, 0xcb, 0x7f, 0xf3, 0x8f, 0x6c, 0xf2, 0xe7, 0xff, 0xff, 0x2f, 0xff, 0xf1, 0x9d, 0x9e, 0xf4, 0xff, 0xff, 0xff, 0xef, 0x0f, 0xff, 0xf1, 0x09, 0x3f, 0xf9, 0xbf, 0xf7, 0xfb, 0xff, 0xef, 0x7f, 0xf6, 0xfb, 0xf5, 0x0f, 0xdf, 0x00, 0xff, 0xd0, 0xbf, 0xc0, 0xbf, 0xf9, 0xff, 0x8f, 0x7f, 0x81, 0x6f, 0xe1, 0xff, 0xff, 0x9e, 0xaf, 0xf7, 0x0f, 0x18, 0xd9, 0xbf, 0x6f, 0x37, 0xef, 0x8f, 0xff, 0x9e, 0x06, 0x75, 0xf7, 0xf6, 0xff, 0xef, 0x7f, 0xf7, 0xcf, 0xbb, 0xfb, 0x6d, 0xfb, 0xef, 0x7d, 0xe9, 0xff, 0xff, 0xbf, 0xc2, 0xf7, 0x6f, 0xff, 0xdc, 0xff, 0xf3, 0xfa, 0x7f, 0x6f, 0xf7, 0xf9, 0xff, 0x6d, 0xfe, 0x9c, 0xbf, 0xbf, 0x7d, 0xe2, 0x7f, 0x77, 0x9f, 0xcd, 0xb7, 0xb5, 0xff, 0xff, 0x7f, 0x7f, 0xff, 0x87, 0xae, 0x86, 0xdf, 0xc0, 0xfd, 0xfb, 0xfa, 0xff, 0xff, 0x8e, 0x6d, 0xd5, 0x3d, 0xf1, 0xff, 0xfe, 0xef, 0xff, 0xff, 0x3f, 0xd2, 0xa4, 0xfe, 0xd9, 0xf7, 0xfe, 0xdf, 0xff, 0xcb, 0xff, 0xdd, 0x7e, 0xbb, 0xdd, 0x5f, 0xf7, 0xbf, 0xff, 0xed, 0xff, 0xfe, 0xf5, 0xff, 0xb7, 0xf6, 0xb4, 0xae, 0xfe, 0xef, 0xf7, 0xff, 0xff, 0x8f, 0x07, 0x7b, 0xfb, 0xff, 0x7f, 0xff, 0xf7, 0xff, 0x7b, 0xa3, 0xbf, 0xe3, 0xff, 0xff, 0xf7, 0xf7, 0xfd, 0xdf, 0xff, 0x8f, 0x7f, 0x9b, 0xdf, 0xfb, 0xef, 0xfe, 0xff, 0x3f, 0xff, 0x6f, 0xff, 0x7f, 0xb3, 0x7f, 0xdf, 0xbd, 0x78, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xef, 0xff, 0xdf, 0xee, 0x9d, 0xfd, 0xef, 0xff, 0xf8, 0x05, 0x8e, 0xb0, 0x58, 0xf7, 0xfc, 0xa4, 0x85, 0xdd, 0xbc, 0x0b, 0x05, 0x61, 0xf8, 0xb7, 0xff, 0xeb, 0xef, 0x7f, 0xbf, 0xff, 0xc7, 0xbb, 0xd8, 0x6f, 0x79, 0xde, 0xff, 0xff, 0xcf, 0xff, 0xba, 0xaf, 0xd9, 0x7b, 0xfd, 0xff, 0xf5, 0xdf, 0xbf, 0xff, 0xff, 0xaf, 0x7f, 0x88, 0x7f, 0xf0, 0xea, 0xfe, 0x7f, 0xf2, 0xff, 0xdf, 0xd7, 0x4f, 0x7f, 0xe3, 0xde, 0xff, 0xff, 0xf7, 0xff, 0x7d, 0x6f, 0x5f, 0xab, 0xff, 0x7a, 0xb6, 0xbf, 0x78, 0xdd, 0x7f, 0xde, 0xef, 0x4b, 0x9b, 0xeb, 0x7f, 0x76, 0x9f, 0xac, 0xff, 0xcf, 0xff, 0xff, 0xb7, 0x7f, 0xae, 0xef, 0xdb, 0xef, 0xff, 0xf7, 0xff, 0xfb, 0xe7, 0xfe, 0xbf, 0x7e, 0xfb, 0xf7, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xcf, 0xbf, 0xff, 0xff, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd7, 0xff, 0xe9, 0xef, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0x8d, 0xaf, 0xf2, 0x71, 0xff, 0xfe, 0xff, 0xff, 0x7f, 0xff, 0xcf, 0x0f, 0x75, 0xf1, 0xff, 0xff, 0x3f, 0x78, 0xbc, 0xfb, 0xfe, 0xff, 0xff, 0xbf, 0xf3, 0x0f, 0x3f, 0xc9, 0xe9, 0x7f, 0xf7, 0xdf, 0xff, 0xff, 0x57, 0x82, 0x2e, 0xff, 0xf8, 0x7f, 0xfb, 0x7a, 0xfb, 0xff, 0xff, 0xeb, 0x87, 0x9b, 0xf1, 0xe5, 0x7f, 0x75, 0x7f, 0xfe, 0xff, 0xbe, 0x39, 0x79, 0xff, 0xcd, 0xbf, 0xfd, 0x7e, 0xff, 0xfb, 0xf4, 0xf7, 0xed, 0x6f, 0xf3, 0x6b, 0xf7, 0xd7, 0xbf, 0xfe, 0xfd, 0xf7, 0x8f, 0xef, 0xff, 0xf8, 0x7a, 0xff, 0xfa, 0xf0, 0xff, 0xff, 0xe6, 0x8f, 0x9b, 0xf1, 0xad, 0xbf, 0xf7, 0xfd, 0xbf, 0xfd, 0xef, 0x8f, 0x3f, 0xbf, 0xff, 0xad, 0xfb, 0xf4, 0xbf, 0xf3, 0x90, 0xdd, 0xf0, 0xfa, 0xcf, 0xe7, 0xf2, 0xf7, 0x7f, 0xff, 0xad, 0xff, 0xf5, 0xdf, 0xff, 0xcb, 0x7b, 0xfa, 0xd2, 0x7f, 0xc7, 0xf3, 0xa9, 0xf7, 0xe7, 0x8b, 0xe7, 0xf1, 0xf7, 0x3f, 0x8f, 0x7f, 0xb0, 0xdf, 0xfd, 0x7f, 0xf8, 0xff, 0xfb, 0xf2, 0xff, 0xd2, 0xe3, 0xdf, 0x87, 0xd3, 0xf0, 0xed, 0xff, 0xf7, 0x7f, 0xff, 0xef, 0x77, 0xaa, 0x7f, 0xbf, 0xea, 0xfc, 0xfb, 0xb3, 0xbf, 0xb3, 0xff, 0xd8, 0x93, 0xab, 0xf1, 0xac, 0xff, 0xf7, 0x3f, 0xaf, 0xef, 0xed, 0xc7, 0xad, 0xff, 0xd6, 0xff, 0xfd, 0xff, 0xdf, 0xff, 0xf4, 0x8f, 0xbf, 0xb1, 0xed, 0xf5, 0xfb, 0xff, 0xef, 0xff, 0xf5, 0x8f, 0xdf, 0xf1, 0xff, 0xf8, 0xfe, 0xfb, 0xff, 0xdf, 0x9f, 0xf8, 0xf0, 0xef, 0xff, 0xd0, 0xfd, 0xf7, 0xff, 0xef, 0xaf, 0xf5, 0xf7, 0xce, 0x7f, 0x7f, 0x83, 0x7d, 0xfa, 0x8f, 0x5f, 0xd0, 0xcb, 0xe9, 0xbb, 0x76, 0x9f, 0x7b, 0xf7, 0x8f, 0x6e, 0xb1, 0xd5, 0xf7, 0x5f, 0xc6, 0xff, 0xa3, 0x7f, 0xfc, 0xff, 0x7f, 0xff, 0xff, 0xdf, 0x9f, 0x7f, 0xa8, 0x77, 0xf5, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0x7f, 0x2d, 0xff, 0x96, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x37, 0x6f, 0xfd, 0xaf, 0x77, 0xf8, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0x4b, 0xff, 0x72, 0xaf, 0xff, 0xe5, 0xdf, 0x99, 0xfc, 0x10, 0x63, 0xf0, 0xef, 0xef, 0x7f, 0x8b, 0xef, 0xaf, 0xe7, 0xf5, 0xf7, 0x7d, 0xff, 0xef, 0x8f, 0xff, 0xa1, 0x4f, 0x81, 0xe7, 0xb0, 0xfa, 0xfe, 0xd7, 0xcf, 0xff, 0xca, 0xcf, 0x0b, 0x85, 0x71, 0xfa, 0xff, 0xff, 0xdd, 0xef, 0xff, 0xa4, 0xdf, 0xb1, 0xdc, 0xd3, 0xff, 0xf8, 0xff, 0xcf, 0x7f, 0xcb, 0x7f, 0xff, 0xf5, 0xff, 0xbf, 0xfd, 0xbf, 0x82, 0xc8, 0xf8, 0xe1, 0x7f, 0xf0, 0xdf, 0xff, 0xef, 0x87, 0x87, 0xc0, 0xf0, 0xca, 0x2f, 0xfb, 0xff, 0xef, 0xee, 0x97, 0xff, 0xff, 0x8f, 0xfa, 0xa5, 0xdf, 0xd1, 0xf7, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x77, 0xdb, 0xef, 0xff, 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xf6, 0x8b, 0xf2, 0x94, 0xff, 0xf1, 0xf7, 0xff, 0xff, 0xf7, 0xef, 0xd7, 0xf7, 0xdf, 0xee, 0xfb, 0xff, 0xef, 0xff, 0xf7, 0xff, 0xf3, 0xcb, 0xff, 0xe6, 0xff, 0xf1, 0xef, 0xff, 0xef, 0xff, 0xe7, 0xd7, 0x7f, 0xdb, 0x7e, 0xfd, 0xf7, 0xff, 0xef, 0xef, 0xff, 0xfe, 0xcf, 0xff, 0xc4, 0xff, 0xf2, 0x9f, 0xff, 0xe0, 0xf7, 0xff, 0xdf, 0xff, 0xdf, 0x7f, 0xdb, 0xac, 0xef, 0xf1, 0xf7, 0x7f, 0xef, 0x0f, 0x5f, 0xb4, 0x8f, 0xff, 0xf6, 0xfd, 0xff, 0x6f, 0xff, 0x8f, 0xff, 0xe9, 0x8d, 0x7f, 0xf1, 0xd1, 0xf7, 0xfe, 0xff, 0xe7, 0x7f, 0x87, 0xfd, 0xe7, 0x8f, 0x9b, 0x00, 0xff, 0xb0, 0x7d, 0xfd, 0xcf, 0xfb, 0xfd, 0x8f, 0x7f, 0x81, 0x6d, 0xd1, 0xff, 0xbc, 0xed, 0xff, 0x86, 0x6e, 0x10, 0xf1, 0xf4, 0x5f, 0x7f, 0xfe, 0x9f, 0x37, 0xc3, 0x8f, 0xf7, 0xe5, 0xfb, 0xff, 0xff, 0x7f, 0xfe, 0xff, 0xfd, 0x97, 0xff, 0xfb, 0xff, 0x3f, 0xff, 0xf9, 0xc3, 0x1f, 0xf8, 0xff, 0xb5, 0x5f, 0xef, 0xdc, 0xff, 0xbe, 0xff, 0xcb, 0xe7, 0xfd, 0x69, 0xe7, 0xfc, 0xb6, 0xef, 0x9a, 0x77, 0xb6, 0x67, 0xdf, 0xef, 0xf7, 0xfe, 0xdf, 0xff, 0xf5, 0x7f, 0xff, 0xf7, 0x97, 0xbe, 0xf4, 0xff, 0xf1, 0xba, 0xfe, 0xff, 0x97, 0x7f, 0xbf, 0x77, 0x55, 0xdf, 0xd9, 0xf1, 0xff, 0xdf, 0xff, 0xbf, 0xbf, 0x72, 0xf2, 0xdd, 0xff, 0xe4, 0xff, 0x7f, 0xef, 0xff, 0xf7, 0xfe, 0xfb, 0xb9, 0xff, 0xff, 0xf5, 0xff, 0xfb, 0x96, 0xff, 0x7f, 0xf7, 0xef, 0x83, 0xf7, 0xf7, 0xff, 0xdf, 0xff, 0xf7, 0xfd, 0xd5, 0x9f, 0x0f, 0x7f, 0xf0, 0xfb, 0xae, 0xff, 0xec, 0xff, 0x7b, 0x85, 0xf7, 0xf3, 0xef, 0xff, 0xf7, 0xff, 0xed, 0xff, 0xe7, 0x8f, 0x7f, 0xc7, 0x97, 0x3f, 0xfc, 0xff, 0xf7, 0xde, 0xff, 0xfd, 0x8b, 0xff, 0xf7, 0x1f, 0xfb, 0xff, 0x2f, 0xfb, 0xf7, 0xff, 0xbf, 0xff, 0xff, 0x6f, 0xf7, 0xf9, 0xe7, 0xff, 0x33, 0xff, 0x9e, 0xe7, 0x8b, 0xf0, 0x50, 0xf1, 0xde, 0xff, 0x9f, 0x1b, 0x28, 0x1b, 0x8d, 0xb4, 0xe1, 0xf5, 0xf3, 0xfe, 0xef, 0xfa, 0xff, 0x7f, 0xf6, 0xff, 0xfe, 0x07, 0xec, 0xfb, 0x7f, 0xff, 0xfd, 0xff, 0xd6, 0x8f, 0xff, 0xf9, 0xff, 0x37, 0x7f, 0xfb, 0xdd, 0xff, 0xff, 0xff, 0x63, 0xef, 0xdf, 0xfa, 0xf1, 0xff, 0xfc, 0xfe, 0xdf, 0xfb, 0xff, 0x8f, 0x7f, 0xf9, 0xeb, 0xef, 0xfd, 0xff, 0x7f, 0x7f, 0xfb, 0xe7, 0x9f, 0x9b, 0xe5, 0xcf, 0xfd, 0xf7, 0xcf, 0xff, 0xf3, 0xbf, 0x5f, 0x5d, 0x67, 0x9f, 0xdf, 0x7f, 0xf9, 0xff, 0xff, 0xff, 0x1f, 0xfe, 0xff, 0xf5, 0xdf, 0x5f, 0xfb, 0xfb, 0xbf, 0xcf, 0xdf, 0xfb, 0x7f, 0xb7, 0xff, 0xfb, 0xff, 0xf7, 0x3f, 0x78, 0xfc, 0xf3, 0xff, 0xff, 0xdf, 0xbf, 0xf3, 0x05, 0x3f, 0xc1, 0xf1, 0xff, 0xf7, 0xef, 0xef, 0xff, 0xfb, 0x97, 0x1f, 0xff, 0xd8, 0x7f, 0xfb, 0x7b, 0xfb, 0xf7, 0xbf, 0xbb, 0x8e, 0xd3, 0xe1, 0xff, 0xfd, 0xf7, 0xed, 0xfe, 0x8b, 0x5e, 0x0f, 0x69, 0xbf, 0xf8, 0xfc, 0xfb, 0x7b, 0xff, 0xdf, 0xed, 0xf7, 0x8f, 0xbf, 0xce, 0xfe, 0xff, 0xf3, 0xff, 0xf6, 0xff, 0xfe, 0x8f, 0x0f, 0xff, 0xfc, 0x7d, 0xff, 0xff, 0xfd, 0xf3, 0xff, 0xbf, 0x8f, 0xe3, 0xf0, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xae, 0x0f, 0x5d, 0xff, 0xff, 0xff, 0xf3, 0xff, 0xff, 0xff, 0xd1, 0xfd, 0xf1, 0xd3, 0xdd, 0xfd, 0xf1, 0xfb, 0xff, 0xfb, 0xfd, 0xff, 0xfd, 0xf6, 0xff, 0xff, 0x7f, 0xfe, 0xfd, 0xff, 0x87, 0xff, 0xcc, 0xf3, 0xdf, 0xcb, 0xff, 0xfb, 0xfb, 0xff, 0x8b, 0x7f, 0xfa, 0xff, 0xff, 0x7f, 0xf8, 0xfe, 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xe8, 0xff, 0xff, 0xf9, 0xff, 0xfb, 0xaf, 0x7f, 0x8f, 0x7e, 0xbf, 0xfb, 0x78, 0xff, 0xff, 0xff, 0xd7, 0xd3, 0xfd, 0xc3, 0x95, 0xc5, 0xf8, 0xff, 0xfb, 0xff, 0xfb, 0xff, 0xfc, 0xf2, 0x8f, 0xff, 0x8b, 0x7b, 0xff, 0xff, 0xdf, 0xff, 0xfd, 0x8f, 0xef, 0xf4, 0xb7, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0x8f, 0x6f, 0xf4, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xdf, 0xdf, 0xfd, 0xf1, 0xb3, 0xff, 0xd8, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xf5, 0x87, 0x7f, 0x3f, 0xef, 0xff, 0xff, 0x8f, 0x5f, 0xf0, 0xff, 0xf1, 0xef, 0x39, 0xc7, 0x7e, 0xf3, 0x8f, 0x7e, 0xf1, 0xbd, 0xed, 0xfa, 0x9a, 0xff, 0x98, 0x7f, 0xfa, 0xff, 0x7f, 0xff, 0xff, 0xf3, 0x8f, 0x5f, 0x8a, 0xfb, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xaf, 0x5f, 0xff, 0x86, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xff, 0x7f, 0xbf, 0xcb, 0xbd, 0x77, 0xf2, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xbf, 0x77, 0xff, 0xff, 0xef, 0xff, 0xed, 0xdf, 0xff, 0xdd, 0x11, 0x73, 0xfc, 0xfc, 0xef, 0xff, 0xfb, 0xff, 0xfd, 0xfd, 0x9d, 0xf3, 0xff, 0xff, 0xf9, 0xef, 0xff, 0xf3, 0x0f, 0x83, 0x9e, 0xf0, 0xf0, 0xff, 0xed, 0xef, 0xff, 0xe9, 0x8e, 0x7b, 0x9d, 0x70, 0xe9, 0xf7, 0xff, 0xff, 0xef, 0xff, 0xef, 0xff, 0xf3, 0xf9, 0xf7, 0xff, 0xfb, 0xf3, 0xef, 0xfd, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x3f, 0x87, 0xec, 0xf8, 0xe5, 0x7f, 0xfb, 0xff, 0xff, 0xff, 0x8f, 0x87, 0xe8, 0xf0, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0x97, 0xff, 0xff, 0xef, 0xff, 0xed, 0xdf, 0xd3, 0xbf, 0x7f, 0xfe, 0xff, 0xff, 0xef, 0xff, 0xe3, 0xef, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x77, 0xef, 0xfa, 0xe7, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xef, 0xef, 0xef, 0xf7, 0xea, 0xfe, 0xf3, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf7, 0xeb, 0xf3, 0xef, 0xff, 0xf3, 0xff, 0xf1, 0x7f, 0xff, 0xe7, 0xe7, 0xf7, 0xea, 0xee, 0xf7, 0xff, 0xf1, 0xff, 0xe7, 0xff, 0xfb, 0x6f, 0xf6, 0xe5, 0xff, 0xeb, 0xdf, 0xef, 0xff, 0xff, 0xf7, 0xef, 0xff, 0xe3, 0xef, 0xff, 0xfe, 0xff, 0xfe, 0xf7, 0x7f, 0xff, 0x4f, 0x5f, 0xe1, 0xc7, 0xef, 0xf1, 0xfe, 0x7f, 0x7b, 0xff, 0x6f, 0xff, 0x93, 0x0b, 0x7f, 0xf1, 0xfa, 0xdf, 0xff, 0xff, 0xfb, 0x7f, 0xdf, 0xf7, 0xef, 0x8f, 0xff, 0x00, 0xef, 0xf0, 0xdf, 0x7f, 0xef, 0xff, 0xfb, 0x8f, 0x7f, 0x81, 0x6f, 0xd1, 0xff, 0xde, 0xff, 0xef, 0xb9, 0x49, 0x74, 0xf3, 0xef, 0x7b, 0x7f, 0xff, 0xeb, 0xf7, 0x85, 0x67, 0xf1, 0xf0, 0xe1, 0xff, 0xf7, 0x3f, 0xab, 0xff, 0xc4, 0xbb, 0xff, 0x8c, 0x9d, 0x7e, 0x3a, 0xb5, 0xbb, 0xe3, 0xfb, 0xf3, 0xcd, 0xe3, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xdc, 0xf7, 0xf8, 0x77, 0x8f, 0xf7, 0xfe, 0x9f, 0x97, 0x7a, 0xf2, 0x7f, 0xfb, 0x8f, 0x1f, 0x7d, 0xfd, 0xef, 0xb1, 0x7d, 0xff, 0xef, 0xa6, 0xef, 0x98, 0x9d, 0xf0, 0xf4, 0xf4, 0xff, 0xff, 0x7f, 0x8f, 0x7f, 0x89, 0x7f, 0xe7, 0xff, 0xff, 0xf7, 0xfb, 0xbf, 0xa7, 0xb7, 0xdf, 0xba, 0xfd, 0xfe, 0xeb, 0xff, 0xff, 0xc4, 0xef, 0x8f, 0x7c, 0xf7, 0x8f, 0x7f, 0xf9, 0xfb, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xeb, 0x87, 0xfd, 0xf4, 0xf7, 0x6f, 0xff, 0xbf, 0xff, 0xff, 0xef, 0xef, 0xdf, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0x85, 0xdb, 0xf2, 0xbf, 0xd7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0x8f, 0x7f, 0xf1, 0xaf, 0x7d, 0xff, 0xf7, 0xeb, 0xff, 0xbf, 0xfd, 0x8f, 0xff, 0xfa, 0x3f, 0xff, 0xf6, 0xb7, 0xff, 0xfe, 0xfb, 0x8f, 0x7f, 0xff, 0xff, 0xf3, 0xee, 0xbf, 0x7f, 0xff, 0xff, 0x67, 0xc6, 0xaf, 0xc3, 0x74, 0xf7, 0xfe, 0xee, 0x8a, 0x37, 0x6e, 0xec, 0x87, 0x71, 0x91, 0x13, 0x7d, 0xec, 0x87, 0xff, 0xbf, 0x7b, 0xf0, 0xef, 0xfb, 0x3f, 0xb7, 0xfc, 0xff, 0xff, 0x97, 0x7d, 0xe8, 0xef, 0x9d, 0x77, 0xfd, 0xfb, 0xff, 0xfb, 0xbf, 0xff, 0xff, 0xde, 0x77, 0xcd, 0xff, 0xf1, 0xfb, 0xff, 0xff, 0xf9, 0xe3, 0xff, 0xff, 0xef, 0xff, 0xff, 0xfc, 0xcf, 0xff, 0xf3, 0x7f, 0xff, 0xfe, 0x77, 0xaf, 0xf7, 0xf8, 0xef, 0xff, 0x76, 0xfa, 0xff, 0x99, 0x6d, 0x9f, 0x6f, 0xf1, 0xbf, 0x7f, 0x7f, 0xfc, 0xff, 0xff, 0xef, 0xbf, 0xeb, 0xfa, 0xdd, 0xef, 0xbc, 0xfd, 0xfd, 0xdf, 0xff, 0xf7, 0xff, 0xff, 0xd1, 0xfe, 0xff, 0xfb, 0xff, 0x3f, 0x70, 0xf8, 0xff, 0xf5, 0xff, 0xff, 0x9f, 0xf3, 0x09, 0x1f, 0xe1, 0xf3, 0xfd, 0xfd, 0xff, 0xef, 0xff, 0xf7, 0x8e, 0x1e, 0xff, 0xf8, 0x7f, 0xff, 0x77, 0xff, 0xff, 0x9b, 0x8f, 0x8e, 0xfb, 0xf1, 0xef, 0xe5, 0xfd, 0xef, 0xfe, 0x9f, 0x16, 0x03, 0x61, 0xbf, 0xf8, 0xfa, 0xfd, 0x73, 0xff, 0xff, 0xf7, 0xf7, 0x8d, 0x9f, 0xe1, 0xf1, 0xff, 0xef, 0xff, 0xfc, 0xff, 0xff, 0x8f, 0x0f, 0xff, 0xf8, 0x7f, 0xff, 0xf7, 0xf7, 0xfd, 0xff, 0x9f, 0x8f, 0xe1, 0xf1, 0xef, 0xff, 0xff, 0xff, 0xfd, 0xff, 0x8e, 0x0f, 0x7d, 0xff, 0xf7, 0xf9, 0xfe, 0xf3, 0x7f, 0xff, 0xf7, 0xf7, 0xf3, 0xfd, 0xef, 0xff, 0xf9, 0xed, 0xff, 0xff, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0x7e, 0xfe, 0xf7, 0xff, 0x8b, 0xf7, 0xfc, 0xeb, 0xeb, 0xed, 0xeb, 0xf9, 0xfd, 0xff, 0x8f, 0x7f, 0xf2, 0xfe, 0xed, 0x7f, 0xf8, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xf7, 0x8d, 0xff, 0xe8, 0xff, 0xfd, 0xef, 0xfe, 0xfd, 0xdf, 0xf6, 0x8f, 0x6f, 0xbf, 0xff, 0x78, 0xff, 0xf7, 0xff, 0xfb, 0xff, 0xff, 0xe3, 0x9f, 0xe5, 0xea, 0xf5, 0xfd, 0xff, 0xed, 0xef, 0xff, 0xf7, 0x8f, 0xff, 0x07, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0xff, 0xf3, 0x93, 0xff, 0xff, 0xff, 0xef, 0xff, 0xf7, 0x8f, 0x7f, 0xf3, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8b, 0xff, 0xe8, 0xfb, 0xff, 0xff, 0xef, 0xef, 0xf7, 0xf5, 0x8e, 0x7f, 0x3f, 0xec, 0xf9, 0xfc, 0x8f, 0x7f, 0xf0, 0xef, 0xef, 0xff, 0x1d, 0xed, 0x7e, 0xfd, 0x8f, 0x6e, 0xf1, 0xd7, 0xf7, 0xde, 0x8c, 0xff, 0x80, 0x7f, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xfb, 0x8f, 0x6f, 0x90, 0xff, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0x7f, 0xff, 0x87, 0xfb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x9f, 0xff, 0x93, 0x7f, 0xf2, 0xff, 0xfe, 0xef, 0xff, 0xff, 0x9f, 0x69, 0xff, 0xfb, 0xef, 0xff, 0xf6, 0xff, 0xff, 0xff, 0x1f, 0x73, 0xfe, 0xf6, 0xff, 0xff, 0xff, 0xef, 0xef, 0xe7, 0x97, 0x77, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x1f, 0x86, 0x95, 0xf0, 0xf6, 0xf7, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0x6d, 0x93, 0x71, 0xf8, 0xfd, 0xff, 0xfe, 0xef, 0xfd, 0xf2, 0xff, 0xf6, 0xff, 0x6f, 0xef, 0xf7, 0xfd, 0xff, 0xff, 0xfd, 0xff, 0xfd, 0xff, 0xef, 0xff, 0xf5, 0x3f, 0x82, 0xf8, 0xf8, 0xe6, 0x7f, 0xf2, 0xff, 0xff, 0xff, 0x87, 0x87, 0xf0, 0xf0, 0xfc, 0xef, 0xfd, 0xf7, 0xef, 0xee, 0x87, 0xff, 0xff, 0xef, 0xff, 0xe7, 0xff, 0xf6, 0xf7, 0x7f, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xf2, 0xe6, 0xff, 0xf5, 0xf7, 0xff, 0xff, 0xff, 0xe7, 0xff, 0xf7, 0xff, 0xfe, 0xff, 0xf7, 0xef, 0xff, 0xf7, 0xff, 0xfa, 0xeb, 0xff, 0xe3, 0xff, 0xf5, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xf7, 0xff, 0xfd, 0xee, 0xfd, 0xff, 0xff, 0xef, 0xef, 0xff, 0xf6, 0xfb, 0x7a, 0xe7, 0xff, 0x91, 0xef, 0xff, 0xe2, 0xff, 0xf7, 0xf7, 0xf7, 0xfd, 0xff, 0xfd, 0xef, 0xeb, 0xf5, 0xe7, 0xff, 0xff, 0x8f, 0xbe, 0xf1, 0x93, 0xfd, 0xf1, 0xff, 0xff, 0xff, 0xff, 0x2e, 0x2f, 0x59, 0x8f, 0x6f, 0xf9, 0xf7, 0xff, 0xfa, 0xff, 0x4f, 0xbf, 0xc4, 0xfb, 0xf5, 0x0f, 0xff, 0x00, 0xff, 0xd0, 0x7f, 0x70, 0xaf, 0x7f, 0xff, 0x8f, 0x7f, 0x81, 0x7f, 0xb1, 0xff, 0xff, 0xda, 0xff, 0x2f, 0x4f, 0x7e, 0xf9, 0xfb, 0xff, 0x5f, 0xef, 0xff, 0xff, 0x99, 0x29, 0x71, 0xf1, 0xed, 0xff, 0xff, 0x3f, 0xb7, 0xef, 0xdf, 0xff, 0xf9, 0xfe, 0xfd, 0xff, 0xff, 0xb7, 0x9f, 0xff, 0xab, 0x73, 0xfd, 0xad, 0x3c, 0x6b, 0xff, 0xfa, 0xff, 0xe8, 0xf7, 0xbf, 0x6f, 0x8f, 0xff, 0xfe, 0xff, 0x87, 0x7f, 0xe0, 0x7f, 0xbf, 0x5f, 0x93, 0x7e, 0xe4, 0xf6, 0x97, 0x7b, 0xff, 0xdf, 0x27, 0xaf, 0xa7, 0xff, 0xf4, 0xf6, 0xff, 0xff, 0xff, 0x5f, 0xdf, 0xfb, 0x87, 0x1f, 0x70, 0xf3, 0xfe, 0x7f, 0xfb, 0xbf, 0xb7, 0x61, 0xb5, 0xfc, 0xff, 0xf7, 0xff, 0xbf, 0xff, 0xa2, 0xff, 0xd8, 0xa1, 0x77, 0xdf, 0xe6, 0xff, 0xff, 0x7e, 0xc4, 0xff, 0xff, 0xf6, 0xd7, 0x97, 0xdb, 0xe8, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xb9, 0x3b, 0xf8, 0xff, 0xef, 0xe7, 0xbe, 0xff, 0xff, 0x86, 0xbf, 0xe2, 0xad, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0x8f, 0x3f, 0x63, 0xcf, 0x7b, 0xfe, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xdf, 0xff, 0xba, 0x7f, 0xf7, 0xee, 0x17, 0xf7, 0xee, 0xef, 0x97, 0x7f, 0xf7, 0xff, 0xff, 0xfd, 0x9e, 0x77, 0xf3, 0xff, 0xfb, 0xc3, 0x8f, 0xc1, 0x70, 0x71, 0xff, 0xf7, 0x9f, 0x77, 0x7e, 0xb6, 0x4f, 0xb9, 0x01, 0x1f, 0x1b, 0x7a, 0x9a, 0x7e, 0xbf, 0xff, 0xf0, 0x9f, 0xef, 0x79, 0x3f, 0xf6, 0xff, 0xfd, 0x9d, 0x7b, 0xf2, 0xff, 0xc9, 0xf3, 0xff, 0x7d, 0xfb, 0xfd, 0xbf, 0xff, 0xfd, 0xbf, 0x77, 0x8f, 0xff, 0xf1, 0xf7, 0xff, 0xff, 0xff, 0xfd, 0xbb, 0x7f, 0xbf, 0x6f, 0xf3, 0xfe, 0xff, 0xe7, 0xbf, 0x7f, 0xff, 0xff, 0x5f, 0xaf, 0xf1, 0xfc, 0xf7, 0xff, 0x77, 0xfe, 0xff, 0xdf, 0xff, 0xdf, 0xff, 0xe3, 0x9e, 0x7f, 0x7a, 0xe3, 0xff, 0xff, 0x6f, 0xff, 0xaf, 0xfc, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xf3, 0xfb, 0xf7, 0xff, 0xf9, 0xfe, 0xff, 0x3f, 0x70, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x0f, 0xf1, 0xf1, 0xff, 0xf7, 0xff, 0xef, 0xff, 0xf7, 0x8c, 0x1e, 0xff, 0xf8, 0x7f, 0xff, 0x7f, 0xf7, 0xff, 0x8f, 0x8f, 0x88, 0xf1, 0xf1, 0xef, 0xff, 0xf7, 0xff, 0xee, 0x97, 0x1e, 0x01, 0x61, 0xbf, 0xf0, 0xfa, 0xfd, 0x75, 0xff, 0xf5, 0xff, 0xff, 0x8f, 0x9f, 0xe1, 0xf1, 0xff, 0xf7, 0xef, 0xfe, 0xff, 0xff, 0x8f, 0x1f, 0xff, 0xfa, 0x7f, 0xff, 0xfd, 0xff, 0xfd, 0xff, 0x9f, 0x9f, 0xf9, 0xf9, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0x9e, 0x0f, 0x75, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xf7, 0xf7, 0xff, 0xef, 0xf7, 0xe7, 0xef, 0xef, 0xef, 0xff, 0xff, 0xef, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf8, 0xf7, 0xff, 0xf7, 0xff, 0xf7, 0xe7, 0xef, 0x9f, 0x6f, 0xf0, 0xff, 0xff, 0x7f, 0xf8, 0xff, 0xff, 0xfd, 0xff, 0xfd, 0x8f, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xf7, 0xef, 0xf7, 0xff, 0xff, 0x9d, 0x7e, 0xbf, 0xff, 0x78, 0xff, 0xfd, 0xff, 0xfd, 0xf7, 0xff, 0xf7, 0x8f, 0xf7, 0xf0, 0xf7, 0xf7, 0xff, 0xf7, 0xef, 0xfe, 0xe7, 0x9f, 0xff, 0x8f, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf8, 0x8f, 0xff, 0xf8, 0xff, 0xef, 0xff, 0xff, 0x8e, 0x6f, 0xe1, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x87, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xef, 0xef, 0xff, 0x9f, 0x7f, 0x3f, 0xff, 0xf8, 0xff, 0x8f, 0x7f, 0xf0, 0x9f, 0xf7, 0xfb, 0x07, 0xe7, 0x78, 0xf7, 0x8f, 0x7e, 0xf1, 0x9f, 0x7f, 0xff, 0x9e, 0xff, 0x80, 0x7f, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xf7, 0x8f, 0x6f, 0x80, 0xff, 0xf1, 0xff, 0xff, 0xef, 0xff, 0xfe, 0x9f, 0x7f, 0xff, 0x88, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x9f, 0xef, 0x81, 0x7f, 0xf0, 0xff, 0xfe, 0xff, 0xff, 0xef, 0x9f, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x97, 0x77, 0xf8, 0xff, 0xef, 0xff, 0xf7, 0xff, 0xef, 0xef, 0x8f, 0x76, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x0f, 0xf7, 0x88, 0xf7, 0xf0, 0xff, 0xff, 0xef, 0xff, 0xef, 0x8e, 0x07, 0x99, 0x61, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xf7, 0xff, 0xff, 0xf7, 0xf7, 0xef, 0xff, 0xe7, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x87, 0xf8, 0xf8, 0xff, 0x7f, 0xf7, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xe0, 0xf0, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xef, 0xff, 0xe7, 0xff, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0xff, 0xe7, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xef, 0xff, 0xef, 0xff, 0x97, 0xee, 0xf9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80, 0x88, 0xff, 0xf8, 0xff, 0xff, 0xf7, 0xff, 0x0f, 0x1f, 0x79, 0x9b, 0x7f, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0x4f, 0x7b, 0xf7, 0xf7, 0xf8, 0x0f, 0xff, 0x00, 0xff, 0xd0, 0x4f, 0x75, 0x8c, 0x79, 0xff, 0x8f, 0x7f, 0x81, 0x7e, 0xb1, 0xff, 0xf7, 0xff, 0xfe, 0x8f, 0x7f, 0x70, 0xf0, 0xff, 0xf7, 0x7f, 0xff, 0xf7, 0xff, 0x84, 0x0e, 0x73, 0xff, 0xf7, 0xff, 0xff, 0x3f, 0xbd, 0xfd, 0xfb, 0xf7, 0xe8, 0xff, 0xff, 0x78, 0xf0, 0x3f, 0xbf, 0xe3, 0x9b, 0x6f, 0xff, 0x97, 0x1f, 0x7f, 0xcf, 0xfe, 0xff, 0xf8, 0xf7, 0xff, 0x8f, 0x80, 0xff, 0xf8, 0xff, 0x8f, 0x78, 0xf0, 0x7f, 0x9f, 0x7f, 0x9b, 0x7f, 0xf1, 0xff, 0x97, 0x7f, 0xff, 0xff, 0x07, 0xff, 0x87, 0x7f, 0xf0, 0xf8, 0xff, 0x7f, 0xfb, 0x7f, 0xee, 0xfb, 0x99, 0x19, 0x73, 0xef, 0xf7, 0xef, 0xff, 0xbf, 0x87, 0x04, 0xfc, 0xff, 0x08, 0xf7, 0xff, 0x7b, 0x7f, 0x8e, 0x8f, 0xf9, 0x98, 0x6f, 0xf3, 0xff, 0xef, 0xff, 0xff, 0xdf, 0xff, 0xff, 0x73, 0xff, 0xf7, 0x9f, 0xf8, 0xfb, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x3d, 0x61, 0xff, 0xef, 0xff, 0xff, 0xff, 0x7f, 0x07, 0xff, 0xf8, 0x78, 0xff, 0xee, 0xfb, 0xff, 0xff, 0xff, 0x87, 0x1f, 0x71, 0xe3, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x77, 0x8b, 0x7f, 0xf8, 0x7f, 0xff, 0xff, 0x07, 0xe7, 0xfa, 0x77, 0x8f, 0x7f, 0xff, 0xef, 0xf1, 0xef, 0x9f, 0x77, 0xf9, 0xff, 0xf8, 0x73, 0x8f, 0xf8, 0x0f, 0x88, 0x88, 0xf8, 0x98, 0x77, 0x78, 0x8f, 0x6f, 0x87, 0x7b, 0xf7, 0xff, 0xef, 0x87, 0x6f, 0xbf, 0x7f, 0xf0, 0xff, 0x8f, 0x7f, 0xff, 0xff, 0x7e, 0xff, 0x9f, 0x7f, 0xf0, 0xff, 0xff, 0xeb, 0xef, 0xff, 0xfb, 0xff, 0x9f, 0xff, 0xff, 0xff, 0x77, 0xf8, 0x7f, 0xf8, 0xf7, 0xff, 0x7f, 0xf7, 0xff, 0x9f, 0x7f, 0x9b, 0x6f, 0xf1, 0xfe, 0xff, 0xfe, 0xff, 0x7f, 0x7f, 0x8f, 0xff, 0x8f, 0xff, 0x7f, 0x8f, 0xff, 0x70, 0xfb, 0x6f, 0xff, 0xff, 0xff, 0xf5, 0xf9, 0xff, 0xff, 0xff, 0xf4, 0xff, 0x87, 0xff, 0x74, 0xff, 0x7f, 0xff, 0xff, 0x7e, 0xff, 0xff, 0x8f, 0xff, 0xf1, 0xff, 0xff, 0xe9, 0xff, 0xf7, 0xff, 0xff, 0xbf, 0x78, 0xf8, 0x7f, 0xf7, 0xff, 0xff, 0xff, 0xf7, 0x07, 0x1f, 0xe1, 0xf1, 0xff, 0xf7, 0xff, 0xef, 0xef, 0xff, 0x8e, 0x0f, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x87, 0x9f, 0x88, 0xf1, 0xe1, 0xff, 0xef, 0xf7, 0xef, 0xfe, 0x9f, 0x0f, 0x09, 0x71, 0xbf, 0xf0, 0xf8, 0xff, 0x77, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf0, 0xf0, 0xff, 0xf7, 0xff, 0xfe, 0xf7, 0xff, 0x8e, 0x1f, 0x7f, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf9, 0xf8, 0xfe, 0xff, 0xe7, 0xf7, 0xff, 0xff, 0x96, 0x0f, 0x61, 0x7f, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xff, 0xf7, 0xe7, 0xef, 0xff, 0xe7, 0xff, 0xf7, 0xfe, 0xff, 0xf7, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf8, 0xef, 0xef, 0xef, 0xff, 0xf7, 0xf7, 0xff, 0x9f, 0x67, 0xf0, 0xff, 0xff, 0x7f, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0x8f, 0xff, 0xe0, 0xf7, 0xff, 0xf7, 0xfe, 0xf7, 0xf7, 0xff, 0x8f, 0x6e, 0x3f, 0xfb, 0x7c, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xe7, 0x9f, 0xef, 0xf0, 0xff, 0xe7, 0xef, 0xf7, 0xef, 0xf6, 0xf6, 0x87, 0xff, 0x07, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf4, 0x9f, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf0, 0x7f, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x87, 0xff, 0xe8, 0xff, 0xf7, 0xff, 0xff, 0xef, 0xe7, 0xf7, 0x8f, 0x6f, 0x3f, 0xff, 0xfc, 0xff, 0x8f, 0x7f, 0xf0, 0xcf, 0xef, 0xe8, 0x1b, 0xef, 0x7c, 0xff, 0x8f, 0x7e, 0xe1, 0x97, 0x77, 0xff, 0x9e, 0xff, 0x80, 0x7f, 0xfc, 0xff, 0x7f, 0xff, 0xff, 0xf7, 0x8f, 0x7f, 0x80, 0xff, 0xf1, 0xff, 0xfe, 0xef, 0xff, 0xff, 0x9f, 0x7f, 0xff, 0x84, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x9f, 0xff, 0x91, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0x1f, 0x77, 0xf8, 0xf7, 0xef, 0xff, 0xf7, 0xff, 0xf7, 0xef, 0x87, 0x67, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x8b, 0x8c, 0xf0, 0xf8, 0xf7, 0xff, 0xef, 0xff, 0xe7, 0x9e, 0x67, 0x89, 0x77, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xef, 0xfb, 0xff, 0xef, 0xff, 0xe7, 0xff, 0xf7, 0xf7, 0xff, 0xff, 0xf7, 0xbf, 0x87, 0xf8, 0xf8, 0xf7, 0x7f, 0xfb, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xe0, 0xf0, 0xe7, 0xfe, 0xf7, 0xff, 0xff, 0xff, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xfb, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xf7, 0xff, 0x8b, 0xff, 0xfc, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xff, 0xf6, 0xff, 0xff, 0xf7, 0x8f, 0xbf, 0xf9, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0x7f, 0xff, 0x1f, 0x1f, 0x71, 0x89, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0x4f, 0xb8, 0xc6, 0xfb, 0xfd, 0x0f, 0xff, 0x00, 0xff, 0xd0, 0x7f, 0x79, 0x90, 0x7f, 0xf9, 0x8f, 0x7f, 0x81, 0x7f, 0x81, 0xff, 0xff, 0xcf, 0xff, 0xb0, 0x4f, 0x77, 0xf4, 0xff, 0xff, 0x7f, 0xe7, 0xee, 0xff, 0x97, 0x07, 0x69, 0xf1, 0xf7, 0xff, 0xff, 0x3f, 0xba, 0xff, 0x4d, 0xfe, 0xe5, 0xff, 0xff, 0x7f, 0xef, 0xbf, 0x9f, 0xe7, 0x9f, 0x77, 0xe9, 0x9f, 0x1f, 0x79, 0xc1, 0xfe, 0xff, 0xf8, 0xf7, 0x3f, 0xff, 0x9f, 0xf7, 0xf8, 0xff, 0x87, 0x7f, 0xf0, 0x7f, 0x97, 0x7f, 0x89, 0x7e, 0xf1, 0xff, 0x9f, 0x7f, 0xff, 0xff, 0x37, 0xbf, 0xb3, 0x7f, 0xf2, 0xff, 0xff, 0xf7, 0xff, 0x7f, 0x7b, 0xfe, 0x87, 0x1f, 0x71, 0xf1, 0xff, 0x7f, 0xfb, 0xff, 0xff, 0x70, 0xb7, 0xfe, 0x7b, 0xff, 0xf3, 0xbf, 0xf7, 0xff, 0xff, 0xf9, 0x9f, 0x7f, 0xf1, 0xff, 0xf7, 0xff, 0xff, 0xfb, 0xbf, 0xff, 0xf4, 0xcf, 0x87, 0xd8, 0xf8, 0xf8, 0xff, 0xff, 0x8f, 0xff, 0xe9, 0xff, 0x8f, 0x1f, 0x71, 0xff, 0xef, 0xff, 0x8d, 0xff, 0xff, 0x87, 0xbf, 0xf9, 0x3f, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x9f, 0x1f, 0x71, 0xf9, 0xff, 0x7b, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xcb, 0xff, 0xbd, 0x7f, 0xfb, 0xff, 0x8f, 0xff, 0xf8, 0x7f, 0x9f, 0x7f, 0xf1, 0xef, 0xff, 0xf7, 0x8f, 0x7f, 0xf9, 0xff, 0xf8, 0x43, 0x8f, 0xc4, 0x75, 0x7d, 0xff, 0xff, 0x97, 0x7f, 0x78, 0x8f, 0x6f, 0x99, 0x17, 0x19, 0x71, 0xf9, 0x8f, 0x6f, 0xbf, 0x7f, 0xf0, 0x8f, 0xf6, 0x7d, 0x37, 0xff, 0xff, 0xff, 0x9f, 0x7f, 0xe0, 0xff, 0xff, 0xef, 0xff, 0xf7, 0xfb, 0xff, 0x8f, 0xff, 0xff, 0xbf, 0x77, 0x8e, 0xff, 0xf0, 0xff, 0xff, 0xf7, 0xf7, 0x7f, 0x97, 0x7f, 0x99, 0x7f, 0xf9, 0xfe, 0xff, 0xfe, 0xff, 0x7f, 0xff, 0xff, 0xcf, 0xbf, 0xf9, 0xfa, 0xff, 0xff, 0xff, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0x7f, 0x71, 0xf6, 0xff, 0xff, 0xff, 0x7f, 0xb7, 0xfe, 0xfb, 0xff, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0x3f, 0x78, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0x08, 0x0f, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x0e, 0xff, 0xf8, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0x8f, 0x8f, 0x80, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0x0e, 0x01, 0x71, 0xbf, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x8f, 0x0f, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf1, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x0f, 0x71, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf0, 0xff, 0xff, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0x88, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7e, 0xbf, 0xff, 0x78, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0xf7, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0x8f, 0xff, 0x87, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf8, 0x8f, 0xf7, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x7f, 0xf1, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x87, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0x3f, 0xff, 0xf8, 0xff, 0x8f, 0x7f, 0xf0, 0xdf, 0xff, 0xf6, 0x07, 0xff, 0x78, 0xff, 0x8f, 0x7e, 0xf1, 0x9f, 0x7f, 0xfd, 0x8e, 0xff, 0x80, 0x7f, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xf7, 0x8f, 0x7f, 0x80, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0x7f, 0xff, 0x80, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x8f, 0xff, 0x81, 0x7f, 0xf0, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x77, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x6e, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x8f, 0x88, 0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x7f, 0x81, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x87, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0x87, 0xff, 0xf8, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x8f, 0xf0, 0x85, 0xff, 0xf0, 0xff, 0xff, 0xff, 0x7f, 0x0d, 0x0f, 0x71, 0x81, 0x7e, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0x5f, 0x3d, 0xf0, 0xf5, 0xfa, 0x0f, 0xff, 0x00, 0xff, 0x80, 0x0f, 0xf1, 0x88, 0x7f, 0xf1, 0x8f, 0x7e, 0x81, 0x7e, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xba, 0x4f, 0x75, 0xfa, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0x0f, 0x71, 0xf1, 0xff, 0xff, 0xff, 0x3f, 0xbf, 0xff, 0xcf, 0xff, 0xfa, 0xff, 0xfe, 0x7f, 0xdf, 0x7f, 0xf4, 0xff, 0x8f, 0x7f, 0xf1, 0x8f, 0x0f, 0x71, 0xf1, 0xbe, 0xff, 0xf8, 0xf7, 0xbb, 0x7f, 0x85, 0xff, 0xf0, 0xff, 0x8f, 0x7f, 0xf0, 0x7f, 0x87, 0x7f, 0x81, 0x7e, 0xf1, 0x8f, 0x0f, 0x71, 0xff, 0xff, 0x87, 0x8f, 0x8d, 0xfd, 0xf5, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xf7, 0xfe, 0x8f, 0x0f, 0x71, 0xf1, 0xfe, 0x7f, 0xf7, 0xbf, 0x07, 0xc0, 0xbf, 0xf7, 0xfd, 0xff, 0xff, 0xbf, 0xff, 0x85, 0x8f, 0xf9, 0x8f, 0x7f, 0xf1, 0xff, 0x7f, 0xf6, 0xff, 0x83, 0xff, 0xff, 0xf4, 0xff, 0x8f, 0xda, 0xf0, 0xb6, 0xdf, 0xfd, 0xf6, 0xff, 0xf9, 0xff, 0x8f, 0x1f, 0x71, 0xfd, 0x8f, 0x7b, 0xfd, 0xff, 0xff, 0xb7, 0x8f, 0xf0, 0xbd, 0xff, 0xfd, 0xf7, 0xff, 0xff, 0xff, 0x8f, 0x0f, 0x71, 0xf1, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xba, 0x7f, 0xf5, 0xf7, 0x0f, 0xf7, 0xf0, 0x7f, 0x87, 0x7f, 0xf1, 0xff, 0xff, 0xfe, 0x8f, 0x7f, 0xf1, 0xff, 0xf8, 0x47, 0x8f, 0xca, 0x70, 0x7a, 0xff, 0xff, 0x8f, 0x7f, 0x70, 0x8f, 0x7f, 0x81, 0x0e, 0x01, 0x01, 0x71, 0x81, 0x7f, 0xbf, 0x7f, 0xf0, 0x8f, 0xff, 0x70, 0x3f, 0xff, 0xfd, 0xfe, 0x8f, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xff, 0x8f, 0xff, 0xff, 0xbf, 0x7f, 0x85, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0x81, 0x7f, 0x81, 0xfe, 0xf1, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xcf, 0x87, 0xfa, 0x75, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0x70, 0xfe, 0xff, 0x87, 0xff, 0x4b, 0xbf, 0x7f, 0xfd, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0x3f, 0x78, 0xf8, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x0f, 0x71, 0xf1, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x0e, 0xff, 0xf8, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0x8f, 0x8f, 0x00, 0x71, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0x0e, 0x01, 0x71, 0xbf, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0x8f, 0x0f, 0x71, 0xf1, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x8f, 0x0f, 0xff, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x0f, 0x71, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x0f, 0x71, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0x7f, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf0, 0xff, 0xff, 0x7f, 0xf8, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7e, 0xbf, 0xff, 0x78, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0x8f, 0xff, 0x07, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x7f, 0xf1, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0x3f, 0xff, 0xf8, 0xff, 0x8f, 0x7f, 0xf0, 0x8f, 0xff, 0xfc, 0x8f, 0xff, 0xf0, 0xff, 0x8f, 0x7e, 0xf1, 0x8f, 0x7f, 0xf3, 0x8e, 0xff, 0x80, 0x7f, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0x80, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0x7f, 0xff, 0x80, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x8f, 0x7f, 0x81, 0x7f, 0xf0, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0xff, 0x80, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8e, 0x0f, 0x01, 0x71, 0xf0, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbf, 0x87, 0xf8, 0xf8, 0xff, 0x7f, 0xff, 0xff, 0x7f, 0x7f, 0x8f, 0x8f, 0xf0, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x7e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x7e, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x7f, 0x8f, 0xfe, 0xf0, 0xff, 0xff, 0xbf, 0xff, 0x0e, 0x8f, 0x70, 0x80, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x7f, 0xf1, 0x0f, 0x7f, 0xf1, 0xfe, 0xff, 0x9f, 0xbf, 0x1f, 0xfb, 0x0c, 0xff, 0xf0, 0x8f, 0xff, 0x00, 0xff, 0xb0, 0x3f, 0x71, 0x80, 0xff, 0xf1, 0x8f, 0x7f, 0x81, 0x7e, 0xc1, 0xff, 0xff, 0xff, 0x7f, 0x8f, 0x0f, 0x70, 0xf0, 0x8f, 0x7f, 0x70, 0xcf, 0x8f, 0xff, 0x71, 0x0f, 0x7e, 0xf1, 0x8f, 0x7f, 0xf1, 0x7f, 0xff, 0x7f, 0x0f, 0xff, 0x78, 0x8f, 0x8f, 0x70, 0x70, 0x7f, 0xfe, 0xff, 0x8f, 0xff, 0x70, 0xff, 0xfe, 0xff, 0xff, 0xbe, 0xff, 0xf8, 0xf7, 0x0b, 0xf7, 0x88, 0xf7, 0xf0, 0x8f, 0x8f, 0x70, 0xf0, 0x7e, 0x73, 0xff, 0x8f, 0x7e, 0xf0, 0xff, 0x8f, 0x7f, 0xff, 0x78, 0x77, 0xff, 0x8f, 0x8f, 0xf0, 0xf0, 0xff, 0x8f, 0x77, 0x70, 0x77, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xfe, 0x8f, 0x7f, 0xff, 0xff, 0x77, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0x77, 0x7f, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x7f, 0xff, 0xbf, 0xbf, 0xff, 0x74, 0xff, 0xff, 0x8f, 0xff, 0xfc, 0x0f, 0xf3, 0x8c, 0xff, 0xfd, 0xff, 0xff, 0x8f, 0x7f, 0xf1, 0x8f, 0x7d, 0x83, 0xff, 0x7f, 0x77, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, 0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x78, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x8f, 0x70, 0x70, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x0f, 0x71, 0xff, 0xf8, 0xf7, 0x7f, 0xff, 0x8f, 0x8f, 0x80, 0x80, 0x81, 0x70, 0x70, 0x7f, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x8f, 0x0f, 0x71, 0xbf, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0xff, 0x9f, 0xff, 0x70, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xfe, 0x8f, 0xff, 0x87, 0x7f, 0x78, 0xfe, 0xff, 0x9e, 0xff, 0xf2, 0x7f, 0xff, 0x0f, 0xff, 0xf0, 0xff, 0xef, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xfe, 0x8f, 0xff, 0x70, 0xff, 0x7f, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0x8f, 0x8f, 0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xaf, 0x0f, 0x70, 0xd1, 0xff, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xf7, 0xaf, 0x8f, 0xfa, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0x8f, 0x0f, 0x79, 0xe1, 0xff, 0xfe, 0xff, 0xbf, 0xff, 0xff, 0x7a, 0x8f, 0xff, 0xf0, 0xef, 0xff, 0xfb, 0xff, 0xaf, 0xff, 0xfb, 0x8f, 0x7f, 0xf1, 0xdf, 0xff, 0xf9, 0xff, 0xbf, 0xff, 0xff, 0xf7, 0xff, 0xaf, 0xff, 0xba, 0xaf, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xef, 0xdf, 0xff, 0xfd, 0xbe, 0xf7, 0xf5, 0xff, 0xf8, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xff, 0x8f, 0xf7, 0xf0, 0xff, 0xff, 0x8f, 0x3f, 0x11, 0xeb, 0xdb, 0xcf, 0x7f, 0xbf, 0xf0, 0x8f, 0x87, 0xf0, 0xf0, 0xff, 0xbf, 0xff, 0x8f, 0xff, 0xf1, 0x8f, 0x0f, 0x31, 0xf1, 0x9f, 0xdf, 0xf9, 0x85, 0x7f, 0xbf, 0xff, 0x8f, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xee, 0xff, 0xfb, 0xf7, 0x8f, 0x7f, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xfd, 0xff, 0xff, 0xff, 0x8f, 0x8f, 0xf0, 0x80, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x2f, 0x71, 0xc1, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xd1, 0x9f, 0xff, 0x7b, 0xb7, 0xff, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x8f, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xb7, 0xff, 0xff, 0xa4, 0xf7, 0x88, 0xff, 0xf0, 0xaf, 0xbf, 0xfb, 0xef, 0xff, 0xf7, 0xff, 0x8f, 0x7f, 0xf1, 0xff, 0xdd, 0xf7, 0x97, 0x7f, 0xff, 0xff, 0xf7, 0xff, 0x8f, 0xff, 0xf0, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xaf, 0x0f, 0xa0, 0xf0, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x0f, 0x71, 0xb1, 0x37, 0xf7, 0xff, 0xff, 0xb8, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x7f, 0xbf, 0xff, 0xf8, 0xf7, 0xff, 0xa7, 0xff, 0xf0, 0xff, 0xff, 0x8f, 0xfe, 0xf0, 0xff, 0xff, 0x8f, 0x7f, 0xf1, 0xff, 0xff, 0xcf, 0xff, 0xff, 0xf7, 0x9f, 0xf7, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xbf, 0x7f, 0xf9, 0xff, 0xfe, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xbf, 0xff, 0xff, 0xff, 0xaf, 0xff, 0xf0, 0xff, 0xff, 0x9f, 0xfe, 0xf1, 0xff, 0xff, 0xcf, 0x7f, 0xf1, 0xff, 0xff, 0xdf, 0xff, 0xf1, 0xbf, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf0, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xf8, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0xff, 0xff, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xef, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x3f, 0xff, 0xdf, 0xf7, 0xff, 0x0f, 0xfe, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xdf, 0x8e, 0x7f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff, 0xbf, 0x7f, 0xe1, 0xff, 0xdf, 0xff, 0x7f, 0xff, 0xbf, 0xff, 0xa7, 0xff, 0x88, 0xff, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xf0, 0xcf, 0xb1, 0xff, 0xef, 0xff, 0x7f, 0xff,
gpl-2.0
siskin/bluetooth-next
drivers/input/keyboard/pmic8xxx-keypad.c
622
17481
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/regmap.h> #include <linux/of.h> #include <linux/input/matrix_keypad.h> #define PM8XXX_MAX_ROWS 18 #define PM8XXX_MAX_COLS 8 #define PM8XXX_ROW_SHIFT 3 #define PM8XXX_MATRIX_MAX_SIZE (PM8XXX_MAX_ROWS * PM8XXX_MAX_COLS) #define PM8XXX_MIN_ROWS 5 #define PM8XXX_MIN_COLS 5 #define MAX_SCAN_DELAY 128 #define MIN_SCAN_DELAY 1 /* in nanoseconds */ #define MAX_ROW_HOLD_DELAY 122000 #define MIN_ROW_HOLD_DELAY 30500 #define MAX_DEBOUNCE_TIME 20 #define MIN_DEBOUNCE_TIME 5 #define KEYP_CTRL 0x148 #define KEYP_CTRL_EVNTS BIT(0) #define KEYP_CTRL_EVNTS_MASK 0x3 #define KEYP_CTRL_SCAN_COLS_SHIFT 5 #define KEYP_CTRL_SCAN_COLS_MIN 5 #define KEYP_CTRL_SCAN_COLS_BITS 0x3 #define KEYP_CTRL_SCAN_ROWS_SHIFT 2 #define KEYP_CTRL_SCAN_ROWS_MIN 5 #define KEYP_CTRL_SCAN_ROWS_BITS 0x7 #define KEYP_CTRL_KEYP_EN BIT(7) #define KEYP_SCAN 0x149 #define KEYP_SCAN_READ_STATE BIT(0) #define KEYP_SCAN_DBOUNCE_SHIFT 1 #define KEYP_SCAN_PAUSE_SHIFT 3 #define KEYP_SCAN_ROW_HOLD_SHIFT 6 #define KEYP_TEST 0x14A #define KEYP_TEST_CLEAR_RECENT_SCAN BIT(6) #define KEYP_TEST_CLEAR_OLD_SCAN BIT(5) #define KEYP_TEST_READ_RESET BIT(4) #define KEYP_TEST_DTEST_EN BIT(3) #define KEYP_TEST_ABORT_READ BIT(0) #define KEYP_TEST_DBG_SELECT_SHIFT 1 /* bits of these registers represent * '0' for key press * '1' for key release */ #define KEYP_RECENT_DATA 0x14B #define KEYP_OLD_DATA 0x14C #define KEYP_CLOCK_FREQ 32768 /** * struct pmic8xxx_kp - internal keypad data structure * @num_cols - number of columns of keypad * @num_rows - number of row of keypad * @input - input device pointer for keypad * @regmap - regmap handle * @key_sense_irq - key press/release irq number * @key_stuck_irq - key stuck notification irq number * @keycodes - array to hold the key codes * @dev - parent device pointer * @keystate - present key press/release state * @stuckstate - present state when key stuck irq * @ctrl_reg - control register value */ struct pmic8xxx_kp { unsigned int num_rows; unsigned int num_cols; struct input_dev *input; struct regmap *regmap; int key_sense_irq; int key_stuck_irq; unsigned short keycodes[PM8XXX_MATRIX_MAX_SIZE]; struct device *dev; u16 keystate[PM8XXX_MAX_ROWS]; u16 stuckstate[PM8XXX_MAX_ROWS]; u8 ctrl_reg; }; static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col) { /* all keys pressed on that particular row? */ if (col == 0x00) return 1 << kp->num_cols; else return col & ((1 << kp->num_cols) - 1); } /* * Synchronous read protocol for RevB0 onwards: * * 1. Write '1' to ReadState bit in KEYP_SCAN register * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode * synchronously * 3. Read rows in old array first if events are more than one * 4. Read rows in recent array * 5. Wait 4*32KHz clocks * 6. Write '0' to ReadState bit of KEYP_SCAN register so that hw can * synchronously exit read mode. */ static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp) { int rc; unsigned int scan_val; rc = regmap_read(kp->regmap, KEYP_SCAN, &scan_val); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc); return rc; } scan_val |= 0x1; rc = regmap_write(kp->regmap, KEYP_SCAN, scan_val); if (rc < 0) { dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); return rc; } /* 2 * 32KHz clocks */ udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1); return rc; } static int pmic8xxx_kp_read_data(struct pmic8xxx_kp *kp, u16 *state, u16 data_reg, int read_rows) { int rc, row; unsigned int val; for (row = 0; row < read_rows; row++) { rc = regmap_read(kp->regmap, data_reg, &val); if (rc) return rc; dev_dbg(kp->dev, "%d = %d\n", row, val); state[row] = pmic8xxx_col_state(kp, val); } return 0; } static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state, u16 *old_state) { int rc, read_rows; unsigned int scan_val; if (kp->num_rows < PM8XXX_MIN_ROWS) read_rows = PM8XXX_MIN_ROWS; else read_rows = kp->num_rows; pmic8xxx_chk_sync_read(kp); if (old_state) { rc = pmic8xxx_kp_read_data(kp, old_state, KEYP_OLD_DATA, read_rows); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_OLD_DATA, rc=%d\n", rc); return rc; } } rc = pmic8xxx_kp_read_data(kp, new_state, KEYP_RECENT_DATA, read_rows); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_RECENT_DATA, rc=%d\n", rc); return rc; } /* 4 * 32KHz clocks */ udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1); rc = regmap_read(kp->regmap, KEYP_SCAN, &scan_val); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc); return rc; } scan_val &= 0xFE; rc = regmap_write(kp->regmap, KEYP_SCAN, scan_val); if (rc < 0) dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); return rc; } static void __pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, u16 *new_state, u16 *old_state) { int row, col, code; for (row = 0; row < kp->num_rows; row++) { int bits_changed = new_state[row] ^ old_state[row]; if (!bits_changed) continue; for (col = 0; col < kp->num_cols; col++) { if (!(bits_changed & (1 << col))) continue; dev_dbg(kp->dev, "key [%d:%d] %s\n", row, col, !(new_state[row] & (1 << col)) ? "pressed" : "released"); code = MATRIX_SCAN_CODE(row, col, PM8XXX_ROW_SHIFT); input_event(kp->input, EV_MSC, MSC_SCAN, code); input_report_key(kp->input, kp->keycodes[code], !(new_state[row] & (1 << col))); input_sync(kp->input); } } } static bool pmic8xxx_detect_ghost_keys(struct pmic8xxx_kp *kp, u16 *new_state) { int row, found_first = -1; u16 check, row_state; check = 0; for (row = 0; row < kp->num_rows; row++) { row_state = (~new_state[row]) & ((1 << kp->num_cols) - 1); if (hweight16(row_state) > 1) { if (found_first == -1) found_first = row; if (check & row_state) { dev_dbg(kp->dev, "detected ghost key on row[%d]" " and row[%d]\n", found_first, row); return true; } } check |= row_state; } return false; } static int pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, unsigned int events) { u16 new_state[PM8XXX_MAX_ROWS]; u16 old_state[PM8XXX_MAX_ROWS]; int rc; switch (events) { case 0x1: rc = pmic8xxx_kp_read_matrix(kp, new_state, NULL); if (rc < 0) return rc; /* detecting ghost key is not an error */ if (pmic8xxx_detect_ghost_keys(kp, new_state)) return 0; __pmic8xxx_kp_scan_matrix(kp, new_state, kp->keystate); memcpy(kp->keystate, new_state, sizeof(new_state)); break; case 0x3: /* two events - eventcounter is gray-coded */ rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); if (rc < 0) return rc; __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate); __pmic8xxx_kp_scan_matrix(kp, new_state, old_state); memcpy(kp->keystate, new_state, sizeof(new_state)); break; case 0x2: dev_dbg(kp->dev, "Some key events were lost\n"); rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); if (rc < 0) return rc; __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate); __pmic8xxx_kp_scan_matrix(kp, new_state, old_state); memcpy(kp->keystate, new_state, sizeof(new_state)); break; default: rc = -EINVAL; } return rc; } /* * NOTE: We are reading recent and old data registers blindly * whenever key-stuck interrupt happens, because events counter doesn't * get updated when this interrupt happens due to key stuck doesn't get * considered as key state change. * * We are not using old data register contents after they are being read * because it might report the key which was pressed before the key being stuck * as stuck key because it's pressed status is stored in the old data * register. */ static irqreturn_t pmic8xxx_kp_stuck_irq(int irq, void *data) { u16 new_state[PM8XXX_MAX_ROWS]; u16 old_state[PM8XXX_MAX_ROWS]; int rc; struct pmic8xxx_kp *kp = data; rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); if (rc < 0) { dev_err(kp->dev, "failed to read keypad matrix\n"); return IRQ_HANDLED; } __pmic8xxx_kp_scan_matrix(kp, new_state, kp->stuckstate); return IRQ_HANDLED; } static irqreturn_t pmic8xxx_kp_irq(int irq, void *data) { struct pmic8xxx_kp *kp = data; unsigned int ctrl_val, events; int rc; rc = regmap_read(kp->regmap, KEYP_CTRL, &ctrl_val); if (rc < 0) { dev_err(kp->dev, "failed to read keyp_ctrl register\n"); return IRQ_HANDLED; } events = ctrl_val & KEYP_CTRL_EVNTS_MASK; rc = pmic8xxx_kp_scan_matrix(kp, events); if (rc < 0) dev_err(kp->dev, "failed to scan matrix\n"); return IRQ_HANDLED; } static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp, struct platform_device *pdev) { const struct device_node *of_node = pdev->dev.of_node; unsigned int scan_delay_ms; unsigned int row_hold_ns; unsigned int debounce_ms; int bits, rc, cycles; u8 scan_val = 0, ctrl_val = 0; static const u8 row_bits[] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, }; /* Find column bits */ if (kp->num_cols < KEYP_CTRL_SCAN_COLS_MIN) bits = 0; else bits = kp->num_cols - KEYP_CTRL_SCAN_COLS_MIN; ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) << KEYP_CTRL_SCAN_COLS_SHIFT; /* Find row bits */ if (kp->num_rows < KEYP_CTRL_SCAN_ROWS_MIN) bits = 0; else bits = row_bits[kp->num_rows - KEYP_CTRL_SCAN_ROWS_MIN]; ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT); rc = regmap_write(kp->regmap, KEYP_CTRL, ctrl_val); if (rc < 0) { dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc); return rc; } if (of_property_read_u32(of_node, "scan-delay", &scan_delay_ms)) scan_delay_ms = MIN_SCAN_DELAY; if (scan_delay_ms > MAX_SCAN_DELAY || scan_delay_ms < MIN_SCAN_DELAY || !is_power_of_2(scan_delay_ms)) { dev_err(&pdev->dev, "invalid keypad scan time supplied\n"); return -EINVAL; } if (of_property_read_u32(of_node, "row-hold", &row_hold_ns)) row_hold_ns = MIN_ROW_HOLD_DELAY; if (row_hold_ns > MAX_ROW_HOLD_DELAY || row_hold_ns < MIN_ROW_HOLD_DELAY || ((row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) { dev_err(&pdev->dev, "invalid keypad row hold time supplied\n"); return -EINVAL; } if (of_property_read_u32(of_node, "debounce", &debounce_ms)) debounce_ms = MIN_DEBOUNCE_TIME; if (((debounce_ms % 5) != 0) || debounce_ms > MAX_DEBOUNCE_TIME || debounce_ms < MIN_DEBOUNCE_TIME) { dev_err(&pdev->dev, "invalid debounce time supplied\n"); return -EINVAL; } bits = (debounce_ms / 5) - 1; scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT); bits = fls(scan_delay_ms) - 1; scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT); /* Row hold time is a multiple of 32KHz cycles. */ cycles = (row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC; scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT); rc = regmap_write(kp->regmap, KEYP_SCAN, scan_val); if (rc) dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); return rc; } static int pmic8xxx_kp_enable(struct pmic8xxx_kp *kp) { int rc; kp->ctrl_reg |= KEYP_CTRL_KEYP_EN; rc = regmap_write(kp->regmap, KEYP_CTRL, kp->ctrl_reg); if (rc < 0) dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc); return rc; } static int pmic8xxx_kp_disable(struct pmic8xxx_kp *kp) { int rc; kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN; rc = regmap_write(kp->regmap, KEYP_CTRL, kp->ctrl_reg); if (rc < 0) return rc; return rc; } static int pmic8xxx_kp_open(struct input_dev *dev) { struct pmic8xxx_kp *kp = input_get_drvdata(dev); return pmic8xxx_kp_enable(kp); } static void pmic8xxx_kp_close(struct input_dev *dev) { struct pmic8xxx_kp *kp = input_get_drvdata(dev); pmic8xxx_kp_disable(kp); } /* * keypad controller should be initialized in the following sequence * only, otherwise it might get into FSM stuck state. * * - Initialize keypad control parameters, like no. of rows, columns, * timing values etc., * - configure rows and column gpios pull up/down. * - set irq edge type. * - enable the keypad controller. */ static int pmic8xxx_kp_probe(struct platform_device *pdev) { unsigned int rows, cols; bool repeat; bool wakeup; struct pmic8xxx_kp *kp; int rc; unsigned int ctrl_val; rc = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols); if (rc) return rc; if (cols > PM8XXX_MAX_COLS || rows > PM8XXX_MAX_ROWS || cols < PM8XXX_MIN_COLS) { dev_err(&pdev->dev, "invalid platform data\n"); return -EINVAL; } repeat = !of_property_read_bool(pdev->dev.of_node, "linux,input-no-autorepeat"); wakeup = of_property_read_bool(pdev->dev.of_node, "linux,keypad-wakeup"); kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL); if (!kp) return -ENOMEM; kp->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!kp->regmap) return -ENODEV; platform_set_drvdata(pdev, kp); kp->num_rows = rows; kp->num_cols = cols; kp->dev = &pdev->dev; kp->input = devm_input_allocate_device(&pdev->dev); if (!kp->input) { dev_err(&pdev->dev, "unable to allocate input device\n"); return -ENOMEM; } kp->key_sense_irq = platform_get_irq(pdev, 0); if (kp->key_sense_irq < 0) { dev_err(&pdev->dev, "unable to get keypad sense irq\n"); return kp->key_sense_irq; } kp->key_stuck_irq = platform_get_irq(pdev, 1); if (kp->key_stuck_irq < 0) { dev_err(&pdev->dev, "unable to get keypad stuck irq\n"); return kp->key_stuck_irq; } kp->input->name = "PMIC8XXX keypad"; kp->input->phys = "pmic8xxx_keypad/input0"; kp->input->id.bustype = BUS_I2C; kp->input->id.version = 0x0001; kp->input->id.product = 0x0001; kp->input->id.vendor = 0x0001; kp->input->open = pmic8xxx_kp_open; kp->input->close = pmic8xxx_kp_close; rc = matrix_keypad_build_keymap(NULL, NULL, PM8XXX_MAX_ROWS, PM8XXX_MAX_COLS, kp->keycodes, kp->input); if (rc) { dev_err(&pdev->dev, "failed to build keymap\n"); return rc; } if (repeat) __set_bit(EV_REP, kp->input->evbit); input_set_capability(kp->input, EV_MSC, MSC_SCAN); input_set_drvdata(kp->input, kp); /* initialize keypad state */ memset(kp->keystate, 0xff, sizeof(kp->keystate)); memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate)); rc = pmic8xxx_kpd_init(kp, pdev); if (rc < 0) { dev_err(&pdev->dev, "unable to initialize keypad controller\n"); return rc; } rc = devm_request_any_context_irq(&pdev->dev, kp->key_sense_irq, pmic8xxx_kp_irq, IRQF_TRIGGER_RISING, "pmic-keypad", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad sense irq\n"); return rc; } rc = devm_request_any_context_irq(&pdev->dev, kp->key_stuck_irq, pmic8xxx_kp_stuck_irq, IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad stuck irq\n"); return rc; } rc = regmap_read(kp->regmap, KEYP_CTRL, &ctrl_val); if (rc < 0) { dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n"); return rc; } kp->ctrl_reg = ctrl_val; rc = input_register_device(kp->input); if (rc < 0) { dev_err(&pdev->dev, "unable to register keypad input device\n"); return rc; } device_init_wakeup(&pdev->dev, wakeup); return 0; } #ifdef CONFIG_PM_SLEEP static int pmic8xxx_kp_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); struct input_dev *input_dev = kp->input; if (device_may_wakeup(dev)) { enable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&input_dev->mutex); if (input_dev->users) pmic8xxx_kp_disable(kp); mutex_unlock(&input_dev->mutex); } return 0; } static int pmic8xxx_kp_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); struct input_dev *input_dev = kp->input; if (device_may_wakeup(dev)) { disable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&input_dev->mutex); if (input_dev->users) pmic8xxx_kp_enable(kp); mutex_unlock(&input_dev->mutex); } return 0; } #endif static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops, pmic8xxx_kp_suspend, pmic8xxx_kp_resume); static const struct of_device_id pm8xxx_match_table[] = { { .compatible = "qcom,pm8058-keypad" }, { .compatible = "qcom,pm8921-keypad" }, { } }; MODULE_DEVICE_TABLE(of, pm8xxx_match_table); static struct platform_driver pmic8xxx_kp_driver = { .probe = pmic8xxx_kp_probe, .driver = { .name = "pm8xxx-keypad", .pm = &pm8xxx_kp_pm_ops, .of_match_table = pm8xxx_match_table, }, }; module_platform_driver(pmic8xxx_kp_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC8XXX keypad driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pmic8xxx_keypad"); MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
gpl-2.0
Michael-Pizzileo/sunxi-2.6.36
drivers/staging/rt2860/rt_main_dev.c
878
19674
/* ************************************************************************* * Ralink Tech Inc. * 5F., No.36, Taiyuan St., Jhubei City, * Hsinchu County 302, * Taiwan, R.O.C. * * (c) Copyright 2002-2007, Ralink Technology, Inc. * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * ************************************************************************* Module Name: rt_main_dev.c Abstract: Create and register network interface. Revision History: Who When What -------- ---------- ---------------------------------------------- */ #include "rt_config.h" /*---------------------------------------------------------------------*/ /* Private Variables Used */ /*---------------------------------------------------------------------*/ char *mac = ""; /* default 00:00:00:00:00:00 */ char *hostname = ""; /* default CMPC */ module_param(mac, charp, 0); MODULE_PARM_DESC(mac, "rt28xx: wireless mac addr"); /*---------------------------------------------------------------------*/ /* Prototypes of Functions Used */ /*---------------------------------------------------------------------*/ /* public function prototype */ int rt28xx_close(IN struct net_device *net_dev); int rt28xx_open(struct net_device *net_dev); /* private function prototype */ static int rt28xx_send_packets(IN struct sk_buff *skb_p, IN struct net_device *net_dev); static struct net_device_stats *RT28xx_get_ether_stats(IN struct net_device *net_dev); /* ======================================================================== Routine Description: Close raxx interface. Arguments: *net_dev the raxx interface pointer Return Value: 0 Open OK otherwise Open Fail Note: 1. if open fail, kernel will not call the close function. 2. Free memory for (1) Mlme Memory Handler: MlmeHalt() (2) TX & RX: RTMPFreeTxRxRingMemory() (3) BA Reordering: ba_reordering_resource_release() ======================================================================== */ int MainVirtualIF_close(IN struct net_device *net_dev) { struct rt_rtmp_adapter *pAd = NULL; GET_PAD_FROM_NET_DEV(pAd, net_dev); /* Sanity check for pAd */ if (pAd == NULL) return 0; /* close ok */ netif_carrier_off(pAd->net_dev); netif_stop_queue(pAd->net_dev); { BOOLEAN Cancelled; if (INFRA_ON(pAd) && (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST))) { struct rt_mlme_disassoc_req DisReq; struct rt_mlme_queue_elem *MsgElem = (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem), MEM_ALLOC_FLAG); if (MsgElem) { COPY_MAC_ADDR(DisReq.Addr, pAd->CommonCfg.Bssid); DisReq.Reason = REASON_DEAUTH_STA_LEAVING; MsgElem->Machine = ASSOC_STATE_MACHINE; MsgElem->MsgType = MT2_MLME_DISASSOC_REQ; MsgElem->MsgLen = sizeof(struct rt_mlme_disassoc_req); NdisMoveMemory(MsgElem->Msg, &DisReq, sizeof (struct rt_mlme_disassoc_req)); /* Prevent to connect AP again in STAMlmePeriodicExec */ pAd->MlmeAux.AutoReconnectSsidLen = 32; NdisZeroMemory(pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux. AutoReconnectSsidLen); pAd->Mlme.CntlMachine.CurrState = CNTL_WAIT_OID_DISASSOC; MlmeDisassocReqAction(pAd, MsgElem); kfree(MsgElem); } RTMPusecDelay(1000); } RTMPCancelTimer(&pAd->StaCfg.StaQuickResponeForRateUpTimer, &Cancelled); RTMPCancelTimer(&pAd->StaCfg.WpaDisassocAndBlockAssocTimer, &Cancelled); } VIRTUAL_IF_DOWN(pAd); RT_MOD_DEC_USE_COUNT(); return 0; /* close ok */ } /* ======================================================================== Routine Description: Open raxx interface. Arguments: *net_dev the raxx interface pointer Return Value: 0 Open OK otherwise Open Fail Note: 1. if open fail, kernel will not call the close function. 2. Free memory for (1) Mlme Memory Handler: MlmeHalt() (2) TX & RX: RTMPFreeTxRxRingMemory() (3) BA Reordering: ba_reordering_resource_release() ======================================================================== */ int MainVirtualIF_open(IN struct net_device *net_dev) { struct rt_rtmp_adapter *pAd = NULL; GET_PAD_FROM_NET_DEV(pAd, net_dev); /* Sanity check for pAd */ if (pAd == NULL) return 0; /* close ok */ if (VIRTUAL_IF_UP(pAd) != 0) return -1; /* increase MODULE use count */ RT_MOD_INC_USE_COUNT(); netif_start_queue(net_dev); netif_carrier_on(net_dev); netif_wake_queue(net_dev); return 0; } /* ======================================================================== Routine Description: Close raxx interface. Arguments: *net_dev the raxx interface pointer Return Value: 0 Open OK otherwise Open Fail Note: 1. if open fail, kernel will not call the close function. 2. Free memory for (1) Mlme Memory Handler: MlmeHalt() (2) TX & RX: RTMPFreeTxRxRingMemory() (3) BA Reordering: ba_reordering_resource_release() ======================================================================== */ int rt28xx_close(struct net_device *dev) { struct net_device *net_dev = (struct net_device *)dev; struct rt_rtmp_adapter *pAd = NULL; BOOLEAN Cancelled; u32 i = 0; #ifdef RTMP_MAC_USB DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); #endif /* RTMP_MAC_USB // */ GET_PAD_FROM_NET_DEV(pAd, net_dev); DBGPRINT(RT_DEBUG_TRACE, ("===> rt28xx_close\n")); Cancelled = FALSE; /* Sanity check for pAd */ if (pAd == NULL) return 0; /* close ok */ { #ifdef RTMP_MAC_PCI RTMPPCIeLinkCtrlValueRestore(pAd, RESTORE_CLOSE); #endif /* RTMP_MAC_PCI // */ /* If dirver doesn't wake up firmware here, */ /* NICLoadFirmware will hang forever when interface is up again. */ if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)) { AsicForceWakeup(pAd, TRUE); } #ifdef RTMP_MAC_USB RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS); #endif /* RTMP_MAC_USB // */ MlmeRadioOff(pAd); #ifdef RTMP_MAC_PCI pAd->bPCIclkOff = FALSE; #endif /* RTMP_MAC_PCI // */ } RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); for (i = 0; i < NUM_OF_TX_RING; i++) { while (pAd->DeQueueRunning[i] == TRUE) { DBGPRINT(RT_DEBUG_TRACE, ("Waiting for TxQueue[%d] done..........\n", i)); RTMPusecDelay(1000); } } #ifdef RTMP_MAC_USB /* ensure there are no more active urbs. */ add_wait_queue(&unlink_wakeup, &wait); pAd->wait = &unlink_wakeup; /* maybe wait for deletions to finish. */ i = 0; /*while((i < 25) && atomic_read(&pAd->PendingRx) > 0) */ while (i < 25) { unsigned long IrqFlags; RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags); if (pAd->PendingRx == 0) { RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags); break; } RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags); msleep(UNLINK_TIMEOUT_MS); /*Time in millisecond */ i++; } pAd->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); #endif /* RTMP_MAC_USB // */ /* Stop Mlme state machine */ MlmeHalt(pAd); /* Close net tasklets */ RtmpNetTaskExit(pAd); { MacTableReset(pAd); } MeasureReqTabExit(pAd); TpcReqTabExit(pAd); /* Close kernel threads */ RtmpMgmtTaskExit(pAd); #ifdef RTMP_MAC_PCI { BOOLEAN brc; /* unsigned long Value; */ if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE)) { RTMP_ASIC_INTERRUPT_DISABLE(pAd); } /* Receive packets to clear DMA index after disable interrupt. */ /*RTMPHandleRxDoneInterrupt(pAd); */ /* put to radio off to save power when driver unload. After radiooff, can't write /read register. So need to finish all */ /* register access before Radio off. */ brc = RT28xxPciAsicRadioOff(pAd, RTMP_HALT, 0); /*In solution 3 of 3090F, the bPCIclkOff will be set to TRUE after calling RT28xxPciAsicRadioOff */ pAd->bPCIclkOff = FALSE; if (brc == FALSE) { DBGPRINT(RT_DEBUG_ERROR, ("%s call RT28xxPciAsicRadioOff fail!\n", __func__)); } } /* if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE)) { RTMP_ASIC_INTERRUPT_DISABLE(pAd); } // Disable Rx, register value supposed will remain after reset NICIssueReset(pAd); */ #endif /* RTMP_MAC_PCI // */ /* Free IRQ */ if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE)) { #ifdef RTMP_MAC_PCI /* Deregister interrupt function */ RtmpOSIRQRelease(net_dev); #endif /* RTMP_MAC_PCI // */ RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE); } /* Free Ring or USB buffers */ RTMPFreeTxRxRingMemory(pAd); RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); /* Free BA reorder resource */ ba_reordering_resource_release(pAd); RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_START_UP); /*+++Modify by woody to solve the bulk fail+++*/ { } DBGPRINT(RT_DEBUG_TRACE, ("<=== rt28xx_close\n")); return 0; /* close ok */ } /* End of rt28xx_close */ /* ======================================================================== Routine Description: Open raxx interface. Arguments: *net_dev the raxx interface pointer Return Value: 0 Open OK otherwise Open Fail Note: ======================================================================== */ int rt28xx_open(struct net_device *dev) { struct net_device *net_dev = (struct net_device *)dev; struct rt_rtmp_adapter *pAd = NULL; int retval = 0; /*struct os_cookie *pObj; */ GET_PAD_FROM_NET_DEV(pAd, net_dev); /* Sanity check for pAd */ if (pAd == NULL) { /* if 1st open fail, pAd will be free; So the net_dev->ml_priv will be NULL in 2rd open */ return -1; } if (net_dev->priv_flags == INT_MAIN) { if (pAd->OpMode == OPMODE_STA) net_dev->wireless_handlers = (struct iw_handler_def *)&rt28xx_iw_handler_def; } /* Request interrupt service routine for PCI device */ /* register the interrupt routine with the os */ RtmpOSIRQRequest(net_dev); /* Init IRQ parameters stored in pAd */ RTMP_IRQ_INIT(pAd); /* Chip & other init */ if (rt28xx_init(pAd, mac, hostname) == FALSE) goto err; /* Enable Interrupt */ RTMP_IRQ_ENABLE(pAd); /* Now Enable RxTx */ RTMPEnableRxTx(pAd); RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_START_UP); { u32 reg = 0; RTMP_IO_READ32(pAd, 0x1300, &reg); /* clear garbage interrupts */ printk("0x1300 = %08x\n", reg); } { /* u32 reg; */ /* u8 byte; */ /* u16 tmp; */ /* RTMP_IO_READ32(pAd, XIFS_TIME_CFG, &reg); */ /* tmp = 0x0805; */ /* reg = (reg & 0xffff0000) | tmp; */ /* RTMP_IO_WRITE32(pAd, XIFS_TIME_CFG, reg); */ } #ifdef RTMP_MAC_PCI RTMPInitPCIeLinkCtrlValue(pAd); #endif /* RTMP_MAC_PCI // */ return retval; err: /*+++Add by shiang, move from rt28xx_init() to here. */ RtmpOSIRQRelease(net_dev); /*---Add by shiang, move from rt28xx_init() to here. */ return -1; } /* End of rt28xx_open */ static const struct net_device_ops rt2860_netdev_ops = { .ndo_open = MainVirtualIF_open, .ndo_stop = MainVirtualIF_close, .ndo_do_ioctl = rt28xx_sta_ioctl, .ndo_get_stats = RT28xx_get_ether_stats, .ndo_validate_addr = NULL, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, .ndo_start_xmit = rt28xx_send_packets, }; struct net_device *RtmpPhyNetDevInit(struct rt_rtmp_adapter *pAd, struct rt_rtmp_os_netdev_op_hook *pNetDevHook) { struct net_device *net_dev = NULL; /* int Status; */ net_dev = RtmpOSNetDevCreate(pAd, INT_MAIN, 0, sizeof(struct rt_rtmp_adapter *), INF_MAIN_DEV_NAME); if (net_dev == NULL) { printk ("RtmpPhyNetDevInit(): creation failed for main physical net device!\n"); return NULL; } NdisZeroMemory((unsigned char *)pNetDevHook, sizeof(struct rt_rtmp_os_netdev_op_hook)); pNetDevHook->netdev_ops = &rt2860_netdev_ops; pNetDevHook->priv_flags = INT_MAIN; pNetDevHook->needProtcted = FALSE; net_dev->ml_priv = (void *)pAd; pAd->net_dev = net_dev; netif_stop_queue(net_dev); return net_dev; } /* ======================================================================== Routine Description: The entry point for Linux kernel sent packet to our driver. Arguments: sk_buff *skb the pointer refer to a sk_buffer. Return Value: 0 Note: This function is the entry point of Tx Path for Os delivery packet to our driver. You only can put OS-depened & STA/AP common handle procedures in here. ======================================================================== */ int rt28xx_packet_xmit(struct sk_buff *skb) { struct net_device *net_dev = skb->dev; struct rt_rtmp_adapter *pAd = NULL; int status = NETDEV_TX_OK; void *pPacket = (void *)skb; GET_PAD_FROM_NET_DEV(pAd, net_dev); /* RT2870STA does this in RTMPSendPackets() */ { /* Drop send request since we are in monitor mode */ if (MONITOR_ON(pAd)) { RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE); goto done; } } /* EapolStart size is 18 */ if (skb->len < 14) { /*printk("bad packet size: %d\n", pkt->len); */ hex_dump("bad packet", skb->data, skb->len); RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE); goto done; } RTMP_SET_PACKET_5VT(pPacket, 0); STASendPackets((void *)pAd, (void **)&pPacket, 1); status = NETDEV_TX_OK; done: return status; } /* ======================================================================== Routine Description: Send a packet to WLAN. Arguments: skb_p points to our adapter dev_p which WLAN network interface Return Value: 0: transmit successfully otherwise: transmit fail Note: ======================================================================== */ static int rt28xx_send_packets(IN struct sk_buff *skb_p, IN struct net_device *net_dev) { struct rt_rtmp_adapter *pAd = NULL; GET_PAD_FROM_NET_DEV(pAd, net_dev); if (!(net_dev->flags & IFF_UP)) { RELEASE_NDIS_PACKET(pAd, (void *)skb_p, NDIS_STATUS_FAILURE); return NETDEV_TX_OK; } NdisZeroMemory((u8 *)&skb_p->cb[CB_OFF], 15); RTMP_SET_PACKET_NET_DEVICE_MBSSID(skb_p, MAIN_MBSSID); return rt28xx_packet_xmit(skb_p); } /* This function will be called when query /proc */ struct iw_statistics *rt28xx_get_wireless_stats(IN struct net_device *net_dev) { struct rt_rtmp_adapter *pAd = NULL; GET_PAD_FROM_NET_DEV(pAd, net_dev); DBGPRINT(RT_DEBUG_TRACE, ("rt28xx_get_wireless_stats --->\n")); pAd->iw_stats.status = 0; /* Status - device dependent for now */ /* link quality */ if (pAd->OpMode == OPMODE_STA) pAd->iw_stats.qual.qual = ((pAd->Mlme.ChannelQuality * 12) / 10 + 10); if (pAd->iw_stats.qual.qual > 100) pAd->iw_stats.qual.qual = 100; if (pAd->OpMode == OPMODE_STA) { pAd->iw_stats.qual.level = RTMPMaxRssi(pAd, pAd->StaCfg.RssiSample.LastRssi0, pAd->StaCfg.RssiSample.LastRssi1, pAd->StaCfg.RssiSample.LastRssi2); } pAd->iw_stats.qual.noise = pAd->BbpWriteLatch[66]; /* noise level (dBm) */ pAd->iw_stats.qual.noise += 256 - 143; pAd->iw_stats.qual.updated = 1; /* Flags to know if updated */ #ifdef IW_QUAL_DBM pAd->iw_stats.qual.updated |= IW_QUAL_DBM; /* Level + Noise are dBm */ #endif /* IW_QUAL_DBM // */ pAd->iw_stats.discard.nwid = 0; /* Rx : Wrong nwid/essid */ pAd->iw_stats.miss.beacon = 0; /* Missed beacons/superframe */ DBGPRINT(RT_DEBUG_TRACE, ("<--- rt28xx_get_wireless_stats\n")); return &pAd->iw_stats; } void tbtt_tasklet(unsigned long data) { /*#define MAX_TX_IN_TBTT (16) */ } /* ======================================================================== Routine Description: return ethernet statistics counter Arguments: net_dev Pointer to net_device Return Value: net_device_stats* Note: ======================================================================== */ static struct net_device_stats *RT28xx_get_ether_stats(IN struct net_device *net_dev) { struct rt_rtmp_adapter *pAd = NULL; if (net_dev) GET_PAD_FROM_NET_DEV(pAd, net_dev); if (pAd) { pAd->stats.rx_packets = pAd->WlanCounters.ReceivedFragmentCount.QuadPart; pAd->stats.tx_packets = pAd->WlanCounters.TransmittedFragmentCount.QuadPart; pAd->stats.rx_bytes = pAd->RalinkCounters.ReceivedByteCount; pAd->stats.tx_bytes = pAd->RalinkCounters.TransmittedByteCount; pAd->stats.rx_errors = pAd->Counters8023.RxErrors; pAd->stats.tx_errors = pAd->Counters8023.TxErrors; pAd->stats.rx_dropped = 0; pAd->stats.tx_dropped = 0; pAd->stats.multicast = pAd->WlanCounters.MulticastReceivedFrameCount.QuadPart; /* multicast packets received */ pAd->stats.collisions = pAd->Counters8023.OneCollision + pAd->Counters8023.MoreCollisions; /* Collision packets */ pAd->stats.rx_length_errors = 0; pAd->stats.rx_over_errors = pAd->Counters8023.RxNoBuffer; /* receiver ring buff overflow */ pAd->stats.rx_crc_errors = 0; /*pAd->WlanCounters.FCSErrorCount; // recved pkt with crc error */ pAd->stats.rx_frame_errors = pAd->Counters8023.RcvAlignmentErrors; /* recv'd frame alignment error */ pAd->stats.rx_fifo_errors = pAd->Counters8023.RxNoBuffer; /* recv'r fifo overrun */ pAd->stats.rx_missed_errors = 0; /* receiver missed packet */ /* detailed tx_errors */ pAd->stats.tx_aborted_errors = 0; pAd->stats.tx_carrier_errors = 0; pAd->stats.tx_fifo_errors = 0; pAd->stats.tx_heartbeat_errors = 0; pAd->stats.tx_window_errors = 0; /* for cslip etc */ pAd->stats.rx_compressed = 0; pAd->stats.tx_compressed = 0; return &pAd->stats; } else return NULL; } BOOLEAN RtmpPhyNetDevExit(struct rt_rtmp_adapter *pAd, struct net_device *net_dev) { /* Unregister network device */ if (net_dev != NULL) { printk ("RtmpOSNetDevDetach(): RtmpOSNetDeviceDetach(), dev->name=%s!\n", net_dev->name); RtmpOSNetDevDetach(net_dev); } return TRUE; } /* ======================================================================== Routine Description: Allocate memory for adapter control block. Arguments: pAd Pointer to our adapter Return Value: NDIS_STATUS_SUCCESS NDIS_STATUS_FAILURE NDIS_STATUS_RESOURCES Note: ======================================================================== */ int AdapterBlockAllocateMemory(void *handle, void ** ppAd) { *ppAd = (void *)vmalloc(sizeof(struct rt_rtmp_adapter)); /*pci_alloc_consistent(pci_dev, sizeof(struct rt_rtmp_adapter), phy_addr); */ if (*ppAd) { NdisZeroMemory(*ppAd, sizeof(struct rt_rtmp_adapter)); ((struct rt_rtmp_adapter *)*ppAd)->OS_Cookie = handle; return NDIS_STATUS_SUCCESS; } else { return NDIS_STATUS_FAILURE; } }
gpl-2.0
Outernet-Project/outernetrx-linux
drivers/usb/gadget/mv_udc_core.c
2158
58068
/* * Copyright (C) 2011 Marvell International Ltd. All rights reserved. * Author: Chao Xie <chao.xie@marvell.com> * Neil Zhang <zhangwm@marvell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include <linux/pm.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/platform_data/mv_usb.h> #include <asm/unaligned.h> #include "mv_udc.h" #define DRIVER_DESC "Marvell PXA USB Device Controller driver" #define DRIVER_VERSION "8 Nov 2010" #define ep_dir(ep) (((ep)->ep_num == 0) ? \ ((ep)->udc->ep0_dir) : ((ep)->direction)) /* timeout value -- usec */ #define RESET_TIMEOUT 10000 #define FLUSH_TIMEOUT 10000 #define EPSTATUS_TIMEOUT 10000 #define PRIME_TIMEOUT 10000 #define READSAFE_TIMEOUT 1000 #define LOOPS_USEC_SHIFT 1 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT) #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT) static DECLARE_COMPLETION(release_done); static const char driver_name[] = "mv_udc"; static const char driver_desc[] = DRIVER_DESC; static void nuke(struct mv_ep *ep, int status); static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver); /* for endpoint 0 operations */ static const struct usb_endpoint_descriptor mv_ep0_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 0, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = EP0_MAX_PKT_SIZE, }; static void ep0_reset(struct mv_udc *udc) { struct mv_ep *ep; u32 epctrlx; int i = 0; /* ep0 in and out */ for (i = 0; i < 2; i++) { ep = &udc->eps[i]; ep->udc = udc; /* ep0 dQH */ ep->dqh = &udc->ep_dqh[i]; /* configure ep0 endpoint capabilities in dQH */ ep->dqh->max_packet_length = (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) | EP_QUEUE_HEAD_IOS; ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; epctrlx = readl(&udc->op_regs->epctrlx[0]); if (i) { /* TX */ epctrlx |= EPCTRL_TX_ENABLE | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_TX_EP_TYPE_SHIFT); } else { /* RX */ epctrlx |= EPCTRL_RX_ENABLE | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_RX_EP_TYPE_SHIFT); } writel(epctrlx, &udc->op_regs->epctrlx[0]); } } /* protocol ep0 stall, will automatically be cleared on new transaction */ static void ep0_stall(struct mv_udc *udc) { u32 epctrlx; /* set TX and RX to stall */ epctrlx = readl(&udc->op_regs->epctrlx[0]); epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL; writel(epctrlx, &udc->op_regs->epctrlx[0]); /* update ep0 state */ udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; } static int process_ep_req(struct mv_udc *udc, int index, struct mv_req *curr_req) { struct mv_dtd *curr_dtd; struct mv_dqh *curr_dqh; int td_complete, actual, remaining_length; int i, direction; int retval = 0; u32 errors; u32 bit_pos; curr_dqh = &udc->ep_dqh[index]; direction = index % 2; curr_dtd = curr_req->head; td_complete = 0; actual = curr_req->req.length; for (i = 0; i < curr_req->dtd_count; i++) { if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) { dev_dbg(&udc->dev->dev, "%s, dTD not completed\n", udc->eps[index].name); return 1; } errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK; if (!errors) { remaining_length = (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS; actual -= remaining_length; if (remaining_length) { if (direction) { dev_dbg(&udc->dev->dev, "TX dTD remains data\n"); retval = -EPROTO; break; } else break; } } else { dev_info(&udc->dev->dev, "complete_tr error: ep=%d %s: error = 0x%x\n", index >> 1, direction ? "SEND" : "RECV", errors); if (errors & DTD_STATUS_HALTED) { /* Clear the errors and Halt condition */ curr_dqh->size_ioc_int_sts &= ~errors; retval = -EPIPE; } else if (errors & DTD_STATUS_DATA_BUFF_ERR) { retval = -EPROTO; } else if (errors & DTD_STATUS_TRANSACTION_ERR) { retval = -EILSEQ; } } if (i != curr_req->dtd_count - 1) curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt; } if (retval) return retval; if (direction == EP_DIR_OUT) bit_pos = 1 << curr_req->ep->ep_num; else bit_pos = 1 << (16 + curr_req->ep->ep_num); while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) { if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) { while (readl(&udc->op_regs->epstatus) & bit_pos) udelay(1); break; } udelay(1); } curr_req->req.actual = actual; return 0; } /* * done() - retire a request; caller blocked irqs * @status : request status to be set, only works when * request is still in progress. */ static void done(struct mv_ep *ep, struct mv_req *req, int status) __releases(&ep->udc->lock) __acquires(&ep->udc->lock) { struct mv_udc *udc = NULL; unsigned char stopped = ep->stopped; struct mv_dtd *curr_td, *next_td; int j; udc = (struct mv_udc *)ep->udc; /* Removed the req from fsl_ep->queue */ list_del_init(&req->queue); /* req.status should be set as -EINPROGRESS in ep_queue() */ if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; /* Free dtd for the request */ next_td = req->head; for (j = 0; j < req->dtd_count; j++) { curr_td = next_td; if (j != req->dtd_count - 1) next_td = curr_td->next_dtd_virt; dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma); } usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); if (status && (status != -ESHUTDOWN)) dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u", ep->ep.name, &req->req, status, req->req.actual, req->req.length); ep->stopped = 1; spin_unlock(&ep->udc->lock); /* * complete() is from gadget layer, * eg fsg->bulk_in_complete() */ if (req->req.complete) req->req.complete(&ep->ep, &req->req); spin_lock(&ep->udc->lock); ep->stopped = stopped; } static int queue_dtd(struct mv_ep *ep, struct mv_req *req) { struct mv_udc *udc; struct mv_dqh *dqh; u32 bit_pos, direction; u32 usbcmd, epstatus; unsigned int loops; int retval = 0; udc = ep->udc; direction = ep_dir(ep); dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]); bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); /* check if the pipe is empty */ if (!(list_empty(&ep->queue))) { struct mv_req *lastreq; lastreq = list_entry(ep->queue.prev, struct mv_req, queue); lastreq->tail->dtd_next = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; wmb(); if (readl(&udc->op_regs->epprime) & bit_pos) goto done; loops = LOOPS(READSAFE_TIMEOUT); while (1) { /* start with setting the semaphores */ usbcmd = readl(&udc->op_regs->usbcmd); usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET; writel(usbcmd, &udc->op_regs->usbcmd); /* read the endpoint status */ epstatus = readl(&udc->op_regs->epstatus) & bit_pos; /* * Reread the ATDTW semaphore bit to check if it is * cleared. When hardware see a hazard, it will clear * the bit or else we remain set to 1 and we can * proceed with priming of endpoint if not already * primed. */ if (readl(&udc->op_regs->usbcmd) & USBCMD_ATDTW_TRIPWIRE_SET) break; loops--; if (loops == 0) { dev_err(&udc->dev->dev, "Timeout for ATDTW_TRIPWIRE...\n"); retval = -ETIME; goto done; } udelay(LOOPS_USEC); } /* Clear the semaphore */ usbcmd = readl(&udc->op_regs->usbcmd); usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR; writel(usbcmd, &udc->op_regs->usbcmd); if (epstatus) goto done; } /* Write dQH next pointer and terminate bit to 0 */ dqh->next_dtd_ptr = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; /* clear active and halt bit, in case set from a previous error */ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); /* Ensure that updates to the QH will occure before priming. */ wmb(); /* Prime the Endpoint */ writel(bit_pos, &udc->op_regs->epprime); done: return retval; } static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, dma_addr_t *dma, int *is_last) { struct mv_dtd *dtd; struct mv_udc *udc; struct mv_dqh *dqh; u32 temp, mult = 0; /* how big will this transfer be? */ if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) { dqh = req->ep->dqh; mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS) & 0x3; *length = min(req->req.length - req->req.actual, (unsigned)(mult * req->ep->ep.maxpacket)); } else *length = min(req->req.length - req->req.actual, (unsigned)EP_MAX_LENGTH_TRANSFER); udc = req->ep->udc; /* * Be careful that no _GFP_HIGHMEM is set, * or we can not use dma_to_virt */ dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma); if (dtd == NULL) return dtd; dtd->td_dma = *dma; /* initialize buffer page pointers */ temp = (u32)(req->req.dma + req->req.actual); dtd->buff_ptr0 = cpu_to_le32(temp); temp &= ~0xFFF; dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000); dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000); dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000); dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000); req->req.actual += *length; /* zlp is needed if req->req.zero is set */ if (req->req.zero) { if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) *is_last = 1; else *is_last = 0; } else if (req->req.length == req->req.actual) *is_last = 1; else *is_last = 0; /* Fill in the transfer size; set active bit */ temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE); /* Enable interrupt for the last dtd of a request */ if (*is_last && !req->req.no_interrupt) temp |= DTD_IOC; temp |= mult << 10; dtd->size_ioc_sts = temp; mb(); return dtd; } /* generate dTD linked list for a request */ static int req_to_dtd(struct mv_req *req) { unsigned count; int is_last, is_first = 1; struct mv_dtd *dtd, *last_dtd = NULL; struct mv_udc *udc; dma_addr_t dma; udc = req->ep->udc; do { dtd = build_dtd(req, &count, &dma, &is_last); if (dtd == NULL) return -ENOMEM; if (is_first) { is_first = 0; req->head = dtd; } else { last_dtd->dtd_next = dma; last_dtd->next_dtd_virt = dtd; } last_dtd = dtd; req->dtd_count++; } while (!is_last); /* set terminate bit to 1 for the last dTD */ dtd->dtd_next = DTD_NEXT_TERMINATE; req->tail = dtd; return 0; } static int mv_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct mv_udc *udc; struct mv_ep *ep; struct mv_dqh *dqh; u16 max = 0; u32 bit_pos, epctrlx, direction; unsigned char zlt = 0, ios = 0, mult = 0; unsigned long flags; ep = container_of(_ep, struct mv_ep, ep); udc = ep->udc; if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; direction = ep_dir(ep); max = usb_endpoint_maxp(desc); /* * disable HW zero length termination select * driver handles zero length packet through req->req.zero */ zlt = 1; bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); /* Check if the Endpoint is Primed */ if ((readl(&udc->op_regs->epprime) & bit_pos) || (readl(&udc->op_regs->epstatus) & bit_pos)) { dev_info(&udc->dev->dev, "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x," " ENDPTSTATUS=0x%x, bit_pos=0x%x\n", (unsigned)ep->ep_num, direction ? "SEND" : "RECV", (unsigned)readl(&udc->op_regs->epprime), (unsigned)readl(&udc->op_regs->epstatus), (unsigned)bit_pos); goto en_done; } /* Set the max packet length, interrupt on Setup and Mult fields */ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_BULK: zlt = 1; mult = 0; break; case USB_ENDPOINT_XFER_CONTROL: ios = 1; case USB_ENDPOINT_XFER_INT: mult = 0; break; case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ mult = (unsigned char)(1 + ((max >> 11) & 0x03)); max = max & 0x7ff; /* bit 0~10 */ /* 3 transactions at most */ if (mult > 3) goto en_done; break; default: goto en_done; } spin_lock_irqsave(&udc->lock, flags); /* Get the endpoint queue head address */ dqh = ep->dqh; dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) | (mult << EP_QUEUE_HEAD_MULT_POS) | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0) | (ios ? EP_QUEUE_HEAD_IOS : 0); dqh->next_dtd_ptr = 1; dqh->size_ioc_int_sts = 0; ep->ep.maxpacket = max; ep->ep.desc = desc; ep->stopped = 0; /* Enable the endpoint for Rx or Tx and set the endpoint type */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (direction == EP_DIR_IN) { epctrlx &= ~EPCTRL_TX_ALL_MASK; epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) << EPCTRL_TX_EP_TYPE_SHIFT); } else { epctrlx &= ~EPCTRL_RX_ALL_MASK; epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) << EPCTRL_RX_EP_TYPE_SHIFT); } writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* * Implement Guideline (GL# USB-7) The unused endpoint type must * be programmed to bulk. */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if ((epctrlx & EPCTRL_RX_ENABLE) == 0) { epctrlx |= (USB_ENDPOINT_XFER_BULK << EPCTRL_RX_EP_TYPE_SHIFT); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); } epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if ((epctrlx & EPCTRL_TX_ENABLE) == 0) { epctrlx |= (USB_ENDPOINT_XFER_BULK << EPCTRL_TX_EP_TYPE_SHIFT); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); } spin_unlock_irqrestore(&udc->lock, flags); return 0; en_done: return -EINVAL; } static int mv_ep_disable(struct usb_ep *_ep) { struct mv_udc *udc; struct mv_ep *ep; struct mv_dqh *dqh; u32 bit_pos, epctrlx, direction; unsigned long flags; ep = container_of(_ep, struct mv_ep, ep); if ((_ep == NULL) || !ep->ep.desc) return -EINVAL; udc = ep->udc; /* Get the endpoint queue head address */ dqh = ep->dqh; spin_lock_irqsave(&udc->lock, flags); direction = ep_dir(ep); bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); /* Reset the max packet length and the interrupt on Setup */ dqh->max_packet_length = 0; /* Disable the endpoint for Rx or Tx and reset the endpoint type */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); epctrlx &= ~((direction == EP_DIR_IN) ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE) : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE)); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* nuke all pending requests (does flush) */ nuke(ep, -ESHUTDOWN); ep->ep.desc = NULL; ep->stopped = 1; spin_unlock_irqrestore(&udc->lock, flags); return 0; } static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct mv_req *req = NULL; req = kzalloc(sizeof *req, gfp_flags); if (!req) return NULL; req->req.dma = DMA_ADDR_INVALID; INIT_LIST_HEAD(&req->queue); return &req->req; } static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct mv_req *req = NULL; req = container_of(_req, struct mv_req, req); if (_req) kfree(req); } static void mv_ep_fifo_flush(struct usb_ep *_ep) { struct mv_udc *udc; u32 bit_pos, direction; struct mv_ep *ep; unsigned int loops; if (!_ep) return; ep = container_of(_ep, struct mv_ep, ep); if (!ep->ep.desc) return; udc = ep->udc; direction = ep_dir(ep); if (ep->ep_num == 0) bit_pos = (1 << 16) | 1; else if (direction == EP_DIR_OUT) bit_pos = 1 << ep->ep_num; else bit_pos = 1 << (16 + ep->ep_num); loops = LOOPS(EPSTATUS_TIMEOUT); do { unsigned int inter_loops; if (loops == 0) { dev_err(&udc->dev->dev, "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n", (unsigned)readl(&udc->op_regs->epstatus), (unsigned)bit_pos); return; } /* Write 1 to the Flush register */ writel(bit_pos, &udc->op_regs->epflush); /* Wait until flushing completed */ inter_loops = LOOPS(FLUSH_TIMEOUT); while (readl(&udc->op_regs->epflush)) { /* * ENDPTFLUSH bit should be cleared to indicate this * operation is complete */ if (inter_loops == 0) { dev_err(&udc->dev->dev, "TIMEOUT for ENDPTFLUSH=0x%x," "bit_pos=0x%x\n", (unsigned)readl(&udc->op_regs->epflush), (unsigned)bit_pos); return; } inter_loops--; udelay(LOOPS_USEC); } loops--; } while (readl(&udc->op_regs->epstatus) & bit_pos); } /* queues (submits) an I/O request to an endpoint */ static int mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); struct mv_req *req = container_of(_req, struct mv_req, req); struct mv_udc *udc = ep->udc; unsigned long flags; int retval; /* catch various bogus parameters */ if (!_req || !req->req.complete || !req->req.buf || !list_empty(&req->queue)) { dev_err(&udc->dev->dev, "%s, bad params", __func__); return -EINVAL; } if (unlikely(!_ep || !ep->ep.desc)) { dev_err(&udc->dev->dev, "%s, bad ep", __func__); return -EINVAL; } udc = ep->udc; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; req->ep = ep; /* map virtual address to hardware */ retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep)); if (retval) return retval; req->req.status = -EINPROGRESS; req->req.actual = 0; req->dtd_count = 0; spin_lock_irqsave(&udc->lock, flags); /* build dtds and push them to device queue */ if (!req_to_dtd(req)) { retval = queue_dtd(ep, req); if (retval) { spin_unlock_irqrestore(&udc->lock, flags); dev_err(&udc->dev->dev, "Failed to queue dtd\n"); goto err_unmap_dma; } } else { spin_unlock_irqrestore(&udc->lock, flags); dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n"); retval = -ENOMEM; goto err_unmap_dma; } /* Update ep0 state */ if (ep->ep_num == 0) udc->ep0_state = DATA_STATE_XMIT; /* irq handler advances the queue */ list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&udc->lock, flags); return 0; err_unmap_dma: usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep)); return retval; } static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) { struct mv_dqh *dqh = ep->dqh; u32 bit_pos; /* Write dQH next pointer and terminate bit to 0 */ dqh->next_dtd_ptr = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; /* clear active and halt bit, in case set from a previous error */ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); /* Ensure that updates to the QH will occure before priming. */ wmb(); bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); /* Prime the Endpoint */ writel(bit_pos, &ep->udc->op_regs->epprime); } /* dequeues (cancels, unlinks) an I/O request from an endpoint */ static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); struct mv_req *req; struct mv_udc *udc = ep->udc; unsigned long flags; int stopped, ret = 0; u32 epctrlx; if (!_ep || !_req) return -EINVAL; spin_lock_irqsave(&ep->udc->lock, flags); stopped = ep->stopped; /* Stop the ep before we deal with the queue */ ep->stopped = 1; epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (ep_dir(ep) == EP_DIR_IN) epctrlx &= ~EPCTRL_TX_ENABLE; else epctrlx &= ~EPCTRL_RX_ENABLE; writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { ret = -EINVAL; goto out; } /* The request is in progress, or completed but not dequeued */ if (ep->queue.next == &req->queue) { _req->status = -ECONNRESET; mv_ep_fifo_flush(_ep); /* flush current transfer */ /* The request isn't the last request in this ep queue */ if (req->queue.next != &ep->queue) { struct mv_req *next_req; next_req = list_entry(req->queue.next, struct mv_req, queue); /* Point the QH to the first TD of next request */ mv_prime_ep(ep, next_req); } else { struct mv_dqh *qh; qh = ep->dqh; qh->next_dtd_ptr = 1; qh->size_ioc_int_sts = 0; } /* The request hasn't been processed, patch up the TD chain */ } else { struct mv_req *prev_req; prev_req = list_entry(req->queue.prev, struct mv_req, queue); writel(readl(&req->tail->dtd_next), &prev_req->tail->dtd_next); } done(ep, req, -ECONNRESET); /* Enable EP */ out: epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (ep_dir(ep) == EP_DIR_IN) epctrlx |= EPCTRL_TX_ENABLE; else epctrlx |= EPCTRL_RX_ENABLE; writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); ep->stopped = stopped; spin_unlock_irqrestore(&ep->udc->lock, flags); return ret; } static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall) { u32 epctrlx; epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); if (stall) { if (direction == EP_DIR_IN) epctrlx |= EPCTRL_TX_EP_STALL; else epctrlx |= EPCTRL_RX_EP_STALL; } else { if (direction == EP_DIR_IN) { epctrlx &= ~EPCTRL_TX_EP_STALL; epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST; } else { epctrlx &= ~EPCTRL_RX_EP_STALL; epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST; } } writel(epctrlx, &udc->op_regs->epctrlx[ep_num]); } static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction) { u32 epctrlx; epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); if (direction == EP_DIR_OUT) return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0; else return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0; } static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) { struct mv_ep *ep; unsigned long flags = 0; int status = 0; struct mv_udc *udc; ep = container_of(_ep, struct mv_ep, ep); udc = ep->udc; if (!_ep || !ep->ep.desc) { status = -EINVAL; goto out; } if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { status = -EOPNOTSUPP; goto out; } /* * Attempt to halt IN ep will fail if any transfer requests * are still queue */ if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) { status = -EAGAIN; goto out; } spin_lock_irqsave(&ep->udc->lock, flags); ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt); if (halt && wedge) ep->wedge = 1; else if (!halt) ep->wedge = 0; spin_unlock_irqrestore(&ep->udc->lock, flags); if (ep->ep_num == 0) { udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; } out: return status; } static int mv_ep_set_halt(struct usb_ep *_ep, int halt) { return mv_ep_set_halt_wedge(_ep, halt, 0); } static int mv_ep_set_wedge(struct usb_ep *_ep) { return mv_ep_set_halt_wedge(_ep, 1, 1); } static struct usb_ep_ops mv_ep_ops = { .enable = mv_ep_enable, .disable = mv_ep_disable, .alloc_request = mv_alloc_request, .free_request = mv_free_request, .queue = mv_ep_queue, .dequeue = mv_ep_dequeue, .set_wedge = mv_ep_set_wedge, .set_halt = mv_ep_set_halt, .fifo_flush = mv_ep_fifo_flush, /* flush fifo */ }; static void udc_clock_enable(struct mv_udc *udc) { clk_prepare_enable(udc->clk); } static void udc_clock_disable(struct mv_udc *udc) { clk_disable_unprepare(udc->clk); } static void udc_stop(struct mv_udc *udc) { u32 tmp; /* Disable interrupts */ tmp = readl(&udc->op_regs->usbintr); tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN | USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN); writel(tmp, &udc->op_regs->usbintr); udc->stopped = 1; /* Reset the Run the bit in the command register to stop VUSB */ tmp = readl(&udc->op_regs->usbcmd); tmp &= ~USBCMD_RUN_STOP; writel(tmp, &udc->op_regs->usbcmd); } static void udc_start(struct mv_udc *udc) { u32 usbintr; usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN | USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND; /* Enable interrupts */ writel(usbintr, &udc->op_regs->usbintr); udc->stopped = 0; /* Set the Run bit in the command register */ writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd); } static int udc_reset(struct mv_udc *udc) { unsigned int loops; u32 tmp, portsc; /* Stop the controller */ tmp = readl(&udc->op_regs->usbcmd); tmp &= ~USBCMD_RUN_STOP; writel(tmp, &udc->op_regs->usbcmd); /* Reset the controller to get default values */ writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd); /* wait for reset to complete */ loops = LOOPS(RESET_TIMEOUT); while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) { if (loops == 0) { dev_err(&udc->dev->dev, "Wait for RESET completed TIMEOUT\n"); return -ETIMEDOUT; } loops--; udelay(LOOPS_USEC); } /* set controller to device mode */ tmp = readl(&udc->op_regs->usbmode); tmp |= USBMODE_CTRL_MODE_DEVICE; /* turn setup lockout off, require setup tripwire in usbcmd */ tmp |= USBMODE_SETUP_LOCK_OFF; writel(tmp, &udc->op_regs->usbmode); writel(0x0, &udc->op_regs->epsetupstat); /* Configure the Endpoint List Address */ writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK, &udc->op_regs->eplistaddr); portsc = readl(&udc->op_regs->portsc[0]); if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC) portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER); if (udc->force_fs) portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT; else portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT); writel(portsc, &udc->op_regs->portsc[0]); tmp = readl(&udc->op_regs->epctrlx[0]); tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL); writel(tmp, &udc->op_regs->epctrlx[0]); return 0; } static int mv_udc_enable_internal(struct mv_udc *udc) { int retval; if (udc->active) return 0; dev_dbg(&udc->dev->dev, "enable udc\n"); udc_clock_enable(udc); if (udc->pdata->phy_init) { retval = udc->pdata->phy_init(udc->phy_regs); if (retval) { dev_err(&udc->dev->dev, "init phy error %d\n", retval); udc_clock_disable(udc); return retval; } } udc->active = 1; return 0; } static int mv_udc_enable(struct mv_udc *udc) { if (udc->clock_gating) return mv_udc_enable_internal(udc); return 0; } static void mv_udc_disable_internal(struct mv_udc *udc) { if (udc->active) { dev_dbg(&udc->dev->dev, "disable udc\n"); if (udc->pdata->phy_deinit) udc->pdata->phy_deinit(udc->phy_regs); udc_clock_disable(udc); udc->active = 0; } } static void mv_udc_disable(struct mv_udc *udc) { if (udc->clock_gating) mv_udc_disable_internal(udc); } static int mv_udc_get_frame(struct usb_gadget *gadget) { struct mv_udc *udc; u16 retval; if (!gadget) return -ENODEV; udc = container_of(gadget, struct mv_udc, gadget); retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS; return retval; } /* Tries to wake up the host connected to this gadget */ static int mv_udc_wakeup(struct usb_gadget *gadget) { struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget); u32 portsc; /* Remote wakeup feature not enabled by host */ if (!udc->remote_wakeup) return -ENOTSUPP; portsc = readl(&udc->op_regs->portsc); /* not suspended? */ if (!(portsc & PORTSCX_PORT_SUSPEND)) return 0; /* trigger force resume */ portsc |= PORTSCX_PORT_FORCE_RESUME; writel(portsc, &udc->op_regs->portsc[0]); return 0; } static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active) { struct mv_udc *udc; unsigned long flags; int retval = 0; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); udc->vbus_active = (is_active != 0); dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", __func__, udc->softconnect, udc->vbus_active); if (udc->driver && udc->softconnect && udc->vbus_active) { retval = mv_udc_enable(udc); if (retval == 0) { /* Clock is disabled, need re-init registers */ udc_reset(udc); ep0_reset(udc); udc_start(udc); } } else if (udc->driver && udc->softconnect) { if (!udc->active) goto out; /* stop all the transfer in queue*/ stop_activity(udc, udc->driver); udc_stop(udc); mv_udc_disable(udc); } out: spin_unlock_irqrestore(&udc->lock, flags); return retval; } static int mv_udc_pullup(struct usb_gadget *gadget, int is_on) { struct mv_udc *udc; unsigned long flags; int retval = 0; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); udc->softconnect = (is_on != 0); dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", __func__, udc->softconnect, udc->vbus_active); if (udc->driver && udc->softconnect && udc->vbus_active) { retval = mv_udc_enable(udc); if (retval == 0) { /* Clock is disabled, need re-init registers */ udc_reset(udc); ep0_reset(udc); udc_start(udc); } } else if (udc->driver && udc->vbus_active) { /* stop all the transfer in queue*/ stop_activity(udc, udc->driver); udc_stop(udc); mv_udc_disable(udc); } spin_unlock_irqrestore(&udc->lock, flags); return retval; } static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *); static int mv_udc_stop(struct usb_gadget *, struct usb_gadget_driver *); /* device controller usb_gadget_ops structure */ static const struct usb_gadget_ops mv_ops = { /* returns the current frame number */ .get_frame = mv_udc_get_frame, /* tries to wake up the host connected to this gadget */ .wakeup = mv_udc_wakeup, /* notify controller that VBUS is powered or not */ .vbus_session = mv_udc_vbus_session, /* D+ pullup, software-controlled connect/disconnect to USB host */ .pullup = mv_udc_pullup, .udc_start = mv_udc_start, .udc_stop = mv_udc_stop, }; static int eps_init(struct mv_udc *udc) { struct mv_ep *ep; char name[14]; int i; /* initialize ep0 */ ep = &udc->eps[0]; ep->udc = udc; strncpy(ep->name, "ep0", sizeof(ep->name)); ep->ep.name = ep->name; ep->ep.ops = &mv_ep_ops; ep->wedge = 0; ep->stopped = 0; ep->ep.maxpacket = EP0_MAX_PKT_SIZE; ep->ep_num = 0; ep->ep.desc = &mv_ep0_desc; INIT_LIST_HEAD(&ep->queue); ep->ep_type = USB_ENDPOINT_XFER_CONTROL; /* initialize other endpoints */ for (i = 2; i < udc->max_eps * 2; i++) { ep = &udc->eps[i]; if (i % 2) { snprintf(name, sizeof(name), "ep%din", i / 2); ep->direction = EP_DIR_IN; } else { snprintf(name, sizeof(name), "ep%dout", i / 2); ep->direction = EP_DIR_OUT; } ep->udc = udc; strncpy(ep->name, name, sizeof(ep->name)); ep->ep.name = ep->name; ep->ep.ops = &mv_ep_ops; ep->stopped = 0; ep->ep.maxpacket = (unsigned short) ~0; ep->ep_num = i / 2; INIT_LIST_HEAD(&ep->queue); list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); ep->dqh = &udc->ep_dqh[i]; } return 0; } /* delete all endpoint requests, called with spinlock held */ static void nuke(struct mv_ep *ep, int status) { /* called with spinlock held */ ep->stopped = 1; /* endpoint fifo flush */ mv_ep_fifo_flush(&ep->ep); while (!list_empty(&ep->queue)) { struct mv_req *req = NULL; req = list_entry(ep->queue.next, struct mv_req, queue); done(ep, req, status); } } /* stop all USB activities */ static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver) { struct mv_ep *ep; nuke(&udc->eps[0], -ESHUTDOWN); list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { nuke(ep, -ESHUTDOWN); } /* report disconnect; the driver is already quiesced */ if (driver) { spin_unlock(&udc->lock); driver->disconnect(&udc->gadget); spin_lock(&udc->lock); } } static int mv_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct mv_udc *udc; int retval = 0; unsigned long flags; udc = container_of(gadget, struct mv_udc, gadget); if (udc->driver) return -EBUSY; spin_lock_irqsave(&udc->lock, flags); /* hook up the driver ... */ driver->driver.bus = NULL; udc->driver = driver; udc->usb_state = USB_STATE_ATTACHED; udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; spin_unlock_irqrestore(&udc->lock, flags); if (udc->transceiver) { retval = otg_set_peripheral(udc->transceiver->otg, &udc->gadget); if (retval) { dev_err(&udc->dev->dev, "unable to register peripheral to otg\n"); udc->driver = NULL; return retval; } } /* pullup is always on */ mv_udc_pullup(&udc->gadget, 1); /* When boot with cable attached, there will be no vbus irq occurred */ if (udc->qwork) queue_work(udc->qwork, &udc->vbus_work); return 0; } static int mv_udc_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct mv_udc *udc; unsigned long flags; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); mv_udc_enable(udc); udc_stop(udc); /* stop all usb activities */ udc->gadget.speed = USB_SPEED_UNKNOWN; stop_activity(udc, driver); mv_udc_disable(udc); spin_unlock_irqrestore(&udc->lock, flags); /* unbind gadget driver */ udc->driver = NULL; return 0; } static void mv_set_ptc(struct mv_udc *udc, u32 mode) { u32 portsc; portsc = readl(&udc->op_regs->portsc[0]); portsc |= mode << 16; writel(portsc, &udc->op_regs->portsc[0]); } static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req) { struct mv_ep *mvep = container_of(ep, struct mv_ep, ep); struct mv_req *req = container_of(_req, struct mv_req, req); struct mv_udc *udc; unsigned long flags; udc = mvep->udc; dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode); spin_lock_irqsave(&udc->lock, flags); if (req->test_mode) { mv_set_ptc(udc, req->test_mode); req->test_mode = 0; } spin_unlock_irqrestore(&udc->lock, flags); } static int udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) { int retval = 0; struct mv_req *req; struct mv_ep *ep; ep = &udc->eps[0]; udc->ep0_dir = direction; udc->ep0_state = WAIT_FOR_OUT_STATUS; req = udc->status_req; /* fill in the reqest structure */ if (empty == false) { *((u16 *) req->req.buf) = cpu_to_le16(status); req->req.length = 2; } else req->req.length = 0; req->ep = ep; req->req.status = -EINPROGRESS; req->req.actual = 0; if (udc->test_mode) { req->req.complete = prime_status_complete; req->test_mode = udc->test_mode; udc->test_mode = 0; } else req->req.complete = NULL; req->dtd_count = 0; if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, req->req.buf, req->req.length, ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } /* prime the data phase */ if (!req_to_dtd(req)) { retval = queue_dtd(ep, req); if (retval) { dev_err(&udc->dev->dev, "Failed to queue dtd when prime status\n"); goto out; } } else{ /* no mem */ retval = -ENOMEM; dev_err(&udc->dev->dev, "Failed to dma_pool_alloc when prime status\n"); goto out; } list_add_tail(&req->queue, &ep->queue); return 0; out: usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); return retval; } static void mv_udc_testmode(struct mv_udc *udc, u16 index) { if (index <= TEST_FORCE_EN) { udc->test_mode = index; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); } else dev_err(&udc->dev->dev, "This test mode(%d) is not supported\n", index); } static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup) { udc->dev_addr = (u8)setup->wValue; /* update usb state */ udc->usb_state = USB_STATE_ADDRESS; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); } static void ch9getstatus(struct mv_udc *udc, u8 ep_num, struct usb_ctrlrequest *setup) { u16 status = 0; int retval; if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) != (USB_DIR_IN | USB_TYPE_STANDARD)) return; if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { status = 1 << USB_DEVICE_SELF_POWERED; status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP; } else if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE) { /* get interface status */ status = 0; } else if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { u8 ep_num, direction; ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; status = ep_is_stall(udc, ep_num, direction) << USB_ENDPOINT_HALT; } retval = udc_prime_status(udc, EP_DIR_IN, status, false); if (retval) ep0_stall(udc); else udc->ep0_state = DATA_STATE_XMIT; } static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) { u8 ep_num; u8 direction; struct mv_ep *ep; if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { switch (setup->wValue) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 0; break; default: goto out; } } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { switch (setup->wValue) { case USB_ENDPOINT_HALT: ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; if (setup->wValue != 0 || setup->wLength != 0 || ep_num > udc->max_eps) goto out; ep = &udc->eps[ep_num * 2 + direction]; if (ep->wedge == 1) break; spin_unlock(&udc->lock); ep_set_stall(udc, ep_num, direction, 0); spin_lock(&udc->lock); break; default: goto out; } } else goto out; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); out: return; } static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) { u8 ep_num; u8 direction; if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { switch (setup->wValue) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 1; break; case USB_DEVICE_TEST_MODE: if (setup->wIndex & 0xFF || udc->gadget.speed != USB_SPEED_HIGH) ep0_stall(udc); if (udc->usb_state != USB_STATE_CONFIGURED && udc->usb_state != USB_STATE_ADDRESS && udc->usb_state != USB_STATE_DEFAULT) ep0_stall(udc); mv_udc_testmode(udc, (setup->wIndex >> 8)); goto out; default: goto out; } } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { switch (setup->wValue) { case USB_ENDPOINT_HALT: ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; if (setup->wValue != 0 || setup->wLength != 0 || ep_num > udc->max_eps) goto out; spin_unlock(&udc->lock); ep_set_stall(udc, ep_num, direction, 1); spin_lock(&udc->lock); break; default: goto out; } } else goto out; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); out: return; } static void handle_setup_packet(struct mv_udc *udc, u8 ep_num, struct usb_ctrlrequest *setup) __releases(&ep->udc->lock) __acquires(&ep->udc->lock) { bool delegate = false; nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN); dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", setup->bRequestType, setup->bRequest, setup->wValue, setup->wIndex, setup->wLength); /* We process some stardard setup requests here */ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (setup->bRequest) { case USB_REQ_GET_STATUS: ch9getstatus(udc, ep_num, setup); break; case USB_REQ_SET_ADDRESS: ch9setaddress(udc, setup); break; case USB_REQ_CLEAR_FEATURE: ch9clearfeature(udc, setup); break; case USB_REQ_SET_FEATURE: ch9setfeature(udc, setup); break; default: delegate = true; } } else delegate = true; /* delegate USB standard requests to the gadget driver */ if (delegate == true) { /* USB requests handled by gadget */ if (setup->wLength) { /* DATA phase from gadget, STATUS phase from udc */ udc->ep0_dir = (setup->bRequestType & USB_DIR_IN) ? EP_DIR_IN : EP_DIR_OUT; spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) ep0_stall(udc); spin_lock(&udc->lock); udc->ep0_state = (setup->bRequestType & USB_DIR_IN) ? DATA_STATE_XMIT : DATA_STATE_RECV; } else { /* no DATA phase, IN STATUS phase from gadget */ udc->ep0_dir = EP_DIR_IN; spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) ep0_stall(udc); spin_lock(&udc->lock); udc->ep0_state = WAIT_FOR_OUT_STATUS; } } } /* complete DATA or STATUS phase of ep0 prime status phase if needed */ static void ep0_req_complete(struct mv_udc *udc, struct mv_ep *ep0, struct mv_req *req) { u32 new_addr; if (udc->usb_state == USB_STATE_ADDRESS) { /* set the new address */ new_addr = (u32)udc->dev_addr; writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT, &udc->op_regs->deviceaddr); } done(ep0, req, 0); switch (udc->ep0_state) { case DATA_STATE_XMIT: /* receive status phase */ if (udc_prime_status(udc, EP_DIR_OUT, 0, true)) ep0_stall(udc); break; case DATA_STATE_RECV: /* send status phase */ if (udc_prime_status(udc, EP_DIR_IN, 0 , true)) ep0_stall(udc); break; case WAIT_FOR_OUT_STATUS: udc->ep0_state = WAIT_FOR_SETUP; break; case WAIT_FOR_SETUP: dev_err(&udc->dev->dev, "unexpect ep0 packets\n"); break; default: ep0_stall(udc); break; } } static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr) { u32 temp; struct mv_dqh *dqh; dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT]; /* Clear bit in ENDPTSETUPSTAT */ writel((1 << ep_num), &udc->op_regs->epsetupstat); /* while a hazard exists when setup package arrives */ do { /* Set Setup Tripwire */ temp = readl(&udc->op_regs->usbcmd); writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); /* Copy the setup packet to local buffer */ memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8); } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET)); /* Clear Setup Tripwire */ temp = readl(&udc->op_regs->usbcmd); writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); } static void irq_process_tr_complete(struct mv_udc *udc) { u32 tmp, bit_pos; int i, ep_num = 0, direction = 0; struct mv_ep *curr_ep; struct mv_req *curr_req, *temp_req; int status; /* * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE * because the setup packets are to be read ASAP */ /* Process all Setup packet received interrupts */ tmp = readl(&udc->op_regs->epsetupstat); if (tmp) { for (i = 0; i < udc->max_eps; i++) { if (tmp & (1 << i)) { get_setup_data(udc, i, (u8 *)(&udc->local_setup_buff)); handle_setup_packet(udc, i, &udc->local_setup_buff); } } } /* Don't clear the endpoint setup status register here. * It is cleared as a setup packet is read out of the buffer */ /* Process non-setup transaction complete interrupts */ tmp = readl(&udc->op_regs->epcomplete); if (!tmp) return; writel(tmp, &udc->op_regs->epcomplete); for (i = 0; i < udc->max_eps * 2; i++) { ep_num = i >> 1; direction = i % 2; bit_pos = 1 << (ep_num + 16 * direction); if (!(bit_pos & tmp)) continue; if (i == 1) curr_ep = &udc->eps[0]; else curr_ep = &udc->eps[i]; /* process the req queue until an uncomplete request */ list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue, queue) { status = process_ep_req(udc, i, curr_req); if (status) break; /* write back status to req */ curr_req->req.status = status; /* ep0 request completion */ if (ep_num == 0) { ep0_req_complete(udc, curr_ep, curr_req); break; } else { done(curr_ep, curr_req, status); } } } } static void irq_process_reset(struct mv_udc *udc) { u32 tmp; unsigned int loops; udc->ep0_dir = EP_DIR_OUT; udc->ep0_state = WAIT_FOR_SETUP; udc->remote_wakeup = 0; /* default to 0 on reset */ /* The address bits are past bit 25-31. Set the address */ tmp = readl(&udc->op_regs->deviceaddr); tmp &= ~(USB_DEVICE_ADDRESS_MASK); writel(tmp, &udc->op_regs->deviceaddr); /* Clear all the setup token semaphores */ tmp = readl(&udc->op_regs->epsetupstat); writel(tmp, &udc->op_regs->epsetupstat); /* Clear all the endpoint complete status bits */ tmp = readl(&udc->op_regs->epcomplete); writel(tmp, &udc->op_regs->epcomplete); /* wait until all endptprime bits cleared */ loops = LOOPS(PRIME_TIMEOUT); while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) { if (loops == 0) { dev_err(&udc->dev->dev, "Timeout for ENDPTPRIME = 0x%x\n", readl(&udc->op_regs->epprime)); break; } loops--; udelay(LOOPS_USEC); } /* Write 1s to the Flush register */ writel((u32)~0, &udc->op_regs->epflush); if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) { dev_info(&udc->dev->dev, "usb bus reset\n"); udc->usb_state = USB_STATE_DEFAULT; /* reset all the queues, stop all USB activities */ stop_activity(udc, udc->driver); } else { dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n", readl(&udc->op_regs->portsc)); /* * re-initialize * controller reset */ udc_reset(udc); /* reset all the queues, stop all USB activities */ stop_activity(udc, udc->driver); /* reset ep0 dQH and endptctrl */ ep0_reset(udc); /* enable interrupt and set controller to run state */ udc_start(udc); udc->usb_state = USB_STATE_ATTACHED; } } static void handle_bus_resume(struct mv_udc *udc) { udc->usb_state = udc->resume_state; udc->resume_state = 0; /* report resume to the driver */ if (udc->driver) { if (udc->driver->resume) { spin_unlock(&udc->lock); udc->driver->resume(&udc->gadget); spin_lock(&udc->lock); } } } static void irq_process_suspend(struct mv_udc *udc) { udc->resume_state = udc->usb_state; udc->usb_state = USB_STATE_SUSPENDED; if (udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } } static void irq_process_port_change(struct mv_udc *udc) { u32 portsc; portsc = readl(&udc->op_regs->portsc[0]); if (!(portsc & PORTSCX_PORT_RESET)) { /* Get the speed */ u32 speed = portsc & PORTSCX_PORT_SPEED_MASK; switch (speed) { case PORTSCX_PORT_SPEED_HIGH: udc->gadget.speed = USB_SPEED_HIGH; break; case PORTSCX_PORT_SPEED_FULL: udc->gadget.speed = USB_SPEED_FULL; break; case PORTSCX_PORT_SPEED_LOW: udc->gadget.speed = USB_SPEED_LOW; break; default: udc->gadget.speed = USB_SPEED_UNKNOWN; break; } } if (portsc & PORTSCX_PORT_SUSPEND) { udc->resume_state = udc->usb_state; udc->usb_state = USB_STATE_SUSPENDED; if (udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } } if (!(portsc & PORTSCX_PORT_SUSPEND) && udc->usb_state == USB_STATE_SUSPENDED) { handle_bus_resume(udc); } if (!udc->resume_state) udc->usb_state = USB_STATE_DEFAULT; } static void irq_process_error(struct mv_udc *udc) { /* Increment the error count */ udc->errors++; } static irqreturn_t mv_udc_irq(int irq, void *dev) { struct mv_udc *udc = (struct mv_udc *)dev; u32 status, intr; /* Disable ISR when stopped bit is set */ if (udc->stopped) return IRQ_NONE; spin_lock(&udc->lock); status = readl(&udc->op_regs->usbsts); intr = readl(&udc->op_regs->usbintr); status &= intr; if (status == 0) { spin_unlock(&udc->lock); return IRQ_NONE; } /* Clear all the interrupts occurred */ writel(status, &udc->op_regs->usbsts); if (status & USBSTS_ERR) irq_process_error(udc); if (status & USBSTS_RESET) irq_process_reset(udc); if (status & USBSTS_PORT_CHANGE) irq_process_port_change(udc); if (status & USBSTS_INT) irq_process_tr_complete(udc); if (status & USBSTS_SUSPEND) irq_process_suspend(udc); spin_unlock(&udc->lock); return IRQ_HANDLED; } static irqreturn_t mv_udc_vbus_irq(int irq, void *dev) { struct mv_udc *udc = (struct mv_udc *)dev; /* polling VBUS and init phy may cause too much time*/ if (udc->qwork) queue_work(udc->qwork, &udc->vbus_work); return IRQ_HANDLED; } static void mv_udc_vbus_work(struct work_struct *work) { struct mv_udc *udc; unsigned int vbus; udc = container_of(work, struct mv_udc, vbus_work); if (!udc->pdata->vbus) return; vbus = udc->pdata->vbus->poll(); dev_info(&udc->dev->dev, "vbus is %d\n", vbus); if (vbus == VBUS_HIGH) mv_udc_vbus_session(&udc->gadget, 1); else if (vbus == VBUS_LOW) mv_udc_vbus_session(&udc->gadget, 0); } /* release device structure */ static void gadget_release(struct device *_dev) { struct mv_udc *udc; udc = dev_get_drvdata(_dev); complete(udc->done); } static int mv_udc_remove(struct platform_device *pdev) { struct mv_udc *udc; udc = platform_get_drvdata(pdev); usb_del_gadget_udc(&udc->gadget); if (udc->qwork) { flush_workqueue(udc->qwork); destroy_workqueue(udc->qwork); } /* free memory allocated in probe */ if (udc->dtd_pool) dma_pool_destroy(udc->dtd_pool); if (udc->ep_dqh) dma_free_coherent(&pdev->dev, udc->ep_dqh_size, udc->ep_dqh, udc->ep_dqh_dma); mv_udc_disable(udc); /* free dev, wait for the release() finished */ wait_for_completion(udc->done); return 0; } static int mv_udc_probe(struct platform_device *pdev) { struct mv_usb_platform_data *pdata = pdev->dev.platform_data; struct mv_udc *udc; int retval = 0; struct resource *r; size_t size; if (pdata == NULL) { dev_err(&pdev->dev, "missing platform_data\n"); return -ENODEV; } udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); if (udc == NULL) { dev_err(&pdev->dev, "failed to allocate memory for udc\n"); return -ENOMEM; } udc->done = &release_done; udc->pdata = pdev->dev.platform_data; spin_lock_init(&udc->lock); udc->dev = pdev; if (pdata->mode == MV_USB_MODE_OTG) { udc->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); if (IS_ERR(udc->transceiver)) { retval = PTR_ERR(udc->transceiver); if (retval == -ENXIO) return retval; udc->transceiver = NULL; return -EPROBE_DEFER; } } /* udc only have one sysclk. */ udc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(udc->clk)) return PTR_ERR(udc->clk); r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs"); if (r == NULL) { dev_err(&pdev->dev, "no I/O memory resource defined\n"); return -ENODEV; } udc->cap_regs = (struct mv_cap_regs __iomem *) devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (udc->cap_regs == NULL) { dev_err(&pdev->dev, "failed to map I/O memory\n"); return -EBUSY; } r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs"); if (r == NULL) { dev_err(&pdev->dev, "no phy I/O memory resource defined\n"); return -ENODEV; } udc->phy_regs = ioremap(r->start, resource_size(r)); if (udc->phy_regs == NULL) { dev_err(&pdev->dev, "failed to map phy I/O memory\n"); return -EBUSY; } /* we will acces controller register, so enable the clk */ retval = mv_udc_enable_internal(udc); if (retval) return retval; udc->op_regs = (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs + (readl(&udc->cap_regs->caplength_hciversion) & CAPLENGTH_MASK)); udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK; /* * some platform will use usb to download image, it may not disconnect * usb gadget before loading kernel. So first stop udc here. */ udc_stop(udc); writel(0xFFFFFFFF, &udc->op_regs->usbsts); size = udc->max_eps * sizeof(struct mv_dqh) *2; size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1); udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size, &udc->ep_dqh_dma, GFP_KERNEL); if (udc->ep_dqh == NULL) { dev_err(&pdev->dev, "allocate dQH memory failed\n"); retval = -ENOMEM; goto err_disable_clock; } udc->ep_dqh_size = size; /* create dTD dma_pool resource */ udc->dtd_pool = dma_pool_create("mv_dtd", &pdev->dev, sizeof(struct mv_dtd), DTD_ALIGNMENT, DMA_BOUNDARY); if (!udc->dtd_pool) { retval = -ENOMEM; goto err_free_dma; } size = udc->max_eps * sizeof(struct mv_ep) *2; udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (udc->eps == NULL) { dev_err(&pdev->dev, "allocate ep memory failed\n"); retval = -ENOMEM; goto err_destroy_dma; } /* initialize ep0 status request structure */ udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req), GFP_KERNEL); if (!udc->status_req) { dev_err(&pdev->dev, "allocate status_req memory failed\n"); retval = -ENOMEM; goto err_destroy_dma; } INIT_LIST_HEAD(&udc->status_req->queue); /* allocate a small amount of memory to get valid address */ udc->status_req->req.buf = kzalloc(8, GFP_KERNEL); udc->status_req->req.dma = DMA_ADDR_INVALID; udc->resume_state = USB_STATE_NOTATTACHED; udc->usb_state = USB_STATE_POWERED; udc->ep0_dir = EP_DIR_OUT; udc->remote_wakeup = 0; r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0); if (r == NULL) { dev_err(&pdev->dev, "no IRQ resource defined\n"); retval = -ENODEV; goto err_destroy_dma; } udc->irq = r->start; if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq, IRQF_SHARED, driver_name, udc)) { dev_err(&pdev->dev, "Request irq %d for UDC failed\n", udc->irq); retval = -ENODEV; goto err_destroy_dma; } /* initialize gadget structure */ udc->gadget.ops = &mv_ops; /* usb_gadget_ops */ udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */ INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */ udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */ /* the "gadget" abstracts/virtualizes the controller */ udc->gadget.name = driver_name; /* gadget name */ eps_init(udc); /* VBUS detect: we can disable/enable clock on demand.*/ if (udc->transceiver) udc->clock_gating = 1; else if (pdata->vbus) { udc->clock_gating = 1; retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq, NULL, mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc); if (retval) { dev_info(&pdev->dev, "Can not request irq for VBUS, " "disable clock gating\n"); udc->clock_gating = 0; } udc->qwork = create_singlethread_workqueue("mv_udc_queue"); if (!udc->qwork) { dev_err(&pdev->dev, "cannot create workqueue\n"); retval = -ENOMEM; goto err_destroy_dma; } INIT_WORK(&udc->vbus_work, mv_udc_vbus_work); } /* * When clock gating is supported, we can disable clk and phy. * If not, it means that VBUS detection is not supported, we * have to enable vbus active all the time to let controller work. */ if (udc->clock_gating) mv_udc_disable_internal(udc); else udc->vbus_active = 1; retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, gadget_release); if (retval) goto err_create_workqueue; platform_set_drvdata(pdev, udc); dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n", udc->clock_gating ? "with" : "without"); return 0; err_create_workqueue: destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: dma_free_coherent(&pdev->dev, udc->ep_dqh_size, udc->ep_dqh, udc->ep_dqh_dma); err_disable_clock: mv_udc_disable_internal(udc); return retval; } #ifdef CONFIG_PM static int mv_udc_suspend(struct device *dev) { struct mv_udc *udc; udc = dev_get_drvdata(dev); /* if OTG is enabled, the following will be done in OTG driver*/ if (udc->transceiver) return 0; if (udc->pdata->vbus && udc->pdata->vbus->poll) if (udc->pdata->vbus->poll() == VBUS_HIGH) { dev_info(&udc->dev->dev, "USB cable is connected!\n"); return -EAGAIN; } /* * only cable is unplugged, udc can suspend. * So do not care about clock_gating == 1. */ if (!udc->clock_gating) { udc_stop(udc); spin_lock_irq(&udc->lock); /* stop all usb activities */ stop_activity(udc, udc->driver); spin_unlock_irq(&udc->lock); mv_udc_disable_internal(udc); } return 0; } static int mv_udc_resume(struct device *dev) { struct mv_udc *udc; int retval; udc = dev_get_drvdata(dev); /* if OTG is enabled, the following will be done in OTG driver*/ if (udc->transceiver) return 0; if (!udc->clock_gating) { retval = mv_udc_enable_internal(udc); if (retval) return retval; if (udc->driver && udc->softconnect) { udc_reset(udc); ep0_reset(udc); udc_start(udc); } } return 0; } static const struct dev_pm_ops mv_udc_pm_ops = { .suspend = mv_udc_suspend, .resume = mv_udc_resume, }; #endif static void mv_udc_shutdown(struct platform_device *pdev) { struct mv_udc *udc; u32 mode; udc = platform_get_drvdata(pdev); /* reset controller mode to IDLE */ mv_udc_enable(udc); mode = readl(&udc->op_regs->usbmode); mode &= ~3; writel(mode, &udc->op_regs->usbmode); mv_udc_disable(udc); } static struct platform_driver udc_driver = { .probe = mv_udc_probe, .remove = mv_udc_remove, .shutdown = mv_udc_shutdown, .driver = { .owner = THIS_MODULE, .name = "mv-udc", #ifdef CONFIG_PM .pm = &mv_udc_pm_ops, #endif }, }; module_platform_driver(udc_driver); MODULE_ALIAS("platform:mv-udc"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
Cardinal97/android_kernel_msm8939
drivers/iio/dac/ad5360.c
2158
13761
/* * Analog devices AD5360, AD5361, AD5362, AD5363, AD5370, AD5371, AD5373 * multi-channel Digital to Analog Converters driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/device.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/regulator/consumer.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #define AD5360_CMD(x) ((x) << 22) #define AD5360_ADDR(x) ((x) << 16) #define AD5360_READBACK_TYPE(x) ((x) << 13) #define AD5360_READBACK_ADDR(x) ((x) << 7) #define AD5360_CHAN_ADDR(chan) ((chan) + 0x8) #define AD5360_CMD_WRITE_DATA 0x3 #define AD5360_CMD_WRITE_OFFSET 0x2 #define AD5360_CMD_WRITE_GAIN 0x1 #define AD5360_CMD_SPECIAL_FUNCTION 0x0 /* Special function register addresses */ #define AD5360_REG_SF_NOP 0x0 #define AD5360_REG_SF_CTRL 0x1 #define AD5360_REG_SF_OFS(x) (0x2 + (x)) #define AD5360_REG_SF_READBACK 0x5 #define AD5360_SF_CTRL_PWR_DOWN BIT(0) #define AD5360_READBACK_X1A 0x0 #define AD5360_READBACK_X1B 0x1 #define AD5360_READBACK_OFFSET 0x2 #define AD5360_READBACK_GAIN 0x3 #define AD5360_READBACK_SF 0x4 /** * struct ad5360_chip_info - chip specific information * @channel_template: channel specification template * @num_channels: number of channels * @channels_per_group: number of channels per group * @num_vrefs: number of vref supplies for the chip */ struct ad5360_chip_info { struct iio_chan_spec channel_template; unsigned int num_channels; unsigned int channels_per_group; unsigned int num_vrefs; }; /** * struct ad5360_state - driver instance specific data * @spi: spi_device * @chip_info: chip model specific constants, available modes etc * @vref_reg: vref supply regulators * @ctrl: control register cache * @data: spi transfer buffers */ struct ad5360_state { struct spi_device *spi; const struct ad5360_chip_info *chip_info; struct regulator_bulk_data vref_reg[3]; unsigned int ctrl; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ union { __be32 d32; u8 d8[4]; } data[2] ____cacheline_aligned; }; enum ad5360_type { ID_AD5360, ID_AD5361, ID_AD5362, ID_AD5363, ID_AD5370, ID_AD5371, ID_AD5372, ID_AD5373, }; #define AD5360_CHANNEL(bits) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_OFFSET) | \ BIT(IIO_CHAN_INFO_CALIBSCALE) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ .scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \ } static const struct ad5360_chip_info ad5360_chip_info_tbl[] = { [ID_AD5360] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 16, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5361] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 16, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5362] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 8, .channels_per_group = 4, .num_vrefs = 2, }, [ID_AD5363] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 8, .channels_per_group = 4, .num_vrefs = 2, }, [ID_AD5370] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 40, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5371] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 40, .channels_per_group = 8, .num_vrefs = 3, }, [ID_AD5372] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 32, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5373] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 32, .channels_per_group = 8, .num_vrefs = 2, }, }; static unsigned int ad5360_get_channel_vref_index(struct ad5360_state *st, unsigned int channel) { unsigned int i; /* The first groups have their own vref, while the remaining groups * share the last vref */ i = channel / st->chip_info->channels_per_group; if (i >= st->chip_info->num_vrefs) i = st->chip_info->num_vrefs - 1; return i; } static int ad5360_get_channel_vref(struct ad5360_state *st, unsigned int channel) { unsigned int i = ad5360_get_channel_vref_index(st, channel); return regulator_get_voltage(st->vref_reg[i].consumer); } static int ad5360_write_unlocked(struct iio_dev *indio_dev, unsigned int cmd, unsigned int addr, unsigned int val, unsigned int shift) { struct ad5360_state *st = iio_priv(indio_dev); val <<= shift; val |= AD5360_CMD(cmd) | AD5360_ADDR(addr); st->data[0].d32 = cpu_to_be32(val); return spi_write(st->spi, &st->data[0].d8[1], 3); } static int ad5360_write(struct iio_dev *indio_dev, unsigned int cmd, unsigned int addr, unsigned int val, unsigned int shift) { int ret; mutex_lock(&indio_dev->mlock); ret = ad5360_write_unlocked(indio_dev, cmd, addr, val, shift); mutex_unlock(&indio_dev->mlock); return ret; } static int ad5360_read(struct iio_dev *indio_dev, unsigned int type, unsigned int addr) { struct ad5360_state *st = iio_priv(indio_dev); int ret; struct spi_transfer t[] = { { .tx_buf = &st->data[0].d8[1], .len = 3, .cs_change = 1, }, { .rx_buf = &st->data[1].d8[1], .len = 3, }, }; mutex_lock(&indio_dev->mlock); st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) | AD5360_ADDR(AD5360_REG_SF_READBACK) | AD5360_READBACK_TYPE(type) | AD5360_READBACK_ADDR(addr)); ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t)); if (ret >= 0) ret = be32_to_cpu(st->data[1].d32) & 0xffff; mutex_unlock(&indio_dev->mlock); return ret; } static ssize_t ad5360_read_dac_powerdown(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad5360_state *st = iio_priv(indio_dev); return sprintf(buf, "%d\n", (bool)(st->ctrl & AD5360_SF_CTRL_PWR_DOWN)); } static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5360_state *st = iio_priv(indio_dev); unsigned int ret; mutex_lock(&indio_dev->mlock); st->ctrl |= set; st->ctrl &= ~clr; ret = ad5360_write_unlocked(indio_dev, AD5360_CMD_SPECIAL_FUNCTION, AD5360_REG_SF_CTRL, st->ctrl, 0); mutex_unlock(&indio_dev->mlock); return ret; } static ssize_t ad5360_write_dac_powerdown(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); bool pwr_down; int ret; ret = strtobool(buf, &pwr_down); if (ret) return ret; if (pwr_down) ret = ad5360_update_ctrl(indio_dev, AD5360_SF_CTRL_PWR_DOWN, 0); else ret = ad5360_update_ctrl(indio_dev, 0, AD5360_SF_CTRL_PWR_DOWN); return ret ? ret : len; } static IIO_DEVICE_ATTR(out_voltage_powerdown, S_IRUGO | S_IWUSR, ad5360_read_dac_powerdown, ad5360_write_dac_powerdown, 0); static struct attribute *ad5360_attributes[] = { &iio_dev_attr_out_voltage_powerdown.dev_attr.attr, NULL, }; static const struct attribute_group ad5360_attribute_group = { .attrs = ad5360_attributes, }; static int ad5360_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ad5360_state *st = iio_priv(indio_dev); int max_val = (1 << chan->scan_type.realbits); unsigned int ofs_index; switch (mask) { case IIO_CHAN_INFO_RAW: if (val >= max_val || val < 0) return -EINVAL; return ad5360_write(indio_dev, AD5360_CMD_WRITE_DATA, chan->address, val, chan->scan_type.shift); case IIO_CHAN_INFO_CALIBBIAS: if (val >= max_val || val < 0) return -EINVAL; return ad5360_write(indio_dev, AD5360_CMD_WRITE_OFFSET, chan->address, val, chan->scan_type.shift); case IIO_CHAN_INFO_CALIBSCALE: if (val >= max_val || val < 0) return -EINVAL; return ad5360_write(indio_dev, AD5360_CMD_WRITE_GAIN, chan->address, val, chan->scan_type.shift); case IIO_CHAN_INFO_OFFSET: if (val <= -max_val || val > 0) return -EINVAL; val = -val; /* offset is supposed to have the same scale as raw, but it * is always 14bits wide, so on a chip where the raw value has * more bits, we need to shift offset. */ val >>= (chan->scan_type.realbits - 14); /* There is one DAC offset register per vref. Changing one * channels offset will also change the offset for all other * channels which share the same vref supply. */ ofs_index = ad5360_get_channel_vref_index(st, chan->channel); return ad5360_write(indio_dev, AD5360_CMD_SPECIAL_FUNCTION, AD5360_REG_SF_OFS(ofs_index), val, 0); default: break; } return -EINVAL; } static int ad5360_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad5360_state *st = iio_priv(indio_dev); unsigned int ofs_index; int scale_uv; int ret; switch (m) { case IIO_CHAN_INFO_RAW: ret = ad5360_read(indio_dev, AD5360_READBACK_X1A, chan->address); if (ret < 0) return ret; *val = ret >> chan->scan_type.shift; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* vout = 4 * vref * dac_code */ scale_uv = ad5360_get_channel_vref(st, chan->channel) * 4 * 100; if (scale_uv < 0) return scale_uv; scale_uv >>= (chan->scan_type.realbits); *val = scale_uv / 100000; *val2 = (scale_uv % 100000) * 10; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_CALIBBIAS: ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET, chan->address); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_CALIBSCALE: ret = ad5360_read(indio_dev, AD5360_READBACK_GAIN, chan->address); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: ofs_index = ad5360_get_channel_vref_index(st, chan->channel); ret = ad5360_read(indio_dev, AD5360_READBACK_SF, AD5360_REG_SF_OFS(ofs_index)); if (ret < 0) return ret; ret <<= (chan->scan_type.realbits - 14); *val = -ret; return IIO_VAL_INT; } return -EINVAL; } static const struct iio_info ad5360_info = { .read_raw = ad5360_read_raw, .write_raw = ad5360_write_raw, .attrs = &ad5360_attribute_group, .driver_module = THIS_MODULE, }; static const char * const ad5360_vref_name[] = { "vref0", "vref1", "vref2" }; static int ad5360_alloc_channels(struct iio_dev *indio_dev) { struct ad5360_state *st = iio_priv(indio_dev); struct iio_chan_spec *channels; unsigned int i; channels = kcalloc(st->chip_info->num_channels, sizeof(struct iio_chan_spec), GFP_KERNEL); if (!channels) return -ENOMEM; for (i = 0; i < st->chip_info->num_channels; ++i) { channels[i] = st->chip_info->channel_template; channels[i].channel = i; channels[i].address = AD5360_CHAN_ADDR(i); } indio_dev->channels = channels; return 0; } static int ad5360_probe(struct spi_device *spi) { enum ad5360_type type = spi_get_device_id(spi)->driver_data; struct iio_dev *indio_dev; struct ad5360_state *st; unsigned int i; int ret; indio_dev = iio_device_alloc(sizeof(*st)); if (indio_dev == NULL) { dev_err(&spi->dev, "Failed to allocate iio device\n"); return -ENOMEM; } st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); st->chip_info = &ad5360_chip_info_tbl[type]; st->spi = spi; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->info = &ad5360_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->num_channels = st->chip_info->num_channels; ret = ad5360_alloc_channels(indio_dev); if (ret) { dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret); goto error_free; } for (i = 0; i < st->chip_info->num_vrefs; ++i) st->vref_reg[i].supply = ad5360_vref_name[i]; ret = regulator_bulk_get(&st->spi->dev, st->chip_info->num_vrefs, st->vref_reg); if (ret) { dev_err(&spi->dev, "Failed to request vref regulators: %d\n", ret); goto error_free_channels; } ret = regulator_bulk_enable(st->chip_info->num_vrefs, st->vref_reg); if (ret) { dev_err(&spi->dev, "Failed to enable vref regulators: %d\n", ret); goto error_free_reg; } ret = iio_device_register(indio_dev); if (ret) { dev_err(&spi->dev, "Failed to register iio device: %d\n", ret); goto error_disable_reg; } return 0; error_disable_reg: regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg); error_free_reg: regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg); error_free_channels: kfree(indio_dev->channels); error_free: iio_device_free(indio_dev); return ret; } static int ad5360_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5360_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); kfree(indio_dev->channels); regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg); regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg); iio_device_free(indio_dev); return 0; } static const struct spi_device_id ad5360_ids[] = { { "ad5360", ID_AD5360 }, { "ad5361", ID_AD5361 }, { "ad5362", ID_AD5362 }, { "ad5363", ID_AD5363 }, { "ad5370", ID_AD5370 }, { "ad5371", ID_AD5371 }, { "ad5372", ID_AD5372 }, { "ad5373", ID_AD5373 }, {} }; MODULE_DEVICE_TABLE(spi, ad5360_ids); static struct spi_driver ad5360_driver = { .driver = { .name = "ad5360", .owner = THIS_MODULE, }, .probe = ad5360_probe, .remove = ad5360_remove, .id_table = ad5360_ids, }; module_spi_driver(ad5360_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("Analog Devices AD5360/61/62/63/70/71/72/73 DAC"); MODULE_LICENSE("GPL v2");
gpl-2.0
yemingxing/X9180_kernel
drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
2158
43714
/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <media/msm/vidc_type.h> #include "vcd.h" static const struct vcd_clnt_state_table *vcd_clnt_state_table[]; void vcd_clnt_handle_device_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event) { if (cctxt->clnt_state.state == VCD_CLIENT_STATE_NULL) { cctxt->callback(VCD_EVT_RESP_OPEN, VCD_ERR_HW_FATAL, NULL, 0, cctxt, cctxt->client_data); vcd_destroy_client_context(cctxt); return; } if (event == VCD_EVT_RESP_BASE) event = VCD_EVT_IND_HWERRFATAL; if (cctxt->clnt_state.state != VCD_CLIENT_STATE_INVALID) { cctxt->callback(event, VCD_ERR_HW_FATAL, NULL, 0, cctxt, cctxt->client_data); vcd_flush_buffers_in_err_fatal(cctxt); vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_INVALID, CLIENT_STATE_EVENT_NUMBER(clnt_cb)); } } static u32 vcd_close_in_open(struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_close_in_open:"); if (cctxt->in_buf_pool.allocated || cctxt->out_buf_pool.allocated) { VCD_MSG_ERROR("\n Allocated buffers are not freed yet"); return VCD_ERR_ILLEGAL_OP; } vcd_destroy_client_context(cctxt); return rc; } static u32 vcd_close_in_invalid(struct vcd_clnt_ctxt *cctxt) { VCD_MSG_LOW("vcd_close_in_invalid:"); if (cctxt->in_buf_pool.allocated || cctxt->out_buf_pool.allocated){ VCD_MSG_ERROR("Allocated buffers are not freed yet"); return VCD_ERR_ILLEGAL_OP; } if (cctxt->status.mask & VCD_CLEANING_UP) cctxt->status.mask |= VCD_CLOSE_PENDING; else vcd_destroy_client_context(cctxt); return VCD_S_SUCCESS; } static u32 vcd_start_in_run_cmn(struct vcd_clnt_ctxt *cctxt) { VCD_MSG_LOW("vcd_start_in_run_cmn:"); cctxt->callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0, cctxt, cctxt->client_data); return VCD_S_SUCCESS; } static u32 vcd_encode_start_in_open(struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; struct vcd_property_hdr prop_hdr; struct vcd_property_vop_timing timing; VCD_MSG_LOW("vcd_encode_start_in_open:"); if (cctxt->decoding) { VCD_MSG_ERROR("vcd_encode_init for decoder client"); return VCD_ERR_ILLEGAL_OP; } if ((!cctxt->meta_mode && !cctxt->in_buf_pool.entries) || !cctxt->out_buf_pool.entries || (!cctxt->meta_mode && cctxt->in_buf_pool.validated != cctxt->in_buf_pool.count) || cctxt->out_buf_pool.validated != cctxt->out_buf_pool.count) { VCD_MSG_HIGH("%s: Buffer pool is not completely setup yet", __func__); } rc = vcd_sched_add_client(cctxt); VCD_FAILED_RETURN(rc, "Failed: vcd_sched_add_client"); prop_hdr.prop_id = VCD_I_VOP_TIMING; prop_hdr.sz = sizeof(struct vcd_property_vop_timing); rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &timing); VCD_FAILED_RETURN(rc, "Failed: Get VCD_I_VOP_TIMING"); if (!timing.vop_time_resolution) { VCD_MSG_ERROR("Vop_time_resolution value is zero"); return VCD_ERR_FAIL; } cctxt->time_resoln = timing.vop_time_resolution; rc = vcd_process_cmd_sess_start(cctxt); if (!VCD_FAILED(rc)) { vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_STARTING, CLIENT_STATE_EVENT_NUMBER (encode_start)); } return rc; } static u32 vcd_encode_start_in_run(struct vcd_clnt_ctxt *cctxt) { VCD_MSG_LOW("vcd_encode_start_in_run:"); (void) vcd_start_in_run_cmn(cctxt); return VCD_S_SUCCESS; } static u32 vcd_encode_frame_cmn(struct vcd_clnt_ctxt *cctxt, struct vcd_frame_data *input_frame) { VCD_MSG_LOW("vcd_encode_frame_cmn in %d:", cctxt->clnt_state.state); if (cctxt->decoding) { VCD_MSG_ERROR("vcd_encode_frame for decoder client"); return VCD_ERR_ILLEGAL_OP; } return vcd_handle_input_frame(cctxt, input_frame); } static u32 vcd_decode_start_in_open (struct vcd_clnt_ctxt *cctxt, struct vcd_sequence_hdr *seq_hdr) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_decode_start_in_open:"); if (!cctxt->decoding) { VCD_MSG_ERROR("vcd_decode_init for encoder client"); return VCD_ERR_ILLEGAL_OP; } if (seq_hdr) { VCD_MSG_HIGH("Seq hdr supplied. len = %d", seq_hdr->sequence_header_len); rc = vcd_store_seq_hdr(cctxt, seq_hdr); } else { VCD_MSG_HIGH("Seq hdr not supplied"); cctxt->seq_hdr.sequence_header_len = 0; cctxt->seq_hdr.sequence_header = NULL; } VCD_FAILED_RETURN(rc, "Err processing seq hdr"); rc = vcd_process_cmd_sess_start(cctxt); if (!VCD_FAILED(rc)) { vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_STARTING, CLIENT_STATE_EVENT_NUMBER (decode_start)); } return rc; } static u32 vcd_decode_start_in_run(struct vcd_clnt_ctxt *cctxt, struct vcd_sequence_hdr *seqhdr) { VCD_MSG_LOW("vcd_decode_start_in_run:"); (void) vcd_start_in_run_cmn(cctxt); return VCD_S_SUCCESS; } static u32 vcd_decode_frame_cmn (struct vcd_clnt_ctxt *cctxt, struct vcd_frame_data *input_frame) { VCD_MSG_LOW("vcd_decode_frame_cmn in %d:", cctxt->clnt_state.state); if (!cctxt->decoding) { VCD_MSG_ERROR("Decode_frame api called for Encoder client"); return VCD_ERR_ILLEGAL_OP; } return vcd_handle_input_frame(cctxt, input_frame); } static u32 vcd_pause_cmn(struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_pause_cmn:"); if (cctxt->sched_clnt_hdl) { rc = vcd_sched_suspend_resume_clnt(cctxt, false); VCD_FAILED_RETURN(rc, "Failed: vcd_sched_suspend_resume_clnt"); } if (cctxt->status.frame_submitted > 0) { vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_PAUSING, CLIENT_STATE_EVENT_NUMBER (pause)); } else { VCD_MSG_HIGH("No client frames are currently being processed"); vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_PAUSED, CLIENT_STATE_EVENT_NUMBER (pause)); cctxt->callback(VCD_EVT_RESP_PAUSE, VCD_S_SUCCESS, NULL, 0, cctxt, cctxt->client_data); rc = vcd_power_event(cctxt->dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_PAUSE); if (VCD_FAILED(rc)) VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_PAUSE_END failed"); } return VCD_S_SUCCESS; } static u32 vcd_resume_in_paused(struct vcd_clnt_ctxt *cctxt) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_resume_in_paused:"); if (cctxt->sched_clnt_hdl) { rc = vcd_power_event(cctxt->dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_RESUME); if (VCD_FAILED(rc)) { VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_RESUME failed"); } else { rc = vcd_sched_suspend_resume_clnt(cctxt, true); if (VCD_FAILED(rc)) { VCD_MSG_ERROR ("rc = 0x%x. Failed: " "vcd_sched_suspend_resume_clnt", rc); } } if (!VCD_FAILED(rc)) { vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, CLIENT_STATE_EVENT_NUMBER (resume)); vcd_try_submit_frame(dev_ctxt); } } else { vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, CLIENT_STATE_EVENT_NUMBER (resume)); } return rc; } static u32 vcd_flush_cmn(struct vcd_clnt_ctxt *cctxt, u32 mode) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_flush_cmn in %d:", cctxt->clnt_state.state); rc = vcd_flush_buffers(cctxt, mode); VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers"); if (cctxt->status.frame_submitted > 0) { vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_FLUSHING, CLIENT_STATE_EVENT_NUMBER (flush)); } else { VCD_MSG_HIGH("All buffers are flushed"); cctxt->status.mask |= (mode & VCD_FLUSH_ALL); vcd_send_flush_done(cctxt, VCD_S_SUCCESS); } return rc; } static u32 vcd_flush_inopen(struct vcd_clnt_ctxt *cctxt, u32 mode) { VCD_MSG_LOW("vcd_flush_inopen:"); cctxt->status.mask |= (mode & VCD_FLUSH_ALL); vcd_send_flush_done(cctxt, VCD_S_SUCCESS); return VCD_S_SUCCESS; } static u32 vcd_flush_in_flushing (struct vcd_clnt_ctxt *cctxt, u32 mode) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_flush_in_flushing:"); rc = vcd_flush_buffers(cctxt, mode); return rc; } static u32 vcd_flush_in_eos(struct vcd_clnt_ctxt *cctxt, u32 mode) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_flush_in_eos:"); if (mode > VCD_FLUSH_ALL || !mode) { VCD_MSG_ERROR("Invalid flush mode %d", mode); return VCD_ERR_ILLEGAL_PARM; } VCD_MSG_MED("Flush mode requested %d", mode); if (!(cctxt->status.frame_submitted) && (!cctxt->decoding)) { rc = vcd_flush_buffers(cctxt, mode); if (!VCD_FAILED(rc)) { VCD_MSG_HIGH("All buffers are flushed"); cctxt->status.mask |= (mode & VCD_FLUSH_ALL); vcd_send_flush_done(cctxt, VCD_S_SUCCESS); } } else cctxt->status.mask |= (mode & VCD_FLUSH_ALL); return rc; } static u32 vcd_flush_in_invalid(struct vcd_clnt_ctxt *cctxt, u32 mode) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_flush_in_invalid:"); if (!(cctxt->status.mask & VCD_CLEANING_UP)) { rc = vcd_flush_buffers(cctxt, mode); if (!VCD_FAILED(rc)) { VCD_MSG_HIGH("All buffers are flushed"); cctxt->status.mask |= (mode & VCD_FLUSH_ALL); vcd_send_flush_done(cctxt, VCD_S_SUCCESS); } } else cctxt->status.mask |= (mode & VCD_FLUSH_ALL); return rc; } static u32 vcd_stop_cmn(struct vcd_clnt_ctxt *cctxt) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 rc = VCD_S_SUCCESS; struct vcd_transc *transc; VCD_MSG_LOW("vcd_stop_cmn in %d:", cctxt->clnt_state.state); rc = vcd_flush_buffers(cctxt, VCD_FLUSH_ALL); VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers"); if (!cctxt->status.frame_submitted) { if (vcd_get_command_channel(dev_ctxt, &transc)) { rc = vcd_power_event(dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_CMD_BEGIN); if (!VCD_FAILED(rc)) { transc->type = VCD_CMD_CODEC_STOP; transc->cctxt = cctxt; rc = vcd_submit_cmd_sess_end(transc); } else { VCD_MSG_ERROR("Failed:" " VCD_EVT_PWR_CLNT_CMD_BEGIN"); } if (VCD_FAILED(rc)) { vcd_release_command_channel(dev_ctxt, transc); } } else { vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP); } } if (VCD_FAILED(rc)) { (void)vcd_power_event(dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_CMD_FAIL); } else { vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_STOPPING, CLIENT_STATE_EVENT_NUMBER (stop)); } return rc; } static u32 vcd_stop_inopen(struct vcd_clnt_ctxt *cctxt) { VCD_MSG_LOW("vcd_stop_inopen:"); cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0, cctxt, cctxt->client_data); return VCD_S_SUCCESS; } static u32 vcd_stop_in_run(struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_stop_in_run:"); rc = vcd_stop_cmn(cctxt); if (!VCD_FAILED(rc) && (cctxt->status.mask & VCD_FIRST_IP_RCVD)) { rc = vcd_power_event(cctxt->dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_LAST_FRAME); } return rc; } static u32 vcd_stop_in_eos(struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_stop_in_eos:"); if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) { rc = vcd_stop_cmn(cctxt); if (!VCD_FAILED(rc)) { rc = vcd_power_event(cctxt->dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_LAST_FRAME); cctxt->status.mask &= ~VCD_EOS_WAIT_OP_BUF; } } else cctxt->status.mask |= VCD_STOP_PENDING; return rc; } static u32 vcd_stop_in_invalid(struct vcd_clnt_ctxt *cctxt) { VCD_MSG_LOW("vcd_stop_in_invalid:"); if (cctxt->status.mask & VCD_CLEANING_UP) { cctxt->status.mask |= VCD_STOP_PENDING; } else { (void) vcd_flush_buffers(cctxt, VCD_FLUSH_ALL); cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0, cctxt, cctxt->client_data); } return VCD_S_SUCCESS; } static u32 vcd_set_property_cmn (struct vcd_clnt_ctxt *cctxt, struct vcd_property_hdr *prop_hdr, void *prop_val) { u32 rc; VCD_MSG_LOW("vcd_set_property_cmn in %d:", cctxt->clnt_state.state); VCD_MSG_LOW("property Id = %d", prop_hdr->prop_id); if (!prop_hdr->sz || !prop_hdr->prop_id) { VCD_MSG_MED("Bad parameters"); return VCD_ERR_ILLEGAL_PARM; } rc = ddl_set_property(cctxt->ddl_handle, prop_hdr, prop_val); if (rc) { /* Some properties aren't known to ddl that we can handle */ if (prop_hdr->prop_id != VCD_I_VOP_TIMING_CONSTANT_DELTA) VCD_FAILED_RETURN(rc, "Failed: ddl_set_property"); } switch (prop_hdr->prop_id) { case VCD_I_META_BUFFER_MODE: { struct vcd_property_live *live = (struct vcd_property_live *)prop_val; cctxt->meta_mode = live->live; break; } case VCD_I_LIVE: { struct vcd_property_live *live = (struct vcd_property_live *)prop_val; cctxt->live = live->live; break; } case VCD_I_FRAME_RATE: { if (cctxt->sched_clnt_hdl) { rc = vcd_set_frame_rate(cctxt, (struct vcd_property_frame_rate *) prop_val); } break; } case VCD_I_FRAME_SIZE: { if (cctxt->sched_clnt_hdl) { rc = vcd_set_frame_size(cctxt, (struct vcd_property_frame_size *) prop_val); } break; } case VCD_I_SET_TURBO_CLK: { if (cctxt->sched_clnt_hdl) rc = vcd_set_perf_turbo_level(cctxt); break; } case VCD_I_INTRA_PERIOD: { struct vcd_property_i_period *iperiod = (struct vcd_property_i_period *)prop_val; cctxt->bframe = iperiod->b_frames; break; } case VCD_REQ_PERF_LEVEL: rc = vcd_req_perf_level(cctxt, (struct vcd_property_perf_level *)prop_val); break; case VCD_I_VOP_TIMING_CONSTANT_DELTA: { struct vcd_property_vop_timing_constant_delta *delta = prop_val; if (delta->constant_delta > 0) { cctxt->time_frame_delta = delta->constant_delta; rc = VCD_S_SUCCESS; } else { VCD_MSG_ERROR("Frame delta must be positive"); rc = VCD_ERR_ILLEGAL_PARM; } break; } default: { break; } } return rc; } static u32 vcd_get_property_cmn (struct vcd_clnt_ctxt *cctxt, struct vcd_property_hdr *prop_hdr, void *prop_val) { int rc; VCD_MSG_LOW("vcd_get_property_cmn in %d:", cctxt->clnt_state.state); VCD_MSG_LOW("property Id = %d", prop_hdr->prop_id); if (!prop_hdr->sz || !prop_hdr->prop_id) { VCD_MSG_MED("Bad parameters"); return VCD_ERR_ILLEGAL_PARM; } rc = ddl_get_property(cctxt->ddl_handle, prop_hdr, prop_val); if (rc) { /* Some properties aren't known to ddl that we can handle */ if (prop_hdr->prop_id != VCD_I_VOP_TIMING_CONSTANT_DELTA) VCD_FAILED_RETURN(rc, "Failed: ddl_set_property"); } switch (prop_hdr->prop_id) { case VCD_I_VOP_TIMING_CONSTANT_DELTA: { struct vcd_property_vop_timing_constant_delta *delta = (struct vcd_property_vop_timing_constant_delta *) prop_val; delta->constant_delta = cctxt->time_frame_delta; rc = VCD_S_SUCCESS; } } return rc; } static u32 vcd_set_buffer_requirements_cmn (struct vcd_clnt_ctxt *cctxt, enum vcd_buffer_type buffer, struct vcd_buffer_requirement *buffer_req) { struct vcd_property_hdr Prop_hdr; u32 rc = VCD_S_SUCCESS; struct vcd_buffer_pool *buf_pool; u32 first_frm_recvd = 0; VCD_MSG_LOW("vcd_set_buffer_requirements_cmn in %d:", cctxt->clnt_state.state); if (!cctxt->decoding && cctxt->clnt_state.state != VCD_CLIENT_STATE_OPEN) { VCD_MSG_ERROR("Bad state (%d) for encoder", cctxt->clnt_state.state); return VCD_ERR_BAD_STATE; } VCD_MSG_MED("Buffer type = %d", buffer); if (buffer == VCD_BUFFER_INPUT) { Prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ; buf_pool = &cctxt->in_buf_pool; first_frm_recvd = VCD_FIRST_IP_RCVD; } else if (buffer == VCD_BUFFER_OUTPUT) { Prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ; buf_pool = &cctxt->out_buf_pool; first_frm_recvd = VCD_FIRST_OP_RCVD; } else { rc = VCD_ERR_ILLEGAL_PARM; } VCD_FAILED_RETURN(rc, "Invalid buffer type provided"); if (buf_pool->validated > 0) { VCD_MSG_ERROR("Need to free allocated buffers"); return VCD_ERR_ILLEGAL_OP; } first_frm_recvd &= cctxt->status.mask; if (first_frm_recvd) { VCD_MSG_ERROR("VCD SetBufReq called when data path is active"); return VCD_ERR_BAD_STATE; } Prop_hdr.sz = sizeof(*buffer_req); rc = ddl_set_property(cctxt->ddl_handle, &Prop_hdr, buffer_req); VCD_FAILED_RETURN(rc, "Failed: ddl_set_property"); if (buf_pool->entries) { VCD_MSG_MED("Resetting buffer requirements"); vcd_free_buffer_pool_entries(buf_pool); } return rc; } static u32 vcd_get_buffer_requirements_cmn (struct vcd_clnt_ctxt *cctxt, enum vcd_buffer_type buffer, struct vcd_buffer_requirement *buffer_req) { struct vcd_property_hdr Prop_hdr; u32 rc = VCD_S_SUCCESS; VCD_MSG_LOW("vcd_get_buffer_requirements_cmn in %d:", cctxt->clnt_state.state); VCD_MSG_MED("Buffer type = %d", buffer); if (buffer == VCD_BUFFER_INPUT) Prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ; else if (buffer == VCD_BUFFER_OUTPUT) Prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ; else rc = VCD_ERR_ILLEGAL_PARM; VCD_FAILED_RETURN(rc, "Invalid buffer type provided"); Prop_hdr.sz = sizeof(*buffer_req); return ddl_get_property(cctxt->ddl_handle, &Prop_hdr, buffer_req); } static u32 vcd_set_buffer_cmn (struct vcd_clnt_ctxt *cctxt, enum vcd_buffer_type buffer_type, u8 *buffer, u32 buf_size) { u32 rc; struct vcd_buffer_pool *buf_pool; VCD_MSG_LOW("vcd_set_buffer_cmn in %d:", cctxt->clnt_state.state); rc = vcd_common_allocate_set_buffer(cctxt, buffer_type, buf_size, &buf_pool); if (!VCD_FAILED(rc)) { rc = vcd_set_buffer_internal(cctxt, buf_pool, buffer, buf_size); } return rc; } static u32 vcd_allocate_buffer_cmn (struct vcd_clnt_ctxt *cctxt, enum vcd_buffer_type buffer, u32 buf_size, u8 **vir_buf_addr, u8 **phy_buf_addr) { u32 rc; struct vcd_buffer_pool *buf_pool; VCD_MSG_LOW("vcd_allocate_buffer_cmn in %d:", cctxt->clnt_state.state); rc = vcd_common_allocate_set_buffer(cctxt, buffer, buf_size, &buf_pool); if (!VCD_FAILED(rc)) { rc = vcd_allocate_buffer_internal(cctxt, buf_pool, buf_size, vir_buf_addr, phy_buf_addr); } return rc; } static u32 vcd_free_buffer_cmn (struct vcd_clnt_ctxt *cctxt, enum vcd_buffer_type buffer_type, u8 *buffer) { VCD_MSG_LOW("vcd_free_buffer_cmn in %d:", cctxt->clnt_state.state); return vcd_free_one_buffer_internal(cctxt, buffer_type, buffer); } static u32 vcd_fill_output_buffer_cmn (struct vcd_clnt_ctxt *cctxt, struct vcd_frame_data *buffer) { u32 rc = VCD_S_SUCCESS; struct vcd_buffer_entry *buf_entry; u32 result = true; u32 handled = true; if (!cctxt || !buffer) { VCD_MSG_ERROR("%s(): Inavlid params cctxt %p buffer %p", __func__, cctxt, buffer); return VCD_ERR_BAD_POINTER; } VCD_MSG_LOW("vcd_fill_output_buffer_cmn in %d:", cctxt->clnt_state.state); if (cctxt->status.mask & VCD_IN_RECONFIG) { buffer->time_stamp = 0; buffer->data_len = 0; VCD_MSG_LOW("In reconfig: Return output buffer"); cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, buffer, sizeof(struct vcd_frame_data), cctxt, cctxt->client_data); return rc; } buf_entry = vcd_check_fill_output_buffer(cctxt, buffer); if (!buf_entry) return VCD_ERR_BAD_POINTER; if (!(cctxt->status.mask & VCD_FIRST_OP_RCVD)) { rc = vcd_handle_first_fill_output_buffer(cctxt, buffer, &handled); VCD_FAILED_RETURN(rc, "Failed: vcd_handle_first_fill_output_buffer"); if (handled) return rc ; } result = vcd_buffer_pool_entry_en_q(&cctxt->out_buf_pool, buf_entry); if (!result && !cctxt->decoding) { VCD_MSG_ERROR("Failed: vcd_buffer_pool_entry_en_q"); return VCD_ERR_FAIL; } buf_entry->frame = *buffer; rc = vcd_return_op_buffer_to_hw(cctxt, buf_entry); if (!VCD_FAILED(rc) && cctxt->sched_clnt_hdl) { cctxt->sched_clnt_hdl->tkns++; vcd_try_submit_frame(cctxt->dev_ctxt); } return rc; } static u32 vcd_fill_output_buffer_in_eos (struct vcd_clnt_ctxt *cctxt, struct vcd_frame_data *buffer) { u32 rc = VCD_S_SUCCESS; struct vcd_buffer_entry *buf_entry; VCD_MSG_LOW("vcd_fill_output_buffer_in_eos:"); buf_entry = vcd_check_fill_output_buffer(cctxt, buffer); if (!buf_entry) return VCD_ERR_BAD_POINTER; if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) { VCD_MSG_HIGH("Got an output buffer we were waiting for"); buf_entry->frame = *buffer; buf_entry->frame.data_len = 0; buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS; buf_entry->frame.ip_frm_tag = cctxt->status.eos_trig_ip_frm.ip_frm_tag; buf_entry->frame.time_stamp = cctxt->status.eos_trig_ip_frm.time_stamp; cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, &buf_entry->frame, sizeof(struct vcd_frame_data), cctxt, cctxt->client_data); cctxt->status.mask &= ~VCD_EOS_WAIT_OP_BUF; vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, CLIENT_STATE_EVENT_NUMBER (fill_output_buffer)); } else { rc = vcd_fill_output_buffer_cmn(cctxt, buffer); } return rc; } static void vcd_clnt_cb_in_starting (struct vcd_clnt_ctxt *cctxt, u32 event, u32 status, void *payload, size_t sz, u32 *ddl_handle, void *const client_data) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; struct vcd_transc *transc = (struct vcd_transc *)client_data; VCD_MSG_LOW("vcd_clnt_cb_in_starting:"); if (cctxt->ddl_handle != ddl_handle) { VCD_MSG_ERROR("vcd_clnt_cb_in_initing: Wrong DDL handle %p", ddl_handle); return; } switch (event) { case VCD_EVT_RESP_START: { vcd_handle_start_done(cctxt, (struct vcd_transc *)client_data, status); break; } case VCD_EVT_RESP_STOP: { vcd_handle_stop_done_in_starting(cctxt, (struct vcd_transc *)client_data, status); break; } case VCD_EVT_IND_HWERRFATAL: { cctxt->status.cmd_submitted--; vcd_mark_command_channel(cctxt->dev_ctxt, transc); vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START, status); break; } default: { VCD_MSG_ERROR("Unexpected callback event=%d status=%d " "from DDL", event, status); dev_ctxt->command_continue = false; break; } } } static void vcd_clnt_cb_in_run (struct vcd_clnt_ctxt *cctxt, u32 event, u32 status, void *payload, size_t sz, u32 *ddl_handle, void *const client_data) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 rc = VCD_S_SUCCESS; if (cctxt->ddl_handle != ddl_handle) { VCD_MSG_ERROR("ddl_handle mismatch"); return; } switch (event) { case VCD_EVT_RESP_INPUT_DONE: { rc = vcd_handle_input_done(cctxt, payload, event, status); break; } case VCD_EVT_RESP_OUTPUT_DONE: { rc = vcd_handle_frame_done(cctxt, payload, event, status); break; } case VCD_EVT_RESP_OUTPUT_REQ: { rc = vcd_handle_output_required(cctxt, payload, status); break; } case VCD_EVT_IND_OUTPUT_RECONFIG: { rc = vcd_handle_ind_output_reconfig(cctxt, payload, status); break; } case VCD_EVT_RESP_TRANSACTION_PENDING: { vcd_handle_trans_pending(cctxt); break; } case VCD_EVT_IND_HWERRFATAL: { vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL, status); break; } case VCD_EVT_IND_INFO_OUTPUT_RECONFIG: { vcd_handle_ind_info_output_reconfig(cctxt, status); break; } case VCD_EVT_IND_INFO_LTRUSE_FAILED: { rc = vcd_handle_ltr_use_failed(cctxt, payload, sz, status); break; } default: { VCD_MSG_ERROR ("Unexpected callback event=%d status=%d from DDL", event, status); dev_ctxt->command_continue = false; break; } } if (!VCD_FAILED(rc) && (event == VCD_EVT_RESP_INPUT_DONE || event == VCD_EVT_RESP_OUTPUT_DONE || event == VCD_EVT_RESP_OUTPUT_REQ)) { if (((struct ddl_frame_data_tag *) payload)->frm_trans_end) vcd_mark_frame_channel(cctxt->dev_ctxt); } } static void vcd_clnt_cb_in_eos (struct vcd_clnt_ctxt *cctxt, u32 event, u32 status, void *payload, size_t sz, u32 *ddl_handle, void *const client_data) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; struct vcd_transc *transc = NULL; u32 frm_trans_end = false, rc = VCD_S_SUCCESS; if (cctxt->ddl_handle != ddl_handle) { VCD_MSG_ERROR("ddl_handle mismatch"); return; } switch (event) { case VCD_EVT_RESP_INPUT_DONE: { rc = vcd_handle_input_done_in_eos(cctxt, payload, status); break; } case VCD_EVT_RESP_OUTPUT_DONE: { rc = vcd_handle_frame_done_in_eos(cctxt, payload, status); break; } case VCD_EVT_RESP_OUTPUT_REQ: { rc = vcd_handle_output_required(cctxt, payload, status); break; } case VCD_EVT_RESP_EOS_DONE: { transc = (struct vcd_transc *)client_data; vcd_handle_eos_done(cctxt, transc, status); vcd_mark_frame_channel(cctxt->dev_ctxt); break; } case VCD_EVT_IND_OUTPUT_RECONFIG: { rc = vcd_handle_ind_output_reconfig(cctxt, payload, status); if (!VCD_FAILED(rc)) { frm_trans_end = true; payload = NULL; vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, CLIENT_STATE_EVENT_NUMBER (clnt_cb)); VCD_MSG_LOW ("RECONFIGinEOS:Suspending Client"); rc = vcd_sched_suspend_resume_clnt(cctxt, false); if (VCD_FAILED(rc)) { VCD_MSG_ERROR ("Failed: suspend_resume_clnt. rc=0x%x", rc); } } break; } case VCD_EVT_IND_HWERRFATAL: { vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL, status); break; } case VCD_EVT_IND_INFO_OUTPUT_RECONFIG: { vcd_handle_ind_info_output_reconfig(cctxt, status); break; } default: { VCD_MSG_ERROR ("Unexpected callback event=%d status=%d from DDL", event, status); dev_ctxt->command_continue = false; break; } } if (!VCD_FAILED(rc) && (event == VCD_EVT_RESP_INPUT_DONE || event == VCD_EVT_RESP_OUTPUT_DONE || event == VCD_EVT_RESP_OUTPUT_REQ || event == VCD_EVT_IND_OUTPUT_RECONFIG)) { if (payload && ((struct ddl_frame_data_tag *) payload)->frm_trans_end) { vcd_mark_frame_channel(cctxt->dev_ctxt); frm_trans_end = true; } if (frm_trans_end && !cctxt->status.frame_submitted) vcd_handle_eos_trans_end(cctxt); } } static void vcd_clnt_cb_in_flushing (struct vcd_clnt_ctxt *cctxt, u32 event, u32 status, void *payload, size_t sz, u32 *ddl_handle, void *const client_data) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 rc = VCD_S_SUCCESS; u32 frm_trans_end = false; VCD_MSG_LOW("vcd_clnt_cb_in_flushing:"); if (cctxt->ddl_handle != ddl_handle) { VCD_MSG_ERROR("ddl_handle mismatch"); return; } switch (event) { case VCD_EVT_RESP_INPUT_DONE: { rc = vcd_handle_input_done(cctxt, payload, VCD_EVT_RESP_INPUT_FLUSHED, status); break; } case VCD_EVT_RESP_OUTPUT_DONE: { rc = vcd_handle_frame_done(cctxt, payload, VCD_EVT_RESP_OUTPUT_FLUSHED, status); break; } case VCD_EVT_RESP_OUTPUT_REQ: { rc = vcd_handle_output_required_in_flushing(cctxt, payload); break; } case VCD_EVT_IND_OUTPUT_RECONFIG: { rc = vcd_handle_ind_output_reconfig(cctxt, payload, status); if (!VCD_FAILED(rc)) { frm_trans_end = true; payload = NULL; } break; } case VCD_EVT_IND_HWERRFATAL: { vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL, status); break; } default: { VCD_MSG_ERROR ("Unexpected callback event=%d status=%d from DDL", event, status); dev_ctxt->command_continue = false; break; } } if (!VCD_FAILED(rc) && ((event == VCD_EVT_RESP_INPUT_DONE || event == VCD_EVT_RESP_OUTPUT_DONE || event == VCD_EVT_RESP_OUTPUT_REQ || event == VCD_EVT_IND_OUTPUT_RECONFIG))) { if (payload && ((struct ddl_frame_data_tag *)\ payload)->frm_trans_end) { vcd_mark_frame_channel(cctxt->dev_ctxt); frm_trans_end = true; } if (frm_trans_end && !cctxt->status.frame_submitted) { VCD_MSG_HIGH ("All pending frames recvd from DDL"); if (cctxt->status.mask & VCD_FLUSH_INPUT) vcd_flush_bframe_buffers(cctxt, VCD_FLUSH_INPUT); if (cctxt->status.mask & VCD_FLUSH_OUTPUT) vcd_flush_output_buffers(cctxt); vcd_send_flush_done(cctxt, VCD_S_SUCCESS); vcd_release_interim_frame_channels(dev_ctxt); VCD_MSG_HIGH("Flush complete"); vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, CLIENT_STATE_EVENT_NUMBER (clnt_cb)); } } } static void vcd_clnt_cb_in_stopping (struct vcd_clnt_ctxt *cctxt, u32 event, u32 status, void *payload, size_t sz, u32 *ddl_handle, void *const client_data) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 rc = VCD_S_SUCCESS; u32 frm_trans_end = false; VCD_MSG_LOW("vcd_clnt_cb_in_stopping:"); if (cctxt->ddl_handle != ddl_handle) { VCD_MSG_ERROR("ddl_handle mismatch"); return; } switch (event) { case VCD_EVT_RESP_INPUT_DONE: { rc = vcd_handle_input_done(cctxt, payload, VCD_EVT_RESP_INPUT_FLUSHED, status); break; } case VCD_EVT_RESP_OUTPUT_DONE: { rc = vcd_handle_frame_done(cctxt, payload, VCD_EVT_RESP_OUTPUT_FLUSHED, status); break; } case VCD_EVT_RESP_OUTPUT_REQ: { rc = vcd_handle_output_required_in_flushing(cctxt, payload); break; } case VCD_EVT_RESP_STOP: { vcd_handle_stop_done(cctxt, (struct vcd_transc *) client_data, status); break; } case VCD_EVT_IND_OUTPUT_RECONFIG: { (void) vcd_handle_ind_output_reconfig(cctxt, payload, status); frm_trans_end = true; payload = NULL; break; } case VCD_EVT_IND_HWERRFATAL: { vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_RESP_STOP, status); break; } default: { VCD_MSG_ERROR ("Unexpected callback event=%d status=%d from DDL", event, status); dev_ctxt->command_continue = false; break; } } if (!VCD_FAILED(rc) && ((event == VCD_EVT_RESP_INPUT_DONE || event == VCD_EVT_RESP_OUTPUT_DONE) || event == VCD_EVT_RESP_OUTPUT_REQ || event == VCD_EVT_IND_OUTPUT_RECONFIG)) { if (payload && ((struct ddl_frame_data_tag *)\ payload)->frm_trans_end) { vcd_mark_frame_channel(cctxt->dev_ctxt); frm_trans_end = true; } if (frm_trans_end && !cctxt->status.frame_submitted) { VCD_MSG_HIGH ("All pending frames recvd from DDL"); vcd_flush_bframe_buffers(cctxt, VCD_FLUSH_INPUT); vcd_flush_output_buffers(cctxt); cctxt->status.mask &= ~VCD_FLUSH_ALL; vcd_release_all_clnt_frm_transc(cctxt); VCD_MSG_HIGH ("All buffers flushed. Enqueuing stop cmd"); vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP); } } } static void vcd_clnt_cb_in_pausing (struct vcd_clnt_ctxt *cctxt, u32 event, u32 status, void *payload, size_t sz, u32 *ddl_handle, void *const client_data) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 rc = VCD_S_SUCCESS; u32 frm_trans_end = false; VCD_MSG_LOW("vcd_clnt_cb_in_pausing:"); if (cctxt->ddl_handle != ddl_handle) { VCD_MSG_ERROR("ddl_handle mismatch"); return; } switch (event) { case VCD_EVT_RESP_INPUT_DONE: { rc = vcd_handle_input_done(cctxt, payload, event, status); break; } case VCD_EVT_RESP_OUTPUT_DONE: { rc = vcd_handle_frame_done(cctxt, payload, event, status); break; } case VCD_EVT_RESP_OUTPUT_REQ: { rc = vcd_handle_output_required(cctxt, payload, status); break; } case VCD_EVT_IND_OUTPUT_RECONFIG: { rc = vcd_handle_ind_output_reconfig(cctxt, payload, status); if (!VCD_FAILED(rc)) { frm_trans_end = true; payload = NULL; } break; } case VCD_EVT_IND_HWERRFATAL: { vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_RESP_PAUSE, status); rc = VCD_ERR_FAIL; break; } default: { VCD_MSG_ERROR ("Unexpected callback event=%d status=%d from DDL", event, status); dev_ctxt->command_continue = false; break; } } if (!VCD_FAILED(rc)) { if (payload && ((struct ddl_frame_data_tag *)\ payload)->frm_trans_end) { vcd_mark_frame_channel(cctxt->dev_ctxt); frm_trans_end = true; } if (frm_trans_end && !cctxt->status.frame_submitted) { VCD_MSG_HIGH ("All pending frames recvd from DDL"); cctxt->callback(VCD_EVT_RESP_PAUSE, VCD_S_SUCCESS, NULL, 0, cctxt, cctxt->client_data); vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_PAUSED, CLIENT_STATE_EVENT_NUMBER (clnt_cb)); rc = vcd_power_event(cctxt->dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_PAUSE); if (VCD_FAILED(rc)) { VCD_MSG_ERROR ("VCD_EVT_PWR_CLNT_PAUSE_END" "failed"); } } } } static void vcd_clnt_cb_in_invalid( struct vcd_clnt_ctxt *cctxt, u32 event, u32 status, void *payload, size_t sz, u32 *ddl_handle, void *const client_data ) { struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; VCD_MSG_LOW("vcd_clnt_cb_in_invalid:"); if (cctxt->ddl_handle != ddl_handle) { VCD_MSG_ERROR("ddl_handle mismatch"); return; } switch (event) { case VCD_EVT_RESP_STOP: { vcd_handle_stop_done_in_invalid(cctxt, (struct vcd_transc *)client_data, status); break; } case VCD_EVT_RESP_INPUT_DONE: case VCD_EVT_RESP_OUTPUT_REQ: { if (cctxt->status.frame_submitted) cctxt->status.frame_submitted--; if (payload && ((struct ddl_frame_data_tag *) payload)->frm_trans_end) vcd_mark_frame_channel(cctxt->dev_ctxt); break; } case VCD_EVT_RESP_OUTPUT_DONE: { if (payload && ((struct ddl_frame_data_tag *) payload)->frm_trans_end) vcd_mark_frame_channel(cctxt->dev_ctxt); break; } case VCD_EVT_RESP_TRANSACTION_PENDING: { if (cctxt->status.frame_submitted) cctxt->status.frame_submitted--; vcd_mark_frame_channel(cctxt->dev_ctxt); break; } case VCD_EVT_IND_HWERRFATAL: { if (status == VCD_ERR_HW_FATAL) vcd_handle_stop_done_in_invalid(cctxt, (struct vcd_transc *)client_data, status); break; } case VCD_EVT_RESP_EOS_DONE: { vcd_mark_frame_channel(cctxt->dev_ctxt); break; } case VCD_EVT_IND_OUTPUT_RECONFIG: { if (cctxt->status.frame_submitted > 0) cctxt->status.frame_submitted--; else cctxt->status.frame_delayed--; vcd_mark_frame_channel(cctxt->dev_ctxt); break; } default: { VCD_MSG_ERROR("Unexpected callback event=%d status=%d" "from DDL", event, status); dev_ctxt->command_continue = false; break; } } } static void vcd_clnt_enter_open (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_OPEN on api %d", state_event); } static void vcd_clnt_enter_starting (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_STARTING on api %d", state_event); cctxt->status.last_evt = VCD_EVT_RESP_START; } static void vcd_clnt_enter_run (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_RUN on api %d", state_event); } static void vcd_clnt_enter_flushing (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_FLUSHING on api %d", state_event); } static void vcd_clnt_enter_stopping (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_STOPPING on api %d", state_event); cctxt->status.last_evt = VCD_EVT_RESP_STOP; } static void vcd_clnt_enter_eos(struct vcd_clnt_ctxt *cctxt, s32 state_event) { u32 rc; VCD_MSG_MED("Entering CLIENT_STATE_EOS on api %d", state_event); rc = vcd_sched_suspend_resume_clnt(cctxt, false); if (VCD_FAILED(rc)) VCD_MSG_ERROR("Failed: vcd_sched_suspend_resume_clnt." "rc=0x%x", rc); } static void vcd_clnt_enter_pausing (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_PAUSING on api %d", state_event); cctxt->status.last_evt = VCD_EVT_RESP_PAUSE; } static void vcd_clnt_enter_paused (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_PAUSED on api %d", state_event); } static void vcd_clnt_enter_invalid(struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Entering CLIENT_STATE_INVALID on api %d", state_event); cctxt->ddl_hdl_valid = false; cctxt->status.mask &= ~(VCD_FIRST_IP_RCVD | VCD_FIRST_OP_RCVD); if (cctxt->sched_clnt_hdl) vcd_sched_suspend_resume_clnt(cctxt, false); } static void vcd_clnt_exit_open (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_OPEN on api %d", state_event); } static void vcd_clnt_exit_starting (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_STARTING on api %d", state_event); cctxt->status.last_evt = VCD_EVT_RESP_BASE; } static void vcd_clnt_exit_run (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_RUN on api %d", state_event); } static void vcd_clnt_exit_flushing (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_FLUSHING on api %d", state_event); } static void vcd_clnt_exit_stopping (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_STOPPING on api %d", state_event); cctxt->status.last_evt = VCD_EVT_RESP_BASE; } static void vcd_clnt_exit_eos (struct vcd_clnt_ctxt *cctxt, s32 state_event) { u32 rc; VCD_MSG_MED("Exiting CLIENT_STATE_EOS on api %d", state_event); rc = vcd_sched_suspend_resume_clnt(cctxt, true); if (VCD_FAILED(rc)) VCD_MSG_ERROR("Failed: vcd_sched_suspend_resume_clnt. rc=0x%x", rc); } static void vcd_clnt_exit_pausing (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_PAUSING on api %d", state_event); cctxt->status.last_evt = VCD_EVT_RESP_BASE; } static void vcd_clnt_exit_paused (struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_PAUSED on api %d", state_event); } static void vcd_clnt_exit_invalid(struct vcd_clnt_ctxt *cctxt, s32 state_event) { VCD_MSG_MED("Exiting CLIENT_STATE_INVALID on api %d", state_event); } void vcd_do_client_state_transition(struct vcd_clnt_ctxt *cctxt, enum vcd_clnt_state_enum to_state, u32 ev_code) { struct vcd_clnt_state_ctxt *state_ctxt; if (!cctxt || to_state >= VCD_CLIENT_STATE_MAX) { VCD_MSG_ERROR("Bad parameters. cctxt=%p, to_state=%d", cctxt, to_state); return; } state_ctxt = &cctxt->clnt_state; if (state_ctxt->state == to_state) { VCD_MSG_HIGH("Client already in requested to_state=%d", to_state); return; } VCD_MSG_MED("vcd_do_client_state_transition: C%d -> C%d, for api %d", (int)state_ctxt->state, (int)to_state, ev_code); if (state_ctxt->state_table->exit) state_ctxt->state_table->exit(cctxt, ev_code); state_ctxt->state = to_state; state_ctxt->state_table = vcd_clnt_state_table[to_state]; if (state_ctxt->state_table->entry) state_ctxt->state_table->entry(cctxt, ev_code); } const struct vcd_clnt_state_table *vcd_get_client_state_table (enum vcd_clnt_state_enum state) { return vcd_clnt_state_table[state]; } static const struct vcd_clnt_state_table vcd_clnt_table_open = { { vcd_close_in_open, vcd_encode_start_in_open, NULL, vcd_decode_start_in_open, NULL, NULL, NULL, vcd_flush_inopen, vcd_stop_inopen, vcd_set_property_cmn, vcd_get_property_cmn, vcd_set_buffer_requirements_cmn, vcd_get_buffer_requirements_cmn, vcd_set_buffer_cmn, vcd_allocate_buffer_cmn, vcd_free_buffer_cmn, vcd_fill_output_buffer_cmn, NULL, }, vcd_clnt_enter_open, vcd_clnt_exit_open }; static const struct vcd_clnt_state_table vcd_clnt_table_starting = { { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, vcd_get_property_cmn, NULL, vcd_get_buffer_requirements_cmn, NULL, NULL, NULL, NULL, vcd_clnt_cb_in_starting, }, vcd_clnt_enter_starting, vcd_clnt_exit_starting }; static const struct vcd_clnt_state_table vcd_clnt_table_run = { { NULL, vcd_encode_start_in_run, vcd_encode_frame_cmn, vcd_decode_start_in_run, vcd_decode_frame_cmn, vcd_pause_cmn, NULL, vcd_flush_cmn, vcd_stop_in_run, vcd_set_property_cmn, vcd_get_property_cmn, vcd_set_buffer_requirements_cmn, vcd_get_buffer_requirements_cmn, vcd_set_buffer_cmn, vcd_allocate_buffer_cmn, vcd_free_buffer_cmn, vcd_fill_output_buffer_cmn, vcd_clnt_cb_in_run, }, vcd_clnt_enter_run, vcd_clnt_exit_run }; static const struct vcd_clnt_state_table vcd_clnt_table_flushing = { { NULL, NULL, NULL, NULL, NULL, NULL, NULL, vcd_flush_in_flushing, NULL, vcd_set_property_cmn, vcd_get_property_cmn, NULL, vcd_get_buffer_requirements_cmn, NULL, NULL, vcd_free_buffer_cmn, vcd_fill_output_buffer_cmn, vcd_clnt_cb_in_flushing, }, vcd_clnt_enter_flushing, vcd_clnt_exit_flushing }; static const struct vcd_clnt_state_table vcd_clnt_table_stopping = { { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, vcd_get_property_cmn, NULL, vcd_get_buffer_requirements_cmn, NULL, NULL, NULL, NULL, vcd_clnt_cb_in_stopping, }, vcd_clnt_enter_stopping, vcd_clnt_exit_stopping }; static const struct vcd_clnt_state_table vcd_clnt_table_eos = { { NULL, NULL, vcd_encode_frame_cmn, NULL, vcd_decode_frame_cmn, vcd_pause_cmn, NULL, vcd_flush_in_eos, vcd_stop_in_eos, NULL, vcd_get_property_cmn, NULL, vcd_get_buffer_requirements_cmn, NULL, NULL, NULL, vcd_fill_output_buffer_in_eos, vcd_clnt_cb_in_eos, }, vcd_clnt_enter_eos, vcd_clnt_exit_eos }; static const struct vcd_clnt_state_table vcd_clnt_table_pausing = { { NULL, NULL, vcd_encode_frame_cmn, NULL, vcd_decode_frame_cmn, NULL, NULL, NULL, NULL, vcd_set_property_cmn, vcd_get_property_cmn, NULL, vcd_get_buffer_requirements_cmn, NULL, NULL, NULL, vcd_fill_output_buffer_cmn, vcd_clnt_cb_in_pausing, }, vcd_clnt_enter_pausing, vcd_clnt_exit_pausing }; static const struct vcd_clnt_state_table vcd_clnt_table_paused = { { NULL, NULL, vcd_encode_frame_cmn, NULL, vcd_decode_frame_cmn, NULL, vcd_resume_in_paused, vcd_flush_cmn, vcd_stop_cmn, vcd_set_property_cmn, vcd_get_property_cmn, vcd_set_buffer_requirements_cmn, vcd_get_buffer_requirements_cmn, vcd_set_buffer_cmn, vcd_allocate_buffer_cmn, NULL, vcd_fill_output_buffer_cmn, NULL, }, vcd_clnt_enter_paused, vcd_clnt_exit_paused }; static const struct vcd_clnt_state_table vcd_clnt_table_invalid = { { vcd_close_in_invalid, NULL, NULL, NULL, NULL, NULL, NULL, vcd_flush_in_invalid, vcd_stop_in_invalid, NULL, NULL, NULL, NULL, NULL, NULL, vcd_free_buffer_cmn, NULL, vcd_clnt_cb_in_invalid, }, vcd_clnt_enter_invalid, vcd_clnt_exit_invalid }; static const struct vcd_clnt_state_table *vcd_clnt_state_table[] = { NULL, &vcd_clnt_table_open, &vcd_clnt_table_starting, &vcd_clnt_table_run, &vcd_clnt_table_flushing, &vcd_clnt_table_pausing, &vcd_clnt_table_paused, &vcd_clnt_table_stopping, &vcd_clnt_table_eos, &vcd_clnt_table_invalid };
gpl-2.0