repo_name
string
path
string
copies
string
size
string
content
string
license
string
nDroidProject/nDroid-kernel
drivers/clk/at91/pmc.c
338
10697
/* * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/clk/at91_pmc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <asm/proc-fns.h> #include "pmc.h" void __iomem *at91_pmc_base; EXPORT_SYMBOL_GPL(at91_pmc_base); void at91rm9200_idle(void) { /* * Disable the processor clock. The processor will be automatically * re-enabled by an interrupt or by a reset. */ at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); } void at91sam9_idle(void) { at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); cpu_do_idle(); } int of_at91_get_clk_range(struct device_node *np, const char *propname, struct clk_range *range) { u32 min, max; int ret; ret = of_property_read_u32_index(np, propname, 0, &min); if (ret) return ret; ret = of_property_read_u32_index(np, propname, 1, &max); if (ret) return ret; if (range) { range->min = min; range->max = max; } return 0; } EXPORT_SYMBOL_GPL(of_at91_get_clk_range); static void pmc_irq_mask(struct irq_data *d) { struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq); } static void pmc_irq_unmask(struct irq_data *d) { struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq); } static int pmc_irq_set_type(struct irq_data *d, unsigned type) { if (type != IRQ_TYPE_LEVEL_HIGH) { pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n"); return -EINVAL; } return 0; } static void pmc_irq_suspend(struct irq_data *d) { struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); pmc->imr = pmc_read(pmc, AT91_PMC_IMR); pmc_write(pmc, AT91_PMC_IDR, pmc->imr); } static void pmc_irq_resume(struct irq_data *d) { struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); pmc_write(pmc, AT91_PMC_IER, pmc->imr); } static struct irq_chip pmc_irq = { .name = "PMC", .irq_disable = pmc_irq_mask, .irq_mask = pmc_irq_mask, .irq_unmask = pmc_irq_unmask, .irq_set_type = pmc_irq_set_type, .irq_suspend = pmc_irq_suspend, .irq_resume = pmc_irq_resume, }; static struct lock_class_key pmc_lock_class; static int pmc_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct at91_pmc *pmc = h->host_data; irq_set_lockdep_class(virq, &pmc_lock_class); irq_set_chip_and_handler(virq, &pmc_irq, handle_level_irq); set_irq_flags(virq, IRQF_VALID); irq_set_chip_data(virq, pmc); return 0; } static int pmc_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct at91_pmc *pmc = d->host_data; const struct at91_pmc_caps *caps = pmc->caps; if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; if (!(caps->available_irqs & (1 << *out_hwirq))) return -EINVAL; *out_type = IRQ_TYPE_LEVEL_HIGH; return 0; } static struct irq_domain_ops pmc_irq_ops = { .map = pmc_irq_map, .xlate = pmc_irq_domain_xlate, }; static irqreturn_t pmc_irq_handler(int irq, void *data) { struct at91_pmc *pmc = (struct at91_pmc *)data; unsigned long sr; int n; sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR); if (!sr) return IRQ_NONE; for_each_set_bit(n, &sr, BITS_PER_LONG) generic_handle_irq(irq_find_mapping(pmc->irqdomain, n)); return IRQ_HANDLED; } static const struct at91_pmc_caps at91rm9200_caps = { .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | AT91_PMC_PCK3RDY, }; static const struct at91_pmc_caps at91sam9260_caps = { .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | AT91_PMC_PCK1RDY, }; static const struct at91_pmc_caps at91sam9g45_caps = { .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | AT91_PMC_PCK1RDY, }; static const struct at91_pmc_caps at91sam9n12_caps = { .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS | AT91_PMC_CFDEV, }; static const struct at91_pmc_caps at91sam9x5_caps = { .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS | AT91_PMC_CFDEV, }; static const struct at91_pmc_caps sama5d3_caps = { .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS | AT91_PMC_CFDEV, }; static struct at91_pmc *__init at91_pmc_init(struct device_node *np, void __iomem *regbase, int virq, const struct at91_pmc_caps *caps) { struct at91_pmc *pmc; if (!regbase || !virq || !caps) return NULL; at91_pmc_base = regbase; pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); if (!pmc) return NULL; spin_lock_init(&pmc->lock); pmc->regbase = regbase; pmc->virq = virq; pmc->caps = caps; pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc); if (!pmc->irqdomain) goto out_free_pmc; pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc)) goto out_remove_irqdomain; return pmc; out_remove_irqdomain: irq_domain_remove(pmc->irqdomain); out_free_pmc: kfree(pmc); return NULL; } static const struct of_device_id pmc_clk_ids[] __initconst = { /* Slow oscillator */ { .compatible = "atmel,at91sam9260-clk-slow", .data = of_at91sam9260_clk_slow_setup, }, /* Main clock */ { .compatible = "atmel,at91rm9200-clk-main-osc", .data = of_at91rm9200_clk_main_osc_setup, }, { .compatible = "atmel,at91sam9x5-clk-main-rc-osc", .data = of_at91sam9x5_clk_main_rc_osc_setup, }, { .compatible = "atmel,at91rm9200-clk-main", .data = of_at91rm9200_clk_main_setup, }, { .compatible = "atmel,at91sam9x5-clk-main", .data = of_at91sam9x5_clk_main_setup, }, /* PLL clocks */ { .compatible = "atmel,at91rm9200-clk-pll", .data = of_at91rm9200_clk_pll_setup, }, { .compatible = "atmel,at91sam9g45-clk-pll", .data = of_at91sam9g45_clk_pll_setup, }, { .compatible = "atmel,at91sam9g20-clk-pllb", .data = of_at91sam9g20_clk_pllb_setup, }, { .compatible = "atmel,sama5d3-clk-pll", .data = of_sama5d3_clk_pll_setup, }, { .compatible = "atmel,at91sam9x5-clk-plldiv", .data = of_at91sam9x5_clk_plldiv_setup, }, /* Master clock */ { .compatible = "atmel,at91rm9200-clk-master", .data = of_at91rm9200_clk_master_setup, }, { .compatible = "atmel,at91sam9x5-clk-master", .data = of_at91sam9x5_clk_master_setup, }, /* System clocks */ { .compatible = "atmel,at91rm9200-clk-system", .data = of_at91rm9200_clk_sys_setup, }, /* Peripheral clocks */ { .compatible = "atmel,at91rm9200-clk-peripheral", .data = of_at91rm9200_clk_periph_setup, }, { .compatible = "atmel,at91sam9x5-clk-peripheral", .data = of_at91sam9x5_clk_periph_setup, }, /* Programmable clocks */ { .compatible = "atmel,at91rm9200-clk-programmable", .data = of_at91rm9200_clk_prog_setup, }, { .compatible = "atmel,at91sam9g45-clk-programmable", .data = of_at91sam9g45_clk_prog_setup, }, { .compatible = "atmel,at91sam9x5-clk-programmable", .data = of_at91sam9x5_clk_prog_setup, }, /* UTMI clock */ #if defined(CONFIG_HAVE_AT91_UTMI) { .compatible = "atmel,at91sam9x5-clk-utmi", .data = of_at91sam9x5_clk_utmi_setup, }, #endif /* USB clock */ #if defined(CONFIG_HAVE_AT91_USB_CLK) { .compatible = "atmel,at91rm9200-clk-usb", .data = of_at91rm9200_clk_usb_setup, }, { .compatible = "atmel,at91sam9x5-clk-usb", .data = of_at91sam9x5_clk_usb_setup, }, { .compatible = "atmel,at91sam9n12-clk-usb", .data = of_at91sam9n12_clk_usb_setup, }, #endif /* SMD clock */ #if defined(CONFIG_HAVE_AT91_SMD) { .compatible = "atmel,at91sam9x5-clk-smd", .data = of_at91sam9x5_clk_smd_setup, }, #endif #if defined(CONFIG_HAVE_AT91_H32MX) { .compatible = "atmel,sama5d4-clk-h32mx", .data = of_sama5d4_clk_h32mx_setup, }, #endif { /*sentinel*/ } }; static void __init of_at91_pmc_setup(struct device_node *np, const struct at91_pmc_caps *caps) { struct at91_pmc *pmc; struct device_node *childnp; void (*clk_setup)(struct device_node *, struct at91_pmc *); const struct of_device_id *clk_id; void __iomem *regbase = of_iomap(np, 0); int virq; if (!regbase) return; virq = irq_of_parse_and_map(np, 0); if (!virq) return; pmc = at91_pmc_init(np, regbase, virq, caps); if (!pmc) return; for_each_child_of_node(np, childnp) { clk_id = of_match_node(pmc_clk_ids, childnp); if (!clk_id) continue; clk_setup = clk_id->data; clk_setup(childnp, pmc); } } static void __init of_at91rm9200_pmc_setup(struct device_node *np) { of_at91_pmc_setup(np, &at91rm9200_caps); } CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc", of_at91rm9200_pmc_setup); static void __init of_at91sam9260_pmc_setup(struct device_node *np) { of_at91_pmc_setup(np, &at91sam9260_caps); } CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc", of_at91sam9260_pmc_setup); static void __init of_at91sam9g45_pmc_setup(struct device_node *np) { of_at91_pmc_setup(np, &at91sam9g45_caps); } CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc", of_at91sam9g45_pmc_setup); static void __init of_at91sam9n12_pmc_setup(struct device_node *np) { of_at91_pmc_setup(np, &at91sam9n12_caps); } CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc", of_at91sam9n12_pmc_setup); static void __init of_at91sam9x5_pmc_setup(struct device_node *np) { of_at91_pmc_setup(np, &at91sam9x5_caps); } CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc", of_at91sam9x5_pmc_setup); static void __init of_sama5d3_pmc_setup(struct device_node *np) { of_at91_pmc_setup(np, &sama5d3_caps); } CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc", of_sama5d3_pmc_setup);
gpl-2.0
smallsilver/linux
drivers/staging/iio/meter/ade7758_trigger.c
338
2487
/* * ADE7758 Poly Phase Multifunction Energy Metering IC driver * * Copyright 2010-2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/export.h> #include <linux/iio/iio.h> #include <linux/iio/trigger.h> #include "ade7758.h" /** * ade7758_data_rdy_trig_poll() the event handler for the data rdy trig **/ static irqreturn_t ade7758_data_rdy_trig_poll(int irq, void *private) { disable_irq_nosync(irq); iio_trigger_poll(private, iio_get_time_ns()); return IRQ_HANDLED; } /** * ade7758_data_rdy_trigger_set_state() set datardy interrupt state **/ static int ade7758_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *indio_dev = trig->private_data; dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state); return ade7758_set_irq(&indio_dev->dev, state); } /** * ade7758_trig_try_reen() try renabling irq for data rdy trigger * @trig: the datardy trigger **/ static int ade7758_trig_try_reen(struct iio_trigger *trig) { struct iio_dev *indio_dev = trig->private_data; struct ade7758_state *st = iio_priv(indio_dev); enable_irq(st->us->irq); /* irq reenabled so success! */ return 0; } static const struct iio_trigger_ops ade7758_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &ade7758_data_rdy_trigger_set_state, .try_reenable = &ade7758_trig_try_reen, }; int ade7758_probe_trigger(struct iio_dev *indio_dev) { struct ade7758_state *st = iio_priv(indio_dev); int ret; st->trig = iio_trigger_alloc("%s-dev%d", spi_get_device_id(st->us)->name, indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } ret = request_irq(st->us->irq, ade7758_data_rdy_trig_poll, IRQF_TRIGGER_LOW, spi_get_device_id(st->us)->name, st->trig); if (ret) goto error_free_trig; st->trig->dev.parent = &st->us->dev; st->trig->ops = &ade7758_trigger_ops; st->trig->private_data = indio_dev; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(st->us->irq, st->trig); error_free_trig: iio_trigger_free(st->trig); error_ret: return ret; } void ade7758_remove_trigger(struct iio_dev *indio_dev) { struct ade7758_state *st = iio_priv(indio_dev); iio_trigger_unregister(st->trig); free_irq(st->us->irq, st->trig); iio_trigger_free(st->trig); }
gpl-2.0
xurzcc/linux
fs/binfmt_em86.c
1106
2838
/* * linux/fs/binfmt_em86.c * * Based on linux/fs/binfmt_script.c * Copyright (C) 1996 Martin von Löwis * original #!-checking implemented by tytso. * * em86 changes Copyright (C) 1997 Jim Paradis */ #include <linux/module.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/binfmts.h> #include <linux/elf.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/errno.h> #define EM86_INTERP "/usr/bin/em86" #define EM86_I_NAME "em86" static int load_em86(struct linux_binprm *bprm) { char *interp, *i_name, *i_arg; struct file * file; int retval; struct elfhdr elf_ex; /* Make sure this is a Linux/Intel ELF executable... */ elf_ex = *((struct elfhdr *)bprm->buf); if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) return -ENOEXEC; /* First of all, some simple consistency checks */ if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) || (!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) || !bprm->file->f_op->mmap) { return -ENOEXEC; } /* Need to be able to load the file after exec */ if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) return -ENOENT; allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; /* Unlike in the script case, we don't have to do any hairy * parsing to find our interpreter... it's hardcoded! */ interp = EM86_INTERP; i_name = EM86_I_NAME; i_arg = NULL; /* We reserve the right to add an arg later */ /* * Splice in (1) the interpreter's name for argv[0] * (2) (optional) argument to interpreter * (3) filename of emulated file (replace argv[0]) * * This is done in reverse order, because of how the * user environment and arguments are stored. */ remove_arg_zero(bprm); retval = copy_strings_kernel(1, &bprm->filename, bprm); if (retval < 0) return retval; bprm->argc++; if (i_arg) { retval = copy_strings_kernel(1, &i_arg, bprm); if (retval < 0) return retval; bprm->argc++; } retval = copy_strings_kernel(1, &i_name, bprm); if (retval < 0) return retval; bprm->argc++; /* * OK, now restart the process with the interpreter's inode. * Note that we use open_exec() as the name is now in kernel * space, and we don't need to copy it. */ file = open_exec(interp); if (IS_ERR(file)) return PTR_ERR(file); bprm->file = file; retval = prepare_binprm(bprm); if (retval < 0) return retval; return search_binary_handler(bprm); } static struct linux_binfmt em86_format = { .module = THIS_MODULE, .load_binary = load_em86, }; static int __init init_em86_binfmt(void) { register_binfmt(&em86_format); return 0; } static void __exit exit_em86_binfmt(void) { unregister_binfmt(&em86_format); } core_initcall(init_em86_binfmt); module_exit(exit_em86_binfmt); MODULE_LICENSE("GPL");
gpl-2.0
hakcenter/android_kernel_samsung_d710
fs/btrfs/disk-io.c
1362
84531
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include <linux/swap.h> #include <linux/radix-tree.h> #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/crc32c.h> #include <linux/slab.h> #include <linux/migrate.h> #include <linux/ratelimit.h> #include <asm/unaligned.h> #include "compat.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "volumes.h" #include "print-tree.h" #include "async-thread.h" #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" #include "inode-map.h" static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); static void free_fs_root(struct btrfs_root *root); static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, int read_only); static int btrfs_destroy_ordered_operations(struct btrfs_root *root); static int btrfs_destroy_ordered_extents(struct btrfs_root *root); static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_root *root); static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); static int btrfs_destroy_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark); static int btrfs_destroy_pinned_extent(struct btrfs_root *root, struct extent_io_tree *pinned_extents); static int btrfs_cleanup_transaction(struct btrfs_root *root); /* * end_io_wq structs are used to do processing in task context when an IO is * complete. This is used during reads to verify checksums, and it is used * by writes to insert metadata for new file extents after IO is complete. */ struct end_io_wq { struct bio *bio; bio_end_io_t *end_io; void *private; struct btrfs_fs_info *info; int error; int metadata; struct list_head list; struct btrfs_work work; }; /* * async submit bios are used to offload expensive checksumming * onto the worker threads. They checksum file and metadata bios * just before they are sent down the IO stack. */ struct async_submit_bio { struct inode *inode; struct bio *bio; struct list_head list; extent_submit_bio_hook_t *submit_bio_start; extent_submit_bio_hook_t *submit_bio_done; int rw; int mirror_num; unsigned long bio_flags; /* * bio_offset is optional, can be used if the pages in the bio * can't tell us where in the file the bio should go */ u64 bio_offset; struct btrfs_work work; }; /* These are used to set the lockdep class on the extent buffer locks. * The class is set by the readpage_end_io_hook after the buffer has * passed csum validation but before the pages are unlocked. * * The lockdep class is also set by btrfs_init_new_buffer on freshly * allocated blocks. * * The class is based on the level in the tree block, which allows lockdep * to know that lower nodes nest inside the locks of higher nodes. * * We also add a check to make sure the highest level of the tree is * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this * code needs update as well. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC # if BTRFS_MAX_LEVEL != 8 # error # endif static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1]; static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = { /* leaf */ "btrfs-extent-00", "btrfs-extent-01", "btrfs-extent-02", "btrfs-extent-03", "btrfs-extent-04", "btrfs-extent-05", "btrfs-extent-06", "btrfs-extent-07", /* highest possible level */ "btrfs-extent-08", }; #endif /* * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ static struct extent_map *btree_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; int ret; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (em) { em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; read_unlock(&em_tree->lock); goto out; } read_unlock(&em_tree->lock); em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; } em->start = 0; em->len = (u64)-1; em->block_len = (u64)-1; em->block_start = 0; em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); if (ret == -EEXIST) { u64 failed_start = em->start; u64 failed_len = em->len; free_extent_map(em); em = lookup_extent_mapping(em_tree, start, len); if (em) { ret = 0; } else { em = lookup_extent_mapping(em_tree, failed_start, failed_len); ret = -EIO; } } else if (ret) { free_extent_map(em); em = NULL; } write_unlock(&em_tree->lock); if (ret) em = ERR_PTR(ret); out: return em; } u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len) { return crc32c(seed, data, len); } void btrfs_csum_final(u32 crc, char *result) { put_unaligned_le32(~crc, result); } /* * compute the csum for a btree block, and either verify it or write it * into the csum field of the block. */ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, int verify) { u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); char *result = NULL; unsigned long len; unsigned long cur_len; unsigned long offset = BTRFS_CSUM_SIZE; char *map_token = NULL; char *kaddr; unsigned long map_start; unsigned long map_len; int err; u32 crc = ~(u32)0; unsigned long inline_result; len = buf->len - offset; while (len > 0) { err = map_private_extent_buffer(buf, offset, 32, &map_token, &kaddr, &map_start, &map_len, KM_USER0); if (err) return 1; cur_len = min(len, map_len - (offset - map_start)); crc = btrfs_csum_data(root, kaddr + offset - map_start, crc, cur_len); len -= cur_len; offset += cur_len; unmap_extent_buffer(buf, map_token, KM_USER0); } if (csum_size > sizeof(inline_result)) { result = kzalloc(csum_size * sizeof(char), GFP_NOFS); if (!result) return 1; } else { result = (char *)&inline_result; } btrfs_csum_final(crc, result); if (verify) { if (memcmp_extent_buffer(buf, result, 0, csum_size)) { u32 val; u32 found = 0; memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); printk_ratelimited(KERN_INFO "btrfs: %s checksum verify " "failed on %llu wanted %X found %X " "level %d\n", root->fs_info->sb->s_id, (unsigned long long)buf->start, val, found, btrfs_header_level(buf)); if (result != (char *)&inline_result) kfree(result); return 1; } } else { write_extent_buffer(buf, result, 0, csum_size); } if (result != (char *)&inline_result) kfree(result); return 0; } /* * we can't consider a given block up to date unless the transid of the * block matches the transid in the parent node's pointer. This is how we * detect blocks that either didn't get written at all or got written * in the wrong place. */ static int verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid) { struct extent_state *cached_state = NULL; int ret; if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 0, &cached_state, GFP_NOFS); if (extent_buffer_uptodate(io_tree, eb, cached_state) && btrfs_header_generation(eb) == parent_transid) { ret = 0; goto out; } printk_ratelimited("parent transid verify failed on %llu wanted %llu " "found %llu\n", (unsigned long long)eb->start, (unsigned long long)parent_transid, (unsigned long long)btrfs_header_generation(eb)); ret = 1; clear_extent_buffer_uptodate(io_tree, eb, &cached_state); out: unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, &cached_state, GFP_NOFS); return ret; } /* * helper to read a given tree block, doing retries as required when * the checksums don't match and we have alternate mirrors to try. */ static int btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) { struct extent_io_tree *io_tree; int ret; int num_copies = 0; int mirror_num = 0; clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; while (1) { ret = read_extent_buffer_pages(io_tree, eb, start, 1, btree_get_extent, mirror_num); if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) return ret; /* * This buffer's crc is fine, but its contents are corrupted, so * there is no reason to read the other copies, they won't be * any less wrong. */ if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) return ret; num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) return ret; mirror_num++; if (mirror_num > num_copies) return ret; } return -EIO; } /* * checksum a dirty tree block before IO. This has extra checks to make sure * we only fill in the checksum field in the first page of a multi-page block */ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; u64 found_start; unsigned long len; struct extent_buffer *eb; int ret; tree = &BTRFS_I(page->mapping->host)->io_tree; if (page->private == EXTENT_PAGE_PRIVATE) { WARN_ON(1); goto out; } if (!page->private) { WARN_ON(1); goto out; } len = page->private >> 2; WARN_ON(len == 0); eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { WARN_ON(1); goto out; } ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, btrfs_header_generation(eb)); BUG_ON(ret); WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN)); found_start = btrfs_header_bytenr(eb); if (found_start != start) { WARN_ON(1); goto err; } if (eb->first_page != page) { WARN_ON(1); goto err; } if (!PageUptodate(page)) { WARN_ON(1); goto err; } csum_tree_block(root, eb, 0); err: free_extent_buffer(eb); out: return 0; } static int check_tree_block_fsid(struct btrfs_root *root, struct extent_buffer *eb) { struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; u8 fsid[BTRFS_UUID_SIZE]; int ret = 1; read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb), BTRFS_FSID_SIZE); while (fs_devices) { if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { ret = 0; break; } fs_devices = fs_devices->seed; } return ret; } #define CORRUPT(reason, eb, root, slot) \ printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \ "root=%llu, slot=%d\n", reason, \ (unsigned long long)btrfs_header_bytenr(eb), \ (unsigned long long)root->objectid, slot) static noinline int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf) { struct btrfs_key key; struct btrfs_key leaf_key; u32 nritems = btrfs_header_nritems(leaf); int slot; if (nritems == 0) return 0; /* Check the 0 item */ if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root)) { CORRUPT("invalid item offset size pair", leaf, root, 0); return -EIO; } /* * Check to make sure each items keys are in the correct order and their * offsets make sense. We only have to loop through nritems-1 because * we check the current slot against the next slot, which verifies the * next slot's offset+size makes sense and that the current's slot * offset is correct. */ for (slot = 0; slot < nritems - 1; slot++) { btrfs_item_key_to_cpu(leaf, &leaf_key, slot); btrfs_item_key_to_cpu(leaf, &key, slot + 1); /* Make sure the keys are in the right order */ if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { CORRUPT("bad key order", leaf, root, slot); return -EIO; } /* * Make sure the offset and ends are right, remember that the * item data starts at the end of the leaf and grows towards the * front. */ if (btrfs_item_offset_nr(leaf, slot) != btrfs_item_end_nr(leaf, slot + 1)) { CORRUPT("slot offset bad", leaf, root, slot); return -EIO; } /* * Check to make sure that we don't point outside of the leaf, * just incase all the items are consistent to eachother, but * all point outside of the leaf. */ if (btrfs_item_end_nr(leaf, slot) > BTRFS_LEAF_DATA_SIZE(root)) { CORRUPT("slot end outside of leaf", leaf, root, slot); return -EIO; } } return 0; } #ifdef CONFIG_DEBUG_LOCK_ALLOC void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level) { lockdep_set_class_and_name(&eb->lock, &btrfs_eb_class[level], btrfs_eb_name[level]); } #endif static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { struct extent_io_tree *tree; u64 found_start; int found_level; unsigned long len; struct extent_buffer *eb; struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; int ret = 0; tree = &BTRFS_I(page->mapping->host)->io_tree; if (page->private == EXTENT_PAGE_PRIVATE) goto out; if (!page->private) goto out; len = page->private >> 2; WARN_ON(len == 0); eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { ret = -EIO; goto out; } found_start = btrfs_header_bytenr(eb); if (found_start != start) { printk_ratelimited(KERN_INFO "btrfs bad tree block start " "%llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); ret = -EIO; goto err; } if (eb->first_page != page) { printk(KERN_INFO "btrfs bad first page %lu %lu\n", eb->first_page->index, page->index); WARN_ON(1); ret = -EIO; goto err; } if (check_tree_block_fsid(root, eb)) { printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n", (unsigned long long)eb->start); ret = -EIO; goto err; } found_level = btrfs_header_level(eb); btrfs_set_buffer_lockdep_class(eb, found_level); ret = csum_tree_block(root, eb, 1); if (ret) { ret = -EIO; goto err; } /* * If this is a leaf block and it is corrupt, set the corrupt bit so * that we don't try and read the other copies of this block, just * return -EIO. */ if (found_level == 0 && check_leaf(root, eb)) { set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); ret = -EIO; } end = min_t(u64, eb->len, PAGE_CACHE_SIZE); end = eb->start + end - 1; err: free_extent_buffer(eb); out: return ret; } static void end_workqueue_bio(struct bio *bio, int err) { struct end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; fs_info = end_io_wq->info; end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; if (bio->bi_rw & REQ_WRITE) { if (end_io_wq->metadata == 1) btrfs_queue_worker(&fs_info->endio_meta_write_workers, &end_io_wq->work); else if (end_io_wq->metadata == 2) btrfs_queue_worker(&fs_info->endio_freespace_worker, &end_io_wq->work); else btrfs_queue_worker(&fs_info->endio_write_workers, &end_io_wq->work); } else { if (end_io_wq->metadata) btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); else btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); } } /* * For the metadata arg you want * * 0 - if data * 1 - if normal metadta * 2 - if writing to the free space cache area */ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata) { struct end_io_wq *end_io_wq; end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS); if (!end_io_wq) return -ENOMEM; end_io_wq->private = bio->bi_private; end_io_wq->end_io = bio->bi_end_io; end_io_wq->info = info; end_io_wq->error = 0; end_io_wq->bio = bio; end_io_wq->metadata = metadata; bio->bi_private = end_io_wq; bio->bi_end_io = end_workqueue_bio; return 0; } unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) { unsigned long limit = min_t(unsigned long, info->workers.max_workers, info->fs_devices->open_devices); return 256 * limit; } static void run_one_async_start(struct btrfs_work *work) { struct async_submit_bio *async; async = container_of(work, struct async_submit_bio, work); async->submit_bio_start(async->inode, async->rw, async->bio, async->mirror_num, async->bio_flags, async->bio_offset); } static void run_one_async_done(struct btrfs_work *work) { struct btrfs_fs_info *fs_info; struct async_submit_bio *async; int limit; async = container_of(work, struct async_submit_bio, work); fs_info = BTRFS_I(async->inode)->root->fs_info; limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; atomic_dec(&fs_info->nr_async_submits); if (atomic_read(&fs_info->nr_async_submits) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); async->submit_bio_done(async->inode, async->rw, async->bio, async->mirror_num, async->bio_flags, async->bio_offset); } static void run_one_async_free(struct btrfs_work *work) { struct async_submit_bio *async; async = container_of(work, struct async_submit_bio, work); kfree(async); } int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done) { struct async_submit_bio *async; async = kmalloc(sizeof(*async), GFP_NOFS); if (!async) return -ENOMEM; async->inode = inode; async->rw = rw; async->bio = bio; async->mirror_num = mirror_num; async->submit_bio_start = submit_bio_start; async->submit_bio_done = submit_bio_done; async->work.func = run_one_async_start; async->work.ordered_func = run_one_async_done; async->work.ordered_free = run_one_async_free; async->work.flags = 0; async->bio_flags = bio_flags; async->bio_offset = bio_offset; atomic_inc(&fs_info->nr_async_submits); if (rw & REQ_SYNC) btrfs_set_work_high_prio(&async->work); btrfs_queue_worker(&fs_info->workers, &async->work); while (atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { wait_event(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) == 0)); } return 0; } static int btree_csum_one_bio(struct bio *bio) { struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; struct btrfs_root *root; WARN_ON(bio->bi_vcnt <= 0); while (bio_index < bio->bi_vcnt) { root = BTRFS_I(bvec->bv_page->mapping->host)->root; csum_dirty_buffer(root, bvec->bv_page); bio_index++; bvec++; } return 0; } static int __btree_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { /* * when we're called for a write, we're already in the async * submission context. Just jump into btrfs_map_bio */ btree_csum_one_bio(bio); return 0; } static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { /* * when we're called for a write, we're already in the async * submission context. Just jump into btrfs_map_bio */ return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); } static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { int ret; ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, bio, 1); BUG_ON(ret); if (!(rw & REQ_WRITE)) { /* * called for a read, do the setup so that checksum validation * can happen in the async kernel threads */ return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 0); } /* * kthread helpers are used to submit writes so that checksumming * can happen in parallel across all CPUs */ return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, 0, bio_offset, __btree_submit_bio_start, __btree_submit_bio_done); } #ifdef CONFIG_MIGRATION static int btree_migratepage(struct address_space *mapping, struct page *newpage, struct page *page) { /* * we can't safely write a btree page from here, * we haven't done the locking hook */ if (PageDirty(page)) return -EAGAIN; /* * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) return -EAGAIN; return migrate_page(mapping, newpage, page); } #endif static int btree_writepage(struct page *page, struct writeback_control *wbc) { struct extent_io_tree *tree; struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; struct extent_buffer *eb; int was_dirty; tree = &BTRFS_I(page->mapping->host)->io_tree; if (!(current->flags & PF_MEMALLOC)) { return extent_write_full_page(tree, page, btree_get_extent, wbc); } redirty_page_for_writepage(wbc, page); eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE); WARN_ON(!eb); was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); if (!was_dirty) { spin_lock(&root->fs_info->delalloc_lock); root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE; spin_unlock(&root->fs_info->delalloc_lock); } free_extent_buffer(eb); unlock_page(page); return 0; } static int btree_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct extent_io_tree *tree; tree = &BTRFS_I(mapping->host)->io_tree; if (wbc->sync_mode == WB_SYNC_NONE) { struct btrfs_root *root = BTRFS_I(mapping->host)->root; u64 num_dirty; unsigned long thresh = 32 * 1024 * 1024; if (wbc->for_kupdate) return 0; /* this is a bit racy, but that's ok */ num_dirty = root->fs_info->dirty_metadata_bytes; if (num_dirty < thresh) return 0; } return extent_writepages(tree, mapping, btree_get_extent, wbc); } static int btree_readpage(struct file *file, struct page *page) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; return extent_read_full_page(tree, page, btree_get_extent); } static int btree_releasepage(struct page *page, gfp_t gfp_flags) { struct extent_io_tree *tree; struct extent_map_tree *map; int ret; if (PageWriteback(page) || PageDirty(page)) return 0; tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; ret = try_release_extent_state(map, tree, page, gfp_flags); if (!ret) return 0; ret = try_release_extent_buffer(tree, page); if (ret == 1) { ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } return ret; } static void btree_invalidatepage(struct page *page, unsigned long offset) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; extent_invalidatepage(tree, page, offset); btree_releasepage(page, GFP_NOFS); if (PagePrivate(page)) { printk(KERN_WARNING "btrfs warning page private not zero " "on page %llu\n", (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } } static const struct address_space_operations btree_aops = { .readpage = btree_readpage, .writepage = btree_writepage, .writepages = btree_writepages, .releasepage = btree_releasepage, .invalidatepage = btree_invalidatepage, #ifdef CONFIG_MIGRATION .migratepage = btree_migratepage, #endif }; int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, u64 parent_transid) { struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; int ret = 0; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return 0; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, 0, btree_get_extent, 0); free_extent_buffer(buf); return ret; } struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) { struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr, blocksize); return eb; } struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) { struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr, blocksize, NULL); return eb; } int btrfs_write_tree_block(struct extent_buffer *buf) { return filemap_fdatawrite_range(buf->first_page->mapping, buf->start, buf->start + buf->len - 1); } int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) { return filemap_fdatawait_range(buf->first_page->mapping, buf->start, buf->start + buf->len - 1); } struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, u64 parent_transid) { struct extent_buffer *buf = NULL; int ret; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return NULL; ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); if (ret == 0) set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags); return buf; } int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf) { struct inode *btree_inode = root->fs_info->btree_inode; if (btrfs_header_generation(buf) == root->fs_info->running_transaction->transid) { btrfs_assert_tree_locked(buf); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { spin_lock(&root->fs_info->delalloc_lock); if (root->fs_info->dirty_metadata_bytes >= buf->len) root->fs_info->dirty_metadata_bytes -= buf->len; else WARN_ON(1); spin_unlock(&root->fs_info->delalloc_lock); } /* ugh, clear_extent_buffer_dirty needs to lock the page */ btrfs_set_lock_blocking(buf); clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); } return 0; } static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, u32 stripesize, struct btrfs_root *root, struct btrfs_fs_info *fs_info, u64 objectid) { root->node = NULL; root->commit_root = NULL; root->sectorsize = sectorsize; root->nodesize = nodesize; root->leafsize = leafsize; root->stripesize = stripesize; root->ref_cows = 0; root->track_dirty = 0; root->in_radix = 0; root->orphan_item_inserted = 0; root->orphan_cleanup_state = 0; root->fs_info = fs_info; root->objectid = objectid; root->last_trans = 0; root->highest_objectid = 0; root->name = NULL; root->inode_tree = RB_ROOT; INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; root->orphan_block_rsv = NULL; INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->orphan_list); INIT_LIST_HEAD(&root->root_list); spin_lock_init(&root->orphan_lock); spin_lock_init(&root->inode_lock); spin_lock_init(&root->accounting_lock); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); atomic_set(&root->log_commit[0], 0); atomic_set(&root->log_commit[1], 0); atomic_set(&root->log_writers, 0); root->log_batch = 0; root->log_transid = 0; root->last_log_commit = 0; extent_io_tree_init(&root->dirty_log_pages, fs_info->btree_inode->i_mapping); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); memset(&root->root_kobj, 0, sizeof(root->root_kobj)); root->defrag_trans_start = fs_info->generation; init_completion(&root->kobj_unregister); root->defrag_running = 0; root->root_key.objectid = objectid; root->anon_super.s_root = NULL; root->anon_super.s_dev = 0; INIT_LIST_HEAD(&root->anon_super.s_list); INIT_LIST_HEAD(&root->anon_super.s_instances); init_rwsem(&root->anon_super.s_umount); return 0; } static int find_and_setup_root(struct btrfs_root *tree_root, struct btrfs_fs_info *fs_info, u64 objectid, struct btrfs_root *root) { int ret; u32 blocksize; u64 generation; __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, objectid); ret = btrfs_find_last_root(tree_root, objectid, &root->root_item, &root->root_key); if (ret > 0) return -ENOENT; BUG_ON(ret); generation = btrfs_root_generation(&root->root_item); blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), blocksize, generation); if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) { free_extent_buffer(root->node); return -EIO; } root->commit_root = btrfs_root_node(root); return 0; } static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *root; struct btrfs_root *tree_root = fs_info->tree_root; struct extent_buffer *leaf; root = kzalloc(sizeof(*root), GFP_NOFS); if (!root) return ERR_PTR(-ENOMEM); __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, BTRFS_TREE_LOG_OBJECTID); root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; /* * log trees do not get reference counted because they go away * before a real commit is actually done. They do store pointers * to file data extents, and those reference counts still get * updated (along with back refs to the log tree). */ root->ref_cows = 0; leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0); if (IS_ERR(leaf)) { kfree(root); return ERR_CAST(leaf); } memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(leaf, leaf->start); btrfs_set_header_generation(leaf, trans->transid); btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); root->node = leaf; write_extent_buffer(root->node, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(root->node), BTRFS_FSID_SIZE); btrfs_mark_buffer_dirty(root->node); btrfs_tree_unlock(root->node); return root; } int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *log_root; log_root = alloc_log_tree(trans, fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); WARN_ON(fs_info->log_root_tree); fs_info->log_root_tree = log_root; return 0; } int btrfs_add_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root *log_root; struct btrfs_inode_item *inode_item; log_root = alloc_log_tree(trans, root->fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); log_root->last_trans = trans->transid; log_root->root_key.offset = root->root_key.objectid; inode_item = &log_root->root_item.inode; inode_item->generation = cpu_to_le64(1); inode_item->size = cpu_to_le64(3); inode_item->nlink = cpu_to_le32(1); inode_item->nbytes = cpu_to_le64(root->leafsize); inode_item->mode = cpu_to_le32(S_IFDIR | 0755); btrfs_set_root_node(&log_root->root_item, log_root->node); WARN_ON(root->log_root); root->log_root = log_root; root->log_transid = 0; root->last_log_commit = 0; return 0; } struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_key *location) { struct btrfs_root *root; struct btrfs_fs_info *fs_info = tree_root->fs_info; struct btrfs_path *path; struct extent_buffer *l; u64 generation; u32 blocksize; int ret = 0; root = kzalloc(sizeof(*root), GFP_NOFS); if (!root) return ERR_PTR(-ENOMEM); if (location->offset == (u64)-1) { ret = find_and_setup_root(tree_root, fs_info, location->objectid, root); if (ret) { kfree(root); return ERR_PTR(ret); } goto out; } __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, location->objectid); path = btrfs_alloc_path(); if (!path) { kfree(root); return ERR_PTR(-ENOMEM); } ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0); if (ret == 0) { l = path->nodes[0]; read_extent_buffer(l, &root->root_item, btrfs_item_ptr_offset(l, path->slots[0]), sizeof(root->root_item)); memcpy(&root->root_key, location, sizeof(*location)); } btrfs_free_path(path); if (ret) { kfree(root); if (ret > 0) ret = -ENOENT; return ERR_PTR(ret); } generation = btrfs_root_generation(&root->root_item); blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), blocksize, generation); root->commit_root = btrfs_root_node(root); BUG_ON(!root->node); out: if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { root->ref_cows = 1; btrfs_check_and_init_root_item(&root->root_item); } return root; } struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location) { struct btrfs_root *root; int ret; if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) return fs_info->tree_root; if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) return fs_info->extent_root; if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) return fs_info->chunk_root; if (location->objectid == BTRFS_DEV_TREE_OBJECTID) return fs_info->dev_root; if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) return fs_info->csum_root; again: spin_lock(&fs_info->fs_roots_radix_lock); root = radix_tree_lookup(&fs_info->fs_roots_radix, (unsigned long)location->objectid); spin_unlock(&fs_info->fs_roots_radix_lock); if (root) return root; root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location); if (IS_ERR(root)) return root; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), GFP_NOFS); if (!root->free_ino_pinned || !root->free_ino_ctl) { ret = -ENOMEM; goto fail; } btrfs_init_free_ino_ctl(root); mutex_init(&root->fs_commit_mutex); spin_lock_init(&root->cache_lock); init_waitqueue_head(&root->cache_wait); ret = set_anon_super(&root->anon_super, NULL); if (ret) goto fail; if (btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; goto fail; } ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid); if (ret < 0) goto fail; if (ret == 0) root->orphan_item_inserted = 1; ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); if (ret) goto fail; spin_lock(&fs_info->fs_roots_radix_lock); ret = radix_tree_insert(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, root); if (ret == 0) root->in_radix = 1; spin_unlock(&fs_info->fs_roots_radix_lock); radix_tree_preload_end(); if (ret) { if (ret == -EEXIST) { free_fs_root(root); goto again; } goto fail; } ret = btrfs_find_dead_roots(fs_info->tree_root, root->root_key.objectid); WARN_ON(ret); return root; fail: free_fs_root(root); return ERR_PTR(ret); } static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; int ret = 0; struct btrfs_device *device; struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; bdi = blk_get_backing_dev_info(device->bdev); if (bdi && bdi_congested(bdi, bdi_bits)) { ret = 1; break; } } rcu_read_unlock(); return ret; } /* * If this fails, caller must call bdi_destroy() to get rid of the * bdi again. */ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) { int err; bdi->capabilities = BDI_CAP_MAP_COPY; err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY); if (err) return err; bdi->ra_pages = default_backing_dev_info.ra_pages; bdi->congested_fn = btrfs_congested_fn; bdi->congested_data = info; return 0; } static int bio_ready_for_csum(struct bio *bio) { u64 length = 0; u64 buf_len = 0; u64 start = 0; struct page *page; struct extent_io_tree *io_tree = NULL; struct bio_vec *bvec; int i; int ret; bio_for_each_segment(bvec, bio, i) { page = bvec->bv_page; if (page->private == EXTENT_PAGE_PRIVATE) { length += bvec->bv_len; continue; } if (!page->private) { length += bvec->bv_len; continue; } length = bvec->bv_len; buf_len = page->private >> 2; start = page_offset(page) + bvec->bv_offset; io_tree = &BTRFS_I(page->mapping->host)->io_tree; } /* are we fully contained in this bio? */ if (buf_len <= length) return 1; ret = extent_range_uptodate(io_tree, start + length, start + buf_len - 1); return ret; } /* * called by the kthread helper functions to finally call the bio end_io * functions. This is where read checksum verification actually happens */ static void end_workqueue_fn(struct btrfs_work *work) { struct bio *bio; struct end_io_wq *end_io_wq; struct btrfs_fs_info *fs_info; int error; end_io_wq = container_of(work, struct end_io_wq, work); bio = end_io_wq->bio; fs_info = end_io_wq->info; /* metadata bio reads are special because the whole tree block must * be checksummed at once. This makes sure the entire block is in * ram and up to date before trying to verify things. For * blocksize <= pagesize, it is basically a noop */ if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata && !bio_ready_for_csum(bio)) { btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); return; } error = end_io_wq->error; bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; kfree(end_io_wq); bio_endio(bio, error); } static int cleaner_kthread(void *arg) { struct btrfs_root *root = arg; do { vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); if (!(root->fs_info->sb->s_flags & MS_RDONLY) && mutex_trylock(&root->fs_info->cleaner_mutex)) { btrfs_run_delayed_iputs(root); btrfs_clean_old_snapshots(root); mutex_unlock(&root->fs_info->cleaner_mutex); btrfs_run_defrag_inodes(root->fs_info); } if (freezing(current)) { refrigerator(); } else { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule(); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); return 0; } static int transaction_kthread(void *arg) { struct btrfs_root *root = arg; struct btrfs_trans_handle *trans; struct btrfs_transaction *cur; u64 transid; unsigned long now; unsigned long delay; int ret; do { delay = HZ * 30; vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); cur = root->fs_info->running_transaction; if (!cur) { spin_unlock(&root->fs_info->trans_lock); goto sleep; } now = get_seconds(); if (!cur->blocked && (now < cur->start_time || now - cur->start_time < 30)) { spin_unlock(&root->fs_info->trans_lock); delay = HZ * 5; goto sleep; } transid = cur->transid; spin_unlock(&root->fs_info->trans_lock); trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); } else { btrfs_end_transaction(trans, root); } sleep: wake_up_process(root->fs_info->cleaner_kthread); mutex_unlock(&root->fs_info->transaction_kthread_mutex); if (freezing(current)) { refrigerator(); } else { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop() && !btrfs_transaction_blocked(root->fs_info)) schedule_timeout(delay); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); return 0; } struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options) { u32 sectorsize; u32 nodesize; u32 leafsize; u32 blocksize; u32 stripesize; u64 generation; u64 features; struct btrfs_key location; struct buffer_head *bh; struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *tree_root = btrfs_sb(sb); struct btrfs_fs_info *fs_info = NULL; struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *log_tree_root; int ret; int err = -EINVAL; struct btrfs_super_block *disk_super; if (!extent_root || !tree_root || !tree_root->fs_info || !chunk_root || !dev_root || !csum_root) { err = -ENOMEM; goto fail; } fs_info = tree_root->fs_info; ret = init_srcu_struct(&fs_info->subvol_srcu); if (ret) { err = ret; goto fail; } ret = setup_bdi(fs_info, &fs_info->bdi); if (ret) { err = ret; goto fail_srcu; } fs_info->btree_inode = new_inode(sb); if (!fs_info->btree_inode) { err = -ENOMEM; goto fail_bdi; } fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS; INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->delayed_iputs); INIT_LIST_HEAD(&fs_info->hashers); INIT_LIST_HEAD(&fs_info->delalloc_inodes); INIT_LIST_HEAD(&fs_info->ordered_operations); INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_lock); spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->ref_cache_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); mutex_init(&fs_info->reloc_mutex); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; fs_info->extent_root = extent_root; fs_info->csum_root = csum_root; fs_info->chunk_root = chunk_root; fs_info->dev_root = dev_root; fs_info->fs_devices = fs_devices; INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->space_info); btrfs_mapping_init(&fs_info->mapping_tree); btrfs_init_block_rsv(&fs_info->global_block_rsv); btrfs_init_block_rsv(&fs_info->delalloc_block_rsv); btrfs_init_block_rsv(&fs_info->trans_block_rsv); btrfs_init_block_rsv(&fs_info->chunk_block_rsv); btrfs_init_block_rsv(&fs_info->empty_block_rsv); INIT_LIST_HEAD(&fs_info->durable_block_rsv_list); mutex_init(&fs_info->durable_block_rsv_mutex); atomic_set(&fs_info->nr_async_submits, 0); atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); atomic_set(&fs_info->defrag_running, 0); fs_info->sb = sb; fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; fs_info->defrag_inodes = RB_ROOT; fs_info->trans_no_join = 0; fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), GFP_NOFS); if (!fs_info->delayed_root) { err = -ENOMEM; goto fail_iput; } btrfs_init_delayed_root(fs_info->delayed_root); mutex_init(&fs_info->scrub_lock); atomic_set(&fs_info->scrubs_running, 0); atomic_set(&fs_info->scrub_pause_req, 0); atomic_set(&fs_info->scrubs_paused, 0); atomic_set(&fs_info->scrub_cancel_req, 0); init_waitqueue_head(&fs_info->scrub_pause_wait); init_rwsem(&fs_info->scrub_super_lock); fs_info->scrub_workers_refcnt = 0; sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); sb->s_bdi = &fs_info->bdi; fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; fs_info->btree_inode->i_nlink = 1; /* * we set the i_size on the btree inode to the max possible int. * the real end of the address space is determined by all of * the devices in the system */ fs_info->btree_inode->i_size = OFFSET_MAX; fs_info->btree_inode->i_mapping->a_ops = &btree_aops; fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi; RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, fs_info->btree_inode->i_mapping); extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; BTRFS_I(fs_info->btree_inode)->root = tree_root; memset(&BTRFS_I(fs_info->btree_inode)->location, 0, sizeof(struct btrfs_key)); BTRFS_I(fs_info->btree_inode)->dummy_inode = 1; insert_inode_hash(fs_info->btree_inode); spin_lock_init(&fs_info->block_group_cache_lock); fs_info->block_group_cache_tree = RB_ROOT; extent_io_tree_init(&fs_info->freed_extents[0], fs_info->btree_inode->i_mapping); extent_io_tree_init(&fs_info->freed_extents[1], fs_info->btree_inode->i_mapping); fs_info->pinned_extents = &fs_info->freed_extents[0]; fs_info->do_barriers = 1; mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->chunk_mutex); mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->volume_mutex); init_rwsem(&fs_info->extent_commit_sem); init_rwsem(&fs_info->cleanup_work_sem); init_rwsem(&fs_info->subvol_sem); btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); btrfs_init_free_cluster(&fs_info->data_alloc_cluster); init_waitqueue_head(&fs_info->transaction_throttle); init_waitqueue_head(&fs_info->transaction_wait); init_waitqueue_head(&fs_info->transaction_blocked_wait); init_waitqueue_head(&fs_info->async_submit_wait); __setup_root(4096, 4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) { err = -EINVAL; goto fail_alloc; } memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); memcpy(&fs_info->super_for_commit, &fs_info->super_copy, sizeof(fs_info->super_for_commit)); brelse(bh); memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE); disk_super = &fs_info->super_copy; if (!btrfs_super_root(disk_super)) goto fail_alloc; /* check FS state, whether FS is broken. */ fs_info->fs_state |= btrfs_super_flags(disk_super); btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); /* * In the long term, we'll store the compression type in the super * block, and it'll be used for per file compression control. */ fs_info->compress_type = BTRFS_COMPRESS_ZLIB; ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super) & ~BTRFS_FEATURE_INCOMPAT_SUPP; if (features) { printk(KERN_ERR "BTRFS: couldn't mount because of " "unsupported optional features (%Lx).\n", (unsigned long long)features); err = -EINVAL; goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super); features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; btrfs_set_super_incompat_flags(disk_super, features); features = btrfs_super_compat_ro_flags(disk_super) & ~BTRFS_FEATURE_COMPAT_RO_SUPP; if (!(sb->s_flags & MS_RDONLY) && features) { printk(KERN_ERR "BTRFS: couldn't mount RDWR because of " "unsupported option features (%Lx).\n", (unsigned long long)features); err = -EINVAL; goto fail_alloc; } btrfs_init_workers(&fs_info->generic_worker, "genwork", 1, NULL); btrfs_init_workers(&fs_info->workers, "worker", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->submit_workers, "submit", min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size), &fs_info->generic_worker); /* a higher idle thresh on the submit workers makes it much more * likely that bios will be send down in a sane order to the * devices */ fs_info->submit_workers.idle_thresh = 64; fs_info->workers.idle_thresh = 16; fs_info->workers.ordered = 1; fs_info->delalloc_workers.idle_thresh = 2; fs_info->delalloc_workers.ordered = 1; btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_workers, "endio", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_meta_write_workers, "endio-meta-write", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", 1, &fs_info->generic_worker); btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", fs_info->thread_pool_size, &fs_info->generic_worker); /* * endios are largely parallel and should have a very * low idle thresh */ fs_info->endio_workers.idle_thresh = 4; fs_info->endio_meta_workers.idle_thresh = 4; fs_info->endio_write_workers.idle_thresh = 2; fs_info->endio_meta_write_workers.idle_thresh = 2; btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->generic_worker, 1); btrfs_start_workers(&fs_info->submit_workers, 1); btrfs_start_workers(&fs_info->delalloc_workers, 1); btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->endio_workers, 1); btrfs_start_workers(&fs_info->endio_meta_workers, 1); btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1); btrfs_start_workers(&fs_info->endio_freespace_worker, 1); btrfs_start_workers(&fs_info->delayed_workers, 1); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 4 * 1024 * 1024 / PAGE_CACHE_SIZE); nodesize = btrfs_super_nodesize(disk_super); leafsize = btrfs_super_leafsize(disk_super); sectorsize = btrfs_super_sectorsize(disk_super); stripesize = btrfs_super_stripesize(disk_super); tree_root->nodesize = nodesize; tree_root->leafsize = leafsize; tree_root->sectorsize = sectorsize; tree_root->stripesize = stripesize; sb->s_blocksize = sectorsize; sb->s_blocksize_bits = blksize_bits(sectorsize); if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, sizeof(disk_super->magic))) { printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { printk(KERN_WARNING "btrfs: failed to read the system " "array on %s\n", sb->s_id); goto fail_sb_buffer; } blocksize = btrfs_level_size(tree_root, btrfs_super_chunk_root_level(disk_super)); generation = btrfs_super_chunk_root_generation(disk_super); __setup_root(nodesize, leafsize, sectorsize, stripesize, chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); chunk_root->node = read_tree_block(chunk_root, btrfs_super_chunk_root(disk_super), blocksize, generation); BUG_ON(!chunk_root->node); if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", sb->s_id); goto fail_chunk_root; } btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); chunk_root->commit_root = btrfs_root_node(chunk_root); read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_chunk_tree(chunk_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", sb->s_id); goto fail_chunk_root; } btrfs_close_extra_devices(fs_devices); blocksize = btrfs_level_size(tree_root, btrfs_super_root_level(disk_super)); generation = btrfs_super_generation(disk_super); tree_root->node = read_tree_block(tree_root, btrfs_super_root(disk_super), blocksize, generation); if (!tree_root->node) goto fail_chunk_root; if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { printk(KERN_WARNING "btrfs: failed to read tree root on %s\n", sb->s_id); goto fail_tree_root; } btrfs_set_root_node(&tree_root->root_item, tree_root->node); tree_root->commit_root = btrfs_root_node(tree_root); ret = find_and_setup_root(tree_root, fs_info, BTRFS_EXTENT_TREE_OBJECTID, extent_root); if (ret) goto fail_tree_root; extent_root->track_dirty = 1; ret = find_and_setup_root(tree_root, fs_info, BTRFS_DEV_TREE_OBJECTID, dev_root); if (ret) goto fail_extent_root; dev_root->track_dirty = 1; ret = find_and_setup_root(tree_root, fs_info, BTRFS_CSUM_TREE_OBJECTID, csum_root); if (ret) goto fail_dev_root; csum_root->track_dirty = 1; fs_info->generation = generation; fs_info->last_trans_committed = generation; fs_info->data_alloc_profile = (u64)-1; fs_info->metadata_alloc_profile = (u64)-1; fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; ret = btrfs_init_space_info(fs_info); if (ret) { printk(KERN_ERR "Failed to initial space info: %d\n", ret); goto fail_block_groups; } ret = btrfs_read_block_groups(extent_root); if (ret) { printk(KERN_ERR "Failed to read block groups: %d\n", ret); goto fail_block_groups; } fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); if (IS_ERR(fs_info->cleaner_kthread)) goto fail_block_groups; fs_info->transaction_kthread = kthread_run(transaction_kthread, tree_root, "btrfs-transaction"); if (IS_ERR(fs_info->transaction_kthread)) goto fail_cleaner; if (!btrfs_test_opt(tree_root, SSD) && !btrfs_test_opt(tree_root, NOSSD) && !fs_info->fs_devices->rotating) { printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD " "mode\n"); btrfs_set_opt(fs_info->mount_opt, SSD); } /* do not make disk changes in broken FS */ if (btrfs_super_log_root(disk_super) != 0 && !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { printk(KERN_WARNING "Btrfs log replay required " "on RO media\n"); err = -EIO; goto fail_trans_kthread; } blocksize = btrfs_level_size(tree_root, btrfs_super_log_root_level(disk_super)); log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); if (!log_tree_root) { err = -ENOMEM; goto fail_trans_kthread; } __setup_root(nodesize, leafsize, sectorsize, stripesize, log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); log_tree_root->node = read_tree_block(tree_root, bytenr, blocksize, generation + 1); ret = btrfs_recover_log_trees(log_tree_root); BUG_ON(ret); if (sb->s_flags & MS_RDONLY) { ret = btrfs_commit_super(tree_root); BUG_ON(ret); } } ret = btrfs_find_orphan_roots(tree_root); BUG_ON(ret); if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_cleanup_fs_roots(fs_info); BUG_ON(ret); ret = btrfs_recover_relocation(tree_root); if (ret < 0) { printk(KERN_WARNING "btrfs: failed to recover relocation\n"); err = -EINVAL; goto fail_trans_kthread; } } location.objectid = BTRFS_FS_TREE_OBJECTID; location.type = BTRFS_ROOT_ITEM_KEY; location.offset = (u64)-1; fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); if (!fs_info->fs_root) goto fail_trans_kthread; if (IS_ERR(fs_info->fs_root)) { err = PTR_ERR(fs_info->fs_root); goto fail_trans_kthread; } if (!(sb->s_flags & MS_RDONLY)) { down_read(&fs_info->cleanup_work_sem); err = btrfs_orphan_cleanup(fs_info->fs_root); if (!err) err = btrfs_orphan_cleanup(fs_info->tree_root); up_read(&fs_info->cleanup_work_sem); if (err) { close_ctree(tree_root); return ERR_PTR(err); } } return tree_root; fail_trans_kthread: kthread_stop(fs_info->transaction_kthread); fail_cleaner: kthread_stop(fs_info->cleaner_kthread); /* * make sure we're done with the btree inode before we stop our * kthreads */ filemap_write_and_wait(fs_info->btree_inode->i_mapping); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); fail_block_groups: btrfs_free_block_groups(fs_info); free_extent_buffer(csum_root->node); free_extent_buffer(csum_root->commit_root); fail_dev_root: free_extent_buffer(dev_root->node); free_extent_buffer(dev_root->commit_root); fail_extent_root: free_extent_buffer(extent_root->node); free_extent_buffer(extent_root->commit_root); fail_tree_root: free_extent_buffer(tree_root->node); free_extent_buffer(tree_root->commit_root); fail_chunk_root: free_extent_buffer(chunk_root->node); free_extent_buffer(chunk_root->commit_root); fail_sb_buffer: btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->fixup_workers); btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); btrfs_stop_workers(&fs_info->delayed_workers); fail_alloc: kfree(fs_info->delayed_root); fail_iput: invalidate_inode_pages2(fs_info->btree_inode->i_mapping); iput(fs_info->btree_inode); btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); fail_bdi: bdi_destroy(&fs_info->bdi); fail_srcu: cleanup_srcu_struct(&fs_info->subvol_srcu); fail: kfree(extent_root); kfree(tree_root); kfree(fs_info); kfree(chunk_root); kfree(dev_root); kfree(csum_root); return ERR_PTR(err); } static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; if (uptodate) { set_buffer_uptodate(bh); } else { printk_ratelimited(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); /* note, we dont' set_buffer_write_io_error because we have * our own ways of dealing with the IO errors */ clear_buffer_uptodate(bh); } unlock_buffer(bh); put_bh(bh); } struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) { struct buffer_head *bh; struct buffer_head *latest = NULL; struct btrfs_super_block *super; int i; u64 transid = 0; u64 bytenr; /* we would like to check all the supers, but that would make * a btrfs mount succeed after a mkfs from a different FS. * So, we need to add a special mount option to scan for * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ for (i = 0; i < 1; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + 4096 >= i_size_read(bdev->bd_inode)) break; bh = __bread(bdev, bytenr / 4096, 4096); if (!bh) continue; super = (struct btrfs_super_block *)bh->b_data; if (btrfs_super_bytenr(super) != bytenr || strncmp((char *)(&super->magic), BTRFS_MAGIC, sizeof(super->magic))) { brelse(bh); continue; } if (!latest || btrfs_super_generation(super) > transid) { brelse(latest); latest = bh; transid = btrfs_super_generation(super); } else { brelse(bh); } } return latest; } /* * this should be called twice, once with wait == 0 and * once with wait == 1. When wait == 0 is done, all the buffer heads * we write are pinned. * * They are released when wait == 1 is done. * max_mirrors must be the same for both runs, and it indicates how * many supers on this one device should be written. * * max_mirrors == 0 means to write them all. */ static int write_dev_supers(struct btrfs_device *device, struct btrfs_super_block *sb, int do_barriers, int wait, int max_mirrors) { struct buffer_head *bh; int i; int ret; int errors = 0; u32 crc; u64 bytenr; int last_barrier = 0; if (max_mirrors == 0) max_mirrors = BTRFS_SUPER_MIRROR_MAX; /* make sure only the last submit_bh does a barrier */ if (do_barriers) { for (i = 0; i < max_mirrors; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) break; last_barrier = i; } } for (i = 0; i < max_mirrors; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) break; if (wait) { bh = __find_get_block(device->bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); BUG_ON(!bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) errors++; /* drop our reference */ brelse(bh); /* drop the reference from the wait == 0 run */ brelse(bh); continue; } else { btrfs_set_super_bytenr(sb, bytenr); crc = ~(u32)0; crc = btrfs_csum_data(NULL, (char *)sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); btrfs_csum_final(crc, sb->csum); /* * one reference for us, and we leave it for the * caller */ bh = __getblk(device->bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); /* one reference for submit_bh */ get_bh(bh); set_buffer_uptodate(bh); lock_buffer(bh); bh->b_end_io = btrfs_end_buffer_write_sync; } if (i == last_barrier && do_barriers) ret = submit_bh(WRITE_FLUSH_FUA, bh); else ret = submit_bh(WRITE_SYNC, bh); if (ret) errors++; } return errors < i ? 0 : -1; } int write_all_supers(struct btrfs_root *root, int max_mirrors) { struct list_head *head; struct btrfs_device *dev; struct btrfs_super_block *sb; struct btrfs_dev_item *dev_item; int ret; int do_barriers; int max_errors; int total_errors = 0; u64 flags; max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; do_barriers = !btrfs_test_opt(root, NOBARRIER); sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); head = &root->fs_info->fs_devices->devices; list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; } if (!dev->in_fs_metadata || !dev->writeable) continue; btrfs_set_stack_device_generation(dev_item, 0); btrfs_set_stack_device_type(dev_item, dev->type); btrfs_set_stack_device_id(dev_item, dev->devid); btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes); btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used); btrfs_set_stack_device_io_align(dev_item, dev->io_align); btrfs_set_stack_device_io_width(dev_item, dev->io_width); btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); flags = btrfs_super_flags(sb); btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); if (ret) total_errors++; } if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); BUG(); } total_errors = 0; list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) continue; if (!dev->in_fs_metadata || !dev->writeable) continue; ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); if (ret) total_errors++; } mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); BUG(); } return 0; } int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors) { int ret; ret = write_all_supers(root, max_mirrors); return ret; } int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) { spin_lock(&fs_info->fs_roots_radix_lock); radix_tree_delete(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid); spin_unlock(&fs_info->fs_roots_radix_lock); if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); __btrfs_remove_free_space_cache(root->free_ino_pinned); __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); return 0; } static void free_fs_root(struct btrfs_root *root) { iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_super.s_dev) { down_write(&root->anon_super.s_umount); kill_anon_super(&root->anon_super); } free_extent_buffer(root->node); free_extent_buffer(root->commit_root); kfree(root->free_ino_ctl); kfree(root->free_ino_pinned); kfree(root->name); kfree(root); } static int del_fs_roots(struct btrfs_fs_info *fs_info) { int ret; struct btrfs_root *gang[8]; int i; while (!list_empty(&fs_info->dead_roots)) { gang[0] = list_entry(fs_info->dead_roots.next, struct btrfs_root, root_list); list_del(&gang[0]->root_list); if (gang[0]->in_radix) { btrfs_free_fs_root(fs_info, gang[0]); } else { free_extent_buffer(gang[0]->node); free_extent_buffer(gang[0]->commit_root); kfree(gang[0]); } } while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang)); if (!ret) break; for (i = 0; i < ret; i++) btrfs_free_fs_root(fs_info, gang[i]); } return 0; } int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) { u64 root_objectid = 0; struct btrfs_root *gang[8]; int i; int ret; while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, root_objectid, ARRAY_SIZE(gang)); if (!ret) break; root_objectid = gang[ret - 1]->root_key.objectid + 1; for (i = 0; i < ret; i++) { int err; root_objectid = gang[i]->root_key.objectid; err = btrfs_orphan_cleanup(gang[i]); if (err) return err; } root_objectid++; } return 0; } int btrfs_commit_super(struct btrfs_root *root) { struct btrfs_trans_handle *trans; int ret; mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); btrfs_clean_old_snapshots(root); mutex_unlock(&root->fs_info->cleaner_mutex); /* wait until ongoing cleanup work done */ down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); ret = write_ctree_super(NULL, root, 0); return ret; } int close_ctree(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; fs_info->closing = 1; smp_mb(); btrfs_scrub_cancel(root); /* wait for any defraggers to finish */ wait_event(fs_info->transaction_wait, (atomic_read(&fs_info->defrag_running) == 0)); /* clear out the rbtree of defraggable inodes */ btrfs_run_defrag_inodes(root->fs_info); btrfs_put_block_group_cache(fs_info); /* * Here come 2 situations when btrfs is broken to flip readonly: * * 1. when btrfs flips readonly somewhere else before * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, * and btrfs will skip to write sb directly to keep * ERROR state on disk. * * 2. when btrfs flips readonly just in btrfs_commit_super, * and in such case, btrfs cannot write sb via btrfs_commit_super, * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, * btrfs will cleanup all FS resources first and write sb then. */ if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { ret = btrfs_error_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } kthread_stop(root->fs_info->transaction_kthread); kthread_stop(root->fs_info->cleaner_kthread); fs_info->closing = 2; smp_mb(); if (fs_info->delalloc_bytes) { printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", (unsigned long long)fs_info->delalloc_bytes); } if (fs_info->total_ref_cache_size) { printk(KERN_INFO "btrfs: at umount reference cache size %llu\n", (unsigned long long)fs_info->total_ref_cache_size); } free_extent_buffer(fs_info->extent_root->node); free_extent_buffer(fs_info->extent_root->commit_root); free_extent_buffer(fs_info->tree_root->node); free_extent_buffer(fs_info->tree_root->commit_root); free_extent_buffer(root->fs_info->chunk_root->node); free_extent_buffer(root->fs_info->chunk_root->commit_root); free_extent_buffer(root->fs_info->dev_root->node); free_extent_buffer(root->fs_info->dev_root->commit_root); free_extent_buffer(root->fs_info->csum_root->node); free_extent_buffer(root->fs_info->csum_root->commit_root); btrfs_free_block_groups(root->fs_info); del_fs_roots(fs_info); iput(fs_info->btree_inode); kfree(fs_info->delayed_root); btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->fixup_workers); btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); btrfs_stop_workers(&fs_info->delayed_workers); btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); bdi_destroy(&fs_info->bdi); cleanup_srcu_struct(&fs_info->subvol_srcu); kfree(fs_info->extent_root); kfree(fs_info->tree_root); kfree(fs_info->chunk_root); kfree(fs_info->dev_root); kfree(fs_info->csum_root); kfree(fs_info); return 0; } int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) { int ret; struct inode *btree_inode = buf->first_page->mapping->host; ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf, NULL); if (!ret) return ret; ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, parent_transid); return !ret; } int btrfs_set_buffer_uptodate(struct extent_buffer *buf) { struct inode *btree_inode = buf->first_page->mapping->host; return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); } void btrfs_mark_buffer_dirty(struct extent_buffer *buf) { struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; u64 transid = btrfs_header_generation(buf); struct inode *btree_inode = root->fs_info->btree_inode; int was_dirty; btrfs_assert_tree_locked(buf); if (transid != root->fs_info->generation) { printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " "found %llu running %llu\n", (unsigned long long)buf->start, (unsigned long long)transid, (unsigned long long)root->fs_info->generation); WARN_ON(1); } was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); if (!was_dirty) { spin_lock(&root->fs_info->delalloc_lock); root->fs_info->dirty_metadata_bytes += buf->len; spin_unlock(&root->fs_info->delalloc_lock); } } void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) { /* * looks as though older kernels can get into trouble with * this code, they end up stuck in balance_dirty_pages forever */ u64 num_dirty; unsigned long thresh = 32 * 1024 * 1024; if (current->flags & PF_MEMALLOC) return; btrfs_balance_delayed_items(root); num_dirty = root->fs_info->dirty_metadata_bytes; if (num_dirty > thresh) { balance_dirty_pages_ratelimited_nr( root->fs_info->btree_inode->i_mapping, 1); } return; } void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) { /* * looks as though older kernels can get into trouble with * this code, they end up stuck in balance_dirty_pages forever */ u64 num_dirty; unsigned long thresh = 32 * 1024 * 1024; if (current->flags & PF_MEMALLOC) return; num_dirty = root->fs_info->dirty_metadata_bytes; if (num_dirty > thresh) { balance_dirty_pages_ratelimited_nr( root->fs_info->btree_inode->i_mapping, 1); } return; } int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) { struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; int ret; ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); if (ret == 0) set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags); return ret; } int btree_lock_page_hook(struct page *page) { struct inode *inode = page->mapping->host; struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_buffer *eb; unsigned long len; u64 bytenr = page_offset(page); if (page->private == EXTENT_PAGE_PRIVATE) goto out; len = page->private >> 2; eb = find_extent_buffer(io_tree, bytenr, len); if (!eb) goto out; btrfs_tree_lock(eb); btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { spin_lock(&root->fs_info->delalloc_lock); if (root->fs_info->dirty_metadata_bytes >= eb->len) root->fs_info->dirty_metadata_bytes -= eb->len; else WARN_ON(1); spin_unlock(&root->fs_info->delalloc_lock); } btrfs_tree_unlock(eb); free_extent_buffer(eb); out: lock_page(page); return 0; } static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, int read_only) { if (read_only) return; if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) printk(KERN_WARNING "warning: mount fs with errors, " "running btrfsck is recommended\n"); } int btrfs_error_commit_super(struct btrfs_root *root) { int ret; mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); /* cleanup FS via transaction */ btrfs_cleanup_transaction(root); ret = write_ctree_super(NULL, root, 0); return ret; } static int btrfs_destroy_ordered_operations(struct btrfs_root *root) { struct btrfs_inode *btrfs_inode; struct list_head splice; INIT_LIST_HEAD(&splice); mutex_lock(&root->fs_info->ordered_operations_mutex); spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_operations, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, ordered_operations); list_del_init(&btrfs_inode->ordered_operations); btrfs_invalidate_inodes(btrfs_inode->root); } spin_unlock(&root->fs_info->ordered_extent_lock); mutex_unlock(&root->fs_info->ordered_operations_mutex); return 0; } static int btrfs_destroy_ordered_extents(struct btrfs_root *root) { struct list_head splice; struct btrfs_ordered_extent *ordered; struct inode *inode; INIT_LIST_HEAD(&splice); spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_extents, &splice); while (!list_empty(&splice)) { ordered = list_entry(splice.next, struct btrfs_ordered_extent, root_extent_list); list_del_init(&ordered->root_extent_list); atomic_inc(&ordered->refs); /* the inode may be getting freed (in sys_unlink path). */ inode = igrab(ordered->inode); spin_unlock(&root->fs_info->ordered_extent_lock); if (inode) iput(inode); atomic_set(&ordered->refs, 1); btrfs_put_ordered_extent(ordered); spin_lock(&root->fs_info->ordered_extent_lock); } spin_unlock(&root->fs_info->ordered_extent_lock); return 0; } static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_root *root) { struct rb_node *node; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; int ret = 0; delayed_refs = &trans->delayed_refs; spin_lock(&delayed_refs->lock); if (delayed_refs->num_entries == 0) { spin_unlock(&delayed_refs->lock); printk(KERN_INFO "delayed_refs has NO entry\n"); return ret; } node = rb_first(&delayed_refs->root); while (node) { ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); node = rb_next(node); ref->in_tree = 0; rb_erase(&ref->rb_node, &delayed_refs->root); delayed_refs->num_entries--; atomic_set(&ref->refs, 1); if (btrfs_delayed_ref_is_head(ref)) { struct btrfs_delayed_ref_head *head; head = btrfs_delayed_node_to_head(ref); mutex_lock(&head->mutex); kfree(head->extent_op); delayed_refs->num_heads--; if (list_empty(&head->cluster)) delayed_refs->num_heads_ready--; list_del_init(&head->cluster); mutex_unlock(&head->mutex); } spin_unlock(&delayed_refs->lock); btrfs_put_delayed_ref(ref); cond_resched(); spin_lock(&delayed_refs->lock); } spin_unlock(&delayed_refs->lock); return ret; } static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) { struct btrfs_pending_snapshot *snapshot; struct list_head splice; INIT_LIST_HEAD(&splice); list_splice_init(&t->pending_snapshots, &splice); while (!list_empty(&splice)) { snapshot = list_entry(splice.next, struct btrfs_pending_snapshot, list); list_del_init(&snapshot->list); kfree(snapshot); } return 0; } static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) { struct btrfs_inode *btrfs_inode; struct list_head splice; INIT_LIST_HEAD(&splice); spin_lock(&root->fs_info->delalloc_lock); list_splice_init(&root->fs_info->delalloc_inodes, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, delalloc_inodes); list_del_init(&btrfs_inode->delalloc_inodes); btrfs_invalidate_inodes(btrfs_inode->root); } spin_unlock(&root->fs_info->delalloc_lock); return 0; } static int btrfs_destroy_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark) { int ret; struct page *page; struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; u64 start = 0; u64 end; u64 offset; unsigned long index; while (1) { ret = find_first_extent_bit(dirty_pages, start, &start, &end, mark); if (ret) break; clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); while (start <= end) { index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; page = find_get_page(btree_inode->i_mapping, index); if (!page) continue; offset = page_offset(page); spin_lock(&dirty_pages->buffer_lock); eb = radix_tree_lookup( &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, offset >> PAGE_CACHE_SHIFT); spin_unlock(&dirty_pages->buffer_lock); if (eb) { ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); atomic_set(&eb->refs, 1); } if (PageWriteback(page)) end_page_writeback(page); lock_page(page); if (PageDirty(page)) { clear_page_dirty_for_io(page); spin_lock_irq(&page->mapping->tree_lock); radix_tree_tag_clear(&page->mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); spin_unlock_irq(&page->mapping->tree_lock); } page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); } } return ret; } static int btrfs_destroy_pinned_extent(struct btrfs_root *root, struct extent_io_tree *pinned_extents) { struct extent_io_tree *unpin; u64 start; u64 end; int ret; unpin = pinned_extents; while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY); if (ret) break; /* opt_discard */ if (btrfs_test_opt(root, DISCARD)) ret = btrfs_error_discard_extent(root, start, end + 1 - start, NULL); clear_extent_dirty(unpin, start, end, GFP_NOFS); btrfs_error_unpin_extent_range(root, start, end); cond_resched(); } return 0; } static int btrfs_cleanup_transaction(struct btrfs_root *root) { struct btrfs_transaction *t; LIST_HEAD(list); WARN_ON(1); mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); list_splice_init(&root->fs_info->trans_list, &list); root->fs_info->trans_no_join = 1; spin_unlock(&root->fs_info->trans_lock); while (!list_empty(&list)) { t = list_entry(list.next, struct btrfs_transaction, list); if (!t) break; btrfs_destroy_ordered_operations(root); btrfs_destroy_ordered_extents(root); btrfs_destroy_delayed_refs(t, root); btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, t->dirty_pages.dirty_bytes); /* FIXME: cleanup wait for commit */ t->in_commit = 1; t->blocked = 1; if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) wake_up(&root->fs_info->transaction_blocked_wait); t->blocked = 0; if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); t->commit_done = 1; if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); btrfs_destroy_pending_snapshots(t); btrfs_destroy_delalloc_inodes(root); spin_lock(&root->fs_info->trans_lock); root->fs_info->running_transaction = NULL; spin_unlock(&root->fs_info->trans_lock); btrfs_destroy_marked_extents(root, &t->dirty_pages, EXTENT_DIRTY); btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); atomic_set(&t->use_count, 0); list_del_init(&t->list); memset(t, 0, sizeof(*t)); kmem_cache_free(btrfs_transaction_cachep, t); } spin_lock(&root->fs_info->trans_lock); root->fs_info->trans_no_join = 0; spin_unlock(&root->fs_info->trans_lock); mutex_unlock(&root->fs_info->transaction_kthread_mutex); return 0; } static struct extent_io_ops btree_extent_io_ops = { .write_cache_pages_lock_hook = btree_lock_page_hook, .readpage_end_io_hook = btree_readpage_end_io_hook, .submit_bio_hook = btree_submit_bio_hook, /* note we're sharing with inode.c for the merge bio hook */ .merge_bio_hook = btrfs_merge_bio_hook, };
gpl-2.0
gabwerkz/bproj
arch/arm/mach-at91/board-carmeva.c
1618
4435
/* * linux/arch/arm/mach-at91/board-carmeva.c * * Copyright (c) 2005 Peer Georgi * Conitec Datasystems * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/gpio.h> #include "generic.h" static void __init carmeva_map_io(void) { /* Initialize processor: 20.000 MHz crystal */ at91rm9200_initialize(20000000, AT91RM9200_BGA); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init carmeva_init_irq(void) { at91rm9200_init_interrupts(NULL); } static struct at91_eth_data __initdata carmeva_eth_data = { .phy_irq_pin = AT91_PIN_PC4, .is_rmii = 1, }; static struct at91_usbh_data __initdata carmeva_usbh_data = { .ports = 2, }; static struct at91_udc_data __initdata carmeva_udc_data = { .vbus_pin = AT91_PIN_PD12, .pullup_pin = AT91_PIN_PD9, }; /* FIXME: user dependant */ // static struct at91_cf_data __initdata carmeva_cf_data = { // .det_pin = AT91_PIN_PB0, // .rst_pin = AT91_PIN_PC5, // .irq_pin = ... not connected // .vcc_pin = ... always powered // }; static struct at91_mmc_data __initdata carmeva_mmc_data = { .slot_b = 0, .wire4 = 1, .det_pin = AT91_PIN_PB10, .wp_pin = AT91_PIN_PC14, }; static struct spi_board_info carmeva_spi_devices[] = { { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 10 * 1000 * 1000, }, { /* User accessible spi - cs1 (250KHz) */ .modalias = "spi-cs1", .chip_select = 1, .max_speed_hz = 250 * 1000, }, { /* User accessible spi - cs2 (1MHz) */ .modalias = "spi-cs2", .chip_select = 2, .max_speed_hz = 1 * 1000 * 1000, }, { /* User accessible spi - cs3 (10MHz) */ .modalias = "spi-cs3", .chip_select = 3, .max_speed_hz = 10 * 1000 * 1000, }, }; static struct gpio_led carmeva_leds[] = { { /* "user led 1", LED9 */ .name = "led9", .gpio = AT91_PIN_PA21, .active_low = 1, .default_trigger = "heartbeat", }, { /* "user led 2", LED10 */ .name = "led10", .gpio = AT91_PIN_PA25, .active_low = 1, }, { /* "user led 3", LED11 */ .name = "led11", .gpio = AT91_PIN_PA26, .active_low = 1, }, { /* "user led 4", LED12 */ .name = "led12", .gpio = AT91_PIN_PA18, .active_low = 1, } }; static void __init carmeva_board_init(void) { /* Serial */ at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&carmeva_eth_data); /* USB Host */ at91_add_device_usbh(&carmeva_usbh_data); /* USB Device */ at91_add_device_udc(&carmeva_udc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* SPI */ at91_add_device_spi(carmeva_spi_devices, ARRAY_SIZE(carmeva_spi_devices)); /* Compact Flash */ // at91_add_device_cf(&carmeva_cf_data); /* MMC */ at91_add_device_mmc(0, &carmeva_mmc_data); /* LEDs */ at91_gpio_leds(carmeva_leds, ARRAY_SIZE(carmeva_leds)); } MACHINE_START(CARMEVA, "Carmeva") /* Maintainer: Conitec Datasystems */ .phys_io = AT91_BASE_SYS, .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc, .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91rm9200_timer, .map_io = carmeva_map_io, .init_irq = carmeva_init_irq, .init_machine = carmeva_board_init, MACHINE_END
gpl-2.0
The-Sickness/S6-MM
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
2130
20575
/* * shmob_drm_crtc.c -- SH Mobile DRM CRTCs * * Copyright (C) 2012 Renesas Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/backlight.h> #include <linux/clk.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <video/sh_mobile_meram.h> #include "shmob_drm_backlight.h" #include "shmob_drm_crtc.h" #include "shmob_drm_drv.h" #include "shmob_drm_kms.h" #include "shmob_drm_plane.h" #include "shmob_drm_regs.h" /* * TODO: panel support */ /* ----------------------------------------------------------------------------- * Clock management */ static void shmob_drm_clk_on(struct shmob_drm_device *sdev) { if (sdev->clock) clk_enable(sdev->clock); #if 0 if (sdev->meram_dev && sdev->meram_dev->pdev) pm_runtime_get_sync(&sdev->meram_dev->pdev->dev); #endif } static void shmob_drm_clk_off(struct shmob_drm_device *sdev) { #if 0 if (sdev->meram_dev && sdev->meram_dev->pdev) pm_runtime_put_sync(&sdev->meram_dev->pdev->dev); #endif if (sdev->clock) clk_disable(sdev->clock); } /* ----------------------------------------------------------------------------- * CRTC */ static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc) { struct drm_crtc *crtc = &scrtc->crtc; struct shmob_drm_device *sdev = crtc->dev->dev_private; const struct shmob_drm_interface_data *idata = &sdev->pdata->iface; const struct drm_display_mode *mode = &crtc->mode; u32 value; value = sdev->ldmt1r | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL) | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL) | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0) | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0) | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0) | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0) | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0); lcdc_write(sdev, LDMT1R, value); if (idata->interface >= SHMOB_DRM_IFACE_SYS8A && idata->interface <= SHMOB_DRM_IFACE_SYS24) { /* Setup SYS bus. */ value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT) | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0) | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0) | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT) | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT) | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT); lcdc_write(sdev, LDMT2R, value); value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT) | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT) | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT) | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT); lcdc_write(sdev, LDMT3R, value); } value = ((mode->hdisplay / 8) << 16) /* HDCN */ | (mode->htotal / 8); /* HTCN */ lcdc_write(sdev, LDHCNR, value); value = (((mode->hsync_end - mode->hsync_start) / 8) << 16) /* HSYNW */ | (mode->hsync_start / 8); /* HSYNP */ lcdc_write(sdev, LDHSYNR, value); value = ((mode->hdisplay & 7) << 24) | ((mode->htotal & 7) << 16) | (((mode->hsync_end - mode->hsync_start) & 7) << 8) | (mode->hsync_start & 7); lcdc_write(sdev, LDHAJR, value); value = ((mode->vdisplay) << 16) /* VDLN */ | mode->vtotal; /* VTLN */ lcdc_write(sdev, LDVLNR, value); value = ((mode->vsync_end - mode->vsync_start) << 16) /* VSYNW */ | mode->vsync_start; /* VSYNP */ lcdc_write(sdev, LDVSYNR, value); } static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start) { struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private; u32 value; value = lcdc_read(sdev, LDCNT2R); if (start) lcdc_write(sdev, LDCNT2R, value | LDCNT2R_DO); else lcdc_write(sdev, LDCNT2R, value & ~LDCNT2R_DO); /* Wait until power is applied/stopped. */ while (1) { value = lcdc_read(sdev, LDPMR) & LDPMR_LPS; if ((start && value) || (!start && !value)) break; cpu_relax(); } if (!start) { /* Stop the dot clock. */ lcdc_write(sdev, LDDCKSTPR, LDDCKSTPR_DCKSTP); } } /* * shmob_drm_crtc_start - Configure and start the LCDC * @scrtc: the SH Mobile CRTC * * Configure and start the LCDC device. External devices (clocks, MERAM, panels, * ...) are not touched by this function. */ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc) { struct drm_crtc *crtc = &scrtc->crtc; struct shmob_drm_device *sdev = crtc->dev->dev_private; const struct shmob_drm_interface_data *idata = &sdev->pdata->iface; const struct shmob_drm_format_info *format; struct drm_device *dev = sdev->ddev; struct drm_plane *plane; u32 value; if (scrtc->started) return; format = shmob_drm_format_info(crtc->fb->pixel_format); if (WARN_ON(format == NULL)) return; /* Enable clocks before accessing the hardware. */ shmob_drm_clk_on(sdev); /* Reset and enable the LCDC. */ lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR); lcdc_wait_bit(sdev, LDCNT2R, LDCNT2R_BR, 0); lcdc_write(sdev, LDCNT2R, LDCNT2R_ME); /* Stop the LCDC first and disable all interrupts. */ shmob_drm_crtc_start_stop(scrtc, false); lcdc_write(sdev, LDINTR, 0); /* Configure power supply, dot clocks and start them. */ lcdc_write(sdev, LDPMR, 0); value = sdev->lddckr; if (idata->clk_div) { /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider * denominator. */ lcdc_write(sdev, LDDCKPAT1R, 0); lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1); if (idata->clk_div == 1) value |= LDDCKR_MOSEL; else value |= idata->clk_div; } lcdc_write(sdev, LDDCKR, value); lcdc_write(sdev, LDDCKSTPR, 0); lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0); /* TODO: Setup SYS panel */ /* Setup geometry, format, frame buffer memory and operation mode. */ shmob_drm_crtc_setup_geometry(scrtc); /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */ lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1); lcdc_write(sdev, LDMLSR, scrtc->line_size); lcdc_write(sdev, LDSA1R, scrtc->dma[0]); if (format->yuv) lcdc_write(sdev, LDSA2R, scrtc->dma[1]); lcdc_write(sdev, LDSM1R, 0); /* Word and long word swap. */ switch (format->fourcc) { case DRM_FORMAT_RGB565: case DRM_FORMAT_NV21: case DRM_FORMAT_NV61: case DRM_FORMAT_NV42: value = LDDDSR_LS | LDDDSR_WS; break; case DRM_FORMAT_RGB888: case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: case DRM_FORMAT_NV24: value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS; break; case DRM_FORMAT_ARGB8888: default: value = LDDDSR_LS; break; } lcdc_write(sdev, LDDDSR, value); /* Setup planes. */ list_for_each_entry(plane, &dev->mode_config.plane_list, head) { if (plane->crtc == crtc) shmob_drm_plane_setup(plane); } /* Enable the display output. */ lcdc_write(sdev, LDCNT1R, LDCNT1R_DE); shmob_drm_crtc_start_stop(scrtc, true); scrtc->started = true; } static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc) { struct drm_crtc *crtc = &scrtc->crtc; struct shmob_drm_device *sdev = crtc->dev->dev_private; if (!scrtc->started) return; /* Disable the MERAM cache. */ if (scrtc->cache) { sh_mobile_meram_cache_free(sdev->meram, scrtc->cache); scrtc->cache = NULL; } /* Stop the LCDC. */ shmob_drm_crtc_start_stop(scrtc, false); /* Disable the display output. */ lcdc_write(sdev, LDCNT1R, 0); /* Stop clocks. */ shmob_drm_clk_off(sdev); scrtc->started = false; } void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc) { shmob_drm_crtc_stop(scrtc); } void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc) { if (scrtc->dpms != DRM_MODE_DPMS_ON) return; shmob_drm_crtc_start(scrtc); } static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc, int x, int y) { struct drm_crtc *crtc = &scrtc->crtc; struct drm_framebuffer *fb = crtc->fb; struct shmob_drm_device *sdev = crtc->dev->dev_private; struct drm_gem_cma_object *gem; unsigned int bpp; bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp; gem = drm_fb_cma_get_gem_obj(fb, 0); scrtc->dma[0] = gem->paddr + fb->offsets[0] + y * fb->pitches[0] + x * bpp / 8; if (scrtc->format->yuv) { bpp = scrtc->format->bpp - 8; gem = drm_fb_cma_get_gem_obj(fb, 1); scrtc->dma[1] = gem->paddr + fb->offsets[1] + y / (bpp == 4 ? 2 : 1) * fb->pitches[1] + x * (bpp == 16 ? 2 : 1); } if (scrtc->cache) sh_mobile_meram_cache_update(sdev->meram, scrtc->cache, scrtc->dma[0], scrtc->dma[1], &scrtc->dma[0], &scrtc->dma[1]); } static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc) { struct drm_crtc *crtc = &scrtc->crtc; struct shmob_drm_device *sdev = crtc->dev->dev_private; shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y); lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]); if (scrtc->format->yuv) lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]); lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS); } #define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc) static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode) { struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); if (scrtc->dpms == mode) return; if (mode == DRM_MODE_DPMS_ON) shmob_drm_crtc_start(scrtc); else shmob_drm_crtc_stop(scrtc); scrtc->dpms = mode; } static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc) { shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); struct shmob_drm_device *sdev = crtc->dev->dev_private; const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram; const struct shmob_drm_format_info *format; void *cache; format = shmob_drm_format_info(crtc->fb->pixel_format); if (format == NULL) { dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n", crtc->fb->pixel_format); return -EINVAL; } scrtc->format = format; scrtc->line_size = crtc->fb->pitches[0]; if (sdev->meram) { /* Enable MERAM cache if configured. We need to de-init * configured ICBs before we can re-initialize them. */ if (scrtc->cache) { sh_mobile_meram_cache_free(sdev->meram, scrtc->cache); scrtc->cache = NULL; } cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata, crtc->fb->pitches[0], adjusted_mode->vdisplay, format->meram, &scrtc->line_size); if (!IS_ERR(cache)) scrtc->cache = cache; } shmob_drm_crtc_compute_base(scrtc, x, y); return 0; } static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc) { shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); } static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { shmob_drm_crtc_update_base(to_shmob_crtc(crtc)); return 0; } static const struct drm_crtc_helper_funcs crtc_helper_funcs = { .dpms = shmob_drm_crtc_dpms, .mode_fixup = shmob_drm_crtc_mode_fixup, .prepare = shmob_drm_crtc_mode_prepare, .commit = shmob_drm_crtc_mode_commit, .mode_set = shmob_drm_crtc_mode_set, .mode_set_base = shmob_drm_crtc_mode_set_base, }; void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc, struct drm_file *file) { struct drm_pending_vblank_event *event; struct drm_device *dev = scrtc->crtc.dev; unsigned long flags; /* Destroy the pending vertical blanking event associated with the * pending page flip, if any, and disable vertical blanking interrupts. */ spin_lock_irqsave(&dev->event_lock, flags); event = scrtc->event; if (event && event->base.file_priv == file) { scrtc->event = NULL; event->base.destroy(&event->base); drm_vblank_put(dev, 0); } spin_unlock_irqrestore(&dev->event_lock, flags); } void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) { struct drm_pending_vblank_event *event; struct drm_device *dev = scrtc->crtc.dev; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); event = scrtc->event; scrtc->event = NULL; if (event) { drm_send_vblank_event(dev, 0, event); drm_vblank_put(dev, 0); } spin_unlock_irqrestore(&dev->event_lock, flags); } static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); struct drm_device *dev = scrtc->crtc.dev; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); if (scrtc->event != NULL) { spin_unlock_irqrestore(&dev->event_lock, flags); return -EBUSY; } spin_unlock_irqrestore(&dev->event_lock, flags); crtc->fb = fb; shmob_drm_crtc_update_base(scrtc); if (event) { event->pipe = 0; drm_vblank_get(dev, 0); spin_lock_irqsave(&dev->event_lock, flags); scrtc->event = event; spin_unlock_irqrestore(&dev->event_lock, flags); } return 0; } static const struct drm_crtc_funcs crtc_funcs = { .destroy = drm_crtc_cleanup, .set_config = drm_crtc_helper_set_config, .page_flip = shmob_drm_crtc_page_flip, }; int shmob_drm_crtc_create(struct shmob_drm_device *sdev) { struct drm_crtc *crtc = &sdev->crtc.crtc; int ret; sdev->crtc.dpms = DRM_MODE_DPMS_OFF; ret = drm_crtc_init(sdev->ddev, crtc, &crtc_funcs); if (ret < 0) return ret; drm_crtc_helper_add(crtc, &crtc_helper_funcs); return 0; } /* ----------------------------------------------------------------------------- * Encoder */ #define to_shmob_encoder(e) \ container_of(e, struct shmob_drm_encoder, encoder) static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode) { struct shmob_drm_encoder *senc = to_shmob_encoder(encoder); struct shmob_drm_device *sdev = encoder->dev->dev_private; struct shmob_drm_connector *scon = &sdev->connector; if (senc->dpms == mode) return; shmob_drm_backlight_dpms(scon, mode); senc->dpms = mode; } static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct shmob_drm_device *sdev = dev->dev_private; struct drm_connector *connector = &sdev->connector.connector; const struct drm_display_mode *panel_mode; if (list_empty(&connector->modes)) { dev_dbg(dev->dev, "mode_fixup: empty modes list\n"); return false; } /* The flat panel mode is fixed, just copy it to the adjusted mode. */ panel_mode = list_first_entry(&connector->modes, struct drm_display_mode, head); drm_mode_copy(adjusted_mode, panel_mode); return true; } static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder) { /* No-op, everything is handled in the CRTC code. */ } static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* No-op, everything is handled in the CRTC code. */ } static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder) { /* No-op, everything is handled in the CRTC code. */ } static const struct drm_encoder_helper_funcs encoder_helper_funcs = { .dpms = shmob_drm_encoder_dpms, .mode_fixup = shmob_drm_encoder_mode_fixup, .prepare = shmob_drm_encoder_mode_prepare, .commit = shmob_drm_encoder_mode_commit, .mode_set = shmob_drm_encoder_mode_set, }; static void shmob_drm_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } static const struct drm_encoder_funcs encoder_funcs = { .destroy = shmob_drm_encoder_destroy, }; int shmob_drm_encoder_create(struct shmob_drm_device *sdev) { struct drm_encoder *encoder = &sdev->encoder.encoder; int ret; sdev->encoder.dpms = DRM_MODE_DPMS_OFF; encoder->possible_crtcs = 1; ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs, DRM_MODE_ENCODER_LVDS); if (ret < 0) return ret; drm_encoder_helper_add(encoder, &encoder_helper_funcs); return 0; } void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable) { unsigned long flags; u32 ldintr; /* Be careful not to acknowledge any pending interrupt. */ spin_lock_irqsave(&sdev->irq_lock, flags); ldintr = lcdc_read(sdev, LDINTR) | LDINTR_STATUS_MASK; if (enable) ldintr |= LDINTR_VEE; else ldintr &= ~LDINTR_VEE; lcdc_write(sdev, LDINTR, ldintr); spin_unlock_irqrestore(&sdev->irq_lock, flags); } /* ----------------------------------------------------------------------------- * Connector */ #define to_shmob_connector(c) \ container_of(c, struct shmob_drm_connector, connector) static int shmob_drm_connector_get_modes(struct drm_connector *connector) { struct shmob_drm_device *sdev = connector->dev->dev_private; struct drm_display_mode *mode; mode = drm_mode_create(connector->dev); if (mode == NULL) return 0; mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; mode->clock = sdev->pdata->panel.mode.clock; mode->hdisplay = sdev->pdata->panel.mode.hdisplay; mode->hsync_start = sdev->pdata->panel.mode.hsync_start; mode->hsync_end = sdev->pdata->panel.mode.hsync_end; mode->htotal = sdev->pdata->panel.mode.htotal; mode->vdisplay = sdev->pdata->panel.mode.vdisplay; mode->vsync_start = sdev->pdata->panel.mode.vsync_start; mode->vsync_end = sdev->pdata->panel.mode.vsync_end; mode->vtotal = sdev->pdata->panel.mode.vtotal; mode->flags = sdev->pdata->panel.mode.flags; drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); connector->display_info.width_mm = sdev->pdata->panel.width_mm; connector->display_info.height_mm = sdev->pdata->panel.height_mm; return 1; } static int shmob_drm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { return MODE_OK; } static struct drm_encoder * shmob_drm_connector_best_encoder(struct drm_connector *connector) { struct shmob_drm_connector *scon = to_shmob_connector(connector); return scon->encoder; } static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = shmob_drm_connector_get_modes, .mode_valid = shmob_drm_connector_mode_valid, .best_encoder = shmob_drm_connector_best_encoder, }; static void shmob_drm_connector_destroy(struct drm_connector *connector) { struct shmob_drm_connector *scon = to_shmob_connector(connector); shmob_drm_backlight_exit(scon); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); } static enum drm_connector_status shmob_drm_connector_detect(struct drm_connector *connector, bool force) { return connector_status_connected; } static const struct drm_connector_funcs connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = shmob_drm_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = shmob_drm_connector_destroy, }; int shmob_drm_connector_create(struct shmob_drm_device *sdev, struct drm_encoder *encoder) { struct drm_connector *connector = &sdev->connector.connector; int ret; sdev->connector.encoder = encoder; connector->display_info.width_mm = sdev->pdata->panel.width_mm; connector->display_info.height_mm = sdev->pdata->panel.height_mm; ret = drm_connector_init(sdev->ddev, connector, &connector_funcs, DRM_MODE_CONNECTOR_LVDS); if (ret < 0) return ret; drm_connector_helper_add(connector, &connector_helper_funcs); ret = drm_sysfs_connector_add(connector); if (ret < 0) goto err_cleanup; ret = shmob_drm_backlight_init(&sdev->connector); if (ret < 0) goto err_sysfs; ret = drm_mode_connector_attach_encoder(connector, encoder); if (ret < 0) goto err_backlight; connector->encoder = encoder; drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); drm_object_property_set_value(&connector->base, sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); return 0; err_backlight: shmob_drm_backlight_exit(&sdev->connector); err_sysfs: drm_sysfs_connector_remove(connector); err_cleanup: drm_connector_cleanup(connector); return ret; }
gpl-2.0
networkimprov/linux
arch/ia64/kernel/kprobes.c
2130
30380
/* * Kernel Probes (KProbes) * arch/ia64/kernel/kprobes.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * Copyright (C) Intel Corporation, 2005 * * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy * <anil.s.keshavamurthy@intel.com> adapted from i386 */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/preempt.h> #include <linux/moduleloader.h> #include <linux/kdebug.h> #include <asm/pgtable.h> #include <asm/sections.h> #include <asm/uaccess.h> extern void jprobe_inst_return(void); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; enum instruction_type {A, I, M, F, B, L, X, u}; static enum instruction_type bundle_encoding[32][3] = { { M, I, I }, /* 00 */ { M, I, I }, /* 01 */ { M, I, I }, /* 02 */ { M, I, I }, /* 03 */ { M, L, X }, /* 04 */ { M, L, X }, /* 05 */ { u, u, u }, /* 06 */ { u, u, u }, /* 07 */ { M, M, I }, /* 08 */ { M, M, I }, /* 09 */ { M, M, I }, /* 0A */ { M, M, I }, /* 0B */ { M, F, I }, /* 0C */ { M, F, I }, /* 0D */ { M, M, F }, /* 0E */ { M, M, F }, /* 0F */ { M, I, B }, /* 10 */ { M, I, B }, /* 11 */ { M, B, B }, /* 12 */ { M, B, B }, /* 13 */ { u, u, u }, /* 14 */ { u, u, u }, /* 15 */ { B, B, B }, /* 16 */ { B, B, B }, /* 17 */ { M, M, B }, /* 18 */ { M, M, B }, /* 19 */ { u, u, u }, /* 1A */ { u, u, u }, /* 1B */ { M, F, B }, /* 1C */ { M, F, B }, /* 1D */ { u, u, u }, /* 1E */ { u, u, u }, /* 1F */ }; /* Insert a long branch code */ static void __kprobes set_brl_inst(void *from, void *to) { s64 rel = ((s64) to - (s64) from) >> 4; bundle_t *brl; brl = (bundle_t *) ((u64) from & ~0xf); brl->quad0.template = 0x05; /* [MLX](stop) */ brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); } /* * In this function we check to see if the instruction * is IP relative instruction and update the kprobe * inst flag accordingly */ static void __kprobes update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p) { p->ainsn.inst_flag = 0; p->ainsn.target_br_reg = 0; p->ainsn.slot = slot; /* Check for Break instruction * Bits 37:40 Major opcode to be zero * Bits 27:32 X6 to be zero * Bits 32:35 X3 to be zero */ if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { /* is a break instruction */ p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; return; } if (bundle_encoding[template][slot] == B) { switch (major_opcode) { case INDIRECT_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; case IP_RELATIVE_PREDICT_OPCODE: case IP_RELATIVE_BRANCH_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; break; case IP_RELATIVE_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; } } else if (bundle_encoding[template][slot] == X) { switch (major_opcode) { case LONG_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; } } return; } /* * In this function we check to see if the instruction * (qp) cmpx.crel.ctype p1,p2=r2,r3 * on which we are inserting kprobe is cmp instruction * with ctype as unc. */ static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst) { cmp_inst_t cmp_inst; uint ctype_unc = 0; if (!((bundle_encoding[template][slot] == I) || (bundle_encoding[template][slot] == M))) goto out; if (!((major_opcode == 0xC) || (major_opcode == 0xD) || (major_opcode == 0xE))) goto out; cmp_inst.l = kprobe_inst; if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { /* Integer compare - Register Register (A6 type)*/ if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) ctype_unc = 1; } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { /* Integer compare - Immediate Register (A8 type)*/ if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) ctype_unc = 1; } out: return ctype_unc; } /* * In this function we check to see if the instruction * on which we are inserting kprobe is supported. * Returns qp value if supported * Returns -EINVAL if unsupported */ static int __kprobes unsupported_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, unsigned long addr) { int qp; qp = kprobe_inst & 0x3f; if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on cmp unc " "instruction on slot 1 at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } else if (bundle_encoding[template][slot] == I) { if (major_opcode == 0) { /* * Check for Integer speculation instruction * - Bit 33-35 to be equal to 0x1 */ if (((kprobe_inst >> 33) & 0x7) == 1) { printk(KERN_WARNING "Kprobes on speculation inst at <0x%lx> not supported\n", addr); return -EINVAL; } /* * IP relative mov instruction * - Bit 27-35 to be equal to 0x30 */ if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { printk(KERN_WARNING "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", addr); return -EINVAL; } } else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && (kprobe_inst & (0x1UL << 12))) { /* test bit instructions, tbit,tnat,tf * bit 33-36 to be equal to 0 * bit 12 to be equal to 1 */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on test bit " "instruction on slot at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } } else if (bundle_encoding[template][slot] == B) { if (major_opcode == 7) { /* IP-Relative Predict major code is 7 */ printk(KERN_WARNING "Kprobes on IP-Relative" "Predict is not supported\n"); return -EINVAL; } else if (major_opcode == 2) { /* Indirect Predict, major code is 2 * bit 27-32 to be equal to 10 or 11 */ int x6=(kprobe_inst >> 27) & 0x3F; if ((x6 == 0x10) || (x6 == 0x11)) { printk(KERN_WARNING "Kprobes on " "Indirect Predict is not supported\n"); return -EINVAL; } } } /* kernel does not use float instruction, here for safety kprobe * will judge whether it is fcmp/flass/float approximation instruction */ else if (unlikely(bundle_encoding[template][slot] == F)) { if ((major_opcode == 4 || major_opcode == 5) && (kprobe_inst & (0x1 << 12))) { /* fcmp/fclass unc instruction */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on fcmp/fclass " "instruction on slot at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } if ((major_opcode == 0 || major_opcode == 1) && (kprobe_inst & (0x1UL << 33))) { /* float Approximation instruction */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on float Approx " "instr at <0x%lx> is not supported\n", addr); return -EINVAL; } qp = 0; } } return qp; } /* * In this function we override the bundle with * the break instruction at the given slot. */ static void __kprobes prepare_break_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p, int qp) { unsigned long break_inst = BREAK_INST; bundle_t *bundle = &p->opcode.bundle; /* * Copy the original kprobe_inst qualifying predicate(qp) * to the break instruction */ break_inst |= qp; switch (slot) { case 0: bundle->quad0.slot0 = break_inst; break; case 1: bundle->quad0.slot1_p0 = break_inst; bundle->quad1.slot1_p1 = break_inst >> (64-46); break; case 2: bundle->quad1.slot2 = break_inst; break; } /* * Update the instruction flag, so that we can * emulate the instruction properly after we * single step on original instruction */ update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); } static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, unsigned long *kprobe_inst, uint *major_opcode) { unsigned long kprobe_inst_p0, kprobe_inst_p1; unsigned int template; template = bundle->quad0.template; switch (slot) { case 0: *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); *kprobe_inst = bundle->quad0.slot0; break; case 1: *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); kprobe_inst_p0 = bundle->quad0.slot1_p0; kprobe_inst_p1 = bundle->quad1.slot1_p1; *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); break; case 2: *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); *kprobe_inst = bundle->quad1.slot2; break; } } /* Returns non-zero if the addr is in the Interrupt Vector Table */ static int __kprobes in_ivt_functions(unsigned long addr) { return (addr >= (unsigned long)__start_ivt_text && addr < (unsigned long)__end_ivt_text); } static int __kprobes valid_kprobe_addr(int template, int slot, unsigned long addr) { if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { printk(KERN_WARNING "Attempting to insert unaligned kprobe " "at 0x%lx\n", addr); return -EINVAL; } if (in_ivt_functions(addr)) { printk(KERN_WARNING "Kprobes can't be inserted inside " "IVT functions at 0x%lx\n", addr); return -EINVAL; } return 0; } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { unsigned int i; i = atomic_add_return(1, &kcb->prev_kprobe_index); kcb->prev_kprobe[i-1].kp = kprobe_running(); kcb->prev_kprobe[i-1].status = kcb->kprobe_status; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { unsigned int i; i = atomic_read(&kcb->prev_kprobe_index); __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp; kcb->kprobe_status = kcb->prev_kprobe[i-1].status; atomic_sub(1, &kcb->prev_kprobe_index); } static void __kprobes set_current_kprobe(struct kprobe *p, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; } static void kretprobe_trampoline(void) { } /* * At this point the target function has been tricked into * returning into our trampoline. Lookup the associated instance * and then: * - call the handler function * - cleanup by marking the instance as unused * - long jump back to the original return address */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = ((struct fnptr *)kretprobe_trampoline)->ip; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same * function, the first instance's ret_addr will point to the * real return address, and all the rest will point to * kretprobe_trampoline */ hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; orig_ret_address = (unsigned long)ri->ret_addr; if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } regs->cr_iip = orig_ret_address; hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; if (ri->rp && ri->rp->handler) ri->rp->handler(ri, regs); orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } kretprobe_assert(ri, orig_ret_address, trampoline_address); reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler * to run (and have re-enabled preemption) */ return 1; } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->b0; /* Replace the return addr with trampoline addr */ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; } /* Check the instruction in the slot is break */ static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) { unsigned int major_opcode; unsigned int template = bundle->quad0.template; unsigned long kprobe_inst; /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ if (slot == 1 && bundle_encoding[template][1] == L) slot++; /* Get Kprobe probe instruction at given slot*/ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); /* For break instruction, * Bits 37:40 Major opcode to be zero * Bits 27:32 X6 to be zero * Bits 32:35 X3 to be zero */ if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { /* Not a break instruction */ return 0; } /* Is a break instruction */ return 1; } /* * In this function, we check whether the target bundle modifies IP or * it triggers an exception. If so, it cannot be boostable. */ static int __kprobes can_boost(bundle_t *bundle, uint slot, unsigned long bundle_addr) { unsigned int template = bundle->quad0.template; do { if (search_exception_tables(bundle_addr + slot) || __is_ia64_break_inst(bundle, slot)) return 0; /* exception may occur in this bundle*/ } while ((++slot) < 3); template &= 0x1e; if (template >= 0x10 /* including B unit */ || template == 0x04 /* including X unit */ || template == 0x06) /* undefined */ return 0; return 1; } /* Prepare long jump bundle and disables other boosters if need */ static void __kprobes prepare_booster(struct kprobe *p) { unsigned long addr = (unsigned long)p->addr & ~0xFULL; unsigned int slot = (unsigned long)p->addr & 0xf; struct kprobe *other_kp; if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; } /* disables boosters in previous slots */ for (; addr < (unsigned long)p->addr; addr++) { other_kp = get_kprobe((void *)addr); if (other_kp) other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; } } int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long) p->addr; unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); unsigned long kprobe_inst=0; unsigned int slot = addr & 0xf, template, major_opcode = 0; bundle_t *bundle; int qp; bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; template = bundle->quad0.template; if(valid_kprobe_addr(template, slot, addr)) return -EINVAL; /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ if (slot == 1 && bundle_encoding[template][1] == L) slot++; /* Get kprobe_inst and major_opcode from the bundle */ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); if (qp < 0) return -EINVAL; p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); prepare_booster(p); return 0; } void __kprobes arch_arm_kprobe(struct kprobe *p) { unsigned long arm_addr; bundle_t *src, *dest; arm_addr = ((unsigned long)p->addr) & ~0xFUL; dest = &((kprobe_opcode_t *)arm_addr)->bundle; src = &p->opcode.bundle; flush_icache_range((unsigned long)p->ainsn.insn, (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); switch (p->ainsn.slot) { case 0: dest->quad0.slot0 = src->quad0.slot0; break; case 1: dest->quad1.slot1_p1 = src->quad1.slot1_p1; break; case 2: dest->quad1.slot2 = src->quad1.slot2; break; } flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { unsigned long arm_addr; bundle_t *src, *dest; arm_addr = ((unsigned long)p->addr) & ~0xFUL; dest = &((kprobe_opcode_t *)arm_addr)->bundle; /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ src = &p->ainsn.insn->bundle; switch (p->ainsn.slot) { case 0: dest->quad0.slot0 = src->quad0.slot0; break; case 1: dest->quad1.slot1_p1 = src->quad1.slot1_p1; break; case 2: dest->quad1.slot2 = src->quad1.slot2; break; } flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); p->ainsn.insn = NULL; } } /* * We are resuming execution after a single step fault, so the pt_regs * structure reflects the register state after we executed the instruction * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust * the ip to point back to the original stack address. To set the IP address * to original stack address, handle the case where we need to fixup the * relative IP address and/or fixup branch register. */ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; unsigned long template; int slot = ((unsigned long)p->addr & 0xf); template = p->ainsn.insn->bundle.quad0.template; if (slot == 1 && bundle_encoding[template][1] == L) slot = 2; if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { /* Fix relative IP address */ regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr; } if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { /* * Fix target branch register, software convention is * to use either b0 or b6 or b7, so just checking * only those registers */ switch (p->ainsn.target_br_reg) { case 0: if ((regs->b0 == bundle_addr) || (regs->b0 == bundle_addr + 0x10)) { regs->b0 = (regs->b0 - bundle_addr) + resume_addr; } break; case 6: if ((regs->b6 == bundle_addr) || (regs->b6 == bundle_addr + 0x10)) { regs->b6 = (regs->b6 - bundle_addr) + resume_addr; } break; case 7: if ((regs->b7 == bundle_addr) || (regs->b7 == bundle_addr + 0x10)) { regs->b7 = (regs->b7 - bundle_addr) + resume_addr; } break; } /* end switch */ } goto turn_ss_off; } if (slot == 2) { if (regs->cr_iip == bundle_addr + 0x10) { regs->cr_iip = resume_addr + 0x10; } } else { if (regs->cr_iip == bundle_addr) { regs->cr_iip = resume_addr; } } turn_ss_off: /* Turn off Single Step bit */ ia64_psr(regs)->ss = 0; } static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; unsigned long slot = (unsigned long)p->addr & 0xf; /* single step inline if break instruction */ if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) regs->cr_iip = (unsigned long)p->addr & ~0xFULL; else regs->cr_iip = bundle_addr & ~0xFULL; if (slot > 2) slot = 0; ia64_psr(regs)->ri = slot; /* turn on single stepping */ ia64_psr(regs)->ss = 1; } static int __kprobes is_ia64_break_inst(struct pt_regs *regs) { unsigned int slot = ia64_psr(regs)->ri; unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; bundle_t bundle; memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); return __is_ia64_break_inst(&bundle, slot); } static int __kprobes pre_kprobes_handler(struct die_args *args) { struct kprobe *p; int ret = 0; struct pt_regs *regs = args->regs; kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); kcb = get_kprobe_ctlblk(); /* Handle recursion cases */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { if ((kcb->kprobe_status == KPROBE_HIT_SS) && (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { ia64_psr(regs)->ss = 0; goto no_kprobe; } /* We have reentered the pre_kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(kcb); set_current_kprobe(p, kcb); kprobes_inc_nmissed_count(p); prepare_ss(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; } else if (args->err == __IA64_BREAK_JPROBE) { /* * jprobe instrumented function just completed */ p = __get_cpu_var(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { goto ss_probe; } } else if (!is_ia64_break_inst(regs)) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ ret = 1; goto no_kprobe; } else { /* Not our break */ goto no_kprobe; } } p = get_kprobe(addr); if (!p) { if (!is_ia64_break_inst(regs)) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of our break, let kernel handle it */ goto no_kprobe; } set_current_kprobe(p, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (p->pre_handler && p->pre_handler(p, regs)) /* * Our pre-handler is specifically requesting that we just * do a return. This is used for both the jprobe pre-handler * and the kretprobe trampoline */ return 1; ss_probe: #if !defined(CONFIG_PREEMPT) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; /* turn single stepping off */ ia64_psr(regs)->ss = 0; reset_current_kprobe(); preempt_enable_no_resched(); return 1; } #endif prepare_ss(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; } static int __kprobes post_kprobes_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (!cur) return 0; if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } resume_execution(cur, regs); /*Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; } reset_current_kprobe(); out: preempt_enable_no_resched(); return 1; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); switch(kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the instruction pointer points back to * the probe address and allow the page fault handler * to continue as a normal page fault. */ regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else reset_current_kprobe(); preempt_enable_no_resched(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, * we can also use npre/npostfault count for accouting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); /* * We come here because instructions in the pre/post * handler caused the page_fault, this could happen * if handler tries to access user space by * copy_from_user(), get_user() etc. Let the * user-specified handler try to fix it first. */ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) return 1; /* * In case the user-specified fault handler returned * zero, try to fix up. */ if (ia64_done_with_exception(regs)) return 1; /* * Let ia64_do_page_fault() fix it. */ break; default: break; } return 0; } int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; if (args->regs && user_mode(args->regs)) return ret; switch(val) { case DIE_BREAK: /* err is break number from ia64_bad_break() */ if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) || args->err == __IA64_BREAK_JPROBE || args->err == 0) if (pre_kprobes_handler(args)) ret = NOTIFY_STOP; break; case DIE_FAULT: /* err is vector number from ia64_fault() */ if (args->err == 36) if (post_kprobes_handler(args->regs)) ret = NOTIFY_STOP; break; default: break; } return ret; } struct param_bsp_cfm { unsigned long ip; unsigned long *bsp; unsigned long cfm; }; static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg) { unsigned long ip; struct param_bsp_cfm *lp = arg; do { unw_get_ip(info, &ip); if (ip == 0) break; if (ip == lp->ip) { unw_get_bsp(info, (unsigned long*)&lp->bsp); unw_get_cfm(info, (unsigned long*)&lp->cfm); return; } } while (unw_unwind(info) >= 0); lp->bsp = NULL; lp->cfm = 0; return; } unsigned long arch_deref_entry_point(void *entry) { return ((struct fnptr *)entry)->ip; } int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); unsigned long addr = arch_deref_entry_point(jp->entry); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct param_bsp_cfm pa; int bytes; /* * Callee owns the argument space and could overwrite it, eg * tail call optimization. So to be absolutely safe * we save the argument space before transferring the control * to instrumented jprobe function which runs in * the process context */ pa.ip = regs->cr_iip; unw_init_running(ia64_get_bsp_cfm, &pa); bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f) - (char *)pa.bsp; memcpy( kcb->jprobes_saved_stacked_regs, pa.bsp, bytes ); kcb->bsp = pa.bsp; kcb->cfm = pa.cfm; /* save architectural state */ kcb->jprobe_saved_regs = *regs; /* after rfi, execute the jprobe instrumented function */ regs->cr_iip = addr & ~0xFULL; ia64_psr(regs)->ri = addr & 0xf; regs->r1 = ((struct fnptr *)(jp->entry))->gp; /* * fix the return address to our jprobe_inst_return() function * in the jprobes.S file */ regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; return 1; } /* ia64 does not need this */ void __kprobes jprobe_return(void) { } int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); int bytes; /* restoring architectural state */ *regs = kcb->jprobe_saved_regs; /* restoring the original argument space */ flush_register_stack(); bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f) - (char *)kcb->bsp; memcpy( kcb->bsp, kcb->jprobes_saved_stacked_regs, bytes ); invalidate_stacked_regs(); preempt_enable_no_resched(); return 1; } static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; int __init arch_init_kprobes(void) { trampoline_p.addr = (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; return register_kprobe(&trampoline_p); } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) return 1; return 0; }
gpl-2.0
blazingwolf/HTC_Fireball_Kernel
net/ipv4/ip_input.c
2386
13305
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The Internet Protocol (IP) module. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Donald Becker, <becker@super.org> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Richard Underwood * Stefan Becker, <stefanb@yello.ping.de> * Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * * * Fixes: * Alan Cox : Commented a couple of minor bits of surplus code * Alan Cox : Undefining IP_FORWARD doesn't include the code * (just stops a compiler warning). * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes * are junked rather than corrupting things. * Alan Cox : Frames to bad broadcast subnets are dumped * We used to process them non broadcast and * boy could that cause havoc. * Alan Cox : ip_forward sets the free flag on the * new frame it queues. Still crap because * it copies the frame but at least it * doesn't eat memory too. * Alan Cox : Generic queue code and memory fixes. * Fred Van Kempen : IP fragment support (borrowed from NET2E) * Gerhard Koerting: Forward fragmented frames correctly. * Gerhard Koerting: Fixes to my fix of the above 8-). * Gerhard Koerting: IP interface addressing fix. * Linus Torvalds : More robustness checks * Alan Cox : Even more checks: Still not as robust as it ought to be * Alan Cox : Save IP header pointer for later * Alan Cox : ip option setting * Alan Cox : Use ip_tos/ip_ttl settings * Alan Cox : Fragmentation bogosity removed * (Thanks to Mark.Bush@prg.ox.ac.uk) * Dmitry Gorodchanin : Send of a raw packet crash fix. * Alan Cox : Silly ip bug when an overlength * fragment turns up. Now frees the * queue. * Linus Torvalds/ : Memory leakage on fragmentation * Alan Cox : handling. * Gerhard Koerting: Forwarding uses IP priority hints * Teemu Rantanen : Fragment problems. * Alan Cox : General cleanup, comments and reformat * Alan Cox : SNMP statistics * Alan Cox : BSD address rule semantics. Also see * UDP as there is a nasty checksum issue * if you do things the wrong way. * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file * Alan Cox : IP options adjust sk->priority. * Pedro Roque : Fix mtu/length error in ip_forward. * Alan Cox : Avoid ip_chk_addr when possible. * Richard Underwood : IP multicasting. * Alan Cox : Cleaned up multicast handlers. * Alan Cox : RAW sockets demultiplex in the BSD style. * Gunther Mayer : Fix the SNMP reporting typo * Alan Cox : Always in group 224.0.0.1 * Pauline Middelink : Fast ip_checksum update when forwarding * Masquerading support. * Alan Cox : Multicast loopback error for 224.0.0.1 * Alan Cox : IP_MULTICAST_LOOP option. * Alan Cox : Use notifiers. * Bjorn Ekwall : Removed ip_csum (from slhc.c too) * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!) * Stefan Becker : Send out ICMP HOST REDIRECT * Arnt Gulbrandsen : ip_build_xmit * Alan Cox : Per socket routing cache * Alan Cox : Fixed routing cache, added header cache. * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it. * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net. * Alan Cox : Incoming IP option handling. * Alan Cox : Set saddr on raw output frames as per BSD. * Alan Cox : Stopped broadcast source route explosions. * Alan Cox : Can disable source routing * Takeshi Sone : Masquerading didn't work. * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible. * Alan Cox : Memory leaks, tramples, misc debugging. * Alan Cox : Fixed multicast (by popular demand 8)) * Alan Cox : Fixed forwarding (by even more popular demand 8)) * Alan Cox : Fixed SNMP statistics [I think] * Gerhard Koerting : IP fragmentation forwarding fix * Alan Cox : Device lock against page fault. * Alan Cox : IP_HDRINCL facility. * Werner Almesberger : Zero fragment bug * Alan Cox : RAW IP frame length bug * Alan Cox : Outgoing firewall on build_xmit * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel * Alan Cox : Multicast routing hooks * Jos Vos : Do accounting *before* call_in_firewall * Willy Konynenberg : Transparent proxying support * * * * To Fix: * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient * and could be made very efficient with the addition of some virtual memory hacks to permit * the allocation of a buffer that can then be 'grown' by twiddling page tables. * Output fragmentation wants updating along with the buffer management to use a single * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause * fragmentation anyway. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <asm/system.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/net.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/snmp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/route.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/arp.h> #include <net/icmp.h> #include <net/raw.h> #include <net/checksum.h> #include <linux/netfilter_ipv4.h> #include <net/xfrm.h> #include <linux/mroute.h> #include <linux/netlink.h> /* * Process Router Attention IP option (RFC 2113) */ int ip_call_ra_chain(struct sk_buff *skb) { struct ip_ra_chain *ra; u8 protocol = ip_hdr(skb)->protocol; struct sock *last = NULL; struct net_device *dev = skb->dev; for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) { struct sock *sk = ra->sk; /* If socket is bound to an interface, only report * the packet if it came from that interface. */ if (sk && inet_sk(sk)->inet_num == protocol && (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dev->ifindex) && net_eq(sock_net(sk), dev_net(dev))) { if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) return 1; } if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) raw_rcv(last, skb2); } last = sk; } } if (last) { raw_rcv(last, skb); return 1; } return 0; } static int ip_local_deliver_finish(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); __skb_pull(skb, ip_hdrlen(skb)); /* Point into the IP datagram, just past the header. */ skb_reset_transport_header(skb); rcu_read_lock(); { int protocol = ip_hdr(skb)->protocol; int hash, raw; const struct net_protocol *ipprot; resubmit: raw = raw_local_deliver(skb, protocol); hash = protocol & (MAX_INET_PROTOS - 1); ipprot = rcu_dereference(inet_protos[hash]); if (ipprot != NULL) { int ret; if (!net_eq(net, &init_net) && !ipprot->netns_ok) { if (net_ratelimit()) printk("%s: proto %d isn't netns-ready\n", __func__, protocol); kfree_skb(skb); goto out; } if (!ipprot->no_policy) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { kfree_skb(skb); goto out; } nf_reset(skb); } ret = ipprot->handler(skb); if (ret < 0) { protocol = -ret; goto resubmit; } IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); } } else IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); kfree_skb(skb); } } out: rcu_read_unlock(); return 0; } /* * Deliver IP Packets to the higher protocol layers. */ int ip_local_deliver(struct sk_buff *skb) { /* * Reassemble IP fragments. */ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) return 0; } return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL, ip_local_deliver_finish); } static inline int ip_rcv_options(struct sk_buff *skb) { struct ip_options *opt; const struct iphdr *iph; struct net_device *dev = skb->dev; /* It looks as overkill, because not all IP options require packet mangling. But it is the easiest for now, especially taking into account that combination of IP options and running sniffer is extremely rare condition. --ANK (980813) */ if (skb_cow(skb, skb_headroom(skb))) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto drop; } iph = ip_hdr(skb); opt = &(IPCB(skb)->opt); opt->optlen = iph->ihl*4 - sizeof(struct iphdr); if (ip_options_compile(dev_net(dev), opt, skb)) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); goto drop; } if (unlikely(opt->srr)) { struct in_device *in_dev = __in_dev_get_rcu(dev); if (in_dev) { if (!IN_DEV_SOURCE_ROUTE(in_dev)) { if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) printk(KERN_INFO "source route option %pI4 -> %pI4\n", &iph->saddr, &iph->daddr); goto drop; } } if (ip_options_rcv_srr(skb)) goto drop; } return 0; drop: return -1; } static int ip_rcv_finish(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; /* * Initialise the virtual path cache for the packet. It describes * how the packet travels inside Linux networking. */ if (skb_dst(skb) == NULL) { int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, iph->tos, skb->dev); if (unlikely(err)) { if (err == -EHOSTUNREACH) IP_INC_STATS_BH(dev_net(skb->dev), IPSTATS_MIB_INADDRERRORS); else if (err == -ENETUNREACH) IP_INC_STATS_BH(dev_net(skb->dev), IPSTATS_MIB_INNOROUTES); else if (err == -EXDEV) NET_INC_STATS_BH(dev_net(skb->dev), LINUX_MIB_IPRPFILTER); goto drop; } } #ifdef CONFIG_IP_ROUTE_CLASSID if (unlikely(skb_dst(skb)->tclassid)) { struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct); u32 idx = skb_dst(skb)->tclassid; st[idx&0xFF].o_packets++; st[idx&0xFF].o_bytes += skb->len; st[(idx>>16)&0xFF].i_packets++; st[(idx>>16)&0xFF].i_bytes += skb->len; } #endif if (iph->ihl > 5 && ip_rcv_options(skb)) goto drop; rt = skb_rtable(skb); if (rt->rt_type == RTN_MULTICAST) { IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST, skb->len); } else if (rt->rt_type == RTN_BROADCAST) IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST, skb->len); return dst_input(skb); drop: kfree_skb(skb); return NET_RX_DROP; } /* * Main IP Receive routine. */ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct iphdr *iph; u32 len; /* When the interface is in promisc. mode, drop all the crap * that it receives, do not try to analyse it. */ if (skb->pkt_type == PACKET_OTHERHOST) goto drop; IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto out; } if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto inhdr_error; iph = ip_hdr(skb); /* * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum. * * Is the datagram acceptable? * * 1. Length at least the size of an ip header * 2. Version of 4 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums] * 4. Doesn't have a bogus length */ if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; if (!pskb_may_pull(skb, iph->ihl*4)) goto inhdr_error; iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) goto inhdr_error; len = ntohs(iph->tot_len); if (skb->len < len) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } else if (len < (iph->ihl*4)) goto inhdr_error; /* Our transport medium may have padded the buffer out. Now we know it * is IP we can trim to the true length of the frame. * Note this now means skb->len holds ntohs(iph->tot_len). */ if (pskb_trim_rcsum(skb, len)) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto drop; } /* Remove any debris in the socket control block */ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); /* Must drop socket now because of tproxy. */ skb_orphan(skb); return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL, ip_rcv_finish); inhdr_error: IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); drop: kfree_skb(skb); out: return NET_RX_DROP; }
gpl-2.0
BlazeDevs/android_kernel_samsung_msm8660-common
drivers/media/video/sh_mobile_ceu_camera.c
2386
59370
/* * V4L2 Driver for SuperH Mobile CEU interface * * Copyright (C) 2008 Magnus Damm * * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", * * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/version.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/soc_camera.h> #include <media/sh_mobile_ceu.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> /* register offsets for sh7722 / sh7723 */ #define CAPSR 0x00 /* Capture start register */ #define CAPCR 0x04 /* Capture control register */ #define CAMCR 0x08 /* Capture interface control register */ #define CMCYR 0x0c /* Capture interface cycle register */ #define CAMOR 0x10 /* Capture interface offset register */ #define CAPWR 0x14 /* Capture interface width register */ #define CAIFR 0x18 /* Capture interface input format register */ #define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */ #define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */ #define CRCNTR 0x28 /* CEU register control register */ #define CRCMPR 0x2c /* CEU register forcible control register */ #define CFLCR 0x30 /* Capture filter control register */ #define CFSZR 0x34 /* Capture filter size clip register */ #define CDWDR 0x38 /* Capture destination width register */ #define CDAYR 0x3c /* Capture data address Y register */ #define CDACR 0x40 /* Capture data address C register */ #define CDBYR 0x44 /* Capture data bottom-field address Y register */ #define CDBCR 0x48 /* Capture data bottom-field address C register */ #define CBDSR 0x4c /* Capture bundle destination size register */ #define CFWCR 0x5c /* Firewall operation control register */ #define CLFCR 0x60 /* Capture low-pass filter control register */ #define CDOCR 0x64 /* Capture data output control register */ #define CDDCR 0x68 /* Capture data complexity level register */ #define CDDAR 0x6c /* Capture data complexity level address register */ #define CEIER 0x70 /* Capture event interrupt enable register */ #define CETCR 0x74 /* Capture event flag clear register */ #define CSTSR 0x7c /* Capture status register */ #define CSRTR 0x80 /* Capture software reset register */ #define CDSSR 0x84 /* Capture data size register */ #define CDAYR2 0x90 /* Capture data address Y register 2 */ #define CDACR2 0x94 /* Capture data address C register 2 */ #define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ #define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ #undef DEBUG_GEOMETRY #ifdef DEBUG_GEOMETRY #define dev_geo dev_info #else #define dev_geo dev_dbg #endif /* per video frame buffer */ struct sh_mobile_ceu_buffer { struct vb2_buffer vb; /* v4l buffer must be first */ struct list_head queue; enum v4l2_mbus_pixelcode code; }; struct sh_mobile_ceu_dev { struct soc_camera_host ici; struct soc_camera_device *icd; unsigned int irq; void __iomem *base; unsigned long video_limit; spinlock_t lock; /* Protects video buffer lists */ struct list_head capture; struct vb2_buffer *active; struct vb2_alloc_ctx *alloc_ctx; struct sh_mobile_ceu_info *pdata; struct completion complete; u32 cflcr; enum v4l2_field field; int sequence; unsigned int image_mode:1; unsigned int is_16bit:1; unsigned int frozen:1; }; struct sh_mobile_ceu_cam { /* CEU offsets within scaled by the CEU camera output */ unsigned int ceu_left; unsigned int ceu_top; /* Client output, as seen by the CEU */ unsigned int width; unsigned int height; /* * User window from S_CROP / G_CROP, produced by client cropping and * scaling, CEU scaling and CEU cropping, mapped back onto the client * input window */ struct v4l2_rect subrect; /* Camera cropping rectangle */ struct v4l2_rect rect; const struct soc_mbus_pixelfmt *extra_fmt; enum v4l2_mbus_pixelcode code; }; static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb) { return container_of(vb, struct sh_mobile_ceu_buffer, vb); } static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev) { unsigned long flags; flags = SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW | SOCAM_DATA_ACTIVE_HIGH; if (pcdev->pdata->flags & SH_CEU_FLAG_USE_8BIT_BUS) flags |= SOCAM_DATAWIDTH_8; if (pcdev->pdata->flags & SH_CEU_FLAG_USE_16BIT_BUS) flags |= SOCAM_DATAWIDTH_16; if (flags & SOCAM_DATAWIDTH_MASK) return flags; return 0; } static void ceu_write(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs, u32 data) { iowrite32(data, priv->base + reg_offs); } static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) { return ioread32(priv->base + reg_offs); } static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) { int i, success = 0; struct soc_camera_device *icd = pcdev->icd; ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ /* wait CSTSR.CPTON bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CSTSR) & 1)) { success++; break; } udelay(1); } /* wait CAPSR.CPKIL bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) { success++; break; } udelay(1); } if (2 != success) { dev_warn(&icd->dev, "soft reset time out\n"); return -EIO; } return 0; } /* * Videobuf operations */ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, unsigned int *count, unsigned int *num_planes, unsigned long sizes[], void *alloc_ctxs[]) { struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); if (bytes_per_line < 0) return bytes_per_line; *num_planes = 1; pcdev->sequence = 0; sizes[0] = bytes_per_line * icd->user_height; alloc_ctxs[0] = pcdev->alloc_ctx; if (!*count) *count = 2; if (pcdev->video_limit) { if (PAGE_ALIGN(sizes[0]) * *count > pcdev->video_limit) *count = pcdev->video_limit / PAGE_ALIGN(sizes[0]); } dev_dbg(icd->dev.parent, "count=%d, size=%lu\n", *count, sizes[0]); return 0; } #define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */ #define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */ #define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */ #define CEU_CEIER_VBP (1 << 20) /* vbp error */ #define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */ #define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP) /* * return value doesn't reflex the success/failure to queue the new buffer, * but rather the status of the previous buffer. */ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) { struct soc_camera_device *icd = pcdev->icd; dma_addr_t phys_addr_top, phys_addr_bottom; unsigned long top1, top2; unsigned long bottom1, bottom2; u32 status; int ret = 0; /* * The hardware is _very_ picky about this sequence. Especially * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge * several not-so-well documented interrupt sources in CETCR. */ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK); status = ceu_read(pcdev, CETCR); ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC); if (!pcdev->frozen) ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK); ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP); ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW); /* * When a VBP interrupt occurs, a capture end interrupt does not occur * and the image of that frame is not captured correctly. So, soft reset * is needed here. */ if (status & CEU_CEIER_VBP) { sh_mobile_ceu_soft_reset(pcdev); ret = -EIO; } if (pcdev->frozen) { complete(&pcdev->complete); return ret; } if (!pcdev->active) return ret; if (V4L2_FIELD_INTERLACED_BT == pcdev->field) { top1 = CDBYR; top2 = CDBCR; bottom1 = CDAYR; bottom2 = CDACR; } else { top1 = CDAYR; top2 = CDACR; bottom1 = CDBYR; bottom2 = CDBCR; } phys_addr_top = vb2_dma_contig_plane_paddr(pcdev->active, 0); ceu_write(pcdev, top1, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->user_width; ceu_write(pcdev, bottom1, phys_addr_bottom); } switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: phys_addr_top += icd->user_width * icd->user_height; ceu_write(pcdev, top2, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->user_width; ceu_write(pcdev, bottom2, phys_addr_bottom); } } ceu_write(pcdev, CAPSR, 0x1); /* start capture */ return ret; } static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct sh_mobile_ceu_buffer *buf; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); unsigned long size; if (bytes_per_line < 0) return bytes_per_line; buf = to_ceu_vb(vb); dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); /* Added list head initialization on alloc */ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); #ifdef DEBUG /* * This can be useful if you want to see if we actually fill * the buffer with something */ if (vb2_plane_vaddr(vb, 0)) memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); #endif BUG_ON(NULL == icd->current_fmt); size = icd->user_height * bytes_per_line; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->dev.parent, "Buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -ENOBUFS; } vb2_set_plane_payload(vb, 0, size); return 0; } static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); spin_lock_irq(&pcdev->lock); list_add_tail(&buf->queue, &pcdev->capture); if (!pcdev->active) { /* * Because there were no active buffer at this moment, * we are not interested in the return value of * sh_mobile_ceu_capture here. */ pcdev->active = vb; sh_mobile_ceu_capture(pcdev); } spin_unlock_irq(&pcdev->lock); } static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); struct sh_mobile_ceu_dev *pcdev = ici->priv; spin_lock_irq(&pcdev->lock); if (pcdev->active == vb) { /* disable capture (release DMA buffer), reset */ ceu_write(pcdev, CAPSR, 1 << 16); pcdev->active = NULL; } /* Doesn't hurt also if the list is empty */ list_del_init(&buf->queue); spin_unlock_irq(&pcdev->lock); } static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) { /* This is for locking debugging only */ INIT_LIST_HEAD(&to_ceu_vb(vb)->queue); return 0; } static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q) { struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct list_head *buf_head, *tmp; spin_lock_irq(&pcdev->lock); pcdev->active = NULL; list_for_each_safe(buf_head, tmp, &pcdev->capture) list_del_init(buf_head); spin_unlock_irq(&pcdev->lock); return sh_mobile_ceu_soft_reset(pcdev); } static struct vb2_ops sh_mobile_ceu_videobuf_ops = { .queue_setup = sh_mobile_ceu_videobuf_setup, .buf_prepare = sh_mobile_ceu_videobuf_prepare, .buf_queue = sh_mobile_ceu_videobuf_queue, .buf_cleanup = sh_mobile_ceu_videobuf_release, .buf_init = sh_mobile_ceu_videobuf_init, .wait_prepare = soc_camera_unlock, .wait_finish = soc_camera_lock, .stop_streaming = sh_mobile_ceu_stop_streaming, }; static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) { struct sh_mobile_ceu_dev *pcdev = data; struct vb2_buffer *vb; int ret; spin_lock(&pcdev->lock); vb = pcdev->active; if (!vb) /* Stale interrupt from a released buffer */ goto out; list_del_init(&to_ceu_vb(vb)->queue); if (!list_empty(&pcdev->capture)) pcdev->active = &list_entry(pcdev->capture.next, struct sh_mobile_ceu_buffer, queue)->vb; else pcdev->active = NULL; ret = sh_mobile_ceu_capture(pcdev); do_gettimeofday(&vb->v4l2_buf.timestamp); if (!ret) { vb->v4l2_buf.field = pcdev->field; vb->v4l2_buf.sequence = pcdev->sequence++; } vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); out: spin_unlock(&pcdev->lock); return IRQ_HANDLED; } /* Called with .video_lock held */ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret; if (pcdev->icd) return -EBUSY; dev_info(icd->dev.parent, "SuperH Mobile CEU driver attached to camera %d\n", icd->devnum); pm_runtime_get_sync(ici->v4l2_dev.dev); ret = sh_mobile_ceu_soft_reset(pcdev); if (!ret) pcdev->icd = icd; return ret; } /* Called with .video_lock held */ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; BUG_ON(icd != pcdev->icd); /* disable capture, disable interrupts */ ceu_write(pcdev, CEIER, 0); sh_mobile_ceu_soft_reset(pcdev); /* make sure active buffer is canceled */ spin_lock_irq(&pcdev->lock); if (pcdev->active) { list_del_init(&to_ceu_vb(pcdev->active)->queue); vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR); pcdev->active = NULL; } spin_unlock_irq(&pcdev->lock); pm_runtime_put_sync(ici->v4l2_dev.dev); dev_info(icd->dev.parent, "SuperH Mobile CEU driver detached from camera %d\n", icd->devnum); pcdev->icd = NULL; } /* * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)" * in SH7722 Hardware Manual */ static unsigned int size_dst(unsigned int src, unsigned int scale) { unsigned int mant_pre = scale >> 12; if (!src || !scale) return src; return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) * mant_pre * 4096 / scale + 1; } static u16 calc_scale(unsigned int src, unsigned int *dst) { u16 scale; if (src == *dst) return 0; scale = (src * 4096 / *dst) & ~7; while (scale > 4096 && size_dst(src, scale) < *dst) scale -= 8; *dst = size_dst(src, scale); return scale; } /* rect is guaranteed to not exceed the scaled camera rectangle */ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned int height, width, cdwdr_width, in_width, in_height; unsigned int left_offset, top_offset; u32 camor; dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); left_offset = cam->ceu_left; top_offset = cam->ceu_top; /* CEU cropping (CFSZR) is applied _after_ the scaling filter (CFLCR) */ if (pcdev->image_mode) { in_width = cam->width; if (!pcdev->is_16bit) { in_width *= 2; left_offset *= 2; } width = icd->user_width; cdwdr_width = icd->user_width; } else { int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); unsigned int w_factor; width = icd->user_width; switch (icd->current_fmt->host_fmt->packing) { case SOC_MBUS_PACKING_2X8_PADHI: w_factor = 2; break; default: w_factor = 1; } in_width = cam->width * w_factor; left_offset = left_offset * w_factor; if (bytes_per_line < 0) cdwdr_width = icd->user_width; else cdwdr_width = bytes_per_line; } height = icd->user_height; in_height = cam->height; if (V4L2_FIELD_NONE != pcdev->field) { height /= 2; in_height /= 2; top_offset /= 2; cdwdr_width *= 2; } /* CSI2 special configuration */ if (pcdev->pdata->csi2_dev) { in_width = ((in_width - 2) * 2); left_offset *= 2; } /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ camor = left_offset | (top_offset << 16); dev_geo(icd->dev.parent, "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, (in_height << 16) | in_width, (height << 16) | width, cdwdr_width); ceu_write(pcdev, CAMOR, camor); ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); ceu_write(pcdev, CFSZR, (height << 16) | width); ceu_write(pcdev, CDWDR, cdwdr_width); } static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev) { u32 capsr = ceu_read(pcdev, CAPSR); ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */ return capsr; } static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr) { unsigned long timeout = jiffies + 10 * HZ; /* * Wait until the end of the current frame. It can take a long time, * but if it has been aborted by a CAPSR reset, it shoule exit sooner. */ while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout)) msleep(1); if (time_after(jiffies, timeout)) { dev_err(pcdev->ici.v4l2_dev.dev, "Timeout waiting for frame end! Interface problem?\n"); return; } /* Wait until reset clears, this shall not hang... */ while (ceu_read(pcdev, CAPSR) & (1 << 16)) udelay(10); /* Anything to restore? */ if (capsr & ~(1 << 16)) ceu_write(pcdev, CAPSR, capsr); } /* Capture is not running, no interrupts, no locking needed */ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret; unsigned long camera_flags, common_flags, value; int yuv_lineskip; struct sh_mobile_ceu_cam *cam = icd->host_priv; u32 capsr = capture_save_reset(pcdev); camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, make_bus_param(pcdev)); if (!common_flags) return -EINVAL; /* Make choises, based on platform preferences */ if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) && (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) { if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW) common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH; else common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW; } if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) && (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) { if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW) common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH; else common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW; } ret = icd->ops->set_bus_param(icd, common_flags); if (ret < 0) return ret; switch (common_flags & SOCAM_DATAWIDTH_MASK) { case SOCAM_DATAWIDTH_8: pcdev->is_16bit = 0; break; case SOCAM_DATAWIDTH_16: pcdev->is_16bit = 1; break; default: return -EINVAL; } ceu_write(pcdev, CRCNTR, 0); ceu_write(pcdev, CRCMPR, 0); value = 0x00000010; /* data fetch by default */ yuv_lineskip = 0; switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */ /* fall-through */ case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: switch (cam->code) { case V4L2_MBUS_FMT_UYVY8_2X8: value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ break; case V4L2_MBUS_FMT_VYUY8_2X8: value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ break; case V4L2_MBUS_FMT_YUYV8_2X8: value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ break; case V4L2_MBUS_FMT_YVYU8_2X8: value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ break; default: BUG(); } } if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; value |= common_flags & SOCAM_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; value |= pcdev->is_16bit ? 1 << 12 : 0; /* CSI2 mode */ if (pcdev->pdata->csi2_dev) value |= 3 << 12; ceu_write(pcdev, CAMCR, value); ceu_write(pcdev, CAPCR, 0x00300000); switch (pcdev->field) { case V4L2_FIELD_INTERLACED_TB: value = 0x101; break; case V4L2_FIELD_INTERLACED_BT: value = 0x102; break; default: value = 0; break; } ceu_write(pcdev, CAIFR, value); sh_mobile_ceu_set_rect(icd); mdelay(1); dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr); ceu_write(pcdev, CFLCR, pcdev->cflcr); /* * A few words about byte order (observed in Big Endian mode) * * In data fetch mode bytes are received in chunks of 8 bytes. * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) * * The data is however by default written to memory in reverse order: * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) * * The lowest three bits of CDOCR allows us to do swapping, * using 7 we swap the data bytes to match the incoming order: * D0, D1, D2, D3, D4, D5, D6, D7 */ value = 0x00000017; if (yuv_lineskip) value &= ~0x00000010; /* convert 4:2:2 -> 4:2:0 */ ceu_write(pcdev, CDOCR, value); ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u\n", pixfmt & 0xff, (pixfmt >> 8) & 0xff, (pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff, icd->user_width, icd->user_height); capture_restore(pcdev, capsr); /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ return 0; } static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned long camera_flags, common_flags; camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, make_bus_param(pcdev)); if (!common_flags || buswidth > 16 || (buswidth > 8 && !(common_flags & SOCAM_DATAWIDTH_16))) return -EINVAL; return 0; } static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { { .fourcc = V4L2_PIX_FMT_NV12, .name = "NV12", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, { .fourcc = V4L2_PIX_FMT_NV21, .name = "NV21", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, { .fourcc = V4L2_PIX_FMT_NV16, .name = "NV16", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, { .fourcc = V4L2_PIX_FMT_NV61, .name = "NV61", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }; /* This will be corrected as we get more formats */ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect); static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; enum v4l2_mbus_pixelcode code; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_warn(dev, "unsupported format code #%u: %d\n", idx, code); return 0; } if (!pcdev->pdata->csi2_dev) { ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; } if (!icd->host_priv) { struct v4l2_mbus_framefmt mf; struct v4l2_rect rect; int shift = 0; /* FIXME: subwindow is lost between close / open */ /* Cache current client geometry */ ret = client_g_rect(sd, &rect); if (ret < 0) return ret; /* First time */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; while ((mf.width > 2560 || mf.height > 1920) && shift < 4) { /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf.width = 2560 >> shift; mf.height = 1920 >> shift; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; shift++; } if (shift == 4) { dev_err(dev, "Failed to configure the client below %ux%x\n", mf.width, mf.height); return -EIO; } dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height); cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) return -ENOMEM; /* We are called with current camera crop, initialise subrect with it */ cam->rect = rect; cam->subrect = rect; cam->width = mf.width; cam->height = mf.height; cam->width = mf.width; cam->height = mf.height; icd->host_priv = cam; } else { cam = icd->host_priv; } /* Beginning of a pass */ if (!idx) cam->extra_fmt = NULL; switch (code) { case V4L2_MBUS_FMT_UYVY8_2X8: case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: if (cam->extra_fmt) break; /* * Our case is simple so far: for any of the above four camera * formats we add all our four synthesized NV* formats, so, * just marking the device with a single flag suffices. If * the format generation rules are more complex, you would have * to actually hang your already added / counted formats onto * the host_priv pointer and check whether the format you're * going to add now is already there. */ cam->extra_fmt = sh_mobile_ceu_formats; n = ARRAY_SIZE(sh_mobile_ceu_formats); formats += n; for (k = 0; xlate && k < n; k++) { xlate->host_fmt = &sh_mobile_ceu_formats[k]; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s using code %d\n", sh_mobile_ceu_formats[k].name, code); } break; default: if (!sh_mobile_ceu_packing_supported(fmt)) return 0; } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s in pass-through mode\n", fmt->name); } return formats; } static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) { kfree(icd->host_priv); icd->host_priv = NULL; } /* Check if any dimension of r1 is smaller than respective one of r2 */ static bool is_smaller(struct v4l2_rect *r1, struct v4l2_rect *r2) { return r1->width < r2->width || r1->height < r2->height; } /* Check if r1 fails to cover r2 */ static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2) { return r1->left > r2->left || r1->top > r2->top || r1->left + r1->width < r2->left + r2->width || r1->top + r1->height < r2->top + r2->height; } static unsigned int scale_down(unsigned int size, unsigned int scale) { return (size * 4096 + scale / 2) / scale; } static unsigned int calc_generic_scale(unsigned int input, unsigned int output) { return (input * 4096 + output / 2) / output; } /* Get and store current client crop */ static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect) { struct v4l2_crop crop; struct v4l2_cropcap cap; int ret; crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, g_crop, &crop); if (!ret) { *rect = crop.c; return ret; } /* Camera driver doesn't support .g_crop(), assume default rectangle */ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (!ret) *rect = cap.defrect; return ret; } /* Client crop has changed, update our sub-rectangle to remain within the area */ static void update_subrect(struct sh_mobile_ceu_cam *cam) { struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect; if (rect->width < subrect->width) subrect->width = rect->width; if (rect->height < subrect->height) subrect->height = rect->height; if (rect->left > subrect->left) subrect->left = rect->left; else if (rect->left + rect->width > subrect->left + subrect->width) subrect->left = rect->left + rect->width - subrect->width; if (rect->top > subrect->top) subrect->top = rect->top; else if (rect->top + rect->height > subrect->top + subrect->height) subrect->top = rect->top + rect->height - subrect->height; } /* * The common for both scaling and cropping iterative approach is: * 1. try if the client can produce exactly what requested by the user * 2. if (1) failed, try to double the client image until we get one big enough * 3. if (2) failed, try to request the maximum image */ static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop, struct v4l2_crop *cam_crop) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c; struct device *dev = sd->v4l2_dev->dev; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_cropcap cap; int ret; unsigned int width, height; v4l2_subdev_call(sd, video, s_crop, crop); ret = client_g_rect(sd, cam_rect); if (ret < 0) return ret; /* * Now cam_crop contains the current camera input rectangle, and it must * be within camera cropcap bounds */ if (!memcmp(rect, cam_rect, sizeof(*rect))) { /* Even if camera S_CROP failed, but camera rectangle matches */ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n", rect->width, rect->height, rect->left, rect->top); cam->rect = *cam_rect; return 0; } /* Try to fix cropping, that camera hasn't managed to set */ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, rect->width, rect->height, rect->left, rect->top); /* We need sensor maximum rectangle */ ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; /* Put user requested rectangle within sensor bounds */ soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2, cap.bounds.width); soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4, cap.bounds.height); /* * Popular special case - some cameras can only handle fixed sizes like * QVGA, VGA,... Take care to avoid infinite loop. */ width = max(cam_rect->width, 2); height = max(cam_rect->height, 2); /* * Loop as long as sensor is not covering the requested rectangle and * is still within its bounds */ while (!ret && (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) && (cap.bounds.width > width || cap.bounds.height > height)) { width *= 2; height *= 2; cam_rect->width = width; cam_rect->height = height; /* * We do not know what capabilities the camera has to set up * left and top borders. We could try to be smarter in iterating * them, e.g., if camera current left is to the right of the * target left, set it to the middle point between the current * left and minimum left. But that would add too much * complexity: we would have to iterate each border separately. * Instead we just drop to the left and top bounds. */ if (cam_rect->left > rect->left) cam_rect->left = cap.bounds.left; if (cam_rect->left + cam_rect->width < rect->left + rect->width) cam_rect->width = rect->left + rect->width - cam_rect->left; if (cam_rect->top > rect->top) cam_rect->top = cap.bounds.top; if (cam_rect->top + cam_rect->height < rect->top + rect->height) cam_rect->height = rect->top + rect->height - cam_rect->top; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } /* S_CROP must not modify the rectangle */ if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { /* * The camera failed to configure a suitable cropping, * we cannot use the current rectangle, set to max */ *cam_rect = cap.bounds; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } if (!ret) { cam->rect = *cam_rect; update_subrect(cam); } return ret; } /* Iterative s_mbus_fmt, also updates cached client crop on success */ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_mbus_framefmt *mf, bool ceu_can_scale) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; unsigned int max_width, max_height; struct v4l2_cropcap cap; int ret; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, mf); if (ret < 0) return ret; dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); if ((width == mf->width && height == mf->height) || !ceu_can_scale) goto update_cache; cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; max_width = min(cap.bounds.width, 2560); max_height = min(cap.bounds.height, 1920); /* Camera set a format, but geometry is not precise, try to improve */ tmp_w = mf->width; tmp_h = mf->height; /* width <= max_width && height <= max_height - guaranteed by try_fmt */ while ((width > tmp_w || height > tmp_h) && tmp_w < max_width && tmp_h < max_height) { tmp_w = min(2 * tmp_w, max_width); tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); if (ret < 0) { /* This shouldn't happen */ dev_err(dev, "Client failed to set format: %d\n", ret); return ret; } } update_cache: /* Update cache */ ret = client_g_rect(sd, &cam->rect); if (ret < 0) return ret; update_subrect(cam); return 0; } /** * @width - on output: user width, mapped back to input * @height - on output: user height, mapped back to input * @mf - in- / output camera output window */ static int client_scale(struct soc_camera_device *icd, struct v4l2_mbus_framefmt *mf, unsigned int *width, unsigned int *height, bool ceu_can_scale) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->dev.parent; struct v4l2_mbus_framefmt mf_tmp = *mf; unsigned int scale_h, scale_v; int ret; /* * 5. Apply iterative camera S_FMT for camera user window (also updates * client crop cache and the imaginary sub-rectangle). */ ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale); if (ret < 0) return ret; dev_geo(dev, "5: camera scaled to %ux%u\n", mf_tmp.width, mf_tmp.height); /* 6. Retrieve camera output window (g_fmt) */ /* unneeded - it is already in "mf_tmp" */ /* 7. Calculate new client scales. */ scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width); scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height); mf->width = mf_tmp.width; mf->height = mf_tmp.height; mf->colorspace = mf_tmp.colorspace; /* * 8. Calculate new CEU crop - apply camera scales to previously * updated "effective" crop. */ *width = scale_down(cam->subrect.width, scale_h); *height = scale_down(cam->subrect.height, scale_v); dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height); return 0; } /* * CEU can scale and crop, but we don't want to waste bandwidth and kill the * framerate by always requesting the maximum image from the client. See * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of * scaling and cropping algorithms and for the meaning of referenced here steps. */ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_crop cam_crop; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *cam_rect = &cam_crop.c; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; struct v4l2_mbus_framefmt mf; unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, out_width, out_height; int interm_width, interm_height; u32 capsr, cflcr; int ret; dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height, rect->left, rect->top); /* During camera cropping its output window can change too, stop CEU */ capsr = capture_save_reset(pcdev); dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); /* 1. - 2. Apply iterative camera S_CROP for new input window. */ ret = client_s_crop(icd, a, &cam_crop); if (ret < 0) return ret; dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); /* On success cam_crop contains current camera crop */ /* 3. Retrieve camera output window */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; if (mf.width > 2560 || mf.height > 1920) return -EINVAL; /* 4. Calculate camera scales */ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); /* Calculate intermediate window */ interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); if (interm_width < icd->user_width) { u32 new_scale_h; new_scale_h = calc_generic_scale(rect->width, icd->user_width); mf.width = scale_down(cam_rect->width, new_scale_h); } if (interm_height < icd->user_height) { u32 new_scale_v; new_scale_v = calc_generic_scale(rect->height, icd->user_height); mf.height = scale_down(cam_rect->height, new_scale_v); } if (interm_width < icd->user_width || interm_height < icd->user_height) { ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; dev_geo(dev, "New camera output %ux%u\n", mf.width, mf.height); scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); } /* Cache camera output window */ cam->width = mf.width; cam->height = mf.height; if (pcdev->image_mode) { out_width = min(interm_width, icd->user_width); out_height = min(interm_height, icd->user_height); } else { out_width = interm_width; out_height = interm_height; } /* * 5. Calculate CEU scales from camera scales from results of (5) and * the user window */ scale_ceu_h = calc_scale(interm_width, &out_width); scale_ceu_v = calc_scale(interm_height, &out_height); dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); /* Apply CEU scales. */ cflcr = scale_ceu_h | (scale_ceu_v << 16); if (cflcr != pcdev->cflcr) { pcdev->cflcr = cflcr; ceu_write(pcdev, CFLCR, cflcr); } icd->user_width = out_width; icd->user_height = out_height; cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1; cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1; /* 6. Use CEU cropping to crop to the new window. */ sh_mobile_ceu_set_rect(icd); cam->subrect = *rect; dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); /* Restore capture. The CE bit can be cleared by the hardware */ if (pcdev->active) capsr |= 1; capture_restore(pcdev, capsr); /* Even if only camera cropping succeeded */ return ret; } static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct sh_mobile_ceu_cam *cam = icd->host_priv; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->c = cam->subrect; return 0; } /* * Calculate real client output window by applying new scales to the current * client crop. New scales are calculated from the requested output format and * CEU crop, mapped backed onto the client input (subrect). */ static void calculate_client_output(struct soc_camera_device *icd, struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->dev.parent; struct v4l2_rect *cam_subrect = &cam->subrect; unsigned int scale_v, scale_h; if (cam_subrect->width == cam->rect.width && cam_subrect->height == cam->rect.height) { /* No sub-cropping */ mf->width = pix->width; mf->height = pix->height; return; } /* 1.-2. Current camera scales and subwin - cached. */ dev_geo(dev, "2: subwin %ux%u@%u:%u\n", cam_subrect->width, cam_subrect->height, cam_subrect->left, cam_subrect->top); /* * 3. Calculate new combined scales from input sub-window to requested * user window. */ /* * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF * (128x96) or larger than VGA */ scale_h = calc_generic_scale(cam_subrect->width, pix->width); scale_v = calc_generic_scale(cam_subrect->height, pix->height); dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); /* * 4. Calculate client output window by applying combined scales to real * input window. */ mf->width = scale_down(cam->rect.width, scale_h); mf->height = scale_down(cam->rect.height, scale_v); } /* Similar to set_crop multistage iterative algorithm */ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; struct device *dev = icd->dev.parent; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; /* Keep Compiler Happy */ unsigned int ceu_sub_width = 0, ceu_sub_height = 0; u16 scale_v, scale_h; int ret; bool image_mode; enum v4l2_field field; dev_geo(dev, "S_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height); switch (pix->field) { default: pix->field = V4L2_FIELD_NONE; /* fall-through */ case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_NONE: field = pix->field; break; case V4L2_FIELD_INTERLACED: field = V4L2_FIELD_INTERLACED_TB; break; } xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } /* 1.-4. Calculate client output geometry */ calculate_client_output(icd, &f->fmt.pix, &mf); mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: image_mode = true; break; default: image_mode = false; } dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height); /* 5. - 9. */ ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height, image_mode && V4L2_FIELD_NONE == field); dev_geo(dev, "5-9: client scale return %d\n", ret); /* Done with the camera. Now see if we can improve the result */ dev_geo(dev, "fmt %ux%u, requested %ux%u\n", mf.width, mf.height, pix->width, pix->height); if (ret < 0) return ret; if (mf.code != xlate->code) return -EINVAL; /* 9. Prepare CEU crop */ cam->width = mf.width; cam->height = mf.height; /* 10. Use CEU scaling to scale to the requested user window. */ /* We cannot scale up */ if (pix->width > ceu_sub_width) ceu_sub_width = pix->width; if (pix->height > ceu_sub_height) ceu_sub_height = pix->height; pix->colorspace = mf.colorspace; if (image_mode) { /* Scale pix->{width x height} down to width x height */ scale_h = calc_scale(ceu_sub_width, &pix->width); scale_v = calc_scale(ceu_sub_height, &pix->height); } else { pix->width = ceu_sub_width; pix->height = ceu_sub_height; scale_h = 0; scale_v = 0; } pcdev->cflcr = scale_h | (scale_v << 16); /* * We have calculated CFLCR, the actual configuration will be performed * in sh_mobile_ceu_set_bus_param() */ dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", ceu_sub_width, scale_h, pix->width, ceu_sub_height, scale_v, pix->height); cam->code = xlate->code; icd->current_fmt = xlate; pcdev->field = field; pcdev->image_mode = image_mode; return 0; } static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; int width, height; int ret; dev_geo(icd->dev.parent, "TRY_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height); xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); return -EINVAL; } /* FIXME: calculate using depth and bus width */ v4l_bound_align_image(&pix->width, 2, 2560, 1, &pix->height, 4, 1920, 2, 0); width = pix->width; height = pix->height; pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt); if ((int)pix->bytesperline < 0) return pix->bytesperline; pix->sizeimage = height * pix->bytesperline; /* limit to sensor capabilities */ mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.code = xlate->code; mf.colorspace = pix->colorspace; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: /* FIXME: check against rect_max after converting soc-camera */ /* We can scale precisely, need a bigger image from camera */ if (pix->width < width || pix->height < height) { /* * We presume, the sensor behaves sanely, i.e., if * requested a bigger rectangle, it will not return a * smaller one. */ mf.width = 2560; mf.height = 1920; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(icd->dev.parent, "FIXME: client try_fmt() = %d\n", ret); return ret; } } /* We will scale exactly */ if (mf.width > width) pix->width = width; if (mf.height > height) pix->height = height; } dev_geo(icd->dev.parent, "%s(): return %d, fmt 0x%x, %ux%u\n", __func__, ret, pix->pixelformat, pix->width, pix->height); return ret; } static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; u32 out_width = icd->user_width, out_height = icd->user_height; int ret; /* Freeze queue */ pcdev->frozen = 1; /* Wait for frame */ ret = wait_for_completion_interruptible(&pcdev->complete); /* Stop the client */ ret = v4l2_subdev_call(sd, video, s_stream, 0); if (ret < 0) dev_warn(icd->dev.parent, "Client failed to stop the stream: %d\n", ret); else /* Do the crop, if it fails, there's nothing more we can do */ sh_mobile_ceu_set_crop(icd, a); dev_geo(icd->dev.parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); if (icd->user_width != out_width || icd->user_height != out_height) { struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = out_width, .height = out_height, .pixelformat = icd->current_fmt->host_fmt->fourcc, .field = pcdev->field, .colorspace = icd->colorspace, }, }; ret = sh_mobile_ceu_set_fmt(icd, &f); if (!ret && (out_width != f.fmt.pix.width || out_height != f.fmt.pix.height)) ret = -EINVAL; if (!ret) { icd->user_width = out_width; icd->user_height = out_height; ret = sh_mobile_ceu_set_bus_param(icd, icd->current_fmt->host_fmt->fourcc); } } /* Thaw the queue */ pcdev->frozen = 0; spin_lock_irq(&pcdev->lock); sh_mobile_ceu_capture(pcdev); spin_unlock_irq(&pcdev->lock); /* Start the client */ ret = v4l2_subdev_call(sd, video, s_stream, 1); return ret; } static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; return vb2_poll(&icd->vb2_vidq, file, pt); } static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); cap->version = KERNEL_VERSION(0, 0, 5); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q, struct soc_camera_device *icd) { q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = icd; q->ops = &sh_mobile_ceu_videobuf_ops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer); return vb2_queue_init(q); } static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd, struct v4l2_control *ctrl) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; u32 val; switch (ctrl->id) { case V4L2_CID_SHARPNESS: val = ceu_read(pcdev, CLFCR); ctrl->value = val ^ 1; return 0; } return -ENOIOCTLCMD; } static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd, struct v4l2_control *ctrl) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; switch (ctrl->id) { case V4L2_CID_SHARPNESS: switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: ceu_write(pcdev, CLFCR, !ctrl->value); return 0; } return -EINVAL; } return -ENOIOCTLCMD; } static const struct v4l2_queryctrl sh_mobile_ceu_controls[] = { { .id = V4L2_CID_SHARPNESS, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Low-pass filter", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, }; static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { .owner = THIS_MODULE, .add = sh_mobile_ceu_add_device, .remove = sh_mobile_ceu_remove_device, .get_formats = sh_mobile_ceu_get_formats, .put_formats = sh_mobile_ceu_put_formats, .get_crop = sh_mobile_ceu_get_crop, .set_crop = sh_mobile_ceu_set_crop, .set_livecrop = sh_mobile_ceu_set_livecrop, .set_fmt = sh_mobile_ceu_set_fmt, .try_fmt = sh_mobile_ceu_try_fmt, .set_ctrl = sh_mobile_ceu_set_ctrl, .get_ctrl = sh_mobile_ceu_get_ctrl, .poll = sh_mobile_ceu_poll, .querycap = sh_mobile_ceu_querycap, .set_bus_param = sh_mobile_ceu_set_bus_param, .init_videobuf2 = sh_mobile_ceu_init_videobuf, .controls = sh_mobile_ceu_controls, .num_controls = ARRAY_SIZE(sh_mobile_ceu_controls), }; struct bus_wait { struct notifier_block notifier; struct completion completion; struct device *dev; }; static int bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); if (wait->dev != dev) return NOTIFY_DONE; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: /* Protect from module unloading */ wait_for_completion(&wait->completion); return NOTIFY_OK; } return NOTIFY_DONE; } static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev) { struct sh_mobile_ceu_dev *pcdev; struct resource *res; void __iomem *base; unsigned int irq; int err = 0; struct bus_wait wait = { .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), .notifier.notifier_call = bus_notify, }; struct device *csi2; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); err = -ENODEV; goto exit; } pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); err = -ENOMEM; goto exit; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); init_completion(&pcdev->complete); pcdev->pdata = pdev->dev.platform_data; if (!pcdev->pdata) { err = -EINVAL; dev_err(&pdev->dev, "CEU platform data not set.\n"); goto exit_kfree; } base = ioremap_nocache(res->start, resource_size(res)); if (!base) { err = -ENXIO; dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n"); goto exit_kfree; } pcdev->irq = irq; pcdev->base = base; pcdev->video_limit = 0; /* only enabled if second resource exists */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { err = dma_declare_coherent_memory(&pdev->dev, res->start, res->start, resource_size(res), DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); if (!err) { dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); err = -ENXIO; goto exit_iounmap; } pcdev->video_limit = resource_size(res); } /* request irq */ err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED, dev_name(&pdev->dev), pcdev); if (err) { dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); goto exit_release_mem; } pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); pcdev->ici.priv = pcdev; pcdev->ici.v4l2_dev.dev = &pdev->dev; pcdev->ici.nr = pdev->id; pcdev->ici.drv_name = dev_name(&pdev->dev); pcdev->ici.ops = &sh_mobile_ceu_host_ops; /* CSI2 interfacing */ csi2 = pcdev->pdata->csi2_dev; if (csi2) { wait.dev = csi2; err = bus_register_notifier(&platform_bus_type, &wait.notifier); if (err < 0) goto exit_free_clk; /* * From this point the driver module will not unload, until * we complete the completion. */ if (!csi2->driver) { complete(&wait.completion); /* Either too late, or probing failed */ bus_unregister_notifier(&platform_bus_type, &wait.notifier); err = -ENXIO; goto exit_free_clk; } /* * The module is still loaded, in the worst case it is hanging * in device release on our completion. So, _now_ dereferencing * the "owner" is safe! */ err = try_module_get(csi2->driver->owner); /* Let notifier complete, if it has been locked */ complete(&wait.completion); bus_unregister_notifier(&platform_bus_type, &wait.notifier); if (!err) { err = -ENODEV; goto exit_free_clk; } } pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(pcdev->alloc_ctx)) { err = PTR_ERR(pcdev->alloc_ctx); goto exit_module_put; } err = soc_camera_host_register(&pcdev->ici); if (err) goto exit_free_ctx; return 0; exit_free_ctx: vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); exit_module_put: if (csi2 && csi2->driver) module_put(csi2->driver->owner); exit_free_clk: pm_runtime_disable(&pdev->dev); free_irq(pcdev->irq, pcdev); exit_release_mem: if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); exit_iounmap: iounmap(base); exit_kfree: kfree(pcdev); exit: return err; } static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, struct sh_mobile_ceu_dev, ici); struct device *csi2 = pcdev->pdata->csi2_dev; soc_camera_host_unregister(soc_host); pm_runtime_disable(&pdev->dev); free_irq(pcdev->irq, pcdev); if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); iounmap(pcdev->base); vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); if (csi2 && csi2->driver) module_put(csi2->driver->owner); kfree(pcdev); return 0; } static int sh_mobile_ceu_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * This driver re-initializes all registers after * pm_runtime_get_sync() anyway so there is no need * to save and restore registers here. */ return 0; } static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { .runtime_suspend = sh_mobile_ceu_runtime_nop, .runtime_resume = sh_mobile_ceu_runtime_nop, }; static struct platform_driver sh_mobile_ceu_driver = { .driver = { .name = "sh_mobile_ceu", .pm = &sh_mobile_ceu_dev_pm_ops, }, .probe = sh_mobile_ceu_probe, .remove = __devexit_p(sh_mobile_ceu_remove), }; static int __init sh_mobile_ceu_init(void) { /* Whatever return code */ request_module("sh_mobile_csi2"); return platform_driver_register(&sh_mobile_ceu_driver); } static void __exit sh_mobile_ceu_exit(void) { platform_driver_unregister(&sh_mobile_ceu_driver); } module_init(sh_mobile_ceu_init); module_exit(sh_mobile_ceu_exit); MODULE_DESCRIPTION("SuperH Mobile CEU driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sh_mobile_ceu");
gpl-2.0
jitendriya/linux-2.6
fs/afs/dir.c
2898
27723
/* dir.c: AFS filesystem directory handling * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/ctype.h> #include <linux/sched.h> #include "internal.h" static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd); static int afs_dir_open(struct inode *inode, struct file *file); static int afs_readdir(struct file *file, void *dirent, filldir_t filldir); static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd); static int afs_d_delete(const struct dentry *dentry); static void afs_d_release(struct dentry *dentry); static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos, u64 ino, unsigned dtype); static int afs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd); static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode); static int afs_rmdir(struct inode *dir, struct dentry *dentry); static int afs_unlink(struct inode *dir, struct dentry *dentry); static int afs_link(struct dentry *from, struct inode *dir, struct dentry *dentry); static int afs_symlink(struct inode *dir, struct dentry *dentry, const char *content); static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); const struct file_operations afs_dir_file_operations = { .open = afs_dir_open, .release = afs_release, .readdir = afs_readdir, .lock = afs_lock, .llseek = generic_file_llseek, }; const struct inode_operations afs_dir_inode_operations = { .create = afs_create, .lookup = afs_lookup, .link = afs_link, .unlink = afs_unlink, .symlink = afs_symlink, .mkdir = afs_mkdir, .rmdir = afs_rmdir, .rename = afs_rename, .permission = afs_permission, .getattr = afs_getattr, .setattr = afs_setattr, }; const struct dentry_operations afs_fs_dentry_operations = { .d_revalidate = afs_d_revalidate, .d_delete = afs_d_delete, .d_release = afs_d_release, .d_automount = afs_d_automount, }; #define AFS_DIR_HASHTBL_SIZE 128 #define AFS_DIR_DIRENT_SIZE 32 #define AFS_DIRENT_PER_BLOCK 64 union afs_dirent { struct { uint8_t valid; uint8_t unused[1]; __be16 hash_next; __be32 vnode; __be32 unique; uint8_t name[16]; uint8_t overflow[4]; /* if any char of the name (inc * NUL) reaches here, consume * the next dirent too */ } u; uint8_t extended_name[32]; }; /* AFS directory page header (one at the beginning of every 2048-byte chunk) */ struct afs_dir_pagehdr { __be16 npages; __be16 magic; #define AFS_DIR_MAGIC htons(1234) uint8_t nentries; uint8_t bitmap[8]; uint8_t pad[19]; }; /* directory block layout */ union afs_dir_block { struct afs_dir_pagehdr pagehdr; struct { struct afs_dir_pagehdr pagehdr; uint8_t alloc_ctrs[128]; /* dir hash table */ uint16_t hashtable[AFS_DIR_HASHTBL_SIZE]; } hdr; union afs_dirent dirents[AFS_DIRENT_PER_BLOCK]; }; /* layout on a linux VM page */ struct afs_dir_page { union afs_dir_block blocks[PAGE_SIZE / sizeof(union afs_dir_block)]; }; struct afs_lookup_cookie { struct afs_fid fid; const char *name; size_t nlen; int found; }; /* * check that a directory page is valid */ static inline void afs_dir_check_page(struct inode *dir, struct page *page) { struct afs_dir_page *dbuf; loff_t latter; int tmp, qty; #if 0 /* check the page count */ qty = desc.size / sizeof(dbuf->blocks[0]); if (qty == 0) goto error; if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) { printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n", __func__, dir->i_ino, qty, ntohs(dbuf->blocks[0].pagehdr.npages)); goto error; } #endif /* determine how many magic numbers there should be in this page */ latter = dir->i_size - page_offset(page); if (latter >= PAGE_SIZE) qty = PAGE_SIZE; else qty = latter; qty /= sizeof(union afs_dir_block); /* check them */ dbuf = page_address(page); for (tmp = 0; tmp < qty; tmp++) { if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) { printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n", __func__, dir->i_ino, tmp, qty, ntohs(dbuf->blocks[tmp].pagehdr.magic)); goto error; } } SetPageChecked(page); return; error: SetPageChecked(page); SetPageError(page); } /* * discard a page cached in the pagecache */ static inline void afs_dir_put_page(struct page *page) { kunmap(page); page_cache_release(page); } /* * get a page into the pagecache */ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index, struct key *key) { struct page *page; _enter("{%lu},%lu", dir->i_ino, index); page = read_cache_page(dir->i_mapping, index, afs_page_filler, key); if (!IS_ERR(page)) { kmap(page); if (!PageChecked(page)) afs_dir_check_page(dir, page); if (PageError(page)) goto fail; } return page; fail: afs_dir_put_page(page); _leave(" = -EIO"); return ERR_PTR(-EIO); } /* * open an AFS directory file */ static int afs_dir_open(struct inode *inode, struct file *file) { _enter("{%lu}", inode->i_ino); BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_dirent) != 32); if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(inode)->flags)) return -ENOENT; return afs_open(inode, file); } /* * deal with one block in an AFS directory */ static int afs_dir_iterate_block(unsigned *fpos, union afs_dir_block *block, unsigned blkoff, void *cookie, filldir_t filldir) { union afs_dirent *dire; unsigned offset, next, curr; size_t nlen; int tmp, ret; _enter("%u,%x,%p,,",*fpos,blkoff,block); curr = (*fpos - blkoff) / sizeof(union afs_dirent); /* walk through the block, an entry at a time */ for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries; offset < AFS_DIRENT_PER_BLOCK; offset = next ) { next = offset + 1; /* skip entries marked unused in the bitmap */ if (!(block->pagehdr.bitmap[offset / 8] & (1 << (offset % 8)))) { _debug("ENT[%Zu.%u]: unused", blkoff / sizeof(union afs_dir_block), offset); if (offset >= curr) *fpos = blkoff + next * sizeof(union afs_dirent); continue; } /* got a valid entry */ dire = &block->dirents[offset]; nlen = strnlen(dire->u.name, sizeof(*block) - offset * sizeof(union afs_dirent)); _debug("ENT[%Zu.%u]: %s %Zu \"%s\"", blkoff / sizeof(union afs_dir_block), offset, (offset < curr ? "skip" : "fill"), nlen, dire->u.name); /* work out where the next possible entry is */ for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_dirent)) { if (next >= AFS_DIRENT_PER_BLOCK) { _debug("ENT[%Zu.%u]:" " %u travelled beyond end dir block" " (len %u/%Zu)", blkoff / sizeof(union afs_dir_block), offset, next, tmp, nlen); return -EIO; } if (!(block->pagehdr.bitmap[next / 8] & (1 << (next % 8)))) { _debug("ENT[%Zu.%u]:" " %u unmarked extension (len %u/%Zu)", blkoff / sizeof(union afs_dir_block), offset, next, tmp, nlen); return -EIO; } _debug("ENT[%Zu.%u]: ext %u/%Zu", blkoff / sizeof(union afs_dir_block), next, tmp, nlen); next++; } /* skip if starts before the current position */ if (offset < curr) continue; /* found the next entry */ ret = filldir(cookie, dire->u.name, nlen, blkoff + offset * sizeof(union afs_dirent), ntohl(dire->u.vnode), filldir == afs_lookup_filldir ? ntohl(dire->u.unique) : DT_UNKNOWN); if (ret < 0) { _leave(" = 0 [full]"); return 0; } *fpos = blkoff + next * sizeof(union afs_dirent); } _leave(" = 1 [more]"); return 1; } /* * iterate through the data blob that lists the contents of an AFS directory */ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, filldir_t filldir, struct key *key) { union afs_dir_block *dblock; struct afs_dir_page *dbuf; struct page *page; unsigned blkoff, limit; int ret; _enter("{%lu},%u,,", dir->i_ino, *fpos); if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { _leave(" = -ESTALE"); return -ESTALE; } /* round the file position up to the next entry boundary */ *fpos += sizeof(union afs_dirent) - 1; *fpos &= ~(sizeof(union afs_dirent) - 1); /* walk through the blocks in sequence */ ret = 0; while (*fpos < dir->i_size) { blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1); /* fetch the appropriate page from the directory */ page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key); if (IS_ERR(page)) { ret = PTR_ERR(page); break; } limit = blkoff & ~(PAGE_SIZE - 1); dbuf = page_address(page); /* deal with the individual blocks stashed on this page */ do { dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / sizeof(union afs_dir_block)]; ret = afs_dir_iterate_block(fpos, dblock, blkoff, cookie, filldir); if (ret != 1) { afs_dir_put_page(page); goto out; } blkoff += sizeof(union afs_dir_block); } while (*fpos < dir->i_size && blkoff < limit); afs_dir_put_page(page); ret = 0; } out: _leave(" = %d", ret); return ret; } /* * read an AFS directory */ static int afs_readdir(struct file *file, void *cookie, filldir_t filldir) { unsigned fpos; int ret; _enter("{%Ld,{%lu}}", file->f_pos, file->f_path.dentry->d_inode->i_ino); ASSERT(file->private_data != NULL); fpos = file->f_pos; ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos, cookie, filldir, file->private_data); file->f_pos = fpos; _leave(" = %d", ret); return ret; } /* * search the directory for a name * - if afs_dir_iterate_block() spots this function, it'll pass the FID * uniquifier through dtype */ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos, u64 ino, unsigned dtype) { struct afs_lookup_cookie *cookie = _cookie; _enter("{%s,%Zu},%s,%u,,%llu,%u", cookie->name, cookie->nlen, name, nlen, (unsigned long long) ino, dtype); /* insanity checks first */ BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_dirent) != 32); if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) { _leave(" = 0 [no]"); return 0; } cookie->fid.vnode = ino; cookie->fid.unique = dtype; cookie->found = 1; _leave(" = -1 [found]"); return -1; } /* * do a lookup in a directory * - just returns the FID the dentry name maps to if found */ static int afs_do_lookup(struct inode *dir, struct dentry *dentry, struct afs_fid *fid, struct key *key) { struct afs_lookup_cookie cookie; struct afs_super_info *as; unsigned fpos; int ret; _enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name); as = dir->i_sb->s_fs_info; /* search the directory */ cookie.name = dentry->d_name.name; cookie.nlen = dentry->d_name.len; cookie.fid.vid = as->volume->vid; cookie.found = 0; fpos = 0; ret = afs_dir_iterate(dir, &fpos, &cookie, afs_lookup_filldir, key); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; } ret = -ENOENT; if (!cookie.found) { _leave(" = -ENOENT [not found]"); return -ENOENT; } *fid = cookie.fid; _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique); return 0; } /* * Try to auto mount the mountpoint with pseudo directory, if the autocell * operation is setted. */ static struct inode *afs_try_auto_mntpt( int ret, struct dentry *dentry, struct inode *dir, struct key *key, struct afs_fid *fid) { const char *devname = dentry->d_name.name; struct afs_vnode *vnode = AFS_FS_I(dir); struct inode *inode; _enter("%d, %p{%s}, {%x:%u}, %p", ret, dentry, devname, vnode->fid.vid, vnode->fid.vnode, key); if (ret != -ENOENT || !test_bit(AFS_VNODE_AUTOCELL, &vnode->flags)) goto out; inode = afs_iget_autocell(dir, devname, strlen(devname), key); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out; } *fid = AFS_FS_I(inode)->fid; _leave("= %p", inode); return inode; out: _leave("= %d", ret); return ERR_PTR(ret); } /* * look up an entry in a directory */ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct afs_vnode *vnode; struct afs_fid fid; struct inode *inode; struct key *key; int ret; vnode = AFS_FS_I(dir); _enter("{%x:%u},%p{%s},", vnode->fid.vid, vnode->fid.vnode, dentry, dentry->d_name.name); ASSERTCMP(dentry->d_inode, ==, NULL); if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _leave(" = -ESTALE"); return ERR_PTR(-ESTALE); } key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); return ERR_CAST(key); } ret = afs_validate(vnode, key); if (ret < 0) { key_put(key); _leave(" = %d [val]", ret); return ERR_PTR(ret); } ret = afs_do_lookup(dir, dentry, &fid, key); if (ret < 0) { inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid); if (!IS_ERR(inode)) { key_put(key); goto success; } ret = PTR_ERR(inode); key_put(key); if (ret == -ENOENT) { d_add(dentry, NULL); _leave(" = NULL [negative]"); return NULL; } _leave(" = %d [do]", ret); return ERR_PTR(ret); } dentry->d_fsdata = (void *)(unsigned long) vnode->status.data_version; /* instantiate the dentry */ inode = afs_iget(dir->i_sb, key, &fid, NULL, NULL); key_put(key); if (IS_ERR(inode)) { _leave(" = %ld", PTR_ERR(inode)); return ERR_CAST(inode); } success: d_add(dentry, inode); _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }", fid.vnode, fid.unique, dentry->d_inode->i_ino, dentry->d_inode->i_generation); return NULL; } /* * check that a dentry lookup hit has found a valid entry * - NOTE! the hit can be a negative hit too, so we can't assume we have an * inode */ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct afs_vnode *vnode, *dir; struct afs_fid uninitialized_var(fid); struct dentry *parent; struct key *key; void *dir_version; int ret; if (nd->flags & LOOKUP_RCU) return -ECHILD; vnode = AFS_FS_I(dentry->d_inode); if (dentry->d_inode) _enter("{v={%x:%u} n=%s fl=%lx},", vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name, vnode->flags); else _enter("{neg n=%s}", dentry->d_name.name); key = afs_request_key(AFS_FS_S(dentry->d_sb)->volume->cell); if (IS_ERR(key)) key = NULL; /* lock down the parent dentry so we can peer at it */ parent = dget_parent(dentry); if (!parent->d_inode) goto out_bad; dir = AFS_FS_I(parent->d_inode); /* validate the parent directory */ if (test_bit(AFS_VNODE_MODIFIED, &dir->flags)) afs_validate(dir, key); if (test_bit(AFS_VNODE_DELETED, &dir->flags)) { _debug("%s: parent dir deleted", dentry->d_name.name); goto out_bad; } dir_version = (void *) (unsigned long) dir->status.data_version; if (dentry->d_fsdata == dir_version) goto out_valid; /* the dir contents are unchanged */ _debug("dir modified"); /* search the directory for this vnode */ ret = afs_do_lookup(&dir->vfs_inode, dentry, &fid, key); switch (ret) { case 0: /* the filename maps to something */ if (!dentry->d_inode) goto out_bad; if (is_bad_inode(dentry->d_inode)) { printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n", parent->d_name.name, dentry->d_name.name); goto out_bad; } /* if the vnode ID has changed, then the dirent points to a * different file */ if (fid.vnode != vnode->fid.vnode) { _debug("%s: dirent changed [%u != %u]", dentry->d_name.name, fid.vnode, vnode->fid.vnode); goto not_found; } /* if the vnode ID uniqifier has changed, then the file has * been deleted and replaced, and the original vnode ID has * been reused */ if (fid.unique != vnode->fid.unique) { _debug("%s: file deleted (uq %u -> %u I:%u)", dentry->d_name.name, fid.unique, vnode->fid.unique, dentry->d_inode->i_generation); spin_lock(&vnode->lock); set_bit(AFS_VNODE_DELETED, &vnode->flags); spin_unlock(&vnode->lock); goto not_found; } goto out_valid; case -ENOENT: /* the filename is unknown */ _debug("%s: dirent not found", dentry->d_name.name); if (dentry->d_inode) goto not_found; goto out_valid; default: _debug("failed to iterate dir %s: %d", parent->d_name.name, ret); goto out_bad; } out_valid: dentry->d_fsdata = dir_version; out_skip: dput(parent); key_put(key); _leave(" = 1 [valid]"); return 1; /* the dirent, if it exists, now points to a different vnode */ not_found: spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_NFSFS_RENAMED; spin_unlock(&dentry->d_lock); out_bad: if (dentry->d_inode) { /* don't unhash if we have submounts */ if (have_submounts(dentry)) goto out_skip; } _debug("dropping dentry %s/%s", parent->d_name.name, dentry->d_name.name); shrink_dcache_parent(dentry); d_drop(dentry); dput(parent); key_put(key); _leave(" = 0 [bad]"); return 0; } /* * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't * sleep) * - called from dput() when d_count is going to 0. * - return 1 to request dentry be unhashed, 0 otherwise */ static int afs_d_delete(const struct dentry *dentry) { _enter("%s", dentry->d_name.name); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto zap; if (dentry->d_inode && (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags) || test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(dentry->d_inode)->flags))) goto zap; _leave(" = 0 [keep]"); return 0; zap: _leave(" = 1 [zap]"); return 1; } /* * handle dentry release */ static void afs_d_release(struct dentry *dentry) { _enter("%s", dentry->d_name.name); } /* * create a directory on an AFS filesystem */ static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct afs_file_status status; struct afs_callback cb; struct afs_server *server; struct afs_vnode *dvnode, *vnode; struct afs_fid fid; struct inode *inode; struct key *key; int ret; dvnode = AFS_FS_I(dir); _enter("{%x:%u},{%s},%o", dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode); ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } mode |= S_IFDIR; ret = afs_vnode_create(dvnode, key, dentry->d_name.name, mode, &fid, &status, &cb, &server); if (ret < 0) goto mkdir_error; inode = afs_iget(dir->i_sb, key, &fid, &status, &cb); if (IS_ERR(inode)) { /* ENOMEM at a really inconvenient time - just abandon the new * directory on the server */ ret = PTR_ERR(inode); goto iget_error; } /* apply the status report we've got for the new vnode */ vnode = AFS_FS_I(inode); spin_lock(&vnode->lock); vnode->update_cnt++; spin_unlock(&vnode->lock); afs_vnode_finalise_status_update(vnode, server); afs_put_server(server); d_instantiate(dentry, inode); if (d_unhashed(dentry)) { _debug("not hashed"); d_rehash(dentry); } key_put(key); _leave(" = 0"); return 0; iget_error: afs_put_server(server); mkdir_error: key_put(key); error: d_drop(dentry); _leave(" = %d", ret); return ret; } /* * remove a directory from an AFS filesystem */ static int afs_rmdir(struct inode *dir, struct dentry *dentry) { struct afs_vnode *dvnode, *vnode; struct key *key; int ret; dvnode = AFS_FS_I(dir); _enter("{%x:%u},{%s}", dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } ret = afs_vnode_remove(dvnode, key, dentry->d_name.name, true); if (ret < 0) goto rmdir_error; if (dentry->d_inode) { vnode = AFS_FS_I(dentry->d_inode); clear_nlink(&vnode->vfs_inode); set_bit(AFS_VNODE_DELETED, &vnode->flags); afs_discard_callback_on_delete(vnode); } key_put(key); _leave(" = 0"); return 0; rmdir_error: key_put(key); error: _leave(" = %d", ret); return ret; } /* * remove a file from an AFS filesystem */ static int afs_unlink(struct inode *dir, struct dentry *dentry) { struct afs_vnode *dvnode, *vnode; struct key *key; int ret; dvnode = AFS_FS_I(dir); _enter("{%x:%u},{%s}", dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } if (dentry->d_inode) { vnode = AFS_FS_I(dentry->d_inode); /* make sure we have a callback promise on the victim */ ret = afs_validate(vnode, key); if (ret < 0) goto error; } ret = afs_vnode_remove(dvnode, key, dentry->d_name.name, false); if (ret < 0) goto remove_error; if (dentry->d_inode) { /* if the file wasn't deleted due to excess hard links, the * fileserver will break the callback promise on the file - if * it had one - before it returns to us, and if it was deleted, * it won't * * however, if we didn't have a callback promise outstanding, * or it was outstanding on a different server, then it won't * break it either... */ vnode = AFS_FS_I(dentry->d_inode); if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) _debug("AFS_VNODE_DELETED"); if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) _debug("AFS_VNODE_CB_BROKEN"); set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); ret = afs_validate(vnode, key); _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret); } key_put(key); _leave(" = 0"); return 0; remove_error: key_put(key); error: _leave(" = %d", ret); return ret; } /* * create a regular file on an AFS filesystem */ static int afs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct afs_file_status status; struct afs_callback cb; struct afs_server *server; struct afs_vnode *dvnode, *vnode; struct afs_fid fid; struct inode *inode; struct key *key; int ret; dvnode = AFS_FS_I(dir); _enter("{%x:%u},{%s},%o,", dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode); ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } mode |= S_IFREG; ret = afs_vnode_create(dvnode, key, dentry->d_name.name, mode, &fid, &status, &cb, &server); if (ret < 0) goto create_error; inode = afs_iget(dir->i_sb, key, &fid, &status, &cb); if (IS_ERR(inode)) { /* ENOMEM at a really inconvenient time - just abandon the new * directory on the server */ ret = PTR_ERR(inode); goto iget_error; } /* apply the status report we've got for the new vnode */ vnode = AFS_FS_I(inode); spin_lock(&vnode->lock); vnode->update_cnt++; spin_unlock(&vnode->lock); afs_vnode_finalise_status_update(vnode, server); afs_put_server(server); d_instantiate(dentry, inode); if (d_unhashed(dentry)) { _debug("not hashed"); d_rehash(dentry); } key_put(key); _leave(" = 0"); return 0; iget_error: afs_put_server(server); create_error: key_put(key); error: d_drop(dentry); _leave(" = %d", ret); return ret; } /* * create a hard link between files in an AFS filesystem */ static int afs_link(struct dentry *from, struct inode *dir, struct dentry *dentry) { struct afs_vnode *dvnode, *vnode; struct key *key; int ret; vnode = AFS_FS_I(from->d_inode); dvnode = AFS_FS_I(dir); _enter("{%x:%u},{%x:%u},{%s}", vnode->fid.vid, vnode->fid.vnode, dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } ret = afs_vnode_link(dvnode, vnode, key, dentry->d_name.name); if (ret < 0) goto link_error; ihold(&vnode->vfs_inode); d_instantiate(dentry, &vnode->vfs_inode); key_put(key); _leave(" = 0"); return 0; link_error: key_put(key); error: d_drop(dentry); _leave(" = %d", ret); return ret; } /* * create a symlink in an AFS filesystem */ static int afs_symlink(struct inode *dir, struct dentry *dentry, const char *content) { struct afs_file_status status; struct afs_server *server; struct afs_vnode *dvnode, *vnode; struct afs_fid fid; struct inode *inode; struct key *key; int ret; dvnode = AFS_FS_I(dir); _enter("{%x:%u},{%s},%s", dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, content); ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; ret = -EINVAL; if (strlen(content) >= AFSPATHMAX) goto error; key = afs_request_key(dvnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } ret = afs_vnode_symlink(dvnode, key, dentry->d_name.name, content, &fid, &status, &server); if (ret < 0) goto create_error; inode = afs_iget(dir->i_sb, key, &fid, &status, NULL); if (IS_ERR(inode)) { /* ENOMEM at a really inconvenient time - just abandon the new * directory on the server */ ret = PTR_ERR(inode); goto iget_error; } /* apply the status report we've got for the new vnode */ vnode = AFS_FS_I(inode); spin_lock(&vnode->lock); vnode->update_cnt++; spin_unlock(&vnode->lock); afs_vnode_finalise_status_update(vnode, server); afs_put_server(server); d_instantiate(dentry, inode); if (d_unhashed(dentry)) { _debug("not hashed"); d_rehash(dentry); } key_put(key); _leave(" = 0"); return 0; iget_error: afs_put_server(server); create_error: key_put(key); error: d_drop(dentry); _leave(" = %d", ret); return ret; } /* * rename a file in an AFS filesystem and/or move it between directories */ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct afs_vnode *orig_dvnode, *new_dvnode, *vnode; struct key *key; int ret; vnode = AFS_FS_I(old_dentry->d_inode); orig_dvnode = AFS_FS_I(old_dir); new_dvnode = AFS_FS_I(new_dir); _enter("{%x:%u},{%x:%u},{%x:%u},{%s}", orig_dvnode->fid.vid, orig_dvnode->fid.vnode, vnode->fid.vid, vnode->fid.vnode, new_dvnode->fid.vid, new_dvnode->fid.vnode, new_dentry->d_name.name); ret = -ENAMETOOLONG; if (new_dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(orig_dvnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } ret = afs_vnode_rename(orig_dvnode, new_dvnode, key, old_dentry->d_name.name, new_dentry->d_name.name); if (ret < 0) goto rename_error; key_put(key); _leave(" = 0"); return 0; rename_error: key_put(key); error: d_drop(new_dentry); _leave(" = %d", ret); return ret; }
gpl-2.0
eousphoros/android_kernel_samsung_noblelte
net/ax25/ax25_ds_timer.c
4178
5904
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/spinlock.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/tcp_states.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> static void ax25_ds_timeout(unsigned long); /* * Add DAMA slave timeout timer to timer list. * Unlike the connection based timers the timeout function gets * triggered every second. Please note that NET_AX25_DAMA_SLAVE_TIMEOUT * (aka /proc/sys/net/ax25/{dev}/dama_slave_timeout) is still in * 1/10th of a second. */ void ax25_ds_setup_timer(ax25_dev *ax25_dev) { setup_timer(&ax25_dev->dama.slave_timer, ax25_ds_timeout, (unsigned long)ax25_dev); } void ax25_ds_del_timer(ax25_dev *ax25_dev) { if (ax25_dev) del_timer(&ax25_dev->dama.slave_timer); } void ax25_ds_set_timer(ax25_dev *ax25_dev) { if (ax25_dev == NULL) /* paranoia */ return; ax25_dev->dama.slave_timeout = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10; mod_timer(&ax25_dev->dama.slave_timer, jiffies + HZ); } /* * DAMA Slave Timeout * Silently discard all (slave) connections in case our master forgot us... */ static void ax25_ds_timeout(unsigned long arg) { ax25_dev *ax25_dev = (struct ax25_dev *) arg; ax25_cb *ax25; if (ax25_dev == NULL || !ax25_dev->dama.slave) return; /* Yikes! */ if (!ax25_dev->dama.slave_timeout || --ax25_dev->dama.slave_timeout) { ax25_ds_set_timer(ax25_dev); return; } spin_lock(&ax25_list_lock); ax25_for_each(ax25, &ax25_list) { if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) continue; ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_disconnect(ax25, ETIMEDOUT); } spin_unlock(&ax25_list_lock); ax25_dev_dama_off(ax25_dev); } void ax25_ds_heartbeat_expiry(ax25_cb *ax25) { struct sock *sk=ax25->sk; if (sk) bh_lock_sock(sk); switch (ax25->state) { case AX25_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (!sk || sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { if (sk) { sock_hold(sk); ax25_destroy_socket(ax25); bh_unlock_sock(sk); sock_put(sk); } else ax25_destroy_socket(ax25); return; } break; case AX25_STATE_3: /* * Check the state of the receive buffer. */ if (sk != NULL) { if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) && (ax25->condition & AX25_COND_OWN_RX_BUSY)) { ax25->condition &= ~AX25_COND_OWN_RX_BUSY; ax25->condition &= ~AX25_COND_ACK_PENDING; break; } } break; } if (sk) bh_unlock_sock(sk); ax25_start_heartbeat(ax25); } /* dl1bke 960114: T3 works much like the IDLE timeout, but * gets reloaded with every frame for this * connection. */ void ax25_ds_t3timer_expiry(ax25_cb *ax25) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_dama_off(ax25); ax25_disconnect(ax25, ETIMEDOUT); } /* dl1bke 960228: close the connection when IDLE expires. * unlike T3 this timer gets reloaded only on * I frames. */ void ax25_ds_idletimer_expiry(ax25_cb *ax25) { ax25_clear_queues(ax25); ax25->n2count = 0; ax25->state = AX25_STATE_2; ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25_stop_t3timer(ax25); if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); ax25->sk->sk_state = TCP_CLOSE; ax25->sk->sk_err = 0; ax25->sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(ax25->sk, SOCK_DEAD)) { ax25->sk->sk_state_change(ax25->sk); sock_set_flag(ax25->sk, SOCK_DEAD); } bh_unlock_sock(ax25->sk); } } /* dl1bke 960114: The DAMA protocol requires to send data and SABM/DISC * within the poll of any connected channel. Remember * that we are not allowed to send anything unless we * get polled by the Master. * * Thus we'll have to do parts of our T1 handling in * ax25_enquiry_response(). */ void ax25_ds_t1_timeout(ax25_cb *ax25) { switch (ax25->state) { case AX25_STATE_1: if (ax25->n2count == ax25->n2) { if (ax25->modulus == AX25_MODULUS) { ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; ax25->n2count = 0; ax25_send_control(ax25, AX25_SABM, AX25_POLLOFF, AX25_COMMAND); } } else { ax25->n2count++; if (ax25->modulus == AX25_MODULUS) ax25_send_control(ax25, AX25_SABM, AX25_POLLOFF, AX25_COMMAND); else ax25_send_control(ax25, AX25_SABME, AX25_POLLOFF, AX25_COMMAND); } break; case AX25_STATE_2: if (ax25->n2count == ax25->n2) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->n2count++; } break; case AX25_STATE_3: if (ax25->n2count == ax25->n2) { ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE); ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->n2count++; } break; } ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); }
gpl-2.0
Stane1983/amlogic-m1
drivers/input/mouse/gpio_mouse.c
4178
4888
/* * Driver for simulating a mouse on GPIO lines. * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/input-polldev.h> #include <linux/gpio_mouse.h> #include <asm/gpio.h> /* * Timer function which is run every scan_ms ms when the device is opened. * The dev input variable is set to the the input_dev pointer. */ static void gpio_mouse_scan(struct input_polled_dev *dev) { struct gpio_mouse_platform_data *gpio = dev->private; struct input_dev *input = dev->input; int x, y; if (gpio->bleft >= 0) input_report_key(input, BTN_LEFT, gpio_get_value(gpio->bleft) ^ gpio->polarity); if (gpio->bmiddle >= 0) input_report_key(input, BTN_MIDDLE, gpio_get_value(gpio->bmiddle) ^ gpio->polarity); if (gpio->bright >= 0) input_report_key(input, BTN_RIGHT, gpio_get_value(gpio->bright) ^ gpio->polarity); x = (gpio_get_value(gpio->right) ^ gpio->polarity) - (gpio_get_value(gpio->left) ^ gpio->polarity); y = (gpio_get_value(gpio->down) ^ gpio->polarity) - (gpio_get_value(gpio->up) ^ gpio->polarity); input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); input_sync(input); } static int __devinit gpio_mouse_probe(struct platform_device *pdev) { struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data; struct input_polled_dev *input_poll; struct input_dev *input; int pin, i; int error; if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); error = -ENXIO; goto out; } if (pdata->scan_ms < 0) { dev_err(&pdev->dev, "invalid scan time\n"); error = -EINVAL; goto out; } for (i = 0; i < GPIO_MOUSE_PIN_MAX; i++) { pin = pdata->pins[i]; if (pin < 0) { if (i <= GPIO_MOUSE_PIN_RIGHT) { /* Mouse direction is required. */ dev_err(&pdev->dev, "missing GPIO for directions\n"); error = -EINVAL; goto out_free_gpios; } if (i == GPIO_MOUSE_PIN_BLEFT) dev_dbg(&pdev->dev, "no left button defined\n"); } else { error = gpio_request(pin, "gpio_mouse"); if (error) { dev_err(&pdev->dev, "fail %d pin (%d idx)\n", pin, i); goto out_free_gpios; } gpio_direction_input(pin); } } input_poll = input_allocate_polled_device(); if (!input_poll) { dev_err(&pdev->dev, "not enough memory for input device\n"); error = -ENOMEM; goto out_free_gpios; } platform_set_drvdata(pdev, input_poll); /* set input-polldev handlers */ input_poll->private = pdata; input_poll->poll = gpio_mouse_scan; input_poll->poll_interval = pdata->scan_ms; input = input_poll->input; input->name = pdev->name; input->id.bustype = BUS_HOST; input->dev.parent = &pdev->dev; input_set_capability(input, EV_REL, REL_X); input_set_capability(input, EV_REL, REL_Y); if (pdata->bleft >= 0) input_set_capability(input, EV_KEY, BTN_LEFT); if (pdata->bmiddle >= 0) input_set_capability(input, EV_KEY, BTN_MIDDLE); if (pdata->bright >= 0) input_set_capability(input, EV_KEY, BTN_RIGHT); error = input_register_polled_device(input_poll); if (error) { dev_err(&pdev->dev, "could not register input device\n"); goto out_free_polldev; } dev_dbg(&pdev->dev, "%d ms scan time, buttons: %s%s%s\n", pdata->scan_ms, pdata->bleft < 0 ? "" : "left ", pdata->bmiddle < 0 ? "" : "middle ", pdata->bright < 0 ? "" : "right"); return 0; out_free_polldev: input_free_polled_device(input_poll); platform_set_drvdata(pdev, NULL); out_free_gpios: while (--i >= 0) { pin = pdata->pins[i]; if (pin) gpio_free(pin); } out: return error; } static int __devexit gpio_mouse_remove(struct platform_device *pdev) { struct input_polled_dev *input = platform_get_drvdata(pdev); struct gpio_mouse_platform_data *pdata = input->private; int pin, i; input_unregister_polled_device(input); input_free_polled_device(input); for (i = 0; i < GPIO_MOUSE_PIN_MAX; i++) { pin = pdata->pins[i]; if (pin >= 0) gpio_free(pin); } platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver gpio_mouse_device_driver = { .probe = gpio_mouse_probe, .remove = __devexit_p(gpio_mouse_remove), .driver = { .name = "gpio_mouse", .owner = THIS_MODULE, } }; static int __init gpio_mouse_init(void) { return platform_driver_register(&gpio_mouse_device_driver); } module_init(gpio_mouse_init); static void __exit gpio_mouse_exit(void) { platform_driver_unregister(&gpio_mouse_device_driver); } module_exit(gpio_mouse_exit); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("GPIO mouse driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
gpl-2.0
Hashcode/android_kernel_samsung_hlte
drivers/scsi/fcoe/fcoe_ctlr.c
4690
76093
/* * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/errno.h> #include <linux/bitops.h> #include <linux/slab.h> #include <net/rtnetlink.h> #include <scsi/fc/fc_els.h> #include <scsi/fc/fc_fs.h> #include <scsi/fc/fc_fip.h> #include <scsi/fc/fc_encaps.h> #include <scsi/fc/fc_fcoe.h> #include <scsi/fc/fc_fcp.h> #include <scsi/libfc.h> #include <scsi/libfcoe.h> #include "libfcoe.h" #define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */ #define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */ static void fcoe_ctlr_timeout(unsigned long); static void fcoe_ctlr_timer_work(struct work_struct *); static void fcoe_ctlr_recv_work(struct work_struct *); static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *); static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS; static const char * const fcoe_ctlr_states[] = { [FIP_ST_DISABLED] = "DISABLED", [FIP_ST_LINK_WAIT] = "LINK_WAIT", [FIP_ST_AUTO] = "AUTO", [FIP_ST_NON_FIP] = "NON_FIP", [FIP_ST_ENABLED] = "ENABLED", [FIP_ST_VNMP_START] = "VNMP_START", [FIP_ST_VNMP_PROBE1] = "VNMP_PROBE1", [FIP_ST_VNMP_PROBE2] = "VNMP_PROBE2", [FIP_ST_VNMP_CLAIM] = "VNMP_CLAIM", [FIP_ST_VNMP_UP] = "VNMP_UP", }; static const char *fcoe_ctlr_state(enum fip_state state) { const char *cp = "unknown"; if (state < ARRAY_SIZE(fcoe_ctlr_states)) cp = fcoe_ctlr_states[state]; if (!cp) cp = "unknown"; return cp; } /** * fcoe_ctlr_set_state() - Set and do debug printing for the new FIP state. * @fip: The FCoE controller * @state: The new state */ static void fcoe_ctlr_set_state(struct fcoe_ctlr *fip, enum fip_state state) { if (state == fip->state) return; if (fip->lp) LIBFCOE_FIP_DBG(fip, "state %s -> %s\n", fcoe_ctlr_state(fip->state), fcoe_ctlr_state(state)); fip->state = state; } /** * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid * @fcf: The FCF to check * * Return non-zero if FCF fcoe_size has been validated. */ static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) { return (fcf->flags & FIP_FL_SOL) != 0; } /** * fcoe_ctlr_fcf_usable() - Check if a FCF is usable * @fcf: The FCF to check * * Return non-zero if the FCF is usable. */ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) { u16 flags = FIP_FL_SOL | FIP_FL_AVAIL; return (fcf->flags & flags) == flags; } /** * fcoe_ctlr_map_dest() - Set flag and OUI for mapping destination addresses * @fip: The FCoE controller */ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip) { if (fip->mode == FIP_MODE_VN2VN) hton24(fip->dest_addr, FIP_VN_FC_MAP); else hton24(fip->dest_addr, FIP_DEF_FC_MAP); hton24(fip->dest_addr + 3, 0); fip->map_dest = 1; } /** * fcoe_ctlr_init() - Initialize the FCoE Controller instance * @fip: The FCoE controller to initialize */ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) { fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); fip->mode = mode; INIT_LIST_HEAD(&fip->fcfs); mutex_init(&fip->ctlr_mutex); spin_lock_init(&fip->ctlr_lock); fip->flogi_oxid = FC_XID_UNKNOWN; setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work); skb_queue_head_init(&fip->fip_recv_list); } EXPORT_SYMBOL(fcoe_ctlr_init); /** * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller * @fip: The FCoE controller whose FCFs are to be reset * * Called with &fcoe_ctlr lock held. */ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; struct fcoe_fcf *next; fip->sel_fcf = NULL; list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { list_del(&fcf->list); kfree(fcf); } fip->fcf_count = 0; fip->sel_time = 0; } /** * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller * @fip: The FCoE controller to tear down * * This is called by FCoE drivers before freeing the &fcoe_ctlr. * * The receive handler will have been deleted before this to guarantee * that no more recv_work will be scheduled. * * The timer routine will simply return once we set FIP_ST_DISABLED. * This guarantees that no further timeouts or work will be scheduled. */ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) { cancel_work_sync(&fip->recv_work); skb_queue_purge(&fip->fip_recv_list); mutex_lock(&fip->ctlr_mutex); fcoe_ctlr_set_state(fip, FIP_ST_DISABLED); fcoe_ctlr_reset_fcfs(fip); mutex_unlock(&fip->ctlr_mutex); del_timer_sync(&fip->timer); cancel_work_sync(&fip->timer_work); } EXPORT_SYMBOL(fcoe_ctlr_destroy); /** * fcoe_ctlr_announce() - announce new FCF selection * @fip: The FCoE controller * * Also sets the destination MAC for FCoE and control packets * * Called with neither ctlr_mutex nor ctlr_lock held. */ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) { struct fcoe_fcf *sel; struct fcoe_fcf *fcf; mutex_lock(&fip->ctlr_mutex); spin_lock_bh(&fip->ctlr_lock); kfree_skb(fip->flogi_req); fip->flogi_req = NULL; list_for_each_entry(fcf, &fip->fcfs, list) fcf->flogi_sent = 0; spin_unlock_bh(&fip->ctlr_lock); sel = fip->sel_fcf; if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) goto unlock; if (!is_zero_ether_addr(fip->dest_addr)) { printk(KERN_NOTICE "libfcoe: host%d: " "FIP Fibre-Channel Forwarder MAC %pM deselected\n", fip->lp->host->host_no, fip->dest_addr); memset(fip->dest_addr, 0, ETH_ALEN); } if (sel) { printk(KERN_INFO "libfcoe: host%d: FIP selected " "Fibre-Channel Forwarder MAC %pM\n", fip->lp->host->host_no, sel->fcf_mac); memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN); fip->map_dest = 0; } unlock: mutex_unlock(&fip->ctlr_mutex); } /** * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port * @fip: The FCoE controller to get the maximum FCoE size from * * Returns the maximum packet size including the FCoE header and trailer, * but not including any Ethernet or VLAN headers. */ static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip) { /* * Determine the max FCoE frame size allowed, including * FCoE header and trailer. * Note: lp->mfs is currently the payload size, not the frame size. */ return fip->lp->mfs + sizeof(struct fc_frame_header) + sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof); } /** * fcoe_ctlr_solicit() - Send a FIP solicitation * @fip: The FCoE controller to send the solicitation on * @fcf: The destination FCF (if NULL, a multicast solicitation is sent) */ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) { struct sk_buff *skb; struct fip_sol { struct ethhdr eth; struct fip_header fip; struct { struct fip_mac_desc mac; struct fip_wwn_desc wwnn; struct fip_size_desc size; } __packed desc; } __packed * sol; u32 fcoe_size; skb = dev_alloc_skb(sizeof(*sol)); if (!skb) return; sol = (struct fip_sol *)skb->data; memset(sol, 0, sizeof(*sol)); memcpy(sol->eth.h_dest, fcf ? fcf->fcf_mac : fcoe_all_fcfs, ETH_ALEN); memcpy(sol->eth.h_source, fip->ctl_src_addr, ETH_ALEN); sol->eth.h_proto = htons(ETH_P_FIP); sol->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); sol->fip.fip_op = htons(FIP_OP_DISC); sol->fip.fip_subcode = FIP_SC_SOL; sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW); sol->fip.fip_flags = htons(FIP_FL_FPMA); if (fip->spma) sol->fip.fip_flags |= htons(FIP_FL_SPMA); sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW; memcpy(sol->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); sol->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; sol->desc.wwnn.fd_desc.fip_dlen = sizeof(sol->desc.wwnn) / FIP_BPW; put_unaligned_be64(fip->lp->wwnn, &sol->desc.wwnn.fd_wwn); fcoe_size = fcoe_ctlr_fcoe_size(fip); sol->desc.size.fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; sol->desc.size.fd_desc.fip_dlen = sizeof(sol->desc.size) / FIP_BPW; sol->desc.size.fd_size = htons(fcoe_size); skb_put(skb, sizeof(*sol)); skb->protocol = htons(ETH_P_FIP); skb->priority = fip->priority; skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); if (!fcf) fip->sol_time = jiffies; } /** * fcoe_ctlr_link_up() - Start FCoE controller * @fip: The FCoE controller to start * * Called from the LLD when the network link is ready. */ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) { mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) { mutex_unlock(&fip->ctlr_mutex); fc_linkup(fip->lp); } else if (fip->state == FIP_ST_LINK_WAIT) { fcoe_ctlr_set_state(fip, fip->mode); switch (fip->mode) { default: LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); /* fall-through */ case FIP_MODE_AUTO: LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); /* fall-through */ case FIP_MODE_FABRIC: case FIP_MODE_NON_FIP: mutex_unlock(&fip->ctlr_mutex); fc_linkup(fip->lp); fcoe_ctlr_solicit(fip, NULL); break; case FIP_MODE_VN2VN: fcoe_ctlr_vn_start(fip); mutex_unlock(&fip->ctlr_mutex); fc_linkup(fip->lp); break; } } else mutex_unlock(&fip->ctlr_mutex); } EXPORT_SYMBOL(fcoe_ctlr_link_up); /** * fcoe_ctlr_reset() - Reset a FCoE controller * @fip: The FCoE controller to reset */ static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) { fcoe_ctlr_reset_fcfs(fip); del_timer(&fip->timer); fip->ctlr_ka_time = 0; fip->port_ka_time = 0; fip->sol_time = 0; fip->flogi_oxid = FC_XID_UNKNOWN; fcoe_ctlr_map_dest(fip); } /** * fcoe_ctlr_link_down() - Stop a FCoE controller * @fip: The FCoE controller to be stopped * * Returns non-zero if the link was up and now isn't. * * Called from the LLD when the network link is not ready. * There may be multiple calls while the link is down. */ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) { int link_dropped; LIBFCOE_FIP_DBG(fip, "link down.\n"); mutex_lock(&fip->ctlr_mutex); fcoe_ctlr_reset(fip); link_dropped = fip->state != FIP_ST_LINK_WAIT; fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); mutex_unlock(&fip->ctlr_mutex); if (link_dropped) fc_linkdown(fip->lp); return link_dropped; } EXPORT_SYMBOL(fcoe_ctlr_link_down); /** * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF * @fip: The FCoE controller to send the FKA on * @lport: libfc fc_lport to send from * @ports: 0 for controller keep-alive, 1 for port keep-alive * @sa: The source MAC address * * A controller keep-alive is sent every fka_period (typically 8 seconds). * The source MAC is the native MAC address. * * A port keep-alive is sent every 90 seconds while logged in. * The source MAC is the assigned mapped source address. * The destination is the FCF's F-port. */ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, struct fc_lport *lport, int ports, u8 *sa) { struct sk_buff *skb; struct fip_kal { struct ethhdr eth; struct fip_header fip; struct fip_mac_desc mac; } __packed * kal; struct fip_vn_desc *vn; u32 len; struct fc_lport *lp; struct fcoe_fcf *fcf; fcf = fip->sel_fcf; lp = fip->lp; if (!fcf || (ports && !lp->port_id)) return; len = sizeof(*kal) + ports * sizeof(*vn); skb = dev_alloc_skb(len); if (!skb) return; kal = (struct fip_kal *)skb->data; memset(kal, 0, len); memcpy(kal->eth.h_dest, fcf->fcf_mac, ETH_ALEN); memcpy(kal->eth.h_source, sa, ETH_ALEN); kal->eth.h_proto = htons(ETH_P_FIP); kal->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); kal->fip.fip_op = htons(FIP_OP_CTRL); kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; kal->fip.fip_dl_len = htons((sizeof(kal->mac) + ports * sizeof(*vn)) / FIP_BPW); kal->fip.fip_flags = htons(FIP_FL_FPMA); if (fip->spma) kal->fip.fip_flags |= htons(FIP_FL_SPMA); kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); if (ports) { vn = (struct fip_vn_desc *)(kal + 1); vn->fd_desc.fip_dtype = FIP_DT_VN_ID; vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN); hton24(vn->fd_fc_id, lport->port_id); put_unaligned_be64(lport->wwpn, &vn->fd_wwpn); } skb_put(skb, len); skb->protocol = htons(ETH_P_FIP); skb->priority = fip->priority; skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); } /** * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it * @fip: The FCoE controller for the ELS frame * @dtype: The FIP descriptor type for the frame * @skb: The FCoE ELS frame including FC header but no FCoE headers * @d_id: The destination port ID. * * Returns non-zero error code on failure. * * The caller must check that the length is a multiple of 4. * * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes). * Headroom includes the FIP encapsulation description, FIP header, and * Ethernet header. The tailroom is for the FIP MAC descriptor. */ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, u8 dtype, struct sk_buff *skb, u32 d_id) { struct fip_encaps_head { struct ethhdr eth; struct fip_header fip; struct fip_encaps encaps; } __packed * cap; struct fc_frame_header *fh; struct fip_mac_desc *mac; struct fcoe_fcf *fcf; size_t dlen; u16 fip_flags; u8 op; fh = (struct fc_frame_header *)skb->data; op = *(u8 *)(fh + 1); dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap)); memset(cap, 0, sizeof(*cap)); if (lport->point_to_multipoint) { if (fcoe_ctlr_vn_lookup(fip, d_id, cap->eth.h_dest)) return -ENODEV; fip_flags = 0; } else { fcf = fip->sel_fcf; if (!fcf) return -ENODEV; fip_flags = fcf->flags; fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : FIP_FL_FPMA; if (!fip_flags) return -ENODEV; memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN); } memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN); cap->eth.h_proto = htons(ETH_P_FIP); cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); cap->fip.fip_op = htons(FIP_OP_LS); if (op == ELS_LS_ACC || op == ELS_LS_RJT) cap->fip.fip_subcode = FIP_SC_REP; else cap->fip.fip_subcode = FIP_SC_REQ; cap->fip.fip_flags = htons(fip_flags); cap->encaps.fd_desc.fip_dtype = dtype; cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; if (op != ELS_LS_RJT) { dlen += sizeof(*mac); mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac)); memset(mac, 0, sizeof(*mac)); mac->fd_desc.fip_dtype = FIP_DT_MAC; mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) { memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); } else if (fip->mode == FIP_MODE_VN2VN) { hton24(mac->fd_mac, FIP_VN_FC_MAP); hton24(mac->fd_mac + 3, fip->port_id); } else if (fip_flags & FIP_FL_SPMA) { LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n"); memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); } else { LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n"); /* FPMA only FLOGI. Must leave the MAC desc zeroed. */ } } cap->fip.fip_dl_len = htons(dlen / FIP_BPW); skb->protocol = htons(ETH_P_FIP); skb->priority = fip->priority; skb_reset_mac_header(skb); skb_reset_network_header(skb); return 0; } /** * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. * @fip: FCoE controller. * @lport: libfc fc_lport to send from * @skb: FCoE ELS frame including FC header but no FCoE headers. * * Returns a non-zero error code if the frame should not be sent. * Returns zero if the caller should send the frame with FCoE encapsulation. * * The caller must check that the length is a multiple of 4. * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). * The the skb must also be an fc_frame. * * This is called from the lower-level driver with spinlocks held, * so we must not take a mutex here. */ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, struct sk_buff *skb) { struct fc_frame *fp; struct fc_frame_header *fh; u16 old_xid; u8 op; u8 mac[ETH_ALEN]; fp = container_of(skb, struct fc_frame, skb); fh = (struct fc_frame_header *)skb->data; op = *(u8 *)(fh + 1); if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) { old_xid = fip->flogi_oxid; fip->flogi_oxid = ntohs(fh->fh_ox_id); if (fip->state == FIP_ST_AUTO) { if (old_xid == FC_XID_UNKNOWN) fip->flogi_count = 0; fip->flogi_count++; if (fip->flogi_count < 3) goto drop; fcoe_ctlr_map_dest(fip); return 0; } if (fip->state == FIP_ST_NON_FIP) fcoe_ctlr_map_dest(fip); } if (fip->state == FIP_ST_NON_FIP) return 0; if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN) goto drop; switch (op) { case ELS_FLOGI: op = FIP_DT_FLOGI; if (fip->mode == FIP_MODE_VN2VN) break; spin_lock_bh(&fip->ctlr_lock); kfree_skb(fip->flogi_req); fip->flogi_req = skb; fip->flogi_req_send = 1; spin_unlock_bh(&fip->ctlr_lock); schedule_work(&fip->timer_work); return -EINPROGRESS; case ELS_FDISC: if (ntoh24(fh->fh_s_id)) return 0; op = FIP_DT_FDISC; break; case ELS_LOGO: if (fip->mode == FIP_MODE_VN2VN) { if (fip->state != FIP_ST_VNMP_UP) return -EINVAL; if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) return -EINVAL; } else { if (fip->state != FIP_ST_ENABLED) return 0; if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) return 0; } op = FIP_DT_LOGO; break; case ELS_LS_ACC: /* * If non-FIP, we may have gotten an SID by accepting an FLOGI * from a point-to-point connection. Switch to using * the source mac based on the SID. The destination * MAC in this case would have been set by receiving the * FLOGI. */ if (fip->state == FIP_ST_NON_FIP) { if (fip->flogi_oxid == FC_XID_UNKNOWN) return 0; fip->flogi_oxid = FC_XID_UNKNOWN; fc_fcoe_set_mac(mac, fh->fh_d_id); fip->update_mac(lport, mac); } /* fall through */ case ELS_LS_RJT: op = fr_encaps(fp); if (op) break; return 0; default: if (fip->state != FIP_ST_ENABLED && fip->state != FIP_ST_VNMP_UP) goto drop; return 0; } LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n", op, ntoh24(fh->fh_d_id)); if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id))) goto drop; fip->send(fip, skb); return -EINPROGRESS; drop: kfree_skb(skb); return -EINVAL; } EXPORT_SYMBOL(fcoe_ctlr_els_send); /** * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller * @fip: The FCoE controller to free FCFs on * * Called with lock held and preemption disabled. * * An FCF is considered old if we have missed two advertisements. * That is, there have been no valid advertisement from it for 2.5 * times its keep-alive period. * * In addition, determine the time when an FCF selection can occur. * * Also, increment the MissDiscAdvCount when no advertisement is received * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). * * Returns the time in jiffies for the next call. */ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; struct fcoe_fcf *next; unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); unsigned long deadline; unsigned long sel_time = 0; struct fcoe_dev_stats *stats; stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; if (fip->sel_fcf == fcf) { if (time_after(jiffies, deadline)) { stats->MissDiscAdvCount++; printk(KERN_INFO "libfcoe: host%d: " "Missing Discovery Advertisement " "for fab %16.16llx count %lld\n", fip->lp->host->host_no, fcf->fabric_name, stats->MissDiscAdvCount); } else if (time_after(next_timer, deadline)) next_timer = deadline; } deadline += fcf->fka_period; if (time_after_eq(jiffies, deadline)) { if (fip->sel_fcf == fcf) fip->sel_fcf = NULL; list_del(&fcf->list); WARN_ON(!fip->fcf_count); fip->fcf_count--; kfree(fcf); stats->VLinkFailureCount++; } else { if (time_after(next_timer, deadline)) next_timer = deadline; if (fcoe_ctlr_mtu_valid(fcf) && (!sel_time || time_before(sel_time, fcf->time))) sel_time = fcf->time; } } put_cpu(); if (sel_time && !fip->sel_fcf && !fip->sel_time) { sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); fip->sel_time = sel_time; } return next_timer; } /** * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry * @fip: The FCoE controller receiving the advertisement * @skb: The received FIP advertisement frame * @fcf: The resulting FCF entry * * Returns zero on a valid parsed advertisement, * otherwise returns non zero value. */ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, struct sk_buff *skb, struct fcoe_fcf *fcf) { struct fip_header *fiph; struct fip_desc *desc = NULL; struct fip_wwn_desc *wwn; struct fip_fab_desc *fab; struct fip_fka_desc *fka; unsigned long t; size_t rlen; size_t dlen; u32 desc_mask; memset(fcf, 0, sizeof(*fcf)); fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA); fiph = (struct fip_header *)skb->data; fcf->flags = ntohs(fiph->fip_flags); /* * mask of required descriptors. validating each one clears its bit. */ desc_mask = BIT(FIP_DT_PRI) | BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_FAB) | BIT(FIP_DT_FKA); rlen = ntohs(fiph->fip_dl_len) * 4; if (rlen + sizeof(*fiph) > skb->len) return -EINVAL; desc = (struct fip_desc *)(fiph + 1); while (rlen > 0) { dlen = desc->fip_dlen * FIP_BPW; if (dlen < sizeof(*desc) || dlen > rlen) return -EINVAL; /* Drop Adv if there are duplicate critical descriptors */ if ((desc->fip_dtype < 32) && !(desc_mask & 1U << desc->fip_dtype)) { LIBFCOE_FIP_DBG(fip, "Duplicate Critical " "Descriptors in FIP adv\n"); return -EINVAL; } switch (desc->fip_dtype) { case FIP_DT_PRI: if (dlen != sizeof(struct fip_pri_desc)) goto len_err; fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri; desc_mask &= ~BIT(FIP_DT_PRI); break; case FIP_DT_MAC: if (dlen != sizeof(struct fip_mac_desc)) goto len_err; memcpy(fcf->fcf_mac, ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN); if (!is_valid_ether_addr(fcf->fcf_mac)) { LIBFCOE_FIP_DBG(fip, "Invalid MAC addr %pM in FIP adv\n", fcf->fcf_mac); return -EINVAL; } desc_mask &= ~BIT(FIP_DT_MAC); break; case FIP_DT_NAME: if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn); desc_mask &= ~BIT(FIP_DT_NAME); break; case FIP_DT_FAB: if (dlen != sizeof(struct fip_fab_desc)) goto len_err; fab = (struct fip_fab_desc *)desc; fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn); fcf->vfid = ntohs(fab->fd_vfid); fcf->fc_map = ntoh24(fab->fd_map); desc_mask &= ~BIT(FIP_DT_FAB); break; case FIP_DT_FKA: if (dlen != sizeof(struct fip_fka_desc)) goto len_err; fka = (struct fip_fka_desc *)desc; if (fka->fd_flags & FIP_FKA_ADV_D) fcf->fd_flags = 1; t = ntohl(fka->fd_fka_period); if (t >= FCOE_CTLR_MIN_FKA) fcf->fka_period = msecs_to_jiffies(t); desc_mask &= ~BIT(FIP_DT_FKA); break; case FIP_DT_MAP_OUI: case FIP_DT_FCOE_SIZE: case FIP_DT_FLOGI: case FIP_DT_FDISC: case FIP_DT_LOGO: case FIP_DT_ELP: default: LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_VENDOR_BASE) return -EINVAL; break; } desc = (struct fip_desc *)((char *)desc + dlen); rlen -= dlen; } if (!fcf->fc_map || (fcf->fc_map & 0x10000)) return -EINVAL; if (!fcf->switch_name) return -EINVAL; if (desc_mask) { LIBFCOE_FIP_DBG(fip, "adv missing descriptors mask %x\n", desc_mask); return -EINVAL; } return 0; len_err: LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", desc->fip_dtype, dlen); return -EINVAL; } /** * fcoe_ctlr_recv_adv() - Handle an incoming advertisement * @fip: The FCoE controller receiving the advertisement * @skb: The received FIP packet */ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fcoe_fcf *fcf; struct fcoe_fcf new; struct fcoe_fcf *found; unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); int first = 0; int mtu_valid; if (fcoe_ctlr_parse_adv(fip, skb, &new)) return; mutex_lock(&fip->ctlr_mutex); first = list_empty(&fip->fcfs); found = NULL; list_for_each_entry(fcf, &fip->fcfs, list) { if (fcf->switch_name == new.switch_name && fcf->fabric_name == new.fabric_name && fcf->fc_map == new.fc_map && compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { found = fcf; break; } } if (!found) { if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT) goto out; fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC); if (!fcf) goto out; fip->fcf_count++; memcpy(fcf, &new, sizeof(new)); list_add(&fcf->list, &fip->fcfs); } else { /* * Update the FCF's keep-alive descriptor flags. * Other flag changes from new advertisements are * ignored after a solicited advertisement is * received and the FCF is selectable (usable). */ fcf->fd_flags = new.fd_flags; if (!fcoe_ctlr_fcf_usable(fcf)) fcf->flags = new.flags; if (fcf == fip->sel_fcf && !fcf->fd_flags) { fip->ctlr_ka_time -= fcf->fka_period; fip->ctlr_ka_time += new.fka_period; if (time_before(fip->ctlr_ka_time, fip->timer.expires)) mod_timer(&fip->timer, fip->ctlr_ka_time); } fcf->fka_period = new.fka_period; memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); } mtu_valid = fcoe_ctlr_mtu_valid(fcf); fcf->time = jiffies; if (!found) LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", fcf->fabric_name, fcf->fcf_mac); /* * If this advertisement is not solicited and our max receive size * hasn't been verified, send a solicited advertisement. */ if (!mtu_valid) fcoe_ctlr_solicit(fip, fcf); /* * If its been a while since we did a solicit, and this is * the first advertisement we've received, do a multicast * solicitation to gather as many advertisements as we can * before selection occurs. */ if (first && time_after(jiffies, fip->sol_time + sol_tov)) fcoe_ctlr_solicit(fip, NULL); /* * Put this FCF at the head of the list for priority among equals. * This helps in the case of an NPV switch which insists we use * the FCF that answers multicast solicitations, not the others that * are sending periodic multicast advertisements. */ if (mtu_valid) list_move(&fcf->list, &fip->fcfs); /* * If this is the first validated FCF, note the time and * set a timer to trigger selection. */ if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) { fip->sel_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); if (!timer_pending(&fip->timer) || time_before(fip->sel_time, fip->timer.expires)) mod_timer(&fip->timer, fip->sel_time); } out: mutex_unlock(&fip->ctlr_mutex); } /** * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame * @fip: The FCoE controller which received the packet * @skb: The received FIP packet */ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fc_lport *lport = fip->lp; struct fip_header *fiph; struct fc_frame *fp = (struct fc_frame *)skb; struct fc_frame_header *fh = NULL; struct fip_desc *desc; struct fip_encaps *els; struct fcoe_dev_stats *stats; struct fcoe_fcf *sel; enum fip_desc_type els_dtype = 0; u8 els_op; u8 sub; u8 granted_mac[ETH_ALEN] = { 0 }; size_t els_len = 0; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 desc_cnt = 0; fiph = (struct fip_header *)skb->data; sub = fiph->fip_subcode; if (sub != FIP_SC_REQ && sub != FIP_SC_REP) goto drop; rlen = ntohs(fiph->fip_dl_len) * 4; if (rlen + sizeof(*fiph) > skb->len) goto drop; desc = (struct fip_desc *)(fiph + 1); while (rlen > 0) { desc_cnt++; dlen = desc->fip_dlen * FIP_BPW; if (dlen < sizeof(*desc) || dlen > rlen) goto drop; /* Drop ELS if there are duplicate critical descriptors */ if (desc->fip_dtype < 32) { if ((desc->fip_dtype != FIP_DT_MAC) && (desc_mask & 1U << desc->fip_dtype)) { LIBFCOE_FIP_DBG(fip, "Duplicate Critical " "Descriptors in FIP ELS\n"); goto drop; } desc_mask |= (1 << desc->fip_dtype); } switch (desc->fip_dtype) { case FIP_DT_MAC: sel = fip->sel_fcf; if (desc_cnt == 1) { LIBFCOE_FIP_DBG(fip, "FIP descriptors " "received out of order\n"); goto drop; } /* * Some switch implementations send two MAC descriptors, * with first MAC(granted_mac) being the FPMA, and the * second one(fcoe_mac) is used as destination address * for sending/receiving FCoE packets. FIP traffic is * sent using fip_mac. For regular switches, both * fip_mac and fcoe_mac would be the same. */ if (desc_cnt == 2) memcpy(granted_mac, ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); if (dlen != sizeof(struct fip_mac_desc)) goto len_err; if ((desc_cnt == 3) && (sel)) memcpy(sel->fcoe_mac, ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); break; case FIP_DT_FLOGI: case FIP_DT_FDISC: case FIP_DT_LOGO: case FIP_DT_ELP: if (desc_cnt != 1) { LIBFCOE_FIP_DBG(fip, "FIP descriptors " "received out of order\n"); goto drop; } if (fh) goto drop; if (dlen < sizeof(*els) + sizeof(*fh) + 1) goto len_err; els_len = dlen - sizeof(*els); els = (struct fip_encaps *)desc; fh = (struct fc_frame_header *)(els + 1); els_dtype = desc->fip_dtype; break; default: LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_VENDOR_BASE) goto drop; if (desc_cnt <= 2) { LIBFCOE_FIP_DBG(fip, "FIP descriptors " "received out of order\n"); goto drop; } break; } desc = (struct fip_desc *)((char *)desc + dlen); rlen -= dlen; } if (!fh) goto drop; els_op = *(u8 *)(fh + 1); if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) { if (els_op == ELS_LS_ACC) { if (!is_valid_ether_addr(granted_mac)) { LIBFCOE_FIP_DBG(fip, "Invalid MAC address %pM in FIP ELS\n", granted_mac); goto drop; } memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) { fip->flogi_oxid = FC_XID_UNKNOWN; if (els_dtype == FIP_DT_FLOGI) fcoe_ctlr_announce(fip); } } else if (els_dtype == FIP_DT_FLOGI && !fcoe_ctlr_flogi_retry(fip)) goto drop; /* retrying FLOGI so drop reject */ } if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && (!(1U << FIP_DT_MAC & desc_mask)))) { LIBFCOE_FIP_DBG(fip, "Missing critical descriptors " "in FIP ELS\n"); goto drop; } /* * Convert skb into an fc_frame containing only the ELS. */ skb_pull(skb, (u8 *)fh - skb->data); skb_trim(skb, els_len); fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; fr_dev(fp) = lport; fr_encaps(fp) = els_dtype; stats = per_cpu_ptr(lport->dev_stats, get_cpu()); stats->RxFrames++; stats->RxWords += skb->len / FIP_BPW; put_cpu(); fc_exch_recv(lport, fp); return; len_err: LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", desc->fip_dtype, dlen); drop: kfree_skb(skb); } /** * fcoe_ctlr_recv_els() - Handle an incoming link reset frame * @fip: The FCoE controller that received the frame * @fh: The received FIP header * * There may be multiple VN_Port descriptors. * The overall length has already been checked. */ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, struct fip_header *fh) { struct fip_desc *desc; struct fip_mac_desc *mp; struct fip_wwn_desc *wp; struct fip_vn_desc *vp; size_t rlen; size_t dlen; struct fcoe_fcf *fcf = fip->sel_fcf; struct fc_lport *lport = fip->lp; struct fc_lport *vn_port = NULL; u32 desc_mask; int num_vlink_desc; int reset_phys_port = 0; struct fip_vn_desc **vlink_desc_arr = NULL; LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); if (!fcf || !lport->port_id) return; /* * mask of required descriptors. Validating each one clears its bit. */ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME); rlen = ntohs(fh->fip_dl_len) * FIP_BPW; desc = (struct fip_desc *)(fh + 1); /* * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen' * before determining max Vx_Port descriptor but a buggy FCF could have * omited either or both MAC Address and Name Identifier descriptors */ num_vlink_desc = rlen / sizeof(*vp); if (num_vlink_desc) vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc, GFP_ATOMIC); if (!vlink_desc_arr) return; num_vlink_desc = 0; while (rlen >= sizeof(*desc)) { dlen = desc->fip_dlen * FIP_BPW; if (dlen > rlen) goto err; /* Drop CVL if there are duplicate critical descriptors */ if ((desc->fip_dtype < 32) && (desc->fip_dtype != FIP_DT_VN_ID) && !(desc_mask & 1U << desc->fip_dtype)) { LIBFCOE_FIP_DBG(fip, "Duplicate Critical " "Descriptors in FIP CVL\n"); goto err; } switch (desc->fip_dtype) { case FIP_DT_MAC: mp = (struct fip_mac_desc *)desc; if (dlen < sizeof(*mp)) goto err; if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) goto err; desc_mask &= ~BIT(FIP_DT_MAC); break; case FIP_DT_NAME: wp = (struct fip_wwn_desc *)desc; if (dlen < sizeof(*wp)) goto err; if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) goto err; desc_mask &= ~BIT(FIP_DT_NAME); break; case FIP_DT_VN_ID: vp = (struct fip_vn_desc *)desc; if (dlen < sizeof(*vp)) goto err; vlink_desc_arr[num_vlink_desc++] = vp; vn_port = fc_vport_id_lookup(lport, ntoh24(vp->fd_fc_id)); if (vn_port && (vn_port == lport)) { mutex_lock(&fip->ctlr_mutex); per_cpu_ptr(lport->dev_stats, get_cpu())->VLinkFailureCount++; put_cpu(); fcoe_ctlr_reset(fip); mutex_unlock(&fip->ctlr_mutex); } break; default: /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_VENDOR_BASE) goto err; break; } desc = (struct fip_desc *)((char *)desc + dlen); rlen -= dlen; } /* * reset only if all required descriptors were present and valid. */ if (desc_mask) LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", desc_mask); else if (!num_vlink_desc) { LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n"); /* * No Vx_Port description. Clear all NPIV ports, * followed by physical port */ mutex_lock(&fip->ctlr_mutex); per_cpu_ptr(lport->dev_stats, get_cpu())->VLinkFailureCount++; put_cpu(); fcoe_ctlr_reset(fip); mutex_unlock(&fip->ctlr_mutex); mutex_lock(&lport->lp_mutex); list_for_each_entry(vn_port, &lport->vports, list) fc_lport_reset(vn_port); mutex_unlock(&lport->lp_mutex); fc_lport_reset(fip->lp); fcoe_ctlr_solicit(fip, NULL); } else { int i; LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); for (i = 0; i < num_vlink_desc; i++) { vp = vlink_desc_arr[i]; vn_port = fc_vport_id_lookup(lport, ntoh24(vp->fd_fc_id)); if (!vn_port) continue; /* * 'port_id' is already validated, check MAC address and * wwpn */ if (compare_ether_addr(fip->get_src_addr(vn_port), vp->fd_mac) != 0 || get_unaligned_be64(&vp->fd_wwpn) != vn_port->wwpn) continue; if (vn_port == lport) /* * Physical port, defer processing till all * listed NPIV ports are cleared */ reset_phys_port = 1; else /* NPIV port */ fc_lport_reset(vn_port); } if (reset_phys_port) { fc_lport_reset(fip->lp); fcoe_ctlr_solicit(fip, NULL); } } err: kfree(vlink_desc_arr); } /** * fcoe_ctlr_recv() - Receive a FIP packet * @fip: The FCoE controller that received the packet * @skb: The received FIP packet * * This may be called from either NET_RX_SOFTIRQ or IRQ. */ void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { skb_queue_tail(&fip->fip_recv_list, skb); schedule_work(&fip->recv_work); } EXPORT_SYMBOL(fcoe_ctlr_recv); /** * fcoe_ctlr_recv_handler() - Receive a FIP frame * @fip: The FCoE controller that received the frame * @skb: The received FIP frame * * Returns non-zero if the frame is dropped. */ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; struct ethhdr *eh; enum fip_state state; u16 op; u8 sub; if (skb_linearize(skb)) goto drop; if (skb->len < sizeof(*fiph)) goto drop; eh = eth_hdr(skb); if (fip->mode == FIP_MODE_VN2VN) { if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && compare_ether_addr(eh->h_dest, fcoe_all_p2p)) goto drop; } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && compare_ether_addr(eh->h_dest, fcoe_all_enode)) goto drop; fiph = (struct fip_header *)skb->data; op = ntohs(fiph->fip_op); sub = fiph->fip_subcode; if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) goto drop; if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) goto drop; mutex_lock(&fip->ctlr_mutex); state = fip->state; if (state == FIP_ST_AUTO) { fip->map_dest = 0; fcoe_ctlr_set_state(fip, FIP_ST_ENABLED); state = FIP_ST_ENABLED; LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); } mutex_unlock(&fip->ctlr_mutex); if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) return fcoe_ctlr_vn_recv(fip, skb); if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && state != FIP_ST_VNMP_CLAIM) goto drop; if (op == FIP_OP_LS) { fcoe_ctlr_recv_els(fip, skb); /* consumes skb */ return 0; } if (state != FIP_ST_ENABLED) goto drop; if (op == FIP_OP_DISC && sub == FIP_SC_ADV) fcoe_ctlr_recv_adv(fip, skb); else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) fcoe_ctlr_recv_clr_vlink(fip, fiph); kfree_skb(skb); return 0; drop: kfree_skb(skb); return -1; } /** * fcoe_ctlr_select() - Select the best FCF (if possible) * @fip: The FCoE controller * * Returns the selected FCF, or NULL if none are usable. * * If there are conflicting advertisements, no FCF can be chosen. * * If there is already a selected FCF, this will choose a better one or * an equivalent one that hasn't already been sent a FLOGI. * * Called with lock held. */ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; struct fcoe_fcf *best = fip->sel_fcf; struct fcoe_fcf *first; first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list); list_for_each_entry(fcf, &fip->fcfs, list) { LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " "VFID %d mac %pM map %x val %d " "sent %u pri %u\n", fcf->fabric_name, fcf->vfid, fcf->fcf_mac, fcf->fc_map, fcoe_ctlr_mtu_valid(fcf), fcf->flogi_sent, fcf->pri); if (fcf->fabric_name != first->fabric_name || fcf->vfid != first->vfid || fcf->fc_map != first->fc_map) { LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " "or FC-MAP\n"); return NULL; } if (fcf->flogi_sent) continue; if (!fcoe_ctlr_fcf_usable(fcf)) { LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " "map %x %svalid %savailable\n", fcf->fabric_name, fcf->fc_map, (fcf->flags & FIP_FL_SOL) ? "" : "in", (fcf->flags & FIP_FL_AVAIL) ? "" : "un"); continue; } if (!best || fcf->pri < best->pri || best->flogi_sent) best = fcf; } fip->sel_fcf = best; if (best) { LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac); fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); fip->ctlr_ka_time = jiffies + best->fka_period; if (time_before(fip->ctlr_ka_time, fip->timer.expires)) mod_timer(&fip->timer, fip->ctlr_ka_time); } return best; } /** * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF * @fip: The FCoE controller * * Returns non-zero error if it could not be sent. * * Called with ctlr_mutex and ctlr_lock held. * Caller must verify that fip->sel_fcf is not NULL. */ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) { struct sk_buff *skb; struct sk_buff *skb_orig; struct fc_frame_header *fh; int error; skb_orig = fip->flogi_req; if (!skb_orig) return -EINVAL; /* * Clone and send the FLOGI request. If clone fails, use original. */ skb = skb_clone(skb_orig, GFP_ATOMIC); if (!skb) { skb = skb_orig; fip->flogi_req = NULL; } fh = (struct fc_frame_header *)skb->data; error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb, ntoh24(fh->fh_d_id)); if (error) { kfree_skb(skb); return error; } fip->send(fip, skb); fip->sel_fcf->flogi_sent = 1; return 0; } /** * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible * @fip: The FCoE controller * * Returns non-zero error code if there's no FLOGI request to retry or * no alternate FCF available. */ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; int error; mutex_lock(&fip->ctlr_mutex); spin_lock_bh(&fip->ctlr_lock); LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); fcf = fcoe_ctlr_select(fip); if (!fcf || fcf->flogi_sent) { kfree_skb(fip->flogi_req); fip->flogi_req = NULL; error = -ENOENT; } else { fcoe_ctlr_solicit(fip, NULL); error = fcoe_ctlr_flogi_send_locked(fip); } spin_unlock_bh(&fip->ctlr_lock); mutex_unlock(&fip->ctlr_mutex); return error; } /** * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI. * @fip: The FCoE controller that timed out * * Done here because fcoe_ctlr_els_send() can't get mutex. * * Called with ctlr_mutex held. The caller must not hold ctlr_lock. */ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; spin_lock_bh(&fip->ctlr_lock); fcf = fip->sel_fcf; if (!fcf || !fip->flogi_req_send) goto unlock; LIBFCOE_FIP_DBG(fip, "sending FLOGI\n"); /* * If this FLOGI is being sent due to a timeout retry * to the same FCF as before, select a different FCF if possible. */ if (fcf->flogi_sent) { LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n"); fcf = fcoe_ctlr_select(fip); if (!fcf || fcf->flogi_sent) { LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n"); list_for_each_entry(fcf, &fip->fcfs, list) fcf->flogi_sent = 0; fcf = fcoe_ctlr_select(fip); } } if (fcf) { fcoe_ctlr_flogi_send_locked(fip); fip->flogi_req_send = 0; } else /* XXX */ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); unlock: spin_unlock_bh(&fip->ctlr_lock); } /** * fcoe_ctlr_timeout() - FIP timeout handler * @arg: The FCoE controller that timed out */ static void fcoe_ctlr_timeout(unsigned long arg) { struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg; schedule_work(&fip->timer_work); } /** * fcoe_ctlr_timer_work() - Worker thread function for timer work * @work: Handle to a FCoE controller * * Ages FCFs. Triggers FCF selection if possible. * Sends keep-alives and resets. */ static void fcoe_ctlr_timer_work(struct work_struct *work) { struct fcoe_ctlr *fip; struct fc_lport *vport; u8 *mac; u8 reset = 0; u8 send_ctlr_ka = 0; u8 send_port_ka = 0; struct fcoe_fcf *sel; struct fcoe_fcf *fcf; unsigned long next_timer; fip = container_of(work, struct fcoe_ctlr, timer_work); if (fip->mode == FIP_MODE_VN2VN) return fcoe_ctlr_vn_timeout(fip); mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_DISABLED) { mutex_unlock(&fip->ctlr_mutex); return; } fcf = fip->sel_fcf; next_timer = fcoe_ctlr_age_fcfs(fip); sel = fip->sel_fcf; if (!sel && fip->sel_time) { if (time_after_eq(jiffies, fip->sel_time)) { sel = fcoe_ctlr_select(fip); fip->sel_time = 0; } else if (time_after(next_timer, fip->sel_time)) next_timer = fip->sel_time; } if (sel && fip->flogi_req_send) fcoe_ctlr_flogi_send(fip); else if (!sel && fcf) reset = 1; if (sel && !sel->fd_flags) { if (time_after_eq(jiffies, fip->ctlr_ka_time)) { fip->ctlr_ka_time = jiffies + sel->fka_period; send_ctlr_ka = 1; } if (time_after(next_timer, fip->ctlr_ka_time)) next_timer = fip->ctlr_ka_time; if (time_after_eq(jiffies, fip->port_ka_time)) { fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); send_port_ka = 1; } if (time_after(next_timer, fip->port_ka_time)) next_timer = fip->port_ka_time; } if (!list_empty(&fip->fcfs)) mod_timer(&fip->timer, next_timer); mutex_unlock(&fip->ctlr_mutex); if (reset) { fc_lport_reset(fip->lp); /* restart things with a solicitation */ fcoe_ctlr_solicit(fip, NULL); } if (send_ctlr_ka) fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); if (send_port_ka) { mutex_lock(&fip->lp->lp_mutex); mac = fip->get_src_addr(fip->lp); fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); list_for_each_entry(vport, &fip->lp->vports, list) { mac = fip->get_src_addr(vport); fcoe_ctlr_send_keep_alive(fip, vport, 1, mac); } mutex_unlock(&fip->lp->lp_mutex); } } /** * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames * @recv_work: Handle to a FCoE controller */ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) { struct fcoe_ctlr *fip; struct sk_buff *skb; fip = container_of(recv_work, struct fcoe_ctlr, recv_work); while ((skb = skb_dequeue(&fip->fip_recv_list))) fcoe_ctlr_recv_handler(fip, skb); } /** * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response * @fip: The FCoE controller * @fp: The FC frame to snoop * * Snoop potential response to FLOGI or even incoming FLOGI. * * The caller has checked that we are waiting for login as indicated * by fip->flogi_oxid != FC_XID_UNKNOWN. * * The caller is responsible for freeing the frame. * Fill in the granted_mac address. * * Return non-zero if the frame should not be delivered to libfc. */ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh; u8 op; u8 *sa; sa = eth_hdr(&fp->skb)->h_source; fh = fc_frame_header_get(fp); if (fh->fh_type != FC_TYPE_ELS) return 0; op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && fip->flogi_oxid == ntohs(fh->fh_ox_id)) { mutex_lock(&fip->ctlr_mutex); if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) { mutex_unlock(&fip->ctlr_mutex); return -EINVAL; } fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); LIBFCOE_FIP_DBG(fip, "received FLOGI LS_ACC using non-FIP mode\n"); /* * FLOGI accepted. * If the src mac addr is FC_OUI-based, then we mark the * address_mode flag to use FC_OUI-based Ethernet DA. * Otherwise we use the FCoE gateway addr */ if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { fcoe_ctlr_map_dest(fip); } else { memcpy(fip->dest_addr, sa, ETH_ALEN); fip->map_dest = 0; } fip->flogi_oxid = FC_XID_UNKNOWN; mutex_unlock(&fip->ctlr_mutex); fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { /* * Save source MAC for point-to-point responses. */ mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) { memcpy(fip->dest_addr, sa, ETH_ALEN); fip->map_dest = 0; if (fip->state == FIP_ST_AUTO) LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. " "Setting non-FIP mode\n"); fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); } mutex_unlock(&fip->ctlr_mutex); } return 0; } EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); /** * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN * @mac: The MAC address to convert * @scheme: The scheme to use when converting * @port: The port indicator for converting * * Returns: u64 fc world wide name */ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int scheme, unsigned int port) { u64 wwn; u64 host_mac; /* The MAC is in NO, so flip only the low 48 bits */ host_mac = ((u64) mac[0] << 40) | ((u64) mac[1] << 32) | ((u64) mac[2] << 24) | ((u64) mac[3] << 16) | ((u64) mac[4] << 8) | (u64) mac[5]; WARN_ON(host_mac >= (1ULL << 48)); wwn = host_mac | ((u64) scheme << 60); switch (scheme) { case 1: WARN_ON(port != 0); break; case 2: WARN_ON(port >= 0xfff); wwn |= (u64) port << 48; break; default: WARN_ON(1); break; } return wwn; } EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); /** * fcoe_ctlr_rport() - return the fcoe_rport for a given fc_rport_priv * @rdata: libfc remote port */ static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) { return (struct fcoe_rport *)(rdata + 1); } /** * fcoe_ctlr_vn_send() - Send a FIP VN2VN Probe Request or Reply. * @fip: The FCoE controller * @sub: sub-opcode for probe request, reply, or advertisement. * @dest: The destination Ethernet MAC address * @min_len: minimum size of the Ethernet payload to be sent */ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, enum fip_vn2vn_subcode sub, const u8 *dest, size_t min_len) { struct sk_buff *skb; struct fip_frame { struct ethhdr eth; struct fip_header fip; struct fip_mac_desc mac; struct fip_wwn_desc wwnn; struct fip_vn_desc vn; } __packed * frame; struct fip_fc4_feat *ff; struct fip_size_desc *size; u32 fcp_feat; size_t len; size_t dlen; len = sizeof(*frame); dlen = 0; if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { dlen = sizeof(struct fip_fc4_feat) + sizeof(struct fip_size_desc); len += dlen; } dlen += sizeof(frame->mac) + sizeof(frame->wwnn) + sizeof(frame->vn); len = max(len, min_len + sizeof(struct ethhdr)); skb = dev_alloc_skb(len); if (!skb) return; frame = (struct fip_frame *)skb->data; memset(frame, 0, len); memcpy(frame->eth.h_dest, dest, ETH_ALEN); memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); frame->eth.h_proto = htons(ETH_P_FIP); frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); frame->fip.fip_op = htons(FIP_OP_VN2VN); frame->fip.fip_subcode = sub; frame->fip.fip_dl_len = htons(dlen / FIP_BPW); frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); frame->wwnn.fd_desc.fip_dtype = FIP_DT_NAME; frame->wwnn.fd_desc.fip_dlen = sizeof(frame->wwnn) / FIP_BPW; put_unaligned_be64(fip->lp->wwnn, &frame->wwnn.fd_wwn); frame->vn.fd_desc.fip_dtype = FIP_DT_VN_ID; frame->vn.fd_desc.fip_dlen = sizeof(frame->vn) / FIP_BPW; hton24(frame->vn.fd_mac, FIP_VN_FC_MAP); hton24(frame->vn.fd_mac + 3, fip->port_id); hton24(frame->vn.fd_fc_id, fip->port_id); put_unaligned_be64(fip->lp->wwpn, &frame->vn.fd_wwpn); /* * For claims, add FC-4 features. * TBD: Add interface to get fc-4 types and features from libfc. */ if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { ff = (struct fip_fc4_feat *)(frame + 1); ff->fd_desc.fip_dtype = FIP_DT_FC4F; ff->fd_desc.fip_dlen = sizeof(*ff) / FIP_BPW; ff->fd_fts = fip->lp->fcts; fcp_feat = 0; if (fip->lp->service_params & FCP_SPPF_INIT_FCN) fcp_feat |= FCP_FEAT_INIT; if (fip->lp->service_params & FCP_SPPF_TARG_FCN) fcp_feat |= FCP_FEAT_TARG; fcp_feat <<= (FC_TYPE_FCP * 4) % 32; ff->fd_ff.fd_feat[FC_TYPE_FCP * 4 / 32] = htonl(fcp_feat); size = (struct fip_size_desc *)(ff + 1); size->fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; size->fd_desc.fip_dlen = sizeof(*size) / FIP_BPW; size->fd_size = htons(fcoe_ctlr_fcoe_size(fip)); } skb_put(skb, len); skb->protocol = htons(ETH_P_FIP); skb->priority = fip->priority; skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); } /** * fcoe_ctlr_vn_rport_callback - Event handler for rport events. * @lport: The lport which is receiving the event * @rdata: remote port private data * @event: The event that occurred * * Locking Note: The rport lock must not be held when calling this function. */ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport, struct fc_rport_priv *rdata, enum fc_rport_event event) { struct fcoe_ctlr *fip = lport->disc.priv; struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); LIBFCOE_FIP_DBG(fip, "vn_rport_callback %x event %d\n", rdata->ids.port_id, event); mutex_lock(&fip->ctlr_mutex); switch (event) { case RPORT_EV_READY: frport->login_count = 0; break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: frport->login_count++; if (frport->login_count > FCOE_CTLR_VN2VN_LOGIN_LIMIT) { LIBFCOE_FIP_DBG(fip, "rport FLOGI limited port_id %6.6x\n", rdata->ids.port_id); lport->tt.rport_logoff(rdata); } break; default: break; } mutex_unlock(&fip->ctlr_mutex); } static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = { .event_callback = fcoe_ctlr_vn_rport_callback, }; /** * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode * @fip: The FCoE controller * * Called with ctlr_mutex held. */ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) { mutex_lock(&lport->disc.disc_mutex); lport->disc.disc_callback = NULL; mutex_unlock(&lport->disc.disc_mutex); } /** * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode * @fip: The FCoE controller * * Called through the local port template for discovery. * Called without the ctlr_mutex held. */ static void fcoe_ctlr_disc_stop(struct fc_lport *lport) { struct fcoe_ctlr *fip = lport->disc.priv; mutex_lock(&fip->ctlr_mutex); fcoe_ctlr_disc_stop_locked(lport); mutex_unlock(&fip->ctlr_mutex); } /** * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode * @fip: The FCoE controller * * Called through the local port template for discovery. * Called without the ctlr_mutex held. */ static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport) { fcoe_ctlr_disc_stop(lport); lport->tt.rport_flush_queue(); synchronize_rcu(); } /** * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id * @fip: The FCoE controller * * Called with fcoe_ctlr lock held. */ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip) { unsigned long wait; u32 port_id; fcoe_ctlr_disc_stop_locked(fip->lp); /* * Get proposed port ID. * If this is the first try after link up, use any previous port_id. * If there was none, use the low bits of the port_name. * On subsequent tries, get the next random one. * Don't use reserved IDs, use another non-zero value, just as random. */ port_id = fip->port_id; if (fip->probe_tries) port_id = prandom32(&fip->rnd_state) & 0xffff; else if (!port_id) port_id = fip->lp->wwpn & 0xffff; if (!port_id || port_id == 0xffff) port_id = 1; fip->port_id = port_id; if (fip->probe_tries < FIP_VN_RLIM_COUNT) { fip->probe_tries++; wait = random32() % FIP_VN_PROBE_WAIT; } else wait = FIP_VN_RLIM_INT; mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); fcoe_ctlr_set_state(fip, FIP_ST_VNMP_START); } /** * fcoe_ctlr_vn_start() - Start in VN2VN mode * @fip: The FCoE controller * * Called with fcoe_ctlr lock held. */ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) { fip->probe_tries = 0; prandom32_seed(&fip->rnd_state, fip->lp->wwpn); fcoe_ctlr_vn_restart(fip); } /** * fcoe_ctlr_vn_parse - parse probe request or response * @fip: The FCoE controller * @skb: incoming packet * @rdata: buffer for resulting parsed VN entry plus fcoe_rport * * Returns non-zero error number on error. * Does not consume the packet. */ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, struct sk_buff *skb, struct fc_rport_priv *rdata) { struct fip_header *fiph; struct fip_desc *desc = NULL; struct fip_mac_desc *macd = NULL; struct fip_wwn_desc *wwn = NULL; struct fip_vn_desc *vn = NULL; struct fip_size_desc *size = NULL; struct fcoe_rport *frport; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 dtype; u8 sub; memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); frport = fcoe_ctlr_rport(rdata); fiph = (struct fip_header *)skb->data; frport->flags = ntohs(fiph->fip_flags); sub = fiph->fip_subcode; switch (sub) { case FIP_SC_VN_PROBE_REQ: case FIP_SC_VN_PROBE_REP: case FIP_SC_VN_BEACON: desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID); break; case FIP_SC_VN_CLAIM_NOTIFY: case FIP_SC_VN_CLAIM_REP: desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID) | BIT(FIP_DT_FC4F) | BIT(FIP_DT_FCOE_SIZE); break; default: LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); return -EINVAL; } rlen = ntohs(fiph->fip_dl_len) * 4; if (rlen + sizeof(*fiph) > skb->len) return -EINVAL; desc = (struct fip_desc *)(fiph + 1); while (rlen > 0) { dlen = desc->fip_dlen * FIP_BPW; if (dlen < sizeof(*desc) || dlen > rlen) return -EINVAL; dtype = desc->fip_dtype; if (dtype < 32) { if (!(desc_mask & BIT(dtype))) { LIBFCOE_FIP_DBG(fip, "unexpected or duplicated desc " "desc type %u in " "FIP VN2VN subtype %u\n", dtype, sub); return -EINVAL; } desc_mask &= ~BIT(dtype); } switch (dtype) { case FIP_DT_MAC: if (dlen != sizeof(struct fip_mac_desc)) goto len_err; macd = (struct fip_mac_desc *)desc; if (!is_valid_ether_addr(macd->fd_mac)) { LIBFCOE_FIP_DBG(fip, "Invalid MAC addr %pM in FIP VN2VN\n", macd->fd_mac); return -EINVAL; } memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); break; case FIP_DT_NAME: if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); break; case FIP_DT_VN_ID: if (dlen != sizeof(struct fip_vn_desc)) goto len_err; vn = (struct fip_vn_desc *)desc; memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); rdata->ids.port_id = ntoh24(vn->fd_fc_id); rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); break; case FIP_DT_FC4F: if (dlen != sizeof(struct fip_fc4_feat)) goto len_err; break; case FIP_DT_FCOE_SIZE: if (dlen != sizeof(struct fip_size_desc)) goto len_err; size = (struct fip_size_desc *)desc; frport->fcoe_len = ntohs(size->fd_size); break; default: LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP probe\n", dtype); /* standard says ignore unknown descriptors >= 128 */ if (dtype < FIP_DT_VENDOR_BASE) return -EINVAL; break; } desc = (struct fip_desc *)((char *)desc + dlen); rlen -= dlen; } return 0; len_err: LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", dtype, dlen); return -EINVAL; } /** * fcoe_ctlr_vn_send_claim() - send multicast FIP VN2VN Claim Notification. * @fip: The FCoE controller * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) { fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_NOTIFY, fcoe_all_vn2vn, 0); fip->sol_time = jiffies; } /** * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. * @fip: The FCoE controller * @rdata: parsed remote port with frport from the probe request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, struct fc_rport_priv *rdata) { struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); if (rdata->ids.port_id != fip->port_id) return; switch (fip->state) { case FIP_ST_VNMP_CLAIM: case FIP_ST_VNMP_UP: fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, frport->enode_mac, 0); break; case FIP_ST_VNMP_PROBE1: case FIP_ST_VNMP_PROBE2: /* * Decide whether to reply to the Probe. * Our selected address is never a "recorded" one, so * only reply if our WWPN is greater and the * Probe's REC bit is not set. * If we don't reply, we will change our address. */ if (fip->lp->wwpn > rdata->ids.port_name && !(frport->flags & FIP_FL_REC_OR_P2P)) { fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, frport->enode_mac, 0); break; } /* fall through */ case FIP_ST_VNMP_START: fcoe_ctlr_vn_restart(fip); break; default: break; } } /** * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. * @fip: The FCoE controller * @rdata: parsed remote port with frport from the probe request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, struct fc_rport_priv *rdata) { if (rdata->ids.port_id != fip->port_id) return; switch (fip->state) { case FIP_ST_VNMP_START: case FIP_ST_VNMP_PROBE1: case FIP_ST_VNMP_PROBE2: case FIP_ST_VNMP_CLAIM: fcoe_ctlr_vn_restart(fip); break; case FIP_ST_VNMP_UP: fcoe_ctlr_vn_send_claim(fip); break; default: break; } } /** * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. * @fip: The FCoE controller * @new: newly-parsed remote port with frport as a template for new rdata * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; struct fc_rport_identifiers *ids; struct fcoe_rport *frport; u32 port_id; port_id = new->ids.port_id; if (port_id == fip->port_id) return; mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_create(lport, port_id); if (!rdata) { mutex_unlock(&lport->disc.disc_mutex); return; } rdata->ops = &fcoe_ctlr_vn_rport_ops; rdata->disc_id = lport->disc.disc_id; ids = &rdata->ids; if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || (ids->node_name != -1 && ids->node_name != new->ids.node_name)) lport->tt.rport_logoff(rdata); ids->port_name = new->ids.port_name; ids->node_name = new->ids.node_name; mutex_unlock(&lport->disc.disc_mutex); frport = fcoe_ctlr_rport(rdata); LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n", port_id, frport->fcoe_len ? "old" : "new"); *frport = *fcoe_ctlr_rport(new); frport->time = 0; } /** * fcoe_ctlr_vn_lookup() - Find VN remote port's MAC address * @fip: The FCoE controller * @port_id: The port_id of the remote VN_node * @mac: buffer which will hold the VN_NODE destination MAC address, if found. * * Returns non-zero error if no remote port found. */ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; struct fcoe_rport *frport; int ret = -1; rcu_read_lock(); rdata = lport->tt.rport_lookup(lport, port_id); if (rdata) { frport = fcoe_ctlr_rport(rdata); memcpy(mac, frport->enode_mac, ETH_ALEN); ret = 0; } rcu_read_unlock(); return ret; } /** * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification * @fip: The FCoE controller * @new: newly-parsed remote port with frport as a template for new rdata * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, struct fc_rport_priv *new) { struct fcoe_rport *frport = fcoe_ctlr_rport(new); if (frport->flags & FIP_FL_REC_OR_P2P) { fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } switch (fip->state) { case FIP_ST_VNMP_START: case FIP_ST_VNMP_PROBE1: case FIP_ST_VNMP_PROBE2: if (new->ids.port_id == fip->port_id) fcoe_ctlr_vn_restart(fip); break; case FIP_ST_VNMP_CLAIM: case FIP_ST_VNMP_UP: if (new->ids.port_id == fip->port_id) { if (new->ids.port_name > fip->lp->wwpn) { fcoe_ctlr_vn_restart(fip); break; } fcoe_ctlr_vn_send_claim(fip); break; } fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, min((u32)frport->fcoe_len, fcoe_ctlr_fcoe_size(fip))); fcoe_ctlr_vn_add(fip, new); break; default: break; } } /** * fcoe_ctlr_vn_claim_resp() - handle received Claim Response * @fip: The FCoE controller that received the frame * @new: newly-parsed remote port with frport from the Claim Response * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, struct fc_rport_priv *new) { LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", new->ids.port_id, fcoe_ctlr_state(fip->state)); if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) fcoe_ctlr_vn_add(fip, new); } /** * fcoe_ctlr_vn_beacon() - handle received beacon. * @fip: The FCoE controller that received the frame * @new: newly-parsed remote port with frport from the Beacon * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, struct fc_rport_priv *new) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; struct fcoe_rport *frport; frport = fcoe_ctlr_rport(new); if (frport->flags & FIP_FL_REC_OR_P2P) { fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, new->ids.port_id); if (rdata) kref_get(&rdata->kref); mutex_unlock(&lport->disc.disc_mutex); if (rdata) { if (rdata->ids.node_name == new->ids.node_name && rdata->ids.port_name == new->ids.port_name) { frport = fcoe_ctlr_rport(rdata); if (!frport->time && fip->state == FIP_ST_VNMP_UP) lport->tt.rport_login(rdata); frport->time = jiffies; } kref_put(&rdata->kref, lport->tt.rport_destroy); return; } if (fip->state != FIP_ST_VNMP_UP) return; /* * Beacon from a new neighbor. * Send a claim notify if one hasn't been sent recently. * Don't add the neighbor yet. */ LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", new->ids.port_id); if (time_after(jiffies, fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) fcoe_ctlr_vn_send_claim(fip); } /** * fcoe_ctlr_vn_age() - Check for VN_ports without recent beacons * @fip: The FCoE controller * * Called with ctlr_mutex held. * Called only in state FIP_ST_VNMP_UP. * Returns the soonest time for next age-out or a time far in the future. */ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; struct fcoe_rport *frport; unsigned long next_time; unsigned long deadline; next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); mutex_lock(&lport->disc.disc_mutex); list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { frport = fcoe_ctlr_rport(rdata); if (!frport->time) continue; deadline = frport->time + msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10); if (time_after_eq(jiffies, deadline)) { frport->time = 0; LIBFCOE_FIP_DBG(fip, "port %16.16llx fc_id %6.6x beacon expired\n", rdata->ids.port_name, rdata->ids.port_id); lport->tt.rport_logoff(rdata); } else if (time_before(deadline, next_time)) next_time = deadline; } mutex_unlock(&lport->disc.disc_mutex); return next_time; } /** * fcoe_ctlr_vn_recv() - Receive a FIP frame * @fip: The FCoE controller that received the frame * @skb: The received FIP frame * * Returns non-zero if the frame is dropped. * Always consumes the frame. */ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vn2vn_subcode sub; struct { struct fc_rport_priv rdata; struct fcoe_rport frport; } buf; int rc; fiph = (struct fip_header *)skb->data; sub = fiph->fip_subcode; rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); if (rc) { LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); goto drop; } mutex_lock(&fip->ctlr_mutex); switch (sub) { case FIP_SC_VN_PROBE_REQ: fcoe_ctlr_vn_probe_req(fip, &buf.rdata); break; case FIP_SC_VN_PROBE_REP: fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); break; case FIP_SC_VN_CLAIM_NOTIFY: fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); break; case FIP_SC_VN_CLAIM_REP: fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); break; case FIP_SC_VN_BEACON: fcoe_ctlr_vn_beacon(fip, &buf.rdata); break; default: LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); rc = -1; break; } mutex_unlock(&fip->ctlr_mutex); drop: kfree_skb(skb); return rc; } /** * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. * @lport: The local port * @fp: The received frame * * This should never be called since we don't see RSCNs or other * fabric-generated ELSes. */ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp) { struct fc_seq_els_data rjt_data; rjt_data.reason = ELS_RJT_UNSUP; rjt_data.explan = ELS_EXPL_NONE; lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } /** * fcoe_ctlr_disc_recv - start discovery for VN2VN mode. * @fip: The FCoE controller * * This sets a flag indicating that remote ports should be created * and started for the peers we discover. We use the disc_callback * pointer as that flag. Peers already discovered are created here. * * The lport lock is held during this call. The callback must be done * later, without holding either the lport or discovery locks. * The fcoe_ctlr lock may also be held during this call. */ static void fcoe_ctlr_disc_start(void (*callback)(struct fc_lport *, enum fc_disc_event), struct fc_lport *lport) { struct fc_disc *disc = &lport->disc; struct fcoe_ctlr *fip = disc->priv; mutex_lock(&disc->disc_mutex); disc->disc_callback = callback; disc->disc_id = (disc->disc_id + 2) | 1; disc->pending = 1; schedule_work(&fip->timer_work); mutex_unlock(&disc->disc_mutex); } /** * fcoe_ctlr_vn_disc() - report FIP VN_port discovery results after claim state. * @fip: The FCoE controller * * Starts the FLOGI and PLOGI login process to each discovered rport for which * we've received at least one beacon. * Performs the discovery complete callback. */ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) { struct fc_lport *lport = fip->lp; struct fc_disc *disc = &lport->disc; struct fc_rport_priv *rdata; struct fcoe_rport *frport; void (*callback)(struct fc_lport *, enum fc_disc_event); mutex_lock(&disc->disc_mutex); callback = disc->pending ? disc->disc_callback : NULL; disc->pending = 0; list_for_each_entry_rcu(rdata, &disc->rports, peers) { frport = fcoe_ctlr_rport(rdata); if (frport->time) lport->tt.rport_login(rdata); } mutex_unlock(&disc->disc_mutex); if (callback) callback(lport, DISC_EV_SUCCESS); } /** * fcoe_ctlr_vn_timeout - timer work function for VN2VN mode. * @fip: The FCoE controller */ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) { unsigned long next_time; u8 mac[ETH_ALEN]; u32 new_port_id = 0; mutex_lock(&fip->ctlr_mutex); switch (fip->state) { case FIP_ST_VNMP_START: fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT); break; case FIP_ST_VNMP_PROBE1: fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); break; case FIP_ST_VNMP_PROBE2: fcoe_ctlr_set_state(fip, FIP_ST_VNMP_CLAIM); new_port_id = fip->port_id; hton24(mac, FIP_VN_FC_MAP); hton24(mac + 3, new_port_id); fcoe_ctlr_map_dest(fip); fip->update_mac(fip->lp, mac); fcoe_ctlr_vn_send_claim(fip); next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); break; case FIP_ST_VNMP_CLAIM: /* * This may be invoked either by starting discovery so don't * go to the next state unless it's been long enough. */ next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT); if (time_after_eq(jiffies, next_time)) { fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP); fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, fcoe_all_vn2vn, 0); next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); fip->port_ka_time = next_time; } fcoe_ctlr_vn_disc(fip); break; case FIP_ST_VNMP_UP: next_time = fcoe_ctlr_vn_age(fip); if (time_after_eq(jiffies, fip->port_ka_time)) { fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, fcoe_all_vn2vn, 0); fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT + (random32() % FIP_VN_BEACON_FUZZ)); } if (time_before(fip->port_ka_time, next_time)) next_time = fip->port_ka_time; break; case FIP_ST_LINK_WAIT: goto unlock; default: WARN(1, "unexpected state %d\n", fip->state); goto unlock; } mod_timer(&fip->timer, next_time); unlock: mutex_unlock(&fip->ctlr_mutex); /* If port ID is new, notify local port after dropping ctlr_mutex */ if (new_port_id) fc_lport_set_local_id(fip->lp, new_port_id); } /** * fcoe_libfc_config() - Sets up libfc related properties for local port * @lp: The local port to configure libfc for * @fip: The FCoE controller in use by the local port * @tt: The libfc function template * @init_fcp: If non-zero, the FCP portion of libfc should be initialized * * Returns : 0 for success */ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, const struct libfc_function_template *tt, int init_fcp) { /* Set the function pointers set by the LLDD */ memcpy(&lport->tt, tt, sizeof(*tt)); if (init_fcp && fc_fcp_init(lport)) return -ENOMEM; fc_exch_init(lport); fc_elsct_init(lport); fc_lport_init(lport); if (fip->mode == FIP_MODE_VN2VN) lport->rport_priv_size = sizeof(struct fcoe_rport); fc_rport_init(lport); if (fip->mode == FIP_MODE_VN2VN) { lport->point_to_multipoint = 1; lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; lport->tt.disc_start = fcoe_ctlr_disc_start; lport->tt.disc_stop = fcoe_ctlr_disc_stop; lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; mutex_init(&lport->disc.disc_mutex); INIT_LIST_HEAD(&lport->disc.rports); lport->disc.priv = fip; } else { fc_disc_init(lport); } return 0; } EXPORT_SYMBOL_GPL(fcoe_libfc_config);
gpl-2.0
Team-Hydra/android_kernel_htc_msm8x60
drivers/ata/ata_generic.c
5458
8628
/* * ata_generic.c - Generic PATA/SATA controller driver. * Copyright 2005 Red Hat Inc, all rights reserved. * * Elements from ide/pci/generic.c * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> * Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com> * * May be copied or modified under the terms of the GNU General Public License * * Driver for PCI IDE interfaces implementing the standard bus mastering * interface functionality. This assumes the BIOS did the drive set up and * tuning for us. By default we do not grab all IDE class devices as they * may have other drivers or need fixups to avoid problems. Instead we keep * a default list of stuff without documentation/driver that appears to * work. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "ata_generic" #define DRV_VERSION "0.2.15" /* * A generic parallel ATA driver using libata */ enum { ATA_GEN_CLASS_MATCH = (1 << 0), ATA_GEN_FORCE_DMA = (1 << 1), ATA_GEN_INTEL_IDER = (1 << 2), }; /** * generic_set_mode - mode setting * @link: link to set up * @unused: returned device on error * * Use a non standard set_mode function. We don't want to be tuned. * The BIOS configured everything. Our job is not to fiddle. We * read the dma enabled bits from the PCI configuration of the device * and respect them. */ static int generic_set_mode(struct ata_link *link, struct ata_device **unused) { struct ata_port *ap = link->ap; const struct pci_device_id *id = ap->host->private_data; int dma_enabled = 0; struct ata_device *dev; if (id->driver_data & ATA_GEN_FORCE_DMA) { dma_enabled = 0xff; } else if (ap->ioaddr.bmdma_addr) { /* Bits 5 and 6 indicate if DMA is active on master/slave */ dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); } ata_for_each_dev(dev, link, ENABLED) { /* We don't really care */ dev->pio_mode = XFER_PIO_0; dev->dma_mode = XFER_MW_DMA_0; /* We do need the right mode information for DMA or PIO and this comes from the current configuration flags */ if (dma_enabled & (1 << (5 + dev->devno))) { unsigned int xfer_mask = ata_id_xfermask(dev->id); const char *name; if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) name = ata_mode_string(xfer_mask); else { /* SWDMA perhaps? */ name = "DMA"; xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0); } ata_dev_info(dev, "configured for %s\n", name); dev->xfer_mode = ata_xfer_mask2mode(xfer_mask); dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode); dev->flags &= ~ATA_DFLAG_PIO; } else { ata_dev_info(dev, "configured for PIO\n"); dev->xfer_mode = XFER_PIO_0; dev->xfer_shift = ATA_SHIFT_PIO; dev->flags |= ATA_DFLAG_PIO; } } return 0; } static struct scsi_host_template generic_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations generic_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_mode = generic_set_mode, }; static int all_generic_ide; /* Set to claim all devices */ /** * is_intel_ider - identify intel IDE-R devices * @dev: PCI device * * Distinguish Intel IDE-R controller devices from other Intel IDE * devices. IDE-R devices have no timing registers and are in * most respects virtual. They should be driven by the ata_generic * driver. * * IDE-R devices have PCI offset 0xF8.L as zero, later Intel ATA has * it non zero. All Intel ATA has 0x40 writable (timing), but it is * not writable on IDE-R devices (this is guaranteed). */ static int is_intel_ider(struct pci_dev *dev) { /* For Intel IDE the value at 0xF8 is only zero on IDE-R interfaces */ u32 r; u16 t; /* Check the manufacturing ID, it will be zero for IDE-R */ pci_read_config_dword(dev, 0xF8, &r); /* Not IDE-R: punt so that ata_(old)piix gets it */ if (r != 0) return 0; /* 0xF8 will also be zero on some early Intel IDE devices but they will have a sane timing register */ pci_read_config_word(dev, 0x40, &t); if (t != 0) return 0; /* Finally check if the timing register is writable so that we eliminate any early devices hot-docked in a docking station */ pci_write_config_word(dev, 0x40, 1); pci_read_config_word(dev, 0x40, &t); if (t) { pci_write_config_word(dev, 0x40, 0); return 0; } return 1; } /** * ata_generic_init - attach generic IDE * @dev: PCI device found * @id: match entry * * Called each time a matching IDE interface is found. We check if the * interface is one we wish to claim and if so we perform any chip * specific hacks then let the ATA layer do the heavy lifting. */ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id) { u16 command; static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &generic_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; /* Don't use the generic entry unless instructed to do so */ if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) return -ENODEV; if (id->driver_data & ATA_GEN_INTEL_IDER) if (!is_intel_ider(dev)) return -ENODEV; /* Devices that need care */ if (dev->vendor == PCI_VENDOR_ID_UMC && dev->device == PCI_DEVICE_ID_UMC_UM8886A && (!(PCI_FUNC(dev->devfn) & 1))) return -ENODEV; if (dev->vendor == PCI_VENDOR_ID_OPTI && dev->device == PCI_DEVICE_ID_OPTI_82C558 && (!(PCI_FUNC(dev->devfn) & 1))) return -ENODEV; /* Don't re-enable devices in generic mode or we will break some motherboards with disabled and unused IDE controllers */ pci_read_config_word(dev, PCI_COMMAND, &command); if (!(command & PCI_COMMAND_IO)) return -ENODEV; if (dev->vendor == PCI_VENDOR_ID_AL) ata_pci_bmdma_clear_simplex(dev); if (dev->vendor == PCI_VENDOR_ID_ATI) { int rc = pcim_enable_device(dev); if (rc < 0) return rc; pcim_pin_device(dev); } return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0); } static struct pci_device_id ata_generic[] = { { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), }, { PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), }, { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), }, { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A), }, { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF), }, { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), .driver_data = ATA_GEN_FORCE_DMA }, /* * For some reason, MCP89 on MacBook 7,1 doesn't work with * ahci, use ata_generic instead. */ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA, PCI_VENDOR_ID_APPLE, 0xcb89, .driver_data = ATA_GEN_FORCE_DMA }, #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, #endif /* Intel, IDE class device */ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, .driver_data = ATA_GEN_INTEL_IDER }, /* Must come last. If you add entries adjust this table appropriately */ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), .driver_data = ATA_GEN_CLASS_MATCH }, { 0, }, }; static struct pci_driver ata_generic_pci_driver = { .name = DRV_NAME, .id_table = ata_generic, .probe = ata_generic_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init ata_generic_init(void) { return pci_register_driver(&ata_generic_pci_driver); } static void __exit ata_generic_exit(void) { pci_unregister_driver(&ata_generic_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for generic ATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ata_generic); MODULE_VERSION(DRV_VERSION); module_init(ata_generic_init); module_exit(ata_generic_exit); module_param(all_generic_ide, int, 0);
gpl-2.0
shinru2004/HTC-C525c
drivers/of/of_i2c.c
7250
2480
/* * OF helpers for the I2C API * * Copyright (c) 2008 Jochen Friedrich <jochen@scram.de> * * Based on a previous patch from Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/irq.h> #include <linux/of.h> #include <linux/of_i2c.h> #include <linux/of_irq.h> #include <linux/module.h> void of_i2c_register_devices(struct i2c_adapter *adap) { void *result; struct device_node *node; /* Only register child devices if the adapter has a node pointer set */ if (!adap->dev.of_node) return; dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); for_each_child_of_node(adap->dev.of_node, node) { struct i2c_board_info info = {}; struct dev_archdata dev_ad = {}; const __be32 *addr; int len; dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name); if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) { dev_err(&adap->dev, "of_i2c: modalias failure on %s\n", node->full_name); continue; } addr = of_get_property(node, "reg", &len); if (!addr || (len < sizeof(int))) { dev_err(&adap->dev, "of_i2c: invalid reg on %s\n", node->full_name); continue; } info.addr = be32_to_cpup(addr); if (info.addr > (1 << 10) - 1) { dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", info.addr, node->full_name); continue; } info.irq = irq_of_parse_and_map(node, 0); info.of_node = of_node_get(node); info.archdata = &dev_ad; request_module("%s%s", I2C_MODULE_PREFIX, info.type); result = i2c_new_device(adap, &info); if (result == NULL) { dev_err(&adap->dev, "of_i2c: Failure registering %s\n", node->full_name); of_node_put(node); irq_dispose_mapping(info.irq); continue; } } } EXPORT_SYMBOL(of_i2c_register_devices); static int of_dev_node_match(struct device *dev, void *data) { return dev->of_node == data; } /* must call put_device() when done with returned i2c_client device */ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) { struct device *dev; dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; return to_i2c_client(dev); } EXPORT_SYMBOL(of_find_i2c_device_by_node); MODULE_LICENSE("GPL");
gpl-2.0
lirokoa/htc_pico_kernel
arch/sh/kernel/cpu/sh4a/setup-sh7343.c
7506
13267
/* * SH7343 Setup * * Copyright (C) 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/uio_driver.h> #include <linux/sh_timer.h> #include <asm/clock.h> /* Serial */ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xffe30000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 83, 83, 83, 83 }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct resource iic0_resources[] = { [0] = { .name = "IIC0", .start = 0x04470000, .end = 0x04470017, .flags = IORESOURCE_MEM, }, [1] = { .start = 96, .end = 99, .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic0_device = { .name = "i2c-sh_mobile", .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic0_resources), .resource = iic0_resources, }; static struct resource iic1_resources[] = { [0] = { .name = "IIC1", .start = 0x04750000, .end = 0x04750017, .flags = IORESOURCE_MEM, }, [1] = { .start = 44, .end = 47, .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic1_device = { .name = "i2c-sh_mobile", .id = 1, /* "i2c1" clock */ .num_resources = ARRAY_SIZE(iic1_resources), .resource = iic1_resources, }; static struct uio_info vpu_platform_data = { .name = "VPU4", .version = "0", .irq = 60, }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe9022eb, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), }; static struct uio_info veu_platform_data = { .name = "VEU", .version = "0", .irq = 54, }; static struct resource veu_resources[] = { [0] = { .name = "VEU", .start = 0xfe920000, .end = 0xfe9200b7, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu_platform_data, }, .resource = veu_resources, .num_resources = ARRAY_SIZE(veu_resources), }; static struct uio_info jpu_platform_data = { .name = "JPU", .version = "0", .irq = 27, }; static struct resource jpu_resources[] = { [0] = { .name = "JPU", .start = 0xfea00000, .end = 0xfea102d3, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device jpu_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &jpu_platform_data, }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), }; static struct sh_timer_config cmt_platform_data = { .channel_offset = 0x60, .timer_bit = 5, .clockevent_rating = 125, .clocksource_rating = 200, }; static struct resource cmt_resources[] = { [0] = { .start = 0x044a0060, .end = 0x044a006b, .flags = IORESOURCE_MEM, }, [1] = { .start = 104, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt_platform_data, }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 17, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002b, .flags = IORESOURCE_MEM, }, [1] = { .start = 18, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct platform_device *sh7343_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, &iic0_device, &iic1_device, &vpu_device, &veu_device, &jpu_device, }; static int __init sh7343_devices_setup(void) { platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20); platform_resource_setup_memory(&veu_device, "veu", 2 << 20); platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); return platform_add_devices(sh7343_devices, ARRAY_SIZE(sh7343_devices)); } arch_initcall(sh7343_devices_setup); static struct platform_device *sh7343_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7343_early_devices, ARRAY_SIZE(sh7343_early_devices)); } enum { UNUSED = 0, ENABLED, DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, DMAC0, DMAC1, DMAC2, DMAC3, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, MFI, VPU, TPU, Z3D4, USBI0, USBI1, MMC_ERR, MMC_TRAN, MMC_FSTAT, MMC_FRDY, DMAC4, DMAC5, DMAC_DADERR, KEYSC, SCIF, SCIF1, SCIF2, SCIF3, SIOF0, SIOF1, SIO, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, IRDA, SDHI, CMT, TSIF, SIU, TMU0, TMU1, TMU2, JPU, LCDC, /* interrupt groups */ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), INTC_VECT(I2C1_ALI, 0x780), INTC_VECT(I2C1_TACKI, 0x7a0), INTC_VECT(I2C1_WAITI, 0x7c0), INTC_VECT(I2C1_DTEI, 0x7e0), INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(TPU, 0x9a0), INTC_VECT(Z3D4, 0x9e0), INTC_VECT(USBI0, 0xa20), INTC_VECT(USBI1, 0xa40), INTC_VECT(MMC_ERR, 0xb00), INTC_VECT(MMC_TRAN, 0xb20), INTC_VECT(MMC_FSTAT, 0xb40), INTC_VECT(MMC_FRDY, 0xb60), INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0), INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIF1, 0xc20), INTC_VECT(SCIF2, 0xc40), INTC_VECT(SCIF3, 0xc60), INTC_VECT(SIOF0, 0xc80), INTC_VECT(SIOF1, 0xca0), INTC_VECT(SIO, 0xd00), INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20), INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60), INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), INTC_VECT(SIU, 0xf80), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580), }; static struct intc_group groups[] __initdata = { INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), INTC_GROUP(MMC, MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR), INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI), INTC_GROUP(USB, USBI0, USBI1), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ { 0, 0, 0, VPU, 0, 0, 0, MFI } }, { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ { SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } }, { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ { 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } }, { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ { KEYSC, DMAC_DADERR, DMAC5, DMAC4, SCIF3, SCIF2, SCIF1, SCIF } }, { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ { 0, 0, 0, SIO, Z3D4, 0, SIOF1, SIOF0 } }, { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USBI1, USBI0 } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ { MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR } }, { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ { I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI, TPU, 0, 0, TSIF } }, { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } }, { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } }, { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIF1, SCIF2, SCIF3 } }, { 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C0 } }, { 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, I2C1 } }, { 0xa4080024, 0, 16, 4, /* IPRJ */ { Z3D4, 0, SIU } }, { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, { 0xa408002c, 0, 16, 4, /* IPRL */ { 0, 0, TPU } }, { 0xa4140010, 0, 32, 4, /* INTPRI00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_sense_reg sense_registers[] __initdata = { { 0xa414001c, 16, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_mask_reg ack_registers[] __initdata = { { 0xa4140024, 0, 8, /* INTREQ00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_desc intc_desc __initdata = { .name = "sh7343", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(vectors, groups, mask_registers, prio_registers, sense_registers, ack_registers), }; void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); }
gpl-2.0
jshafer817/sailfish_kernel_hp_tenderloin30
arch/sh/kernel/cpu/sh2/setup-sh7619.c
7506
5479
/* * SH7619 Setup * * Copyright (C) 2006 Yoshinori Sato * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/sh_timer.h> #include <linux/io.h> enum { UNUSED = 0, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, WDT, EDMAC, CMT0, CMT1, SCIF0, SCIF1, SCIF2, HIF_HIFI, HIF_HIFBI, DMAC0, DMAC1, DMAC2, DMAC3, SIOF, }; static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), INTC_IRQ(IRQ4, 80), INTC_IRQ(IRQ5, 81), INTC_IRQ(IRQ6, 82), INTC_IRQ(IRQ7, 83), INTC_IRQ(WDT, 84), INTC_IRQ(EDMAC, 85), INTC_IRQ(CMT0, 86), INTC_IRQ(CMT1, 87), INTC_IRQ(SCIF0, 88), INTC_IRQ(SCIF0, 89), INTC_IRQ(SCIF0, 90), INTC_IRQ(SCIF0, 91), INTC_IRQ(SCIF1, 92), INTC_IRQ(SCIF1, 93), INTC_IRQ(SCIF1, 94), INTC_IRQ(SCIF1, 95), INTC_IRQ(SCIF2, 96), INTC_IRQ(SCIF2, 97), INTC_IRQ(SCIF2, 98), INTC_IRQ(SCIF2, 99), INTC_IRQ(HIF_HIFI, 100), INTC_IRQ(HIF_HIFBI, 101), INTC_IRQ(DMAC0, 104), INTC_IRQ(DMAC1, 105), INTC_IRQ(DMAC2, 106), INTC_IRQ(DMAC3, 107), INTC_IRQ(SIOF, 108), }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xf8140006, 0, 16, 4, /* IPRA */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, { 0xf8140008, 0, 16, 4, /* IPRB */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, { 0xf8080000, 0, 16, 4, /* IPRC */ { WDT, EDMAC, CMT0, CMT1 } }, { 0xf8080002, 0, 16, 4, /* IPRD */ { SCIF0, SCIF1, SCIF2 } }, { 0xf8080004, 0, 16, 4, /* IPRE */ { HIF_HIFI, HIF_HIFBI } }, { 0xf8080006, 0, 16, 4, /* IPRF */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, { 0xf8080008, 0, 16, 4, /* IPRG */ { SIOF } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL, NULL, prio_registers, NULL); static struct plat_sci_port scif0_platform_data = { .mapbase = 0xf8400000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 88, 88, 88, 88 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xf8410000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 92, 92, 92, 92 }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xf8420000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 96, 96, 96, 96 }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct resource eth_resources[] = { [0] = { .start = 0xfb000000, .end = 0xfb0001c8, .flags = IORESOURCE_MEM, }, [1] = { .start = 85, .end = 85, .flags = IORESOURCE_IRQ, }, }; static struct platform_device eth_device = { .name = "sh-eth", .id = -1, .dev = { .platform_data = (void *)1, }, .num_resources = ARRAY_SIZE(eth_resources), .resource = eth_resources, }; static struct sh_timer_config cmt0_platform_data = { .channel_offset = 0x02, .timer_bit = 0, .clockevent_rating = 125, .clocksource_rating = 0, /* disabled due to code generation issues */ }; static struct resource cmt0_resources[] = { [0] = { .start = 0xf84a0072, .end = 0xf84a0077, .flags = IORESOURCE_MEM, }, [1] = { .start = 86, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt0_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt0_platform_data, }, .resource = cmt0_resources, .num_resources = ARRAY_SIZE(cmt0_resources), }; static struct sh_timer_config cmt1_platform_data = { .channel_offset = 0x08, .timer_bit = 1, .clockevent_rating = 125, .clocksource_rating = 0, /* disabled due to code generation issues */ }; static struct resource cmt1_resources[] = { [0] = { .start = 0xf84a0078, .end = 0xf84a007d, .flags = IORESOURCE_MEM, }, [1] = { .start = 87, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt1_device = { .name = "sh_cmt", .id = 1, .dev = { .platform_data = &cmt1_platform_data, }, .resource = cmt1_resources, .num_resources = ARRAY_SIZE(cmt1_resources), }; static struct platform_device *sh7619_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &eth_device, &cmt0_device, &cmt1_device, }; static int __init sh7619_devices_setup(void) { return platform_add_devices(sh7619_devices, ARRAY_SIZE(sh7619_devices)); } arch_initcall(sh7619_devices_setup); void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct platform_device *sh7619_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &cmt0_device, &cmt1_device, }; #define STBCR3 0xf80a0000 void __init plat_early_device_setup(void) { /* enable CMT clock */ __raw_writeb(__raw_readb(STBCR3) & ~0x10, STBCR3); early_platform_add_devices(sh7619_early_devices, ARRAY_SIZE(sh7619_early_devices)); }
gpl-2.0
skeevy420/android_kernel_lge_d850
arch/h8300/kernel/sys_h8300.c
7762
1728
/* * linux/arch/h8300/kernel/sys_h8300.c * * This file contains various random system calls that * have a non-standard calling sequence on the H8/300 * platform. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/ipc.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/cachectl.h> #include <asm/traps.h> #include <asm/unistd.h> /* sys_cacheflush -- no support. */ asmlinkage int sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) { return -EINVAL; } asmlinkage int sys_getpagesize(void) { return PAGE_SIZE; } #if defined(CONFIG_SYSCALL_PRINT) asmlinkage void syscall_print(void *dummy,...) { struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4); printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n", ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0); } #endif /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long res __asm__("er0"); register const char *const *_c __asm__("er3") = envp; register const char *const *_b __asm__("er2") = argv; register const char * _a __asm__("er1") = filename; __asm__ __volatile__ ("mov.l %1,er0\n\t" "trapa #0\n\t" : "=r" (res) : "g" (__NR_execve), "g" (_a), "g" (_b), "g" (_c) : "cc", "memory"); return res; }
gpl-2.0
jrspruitt/FriendlyARM_210_Kernel
arch/h8300/kernel/sys_h8300.c
7762
1728
/* * linux/arch/h8300/kernel/sys_h8300.c * * This file contains various random system calls that * have a non-standard calling sequence on the H8/300 * platform. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/ipc.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/cachectl.h> #include <asm/traps.h> #include <asm/unistd.h> /* sys_cacheflush -- no support. */ asmlinkage int sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) { return -EINVAL; } asmlinkage int sys_getpagesize(void) { return PAGE_SIZE; } #if defined(CONFIG_SYSCALL_PRINT) asmlinkage void syscall_print(void *dummy,...) { struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4); printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n", ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0); } #endif /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long res __asm__("er0"); register const char *const *_c __asm__("er3") = envp; register const char *const *_b __asm__("er2") = argv; register const char * _a __asm__("er1") = filename; __asm__ __volatile__ ("mov.l %1,er0\n\t" "trapa #0\n\t" : "=r" (res) : "g" (__NR_execve), "g" (_a), "g" (_b), "g" (_c) : "cc", "memory"); return res; }
gpl-2.0
vantinh1991/F240L-JB
drivers/media/dvb/frontends/mb86a16.c
8018
46822
/* Fujitsu MB86A16 DVB-S/DSS DC Receiver driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mb86a16.h" #include "mb86a16_priv.h" unsigned int verbose = 5; module_param(verbose, int, 0644); #define ABS(x) ((x) < 0 ? (-x) : (x)) struct mb86a16_state { struct i2c_adapter *i2c_adap; const struct mb86a16_config *config; struct dvb_frontend frontend; /* tuning parameters */ int frequency; int srate; /* Internal stuff */ int master_clk; int deci; int csel; int rsel; }; #define MB86A16_ERROR 0 #define MB86A16_NOTICE 1 #define MB86A16_INFO 2 #define MB86A16_DEBUG 3 #define dprintk(x, y, z, format, arg...) do { \ if (z) { \ if ((x > MB86A16_ERROR) && (x > y)) \ printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_NOTICE) && (x > y)) \ printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_INFO) && (x > y)) \ printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_DEBUG) && (x > y)) \ printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \ } else { \ if (x > y) \ printk(format, ##arg); \ } \ } while (0) #define TRACE_IN dprintk(verbose, MB86A16_DEBUG, 1, "-->()") #define TRACE_OUT dprintk(verbose, MB86A16_DEBUG, 1, "()-->") static int mb86a16_write(struct mb86a16_state *state, u8 reg, u8 val) { int ret; u8 buf[] = { reg, val }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; dprintk(verbose, MB86A16_DEBUG, 1, "writing to [0x%02x],Reg[0x%02x],Data[0x%02x]", state->config->demod_address, buf[0], buf[1]); ret = i2c_transfer(state->i2c_adap, &msg, 1); return (ret != 1) ? -EREMOTEIO : 0; } static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c_adap, msg, 2); if (ret != 2) { dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=0x%i)", reg, ret); return -EREMOTEIO; } *val = b1[0]; return ret; } static int CNTM_set(struct mb86a16_state *state, unsigned char timint1, unsigned char timint2, unsigned char cnext) { unsigned char val; val = (timint1 << 4) | (timint2 << 2) | cnext; if (mb86a16_write(state, MB86A16_CNTMR, val) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int smrt_set(struct mb86a16_state *state, int rate) { int tmp ; int m ; unsigned char STOFS0, STOFS1; m = 1 << state->deci; tmp = (8192 * state->master_clk - 2 * m * rate * 8192 + state->master_clk / 2) / state->master_clk; STOFS0 = tmp & 0x0ff; STOFS1 = (tmp & 0xf00) >> 8; if (mb86a16_write(state, MB86A16_SRATE1, (state->deci << 2) | (state->csel << 1) | state->rsel) < 0) goto err; if (mb86a16_write(state, MB86A16_SRATE2, STOFS0) < 0) goto err; if (mb86a16_write(state, MB86A16_SRATE3, STOFS1) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } static int srst(struct mb86a16_state *state) { if (mb86a16_write(state, MB86A16_RESET, 0x04) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int afcex_data_set(struct mb86a16_state *state, unsigned char AFCEX_L, unsigned char AFCEX_H) { if (mb86a16_write(state, MB86A16_AFCEXL, AFCEX_L) < 0) goto err; if (mb86a16_write(state, MB86A16_AFCEXH, AFCEX_H) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } static int afcofs_data_set(struct mb86a16_state *state, unsigned char AFCEX_L, unsigned char AFCEX_H) { if (mb86a16_write(state, 0x58, AFCEX_L) < 0) goto err; if (mb86a16_write(state, 0x59, AFCEX_H) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int stlp_set(struct mb86a16_state *state, unsigned char STRAS, unsigned char STRBS) { if (mb86a16_write(state, MB86A16_STRFILTCOEF1, (STRBS << 3) | (STRAS)) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int Vi_set(struct mb86a16_state *state, unsigned char ETH, unsigned char VIA) { if (mb86a16_write(state, MB86A16_VISET2, 0x04) < 0) goto err; if (mb86a16_write(state, MB86A16_VISET3, 0xf5) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int initial_set(struct mb86a16_state *state) { if (stlp_set(state, 5, 7)) goto err; udelay(100); if (afcex_data_set(state, 0, 0)) goto err; udelay(100); if (afcofs_data_set(state, 0, 0)) goto err; udelay(100); if (mb86a16_write(state, MB86A16_CRLFILTCOEF1, 0x16) < 0) goto err; if (mb86a16_write(state, 0x2f, 0x21) < 0) goto err; if (mb86a16_write(state, MB86A16_VIMAG, 0x38) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS1, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS2, 0x1c) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS3, 0x20) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS4, 0x1e) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS5, 0x23) < 0) goto err; if (mb86a16_write(state, 0x54, 0xff) < 0) goto err; if (mb86a16_write(state, MB86A16_TSOUT, 0x00) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int S01T_set(struct mb86a16_state *state, unsigned char s1t, unsigned s0t) { if (mb86a16_write(state, 0x33, (s1t << 3) | s0t) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int EN_set(struct mb86a16_state *state, int cren, int afcen) { unsigned char val; val = 0x7a | (cren << 7) | (afcen << 2); if (mb86a16_write(state, 0x49, val) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int AFCEXEN_set(struct mb86a16_state *state, int afcexen, int smrt) { unsigned char AFCA ; if (smrt > 18875) AFCA = 4; else if (smrt > 9375) AFCA = 3; else if (smrt > 2250) AFCA = 2; else AFCA = 1; if (mb86a16_write(state, 0x2a, 0x02 | (afcexen << 5) | (AFCA << 2)) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int DAGC_data_set(struct mb86a16_state *state, unsigned char DAGCA, unsigned char DAGCW) { if (mb86a16_write(state, 0x2d, (DAGCA << 3) | DAGCW) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static void smrt_info_get(struct mb86a16_state *state, int rate) { if (rate >= 37501) { state->deci = 0; state->csel = 0; state->rsel = 0; } else if (rate >= 30001) { state->deci = 0; state->csel = 0; state->rsel = 1; } else if (rate >= 26251) { state->deci = 0; state->csel = 1; state->rsel = 0; } else if (rate >= 22501) { state->deci = 0; state->csel = 1; state->rsel = 1; } else if (rate >= 18751) { state->deci = 1; state->csel = 0; state->rsel = 0; } else if (rate >= 15001) { state->deci = 1; state->csel = 0; state->rsel = 1; } else if (rate >= 13126) { state->deci = 1; state->csel = 1; state->rsel = 0; } else if (rate >= 11251) { state->deci = 1; state->csel = 1; state->rsel = 1; } else if (rate >= 9376) { state->deci = 2; state->csel = 0; state->rsel = 0; } else if (rate >= 7501) { state->deci = 2; state->csel = 0; state->rsel = 1; } else if (rate >= 6563) { state->deci = 2; state->csel = 1; state->rsel = 0; } else if (rate >= 5626) { state->deci = 2; state->csel = 1; state->rsel = 1; } else if (rate >= 4688) { state->deci = 3; state->csel = 0; state->rsel = 0; } else if (rate >= 3751) { state->deci = 3; state->csel = 0; state->rsel = 1; } else if (rate >= 3282) { state->deci = 3; state->csel = 1; state->rsel = 0; } else if (rate >= 2814) { state->deci = 3; state->csel = 1; state->rsel = 1; } else if (rate >= 2344) { state->deci = 4; state->csel = 0; state->rsel = 0; } else if (rate >= 1876) { state->deci = 4; state->csel = 0; state->rsel = 1; } else if (rate >= 1641) { state->deci = 4; state->csel = 1; state->rsel = 0; } else if (rate >= 1407) { state->deci = 4; state->csel = 1; state->rsel = 1; } else if (rate >= 1172) { state->deci = 5; state->csel = 0; state->rsel = 0; } else if (rate >= 939) { state->deci = 5; state->csel = 0; state->rsel = 1; } else if (rate >= 821) { state->deci = 5; state->csel = 1; state->rsel = 0; } else { state->deci = 5; state->csel = 1; state->rsel = 1; } if (state->csel == 0) state->master_clk = 92000; else state->master_clk = 61333; } static int signal_det(struct mb86a16_state *state, int smrt, unsigned char *SIG) { int ret ; int smrtd ; int wait_sym ; u32 wait_t; unsigned char S[3] ; int i ; if (*SIG > 45) { if (CNTM_set(state, 2, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } wait_sym = 40000; } else { if (CNTM_set(state, 3, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } wait_sym = 80000; } for (i = 0; i < 3; i++) { if (i == 0) smrtd = smrt * 98 / 100; else if (i == 1) smrtd = smrt; else smrtd = smrt * 102 / 100; smrt_info_get(state, smrtd); smrt_set(state, smrtd); srst(state); wait_t = (wait_sym + 99 * smrtd / 100) / smrtd; if (wait_t == 0) wait_t = 1; msleep_interruptible(10); if (mb86a16_read(state, 0x37, &(S[i])) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } } if ((S[1] > S[0] * 112 / 100) && (S[1] > S[2] * 112 / 100)) { ret = 1; } else { ret = 0; } *SIG = S[1]; if (CNTM_set(state, 0, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } return ret; } static int rf_val_set(struct mb86a16_state *state, int f, int smrt, unsigned char R) { unsigned char C, F, B; int M; unsigned char rf_val[5]; int ack = -1; if (smrt > 37750) C = 1; else if (smrt > 18875) C = 2; else if (smrt > 5500) C = 3; else C = 4; if (smrt > 30500) F = 3; else if (smrt > 9375) F = 1; else if (smrt > 4625) F = 0; else F = 2; if (f < 1060) B = 0; else if (f < 1175) B = 1; else if (f < 1305) B = 2; else if (f < 1435) B = 3; else if (f < 1570) B = 4; else if (f < 1715) B = 5; else if (f < 1845) B = 6; else if (f < 1980) B = 7; else if (f < 2080) B = 8; else B = 9; M = f * (1 << R) / 2; rf_val[0] = 0x01 | (C << 3) | (F << 1); rf_val[1] = (R << 5) | ((M & 0x1f000) >> 12); rf_val[2] = (M & 0x00ff0) >> 4; rf_val[3] = ((M & 0x0000f) << 4) | B; /* Frequency Set */ if (mb86a16_write(state, 0x21, rf_val[0]) < 0) ack = 0; if (mb86a16_write(state, 0x22, rf_val[1]) < 0) ack = 0; if (mb86a16_write(state, 0x23, rf_val[2]) < 0) ack = 0; if (mb86a16_write(state, 0x24, rf_val[3]) < 0) ack = 0; if (mb86a16_write(state, 0x25, 0x01) < 0) ack = 0; if (ack == 0) { dprintk(verbose, MB86A16_ERROR, 1, "RF Setup - I2C transfer error"); return -EREMOTEIO; } return 0; } static int afcerr_chk(struct mb86a16_state *state) { unsigned char AFCM_L, AFCM_H ; int AFCM ; int afcm, afcerr ; if (mb86a16_read(state, 0x0e, &AFCM_L) != 2) goto err; if (mb86a16_read(state, 0x0f, &AFCM_H) != 2) goto err; AFCM = (AFCM_H << 8) + AFCM_L; if (AFCM > 2048) afcm = AFCM - 4096; else afcm = AFCM; afcerr = afcm * state->master_clk / 8192; return afcerr; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int dagcm_val_get(struct mb86a16_state *state) { int DAGCM; unsigned char DAGCM_H, DAGCM_L; if (mb86a16_read(state, 0x45, &DAGCM_L) != 2) goto err; if (mb86a16_read(state, 0x46, &DAGCM_H) != 2) goto err; DAGCM = (DAGCM_H << 8) + DAGCM_L; return DAGCM; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_read_status(struct dvb_frontend *fe, fe_status_t *status) { u8 stat, stat2; struct mb86a16_state *state = fe->demodulator_priv; *status = 0; if (mb86a16_read(state, MB86A16_SIG1, &stat) != 2) goto err; if (mb86a16_read(state, MB86A16_SIG2, &stat2) != 2) goto err; if ((stat > 25) && (stat2 > 25)) *status |= FE_HAS_SIGNAL; if ((stat > 45) && (stat2 > 45)) *status |= FE_HAS_CARRIER; if (mb86a16_read(state, MB86A16_STATUS, &stat) != 2) goto err; if (stat & 0x01) *status |= FE_HAS_SYNC; if (stat & 0x01) *status |= FE_HAS_VITERBI; if (mb86a16_read(state, MB86A16_FRAMESYNC, &stat) != 2) goto err; if ((stat & 0x0f) && (*status & FE_HAS_VITERBI)) *status |= FE_HAS_LOCK; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int sync_chk(struct mb86a16_state *state, unsigned char *VIRM) { unsigned char val; int sync; if (mb86a16_read(state, 0x0d, &val) != 2) goto err; dprintk(verbose, MB86A16_INFO, 1, "Status = %02x,", val); sync = val & 0x01; *VIRM = (val & 0x1c) >> 2; return sync; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int freqerr_chk(struct mb86a16_state *state, int fTP, int smrt, int unit) { unsigned char CRM, AFCML, AFCMH; unsigned char temp1, temp2, temp3; int crm, afcm, AFCM; int crrerr, afcerr; /* kHz */ int frqerr; /* MHz */ int afcen, afcexen = 0; int R, M, fOSC, fOSC_OFS; if (mb86a16_read(state, 0x43, &CRM) != 2) goto err; if (CRM > 127) crm = CRM - 256; else crm = CRM; crrerr = smrt * crm / 256; if (mb86a16_read(state, 0x49, &temp1) != 2) goto err; afcen = (temp1 & 0x04) >> 2; if (afcen == 0) { if (mb86a16_read(state, 0x2a, &temp1) != 2) goto err; afcexen = (temp1 & 0x20) >> 5; } if (afcen == 1) { if (mb86a16_read(state, 0x0e, &AFCML) != 2) goto err; if (mb86a16_read(state, 0x0f, &AFCMH) != 2) goto err; } else if (afcexen == 1) { if (mb86a16_read(state, 0x2b, &AFCML) != 2) goto err; if (mb86a16_read(state, 0x2c, &AFCMH) != 2) goto err; } if ((afcen == 1) || (afcexen == 1)) { smrt_info_get(state, smrt); AFCM = ((AFCMH & 0x01) << 8) + AFCML; if (AFCM > 255) afcm = AFCM - 512; else afcm = AFCM; afcerr = afcm * state->master_clk / 8192; } else afcerr = 0; if (mb86a16_read(state, 0x22, &temp1) != 2) goto err; if (mb86a16_read(state, 0x23, &temp2) != 2) goto err; if (mb86a16_read(state, 0x24, &temp3) != 2) goto err; R = (temp1 & 0xe0) >> 5; M = ((temp1 & 0x1f) << 12) + (temp2 << 4) + (temp3 >> 4); if (R == 0) fOSC = 2 * M; else fOSC = M; fOSC_OFS = fOSC - fTP; if (unit == 0) { /* MHz */ if (crrerr + afcerr + fOSC_OFS * 1000 >= 0) frqerr = (crrerr + afcerr + fOSC_OFS * 1000 + 500) / 1000; else frqerr = (crrerr + afcerr + fOSC_OFS * 1000 - 500) / 1000; } else { /* kHz */ frqerr = crrerr + afcerr + fOSC_OFS * 1000; } return frqerr; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static unsigned char vco_dev_get(struct mb86a16_state *state, int smrt) { unsigned char R; if (smrt > 9375) R = 0; else R = 1; return R; } static void swp_info_get(struct mb86a16_state *state, int fOSC_start, int smrt, int v, int R, int swp_ofs, int *fOSC, int *afcex_freq, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; int crnt_swp_freq ; crnt_swp_freq = fOSC_start * 1000 + v * swp_ofs; if (R == 0) *fOSC = (crnt_swp_freq + 1000) / 2000 * 2; else *fOSC = (crnt_swp_freq + 500) / 1000; if (*fOSC >= crnt_swp_freq) *afcex_freq = *fOSC * 1000 - crnt_swp_freq; else *afcex_freq = crnt_swp_freq - *fOSC * 1000; AFCEX = *afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static int swp_freq_calcuation(struct mb86a16_state *state, int i, int v, int *V, int vmax, int vmin, int SIGMIN, int fOSC, int afcex_freq, int swp_ofs, unsigned char *SIG1) { int swp_freq ; if ((i % 2 == 1) && (v <= vmax)) { /* positive v (case 1) */ if ((v - 1 == vmin) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v - 1) > *(V + 30 + v)) && (*(V + 30 + v - 1) > SIGMIN)) { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } else if ((v == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v) > *(V + 30 + v - 1)) && (*(V + 30 + v) > SIGMIN)) { /* (case 2) */ swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else if ((*(V + 30 + v) > 0) && (*(V + 30 + v - 1) > 0) && (*(V + 30 + v - 2) > 0) && (*(V + 30 + v - 3) > 0) && (*(V + 30 + v - 1) > *(V + 30 + v)) && (*(V + 30 + v - 2) > *(V + 30 + v - 3)) && ((*(V + 30 + v - 1) > SIGMIN) || (*(V + 30 + v - 2) > SIGMIN))) { /* (case 3) */ if (*(V + 30 + v - 1) >= *(V + 30 + v - 2)) { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } else { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs * 2; *SIG1 = *(V + 30 + v - 2); } } else if ((v == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v - 2) >= 0) && (*(V + 30 + v) > *(V + 30 + v - 2)) && (*(V + 30 + v - 1) > *(V + 30 + v - 2)) && ((*(V + 30 + v) > SIGMIN) || (*(V + 30 + v - 1) > SIGMIN))) { /* (case 4) */ if (*(V + 30 + v) >= *(V + 30 + v - 1)) { swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } } else { swp_freq = -1 ; } } else if ((i % 2 == 0) && (v >= vmin)) { /* Negative v (case 1) */ if ((*(V + 30 + v) > 0) && (*(V + 30 + v + 1) > 0) && (*(V + 30 + v + 2) > 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 1) > *(V + 30 + v + 2)) && (*(V + 30 + v + 1) > SIGMIN)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else if ((v + 1 == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 1) > SIGMIN)) { /* (case 2) */ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v); } else if ((v == vmin) && (*(V + 30 + v) > 0) && (*(V + 30 + v + 1) > 0) && (*(V + 30 + v + 2) > 0) && (*(V + 30 + v) > *(V + 30 + v + 1)) && (*(V + 30 + v) > *(V + 30 + v + 2)) && (*(V + 30 + v) > SIGMIN)) { /* (case 3) */ swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else if ((*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 3) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 2) > *(V + 30 + v + 3)) && ((*(V + 30 + v + 1) > SIGMIN) || (*(V + 30 + v + 2) > SIGMIN))) { /* (case 4) */ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2; *SIG1 = *(V + 30 + v + 2); } } else if ((*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 3) >= 0) && (*(V + 30 + v) > *(V + 30 + v + 2)) && (*(V + 30 + v + 1) > *(V + 30 + v + 2)) && (*(V + 30 + v) > *(V + 30 + v + 3)) && (*(V + 30 + v + 1) > *(V + 30 + v + 3)) && ((*(V + 30 + v) > SIGMIN) || (*(V + 30 + v + 1) > SIGMIN))) { /* (case 5) */ if (*(V + 30 + v) >= *(V + 30 + v + 1)) { swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } } else if ((v + 2 == vmin) && (*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 2) > *(V + 30 + v)) && ((*(V + 30 + v + 1) > SIGMIN) || (*(V + 30 + v + 2) > SIGMIN))) { /* (case 6) */ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2; *SIG1 = *(V + 30 + v + 2); } } else if ((vmax == 0) && (vmin == 0) && (*(V + 30 + v) > SIGMIN)) { swp_freq = fOSC * 1000; *SIG1 = *(V + 30 + v); } else swp_freq = -1; } else swp_freq = -1; return swp_freq; } static void swp_info_get2(struct mb86a16_state *state, int smrt, int R, int swp_freq, int *afcex_freq, int *fOSC, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; if (R == 0) *fOSC = (swp_freq + 1000) / 2000 * 2; else *fOSC = (swp_freq + 500) / 1000; if (*fOSC >= swp_freq) *afcex_freq = *fOSC * 1000 - swp_freq; else *afcex_freq = swp_freq - *fOSC * 1000; AFCEX = *afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static void afcex_info_get(struct mb86a16_state *state, int afcex_freq, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; AFCEX = afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static int SEQ_set(struct mb86a16_state *state, unsigned char loop) { /* SLOCK0 = 0 */ if (mb86a16_write(state, 0x32, 0x02 | (loop << 2)) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int iq_vt_set(struct mb86a16_state *state, unsigned char IQINV) { /* Viterbi Rate, IQ Settings */ if (mb86a16_write(state, 0x06, 0xdf | (IQINV << 5)) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int FEC_srst(struct mb86a16_state *state) { if (mb86a16_write(state, MB86A16_RESET, 0x02) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int S2T_set(struct mb86a16_state *state, unsigned char S2T) { if (mb86a16_write(state, 0x34, 0x70 | S2T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int S45T_set(struct mb86a16_state *state, unsigned char S4T, unsigned char S5T) { if (mb86a16_write(state, 0x35, 0x00 | (S5T << 4) | S4T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int mb86a16_set_fe(struct mb86a16_state *state) { u8 agcval, cnmval; int i, j; int fOSC = 0; int fOSC_start = 0; int wait_t; int fcp; int swp_ofs; int V[60]; u8 SIG1MIN; unsigned char CREN, AFCEN, AFCEXEN; unsigned char SIG1; unsigned char TIMINT1, TIMINT2, TIMEXT; unsigned char S0T, S1T; unsigned char S2T; /* unsigned char S2T, S3T; */ unsigned char S4T, S5T; unsigned char AFCEX_L, AFCEX_H; unsigned char R; unsigned char VIRM; unsigned char ETH, VIA; unsigned char junk; int loop; int ftemp; int v, vmax, vmin; int vmax_his, vmin_his; int swp_freq, prev_swp_freq[20]; int prev_freq_num; int signal_dupl; int afcex_freq; int signal; int afcerr; int temp_freq, delta_freq; int dagcm[4]; int smrt_d; /* int freq_err; */ int n; int ret = -1; int sync; dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate); fcp = 3000; swp_ofs = state->srate / 4; for (i = 0; i < 60; i++) V[i] = -1; for (i = 0; i < 20; i++) prev_swp_freq[i] = 0; SIG1MIN = 25; for (n = 0; ((n < 3) && (ret == -1)); n++) { SEQ_set(state, 0); iq_vt_set(state, 0); CREN = 0; AFCEN = 0; AFCEXEN = 1; TIMINT1 = 0; TIMINT2 = 1; TIMEXT = 2; S1T = 0; S0T = 0; if (initial_set(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "initial set failed"); return -1; } if (DAGC_data_set(state, 3, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; /* (0, 0) */ } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; /* (1, smrt) = (1, symbolrate) */ } if (CNTM_set(state, TIMINT1, TIMINT2, TIMEXT) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set error"); return -1; /* (0, 1, 2) */ } if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; /* (0, 0) */ } smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt info get error"); return -1; } R = vco_dev_get(state, state->srate); if (R == 1) fOSC_start = state->frequency; else if (R == 0) { if (state->frequency % 2 == 0) { fOSC_start = state->frequency; } else { fOSC_start = state->frequency + 1; if (fOSC_start > 2150) fOSC_start = state->frequency - 1; } } loop = 1; ftemp = fOSC_start * 1000; vmax = 0 ; while (loop == 1) { ftemp = ftemp + swp_ofs; vmax++; /* Upper bound */ if (ftemp > 2150000) { loop = 0; vmax--; } else { if ((ftemp == 2150000) || (ftemp - state->frequency * 1000 >= fcp + state->srate / 4)) loop = 0; } } loop = 1; ftemp = fOSC_start * 1000; vmin = 0 ; while (loop == 1) { ftemp = ftemp - swp_ofs; vmin--; /* Lower bound */ if (ftemp < 950000) { loop = 0; vmin++; } else { if ((ftemp == 950000) || (state->frequency * 1000 - ftemp >= fcp + state->srate / 4)) loop = 0; } } wait_t = (8000 + state->srate / 2) / state->srate; if (wait_t == 0) wait_t = 1; i = 0; j = 0; prev_freq_num = 0; loop = 1; signal = 0; vmax_his = 0; vmin_his = 0; v = 0; while (loop == 1) { swp_info_get(state, fOSC_start, state->srate, v, R, swp_ofs, &fOSC, &afcex_freq, &AFCEX_L, &AFCEX_H); udelay(100); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } udelay(100); if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } msleep_interruptible(wait_t); if (mb86a16_read(state, 0x37, &SIG1) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } V[30 + v] = SIG1 ; swp_freq = swp_freq_calcuation(state, i, v, V, vmax, vmin, SIG1MIN, fOSC, afcex_freq, swp_ofs, &SIG1); /* changed */ signal_dupl = 0; for (j = 0; j < prev_freq_num; j++) { if ((ABS(prev_swp_freq[j] - swp_freq)) < (swp_ofs * 3 / 2)) { signal_dupl = 1; dprintk(verbose, MB86A16_INFO, 1, "Probably Duplicate Signal, j = %d", j); } } if ((signal_dupl == 0) && (swp_freq > 0) && (ABS(swp_freq - state->frequency * 1000) < fcp + state->srate / 6)) { dprintk(verbose, MB86A16_DEBUG, 1, "------ Signal detect ------ [swp_freq=[%07d, srate=%05d]]", swp_freq, state->srate); prev_swp_freq[prev_freq_num] = swp_freq; prev_freq_num++; swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } signal = signal_det(state, state->srate, &SIG1); if (signal == 1) { dprintk(verbose, MB86A16_ERROR, 1, "***** Signal Found *****"); loop = 0; } else { dprintk(verbose, MB86A16_ERROR, 1, "!!!!! No signal !!!!!, try again..."); smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } } } if (v > vmax) vmax_his = 1 ; if (v < vmin) vmin_his = 1 ; i++; if ((i % 2 == 1) && (vmax_his == 1)) i++; if ((i % 2 == 0) && (vmin_his == 1)) i++; if (i % 2 == 1) v = (i + 1) / 2; else v = -i / 2; if ((vmax_his == 1) && (vmin_his == 1)) loop = 0 ; } if (signal == 1) { dprintk(verbose, MB86A16_INFO, 1, " Start Freq Error Check"); S1T = 7 ; S0T = 1 ; CREN = 0 ; AFCEN = 1 ; AFCEXEN = 0 ; if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; } smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } afcex_info_get(state, afcex_freq, &AFCEX_L, &AFCEX_H); if (afcofs_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCOFS data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } /* delay 4~200 */ wait_t = 200000 / state->master_clk + 200000 / state->srate; msleep(wait_t); afcerr = afcerr_chk(state); if (afcerr == -1) return -1; swp_freq = fOSC * 1000 + afcerr ; AFCEXEN = 1 ; if (state->srate >= 1500) smrt_d = state->srate / 3; else smrt_d = state->srate / 2; smrt_info_get(state, smrt_d); if (smrt_set(state, smrt_d) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, smrt_d) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } R = vco_dev_get(state, smrt_d); if (DAGC_data_set(state, 2, 0) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } for (i = 0; i < 3; i++) { temp_freq = swp_freq + (i - 1) * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[i] = dagcm_val_get(state); } if ((dagcm[0] > dagcm[1]) && (dagcm[0] > dagcm[2]) && (dagcm[0] - dagcm[1] > 2 * (dagcm[2] - dagcm[1]))) { temp_freq = swp_freq - 2 * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[3] = dagcm_val_get(state); if (dagcm[3] > dagcm[1]) delta_freq = (dagcm[2] - dagcm[0] + dagcm[1] - dagcm[3]) * state->srate / 300; else delta_freq = 0; } else if ((dagcm[2] > dagcm[1]) && (dagcm[2] > dagcm[0]) && (dagcm[2] - dagcm[1] > 2 * (dagcm[0] - dagcm[1]))) { temp_freq = swp_freq + 2 * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[3] = dagcm_val_get(state); if (dagcm[3] > dagcm[1]) delta_freq = (dagcm[2] - dagcm[0] + dagcm[3] - dagcm[1]) * state->srate / 300; else delta_freq = 0 ; } else { delta_freq = 0 ; } dprintk(verbose, MB86A16_INFO, 1, "SWEEP Frequency = %d", swp_freq); swp_freq += delta_freq; dprintk(verbose, MB86A16_INFO, 1, "Adjusting .., DELTA Freq = %d, SWEEP Freq=%d", delta_freq, swp_freq); if (ABS(state->frequency * 1000 - swp_freq) > 3800) { dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL !"); } else { S1T = 0; S0T = 3; CREN = 1; AFCEN = 0; AFCEXEN = 1; if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; } if (DAGC_data_set(state, 0, 0) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } R = vco_dev_get(state, state->srate); smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } wait_t = 7 + (10000 + state->srate / 2) / state->srate; if (wait_t == 0) wait_t = 1; msleep_interruptible(wait_t); if (mb86a16_read(state, 0x37, &SIG1) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } if (SIG1 > 110) { S2T = 4; S4T = 1; S5T = 6; ETH = 4; VIA = 6; wait_t = 7 + (917504 + state->srate / 2) / state->srate; } else if (SIG1 > 105) { S2T = 4; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1048576 + state->srate / 2) / state->srate; } else if (SIG1 > 85) { S2T = 5; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1310720 + state->srate / 2) / state->srate; } else if (SIG1 > 65) { S2T = 6; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1572864 + state->srate / 2) / state->srate; } else { S2T = 7; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (2097152 + state->srate / 2) / state->srate; } wait_t *= 2; /* FOS */ S2T_set(state, S2T); S45T_set(state, S4T, S5T); Vi_set(state, ETH, VIA); srst(state); msleep_interruptible(wait_t); sync = sync_chk(state, &VIRM); dprintk(verbose, MB86A16_INFO, 1, "-------- Viterbi=[%d] SYNC=[%d] ---------", VIRM, sync); if (VIRM) { if (VIRM == 4) { /* 5/6 */ if (SIG1 > 110) wait_t = (786432 + state->srate / 2) / state->srate; else wait_t = (1572864 + state->srate / 2) / state->srate; if (state->srate < 5000) /* FIXME ! , should be a long wait ! */ msleep_interruptible(wait_t); else msleep_interruptible(wait_t); if (sync_chk(state, &junk) == 0) { iq_vt_set(state, 1); FEC_srst(state); } } /* 1/2, 2/3, 3/4, 7/8 */ if (SIG1 > 110) wait_t = (786432 + state->srate / 2) / state->srate; else wait_t = (1572864 + state->srate / 2) / state->srate; msleep_interruptible(wait_t); SEQ_set(state, 1); } else { dprintk(verbose, MB86A16_INFO, 1, "NO -- SYNC"); SEQ_set(state, 1); ret = -1; } } } else { dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL"); ret = -1; } sync = sync_chk(state, &junk); if (sync) { dprintk(verbose, MB86A16_INFO, 1, "******* SYNC *******"); freqerr_chk(state, state->frequency, state->srate, 1); ret = 0; break; } } mb86a16_read(state, 0x15, &agcval); mb86a16_read(state, 0x26, &cnmval); dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval); return ret; } static int mb86a16_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd) { struct mb86a16_state *state = fe->demodulator_priv; int i; u8 regs; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0) goto err; regs = 0x18; if (cmd->msg_len > 5 || cmd->msg_len < 4) return -EINVAL; for (i = 0; i < cmd->msg_len; i++) { if (mb86a16_write(state, regs, cmd->msg[i]) < 0) goto err; regs++; } i += 0x90; msleep_interruptible(10); if (mb86a16_write(state, MB86A16_DCC1, i) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct mb86a16_state *state = fe->demodulator_priv; switch (burst) { case SEC_MINI_A: if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_TBEN | MB86A16_DCC1_TBO) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; case SEC_MINI_B: if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_TBEN) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct mb86a16_state *state = fe->demodulator_priv; switch (tone) { case SEC_TONE_ON: if (mb86a16_write(state, MB86A16_TONEOUT2, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_CTOE) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; case SEC_TONE_OFF: if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0) goto err; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0) goto err; break; default: return -EINVAL; } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct mb86a16_state *state = fe->demodulator_priv; state->frequency = p->frequency / 1000; state->srate = p->symbol_rate / 1000; if (!mb86a16_set_fe(state)) { dprintk(verbose, MB86A16_ERROR, 1, "Successfully acquired LOCK"); return DVBFE_ALGO_SEARCH_SUCCESS; } dprintk(verbose, MB86A16_ERROR, 1, "Lock acquisition failed!"); return DVBFE_ALGO_SEARCH_FAILED; } static void mb86a16_release(struct dvb_frontend *fe) { struct mb86a16_state *state = fe->demodulator_priv; kfree(state); } static int mb86a16_init(struct dvb_frontend *fe) { return 0; } static int mb86a16_sleep(struct dvb_frontend *fe) { return 0; } static int mb86a16_read_ber(struct dvb_frontend *fe, u32 *ber) { u8 ber_mon, ber_tab, ber_lsb, ber_mid, ber_msb, ber_tim, ber_rst; u32 timer; struct mb86a16_state *state = fe->demodulator_priv; *ber = 0; if (mb86a16_read(state, MB86A16_BERMON, &ber_mon) != 2) goto err; if (mb86a16_read(state, MB86A16_BERTAB, &ber_tab) != 2) goto err; if (mb86a16_read(state, MB86A16_BERLSB, &ber_lsb) != 2) goto err; if (mb86a16_read(state, MB86A16_BERMID, &ber_mid) != 2) goto err; if (mb86a16_read(state, MB86A16_BERMSB, &ber_msb) != 2) goto err; /* BER monitor invalid when BER_EN = 0 */ if (ber_mon & 0x04) { /* coarse, fast calculation */ *ber = ber_tab & 0x1f; dprintk(verbose, MB86A16_DEBUG, 1, "BER coarse=[0x%02x]", *ber); if (ber_mon & 0x01) { /* * BER_SEL = 1, The monitored BER is the estimated * value with a Reed-Solomon decoder error amount at * the deinterleaver output. * monitored BER is expressed as a 20 bit output in total */ ber_rst = ber_mon >> 3; *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb; if (ber_rst == 0) timer = 12500000; if (ber_rst == 1) timer = 25000000; if (ber_rst == 2) timer = 50000000; if (ber_rst == 3) timer = 100000000; *ber /= timer; dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber); } else { /* * BER_SEL = 0, The monitored BER is the estimated * value with a Viterbi decoder error amount at the * QPSK demodulator output. * monitored BER is expressed as a 24 bit output in total */ ber_tim = ber_mon >> 1; *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb; if (ber_tim == 0) timer = 16; if (ber_tim == 1) timer = 24; *ber /= 2 ^ timer; dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber); } } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { u8 agcm = 0; struct mb86a16_state *state = fe->demodulator_priv; *strength = 0; if (mb86a16_read(state, MB86A16_AGCM, &agcm) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } *strength = ((0xff - agcm) * 100) / 256; dprintk(verbose, MB86A16_DEBUG, 1, "Signal strength=[%d %%]", (u8) *strength); *strength = (0xffff - 0xff) + agcm; return 0; } struct cnr { u8 cn_reg; u8 cn_val; }; static const struct cnr cnr_tab[] = { { 35, 2 }, { 40, 3 }, { 50, 4 }, { 60, 5 }, { 70, 6 }, { 80, 7 }, { 92, 8 }, { 103, 9 }, { 115, 10 }, { 138, 12 }, { 162, 15 }, { 180, 18 }, { 185, 19 }, { 189, 20 }, { 195, 22 }, { 199, 24 }, { 201, 25 }, { 202, 26 }, { 203, 27 }, { 205, 28 }, { 208, 30 } }; static int mb86a16_read_snr(struct dvb_frontend *fe, u16 *snr) { struct mb86a16_state *state = fe->demodulator_priv; int i = 0; int low_tide = 2, high_tide = 30, q_level; u8 cn; *snr = 0; if (mb86a16_read(state, 0x26, &cn) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } for (i = 0; i < ARRAY_SIZE(cnr_tab); i++) { if (cn < cnr_tab[i].cn_reg) { *snr = cnr_tab[i].cn_val; break; } } q_level = (*snr * 100) / (high_tide - low_tide); dprintk(verbose, MB86A16_ERROR, 1, "SNR (Quality) = [%d dB], Level=%d %%", *snr, q_level); *snr = (0xffff - 0xff) + *snr; return 0; } static int mb86a16_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { u8 dist; struct mb86a16_state *state = fe->demodulator_priv; if (mb86a16_read(state, MB86A16_DISTMON, &dist) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } *ucblocks = dist; return 0; } static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_CUSTOM; } static struct dvb_frontend_ops mb86a16_ops = { .delsys = { SYS_DVBS }, .info = { .name = "Fujitsu MB86A16 DVB-S", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 3000, .frequency_tolerance = 0, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_QPSK | FE_CAN_FEC_AUTO }, .release = mb86a16_release, .get_frontend_algo = mb86a16_frontend_algo, .search = mb86a16_search, .init = mb86a16_init, .sleep = mb86a16_sleep, .read_status = mb86a16_read_status, .read_ber = mb86a16_read_ber, .read_signal_strength = mb86a16_read_signal_strength, .read_snr = mb86a16_read_snr, .read_ucblocks = mb86a16_read_ucblocks, .diseqc_send_master_cmd = mb86a16_send_diseqc_msg, .diseqc_send_burst = mb86a16_send_diseqc_burst, .set_tone = mb86a16_set_tone, }; struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config, struct i2c_adapter *i2c_adap) { u8 dev_id = 0; struct mb86a16_state *state = NULL; state = kmalloc(sizeof(struct mb86a16_state), GFP_KERNEL); if (state == NULL) goto error; state->config = config; state->i2c_adap = i2c_adap; mb86a16_read(state, 0x7f, &dev_id); if (dev_id != 0xfe) goto error; memcpy(&state->frontend.ops, &mb86a16_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; state->frontend.ops.set_voltage = state->config->set_voltage; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(mb86a16_attach); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Manu Abraham");
gpl-2.0
xsynergy510x/android_kernel_google_msm
drivers/usb/host/whci/asl.c
13906
9704
/* * Wireless Host Controller (WHC) asynchronous schedule management. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include <linux/usb.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, struct whc_qset **next, struct whc_qset **prev) { struct list_head *n, *p; BUG_ON(list_empty(&whc->async_list)); n = qset->list_node.next; if (n == &whc->async_list) n = n->next; p = qset->list_node.prev; if (p == &whc->async_list) p = p->prev; *next = container_of(n, struct whc_qset, list_node); *prev = container_of(p, struct whc_qset, list_node); } static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset) { list_move(&qset->list_node, &whc->async_list); qset->in_sw_list = true; } static void asl_qset_insert(struct whc *whc, struct whc_qset *qset) { struct whc_qset *next, *prev; qset_clear(whc, qset); /* Link into ASL. */ qset_get_next_prev(whc, qset, &next, &prev); whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma); whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma); qset->in_hw_list = true; } static void asl_qset_remove(struct whc *whc, struct whc_qset *qset) { struct whc_qset *prev, *next; qset_get_next_prev(whc, qset, &next, &prev); list_move(&qset->list_node, &whc->async_removed_list); qset->in_sw_list = false; /* * No more qsets in the ASL? The caller must stop the ASL as * it's no longer valid. */ if (list_empty(&whc->async_list)) return; /* Remove from ASL. */ whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma); qset->in_hw_list = false; } /** * process_qset - process any recently inactivated or halted qTDs in a * qset. * * After inactive qTDs are removed, new qTDs can be added if the * urb queue still contains URBs. * * Returns any additional WUSBCMD bits for the ASL sync command (i.e., * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed). */ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) { enum whc_update update = 0; uint32_t status = 0; while (qset->ntds) { struct whc_qtd *td; int t; t = qset->td_start; td = &qset->qtd[qset->td_start]; status = le32_to_cpu(td->status); /* * Nothing to do with a still active qTD. */ if (status & QTD_STS_ACTIVE) break; if (status & QTD_STS_HALTED) { /* Ug, an error. */ process_halted_qtd(whc, qset, td); /* A halted qTD always triggers an update because the qset was either removed or reactivated. */ update |= WHC_UPDATE_UPDATED; goto done; } /* Mmm, a completed qTD. */ process_inactive_qtd(whc, qset, td); } if (!qset->remove) update |= qset_add_qtds(whc, qset); done: /* * Remove this qset from the ASL if requested, but only if has * no qTDs. */ if (qset->remove && qset->ntds == 0) { asl_qset_remove(whc, qset); update |= WHC_UPDATE_REMOVED; } return update; } void asl_start(struct whc *whc) { struct whc_qset *qset; qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR); whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN); whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED, 1000, "start ASL"); } void asl_stop(struct whc *whc) { whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0); whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, WUSBSTS_ASYNC_SCHED, 0, 1000, "stop ASL"); } /** * asl_update - request an ASL update and wait for the hardware to be synced * @whc: the WHCI HC * @wusbcmd: WUSBCMD value to start the update. * * If the WUSB HC is inactive (i.e., the ASL is stopped) then the * update must be skipped as the hardware may not respond to update * requests. */ void asl_update(struct whc *whc, uint32_t wusbcmd) { struct wusbhc *wusbhc = &whc->wusbhc; long t; mutex_lock(&wusbhc->mutex); if (wusbhc->active) { whc_write_wusbcmd(whc, wusbcmd, wusbcmd); t = wait_event_timeout( whc->async_list_wq, (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0, msecs_to_jiffies(1000)); if (t == 0) whc_hw_error(whc, "ASL update timeout"); } mutex_unlock(&wusbhc->mutex); } /** * scan_async_work - scan the ASL for qsets to process. * * Process each qset in the ASL in turn and then signal the WHC that * the ASL has been updated. * * Then start, stop or update the asynchronous schedule as required. */ void scan_async_work(struct work_struct *work) { struct whc *whc = container_of(work, struct whc, async_work); struct whc_qset *qset, *t; enum whc_update update = 0; spin_lock_irq(&whc->lock); /* * Transerve the software list backwards so new qsets can be * safely inserted into the ASL without making it non-circular. */ list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) { if (!qset->in_hw_list) { asl_qset_insert(whc, qset); update |= WHC_UPDATE_ADDED; } update |= process_qset(whc, qset); } spin_unlock_irq(&whc->lock); if (update) { uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB; if (update & WHC_UPDATE_REMOVED) wusbcmd |= WUSBCMD_ASYNC_QSET_RM; asl_update(whc, wusbcmd); } /* * Now that the ASL is updated, complete the removal of any * removed qsets. * * If the qset was to be reset, do so and reinsert it into the * ASL if it has pending transfers. */ spin_lock_irq(&whc->lock); list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) { qset_remove_complete(whc, qset); if (qset->reset) { qset_reset(whc, qset); if (!list_empty(&qset->stds)) { asl_qset_insert_begin(whc, qset); queue_work(whc->workqueue, &whc->async_work); } } } spin_unlock_irq(&whc->lock); } /** * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL). * @whc: the WHCI host controller * @urb: the URB to enqueue * @mem_flags: flags for any memory allocations * * The qset for the endpoint is obtained and the urb queued on to it. * * Work is scheduled to update the hardware's view of the ASL. */ int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) { struct whc_qset *qset; int err; unsigned long flags; spin_lock_irqsave(&whc->lock, flags); err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); if (err < 0) { spin_unlock_irqrestore(&whc->lock, flags); return err; } qset = get_qset(whc, urb, GFP_ATOMIC); if (qset == NULL) err = -ENOMEM; else err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); if (!err) { if (!qset->in_sw_list && !qset->remove) asl_qset_insert_begin(whc, qset); } else usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); spin_unlock_irqrestore(&whc->lock, flags); if (!err) queue_work(whc->workqueue, &whc->async_work); return err; } /** * asl_urb_dequeue - remove an URB (qset) from the async list. * @whc: the WHCI host controller * @urb: the URB to dequeue * @status: the current status of the URB * * URBs that do yet have qTDs can simply be removed from the software * queue, otherwise the qset must be removed from the ASL so the qTDs * can be removed. */ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) { struct whc_urb *wurb = urb->hcpriv; struct whc_qset *qset = wurb->qset; struct whc_std *std, *t; bool has_qtd = false; int ret; unsigned long flags; spin_lock_irqsave(&whc->lock, flags); ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); if (ret < 0) goto out; list_for_each_entry_safe(std, t, &qset->stds, list_node) { if (std->urb == urb) { if (std->qtd) has_qtd = true; qset_free_std(whc, std); } else std->qtd = NULL; /* so this std is re-added when the qset is */ } if (has_qtd) { asl_qset_remove(whc, qset); wurb->status = status; wurb->is_async = true; queue_work(whc->workqueue, &wurb->dequeue_work); } else qset_remove_urb(whc, qset, urb, status); out: spin_unlock_irqrestore(&whc->lock, flags); return ret; } /** * asl_qset_delete - delete a qset from the ASL */ void asl_qset_delete(struct whc *whc, struct whc_qset *qset) { qset->remove = 1; queue_work(whc->workqueue, &whc->async_work); qset_delete(whc, qset); } /** * asl_init - initialize the asynchronous schedule list * * A dummy qset with no qTDs is added to the ASL to simplify removing * qsets (no need to stop the ASL when the last qset is removed). */ int asl_init(struct whc *whc) { struct whc_qset *qset; qset = qset_alloc(whc, GFP_KERNEL); if (qset == NULL) return -ENOMEM; asl_qset_insert_begin(whc, qset); asl_qset_insert(whc, qset); return 0; } /** * asl_clean_up - free ASL resources * * The ASL is stopped and empty except for the dummy qset. */ void asl_clean_up(struct whc *whc) { struct whc_qset *qset; if (!list_empty(&whc->async_list)) { qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); list_del(&qset->list_node); qset_free(whc, qset); } }
gpl-2.0
deadlyindian/android_kernel_oneplus_msm8974
arch/ia64/kernel/audit.c
15698
1117
#include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { return 0; } int audit_classify_syscall(int abi, unsigned syscall) { switch(syscall) { case __NR_open: return 2; case __NR_openat: return 3; case __NR_execve: return 5; default: return 0; } } static int __init audit_classes_init(void) { audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
gpl-2.0
dezelin/kvm
arch/tile/kernel/pci_gx.c
83
38806
/* * Copyright 2012 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/mmzone.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/ctype.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/byteorder.h> #include <gxio/iorpc_globals.h> #include <gxio/kiorpc.h> #include <gxio/trio.h> #include <gxio/iorpc_trio.h> #include <hv/drv_trio_intf.h> #include <arch/sim.h> /* * This file containes the routines to search for PCI buses, * enumerate the buses, and configure any attached devices. */ #define DEBUG_PCI_CFG 0 #if DEBUG_PCI_CFG #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \ pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \ size, val, bus, dev, func, offset & 0xFFF); #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \ pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \ size, val, bus, dev, func, offset & 0xFFF); #else #define TRACE_CFG_WR(...) #define TRACE_CFG_RD(...) #endif static int __devinitdata pci_probe = 1; /* Information on the PCIe RC ports configuration. */ static int __devinitdata pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; /* * On some platforms with one or more Gx endpoint ports, we need to * delay the PCIe RC port probe for a few seconds to work around * a HW PCIe link-training bug. The exact delay is specified with * a kernel boot argument in the form of "pcie_rc_delay=T,P,S", * where T is the TRIO instance number, P is the port number and S is * the delay in seconds. If the delay is not provided, the value * will be DEFAULT_RC_DELAY. */ static int __devinitdata rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; /* Default number of seconds that the PCIe RC port probe can be delayed. */ #define DEFAULT_RC_DELAY 10 /* Max number of seconds that the PCIe RC port probe can be delayed. */ #define MAX_RC_DELAY 20 /* Array of the PCIe ports configuration info obtained from the BIB. */ struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; /* All drivers share the TRIO contexts defined here. */ gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; /* Pointer to an array of PCIe RC controllers. */ struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; int num_rc_controllers; static int num_ep_controllers; static struct pci_ops tile_cfg_ops; /* Mask of CPUs that should receive PCIe interrupts. */ static struct cpumask intr_cpus_map; /* * We don't need to worry about the alignment of resources. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } EXPORT_SYMBOL(pcibios_align_resource); /* * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. * For now, we simply send interrupts to non-dataplane CPUs. * We may implement methods to allow user to specify the target CPUs, * e.g. via boot arguments. */ static int tile_irq_cpu(int irq) { unsigned int count; int i = 0; int cpu; count = cpumask_weight(&intr_cpus_map); if (unlikely(count == 0)) { pr_warning("intr_cpus_map empty, interrupts will be" " delievered to dataplane tiles\n"); return irq % (smp_height * smp_width); } count = irq % count; for_each_cpu(cpu, &intr_cpus_map) { if (i++ == count) break; } return cpu; } /* * Open a file descriptor to the TRIO shim. */ static int __devinit tile_pcie_open(int trio_index) { gxio_trio_context_t *context = &trio_contexts[trio_index]; int ret; /* * This opens a file descriptor to the TRIO shim. */ ret = gxio_trio_init(context, trio_index); if (ret < 0) return ret; /* * Allocate an ASID for the kernel. */ ret = gxio_trio_alloc_asids(context, 1, 0, 0); if (ret < 0) { pr_err("PCI: ASID alloc failure on TRIO %d, give up\n", trio_index); goto asid_alloc_failure; } context->asid = ret; #ifdef USE_SHARED_PCIE_CONFIG_REGION /* * Alloc a PIO region for config access, shared by all MACs per TRIO. * This shouldn't fail since the kernel is supposed to the first * client of the TRIO's PIO regions. */ ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0); if (ret < 0) { pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n", trio_index); goto pio_alloc_failure; } context->pio_cfg_index = ret; /* * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR. */ ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index, 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); if (ret < 0) { pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n", trio_index); goto pio_alloc_failure; } #endif return ret; asid_alloc_failure: #ifdef USE_SHARED_PCIE_CONFIG_REGION pio_alloc_failure: #endif hv_dev_close(context->fd); return ret; } static void tilegx_legacy_irq_ack(struct irq_data *d) { __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); } static void tilegx_legacy_irq_mask(struct irq_data *d) { __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); } static void tilegx_legacy_irq_unmask(struct irq_data *d) { __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); } static struct irq_chip tilegx_legacy_irq_chip = { .name = "tilegx_legacy_irq", .irq_ack = tilegx_legacy_irq_ack, .irq_mask = tilegx_legacy_irq_mask, .irq_unmask = tilegx_legacy_irq_unmask, /* TBD: support set_affinity. */ }; /* * This is a wrapper function of the kernel level-trigger interrupt * handler handle_level_irq() for PCI legacy interrupts. The TRIO * is configured such that only INTx Assert interrupts are proxied * to Linux which just calls handle_level_irq() after clearing the * MAC INTx Assert status bit associated with this interrupt. */ static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc) { struct pci_controller *controller = irq_desc_get_handler_data(desc); gxio_trio_context_t *trio_context = controller->trio; uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); int mac = controller->mac; unsigned int reg_offset; uint64_t level_mask; handle_level_irq(irq, desc); /* * Clear the INTx Level status, otherwise future interrupts are * not sent. */ reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx; __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask); } /* * Create kernel irqs and set up the handlers for the legacy interrupts. * Also some minimum initialization for the MSI support. */ static int __devinit tile_init_irqs(struct pci_controller *controller) { int i; int j; int irq; int result; cpumask_copy(&intr_cpus_map, cpu_online_mask); for (i = 0; i < 4; i++) { gxio_trio_context_t *context = controller->trio; int cpu; /* Ask the kernel to allocate an IRQ. */ irq = create_irq(); if (irq < 0) { pr_err("PCI: no free irq vectors, failed for %d\n", i); goto free_irqs; } controller->irq_intx_table[i] = irq; /* Distribute the 4 IRQs to different tiles. */ cpu = tile_irq_cpu(irq); /* Configure the TRIO intr binding for this IRQ. */ result = gxio_trio_config_legacy_intr(context, cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq, controller->mac, i); if (result < 0) { pr_err("PCI: MAC intx config failed for %d\n", i); goto free_irqs; } /* * Register the IRQ handler with the kernel. */ irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip, trio_handle_level_irq); irq_set_chip_data(irq, (void *)(uint64_t)i); irq_set_handler_data(irq, controller); } return 0; free_irqs: for (j = 0; j < i; j++) destroy_irq(controller->irq_intx_table[j]); return -1; } /* * Find valid controllers and fill in pci_controller structs for each * of them. * * Returns the number of controllers discovered. */ int __init tile_pci_init(void) { int num_trio_shims = 0; int ctl_index = 0; int i, j; if (!pci_probe) { pr_info("PCI: disabled by boot argument\n"); return 0; } pr_info("PCI: Searching for controllers...\n"); /* * We loop over all the TRIO shims. */ for (i = 0; i < TILEGX_NUM_TRIO; i++) { int ret; ret = tile_pcie_open(i); if (ret < 0) continue; num_trio_shims++; } if (num_trio_shims == 0 || sim_is_simulator()) return 0; /* * Now determine which PCIe ports are configured to operate in RC mode. * We look at the Board Information Block first and then see if there * are any overriding configuration by the HW strapping pin. */ for (i = 0; i < TILEGX_NUM_TRIO; i++) { gxio_trio_context_t *context = &trio_contexts[i]; int ret; if (context->fd < 0) continue; ret = hv_dev_pread(context->fd, 0, (HV_VirtAddr)&pcie_ports[i][0], sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES, GXIO_TRIO_OP_GET_PORT_PROPERTY); if (ret < 0) { pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," " on TRIO %d\n", ret, i); continue; } for (j = 0; j < TILEGX_TRIO_PCIES; j++) { if (pcie_ports[i][j].allow_rc) { pcie_rc[i][j] = 1; num_rc_controllers++; } else if (pcie_ports[i][j].allow_ep) { num_ep_controllers++; } } } /* * Return if no PCIe ports are configured to operate in RC mode. */ if (num_rc_controllers == 0) return 0; /* * Set the TRIO pointer and MAC index for each PCIe RC port. */ for (i = 0; i < TILEGX_NUM_TRIO; i++) { for (j = 0; j < TILEGX_TRIO_PCIES; j++) { if (pcie_rc[i][j]) { pci_controllers[ctl_index].trio = &trio_contexts[i]; pci_controllers[ctl_index].mac = j; pci_controllers[ctl_index].trio_index = i; ctl_index++; if (ctl_index == num_rc_controllers) goto out; } } } out: /* * Configure each PCIe RC port. */ for (i = 0; i < num_rc_controllers; i++) { /* * Configure the PCIe MAC to run in RC mode. */ struct pci_controller *controller = &pci_controllers[i]; controller->index = i; controller->ops = &tile_cfg_ops; /* * The PCI memory resource is located above the PA space. * For every host bridge, the BAR window or the MMIO aperture * is in range [3GB, 4GB - 1] of a 4GB space beyond the * PA space. */ controller->mem_offset = TILE_PCI_MEM_START + (i * TILE_PCI_BAR_WINDOW_TOP); controller->mem_space.start = controller->mem_offset + TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE; controller->mem_space.end = controller->mem_offset + TILE_PCI_BAR_WINDOW_TOP - 1; controller->mem_space.flags = IORESOURCE_MEM; snprintf(controller->mem_space_name, sizeof(controller->mem_space_name), "PCI mem domain %d", i); controller->mem_space.name = controller->mem_space_name; } return num_rc_controllers; } /* * (pin - 1) converts from the PCI standard's [1:4] convention to * a normal [0:3] range. */ static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin) { struct pci_controller *controller = (struct pci_controller *)dev->sysdata; return controller->irq_intx_table[pin - 1]; } static void __devinit fixup_read_and_payload_sizes(struct pci_controller * controller) { gxio_trio_context_t *trio_context = controller->trio; struct pci_bus *root_bus = controller->root_bus; TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control; TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap; unsigned int reg_offset; struct pci_bus *child; int mac; int err; mac = controller->mac; /* * Set our max read request size to be 4KB. */ reg_offset = (TRIO_PCIE_RC_DEVICE_CONTROL << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + reg_offset); dev_control.max_read_req_sz = 5; __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, dev_control.word); /* * Set the max payload size supported by this Gx PCIe MAC. * Though Gx PCIe supports Max Payload Size of up to 1024 bytes, * experiments have shown that setting MPS to 256 yields the * best performance. */ reg_offset = (TRIO_PCIE_RC_DEVICE_CAP << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac + reg_offset); rc_dev_cap.mps_sup = 1; __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, rc_dev_cap.word); /* Configure PCI Express MPS setting. */ list_for_each_entry(child, &root_bus->children, node) { struct pci_dev *self = child->self; if (!self) continue; pcie_bus_configure_settings(child, self->pcie_mpss); } /* * Set the mac_config register in trio based on the MPS/MRS of the link. */ reg_offset = (TRIO_PCIE_RC_DEVICE_CONTROL << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + reg_offset); err = gxio_trio_set_mps_mrs(trio_context, dev_control.max_payload_size, dev_control.max_read_req_sz, mac); if (err < 0) { pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " "MAC %d on TRIO %d\n", mac, controller->trio_index); } } static int __devinit setup_pcie_rc_delay(char *str) { unsigned long delay = 0; unsigned long trio_index; unsigned long mac; if (str == NULL || !isdigit(*str)) return -EINVAL; trio_index = simple_strtoul(str, (char **)&str, 10); if (trio_index >= TILEGX_NUM_TRIO) return -EINVAL; if (*str != ',') return -EINVAL; str++; if (!isdigit(*str)) return -EINVAL; mac = simple_strtoul(str, (char **)&str, 10); if (mac >= TILEGX_TRIO_PCIES) return -EINVAL; if (*str != '\0') { if (*str != ',') return -EINVAL; str++; if (!isdigit(*str)) return -EINVAL; delay = simple_strtoul(str, (char **)&str, 10); if (delay > MAX_RC_DELAY) return -EINVAL; } rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY; pr_info("Delaying PCIe RC link training for %u sec" " on MAC %lu on TRIO %lu\n", rc_delay[trio_index][mac], mac, trio_index); return 0; } early_param("pcie_rc_delay", setup_pcie_rc_delay); /* * PCI initialization entry point, called by subsys_initcall. */ int __init pcibios_init(void) { resource_size_t offset; LIST_HEAD(resources); int next_busno; int i; tile_pci_init(); if (num_rc_controllers == 0 && num_ep_controllers == 0) return 0; /* * We loop over all the TRIO shims and set up the MMIO mappings. */ for (i = 0; i < TILEGX_NUM_TRIO; i++) { gxio_trio_context_t *context = &trio_contexts[i]; if (context->fd < 0) continue; /* * Map in the MMIO space for the MAC. */ offset = 0; context->mmio_base_mac = iorpc_ioremap(context->fd, offset, HV_TRIO_CONFIG_IOREMAP_SIZE); if (context->mmio_base_mac == NULL) { pr_err("PCI: MAC map failure on TRIO %d\n", i); hv_dev_close(context->fd); context->fd = -1; continue; } } /* * Delay a bit in case devices aren't ready. Some devices are * known to require at least 20ms here, but we use a more * conservative value. */ msleep(250); /* Scan all of the recorded PCI controllers. */ for (next_busno = 0, i = 0; i < num_rc_controllers; i++) { struct pci_controller *controller = &pci_controllers[i]; gxio_trio_context_t *trio_context = controller->trio; TRIO_PCIE_INTFC_PORT_CONFIG_t port_config; TRIO_PCIE_INTFC_PORT_STATUS_t port_status; TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl; struct pci_bus *bus; unsigned int reg_offset; unsigned int class_code_revision; int trio_index; int mac; int ret; if (trio_context->fd < 0) continue; trio_index = controller->trio_index; mac = controller->mac; /* * Check the port strap state which will override the BIB * setting. */ reg_offset = (TRIO_PCIE_INTFC_PORT_CONFIG << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); port_config.word = __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset); if ((port_config.strap_state != TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC) && (port_config.strap_state != TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1)) { /* * If this is really intended to be an EP port, * record it so that the endpoint driver will know about it. */ if (port_config.strap_state == TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT || port_config.strap_state == TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1) pcie_ports[trio_index][mac].allow_ep = 1; continue; } /* * Delay the RC link training if needed. */ if (rc_delay[trio_index][mac]) msleep(rc_delay[trio_index][mac] * 1000); ret = gxio_trio_force_rc_link_up(trio_context, mac); if (ret < 0) pr_err("PCI: PCIE_FORCE_LINK_UP failure, " "MAC %d on TRIO %d\n", mac, trio_index); pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, trio_index, controller->mac); /* * Wait a bit here because some EP devices take longer * to come up. */ msleep(1000); /* * Check for PCIe link-up status. */ reg_offset = (TRIO_PCIE_INTFC_PORT_STATUS << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); port_status.word = __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset); if (!port_status.dl_up) { pr_err("PCI: link is down, MAC %d on TRIO %d\n", mac, trio_index); continue; } /* * Ensure that the link can come out of L1 power down state. * Strictly speaking, this is needed only in the case of * heavy RC-initiated DMAs. */ reg_offset = (TRIO_PCIE_INTFC_TX_FIFO_CTL << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); tx_fifo_ctl.word = __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset); tx_fifo_ctl.min_p_credits = 0; __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, tx_fifo_ctl.word); /* * Change the device ID so that Linux bus crawl doesn't confuse * the internal bridge with any Tilera endpoints. */ reg_offset = (TRIO_PCIE_RC_DEVICE_ID_VEN_ID << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, (TILERA_GX36_RC_DEV_ID << TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) | TILERA_VENDOR_ID); /* * Set the internal P2P bridge class code. */ reg_offset = (TRIO_PCIE_RC_REVISION_ID << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); class_code_revision = __gxio_mmio_read32(trio_context->mmio_base_mac + reg_offset); class_code_revision = (class_code_revision & 0xff ) | (PCI_CLASS_BRIDGE_PCI << 16); __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, class_code_revision); #ifdef USE_SHARED_PCIE_CONFIG_REGION /* * Map in the MMIO space for the PIO region. */ offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) | (((unsigned long long)mac) << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); #else /* * Alloc a PIO region for PCI config access per MAC. */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { pr_err("PCI: PCI CFG PIO alloc failure for mac %d " "on TRIO %d, give up\n", mac, trio_index); continue; } trio_context->pio_cfg_index[mac] = ret; /* * For PIO CFG, the bus_address_hi parameter is 0. */ ret = gxio_trio_init_pio_region_aux(trio_context, trio_context->pio_cfg_index[mac], mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); if (ret < 0) { pr_err("PCI: PCI CFG PIO init failure for mac %d " "on TRIO %d, give up\n", mac, trio_index); continue; } offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) | (((unsigned long long)mac) << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); #endif trio_context->mmio_base_pio_cfg[mac] = iorpc_ioremap(trio_context->fd, offset, (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT)); if (trio_context->mmio_base_pio_cfg[mac] == NULL) { pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", mac, trio_index); continue; } /* * Initialize the PCIe interrupts. */ if (tile_init_irqs(controller)) { pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n", mac, trio_index); continue; } /* * The PCI memory resource is located above the PA space. * The memory range for the PCI root bus should not overlap * with the physical RAM */ pci_add_resource_offset(&resources, &controller->mem_space, controller->mem_offset); controller->first_busno = next_busno; bus = pci_scan_root_bus(NULL, next_busno, controller->ops, controller, &resources); controller->root_bus = bus; next_busno = bus->busn_res.end + 1; } /* Do machine dependent PCI interrupt routing */ pci_fixup_irqs(pci_common_swizzle, tile_map_irq); /* * This comes from the generic Linux PCI driver. * * It allocates all of the resources (I/O memory, etc) * associated with the devices read in above. */ pci_assign_unassigned_resources(); /* Record the I/O resources in the PCI controller structure. */ for (i = 0; i < num_rc_controllers; i++) { struct pci_controller *controller = &pci_controllers[i]; gxio_trio_context_t *trio_context = controller->trio; struct pci_bus *root_bus = pci_controllers[i].root_bus; struct pci_bus *next_bus; uint32_t bus_address_hi; struct pci_dev *dev; int ret; int j; /* * Skip controllers that are not properly initialized or * have down links. */ if (root_bus == NULL) continue; /* Configure the max_payload_size values for this domain. */ fixup_read_and_payload_sizes(controller); list_for_each_entry(dev, &root_bus->devices, bus_list) { /* Find the PCI host controller, ie. the 1st bridge. */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && (PCI_SLOT(dev->devfn) == 0)) { next_bus = dev->subordinate; pci_controllers[i].mem_resources[0] = *next_bus->resource[0]; pci_controllers[i].mem_resources[1] = *next_bus->resource[1]; pci_controllers[i].mem_resources[2] = *next_bus->resource[2]; break; } } if (pci_controllers[i].mem_resources[1].flags & IORESOURCE_MEM) bus_address_hi = pci_controllers[i].mem_resources[1].start >> 32; else if (pci_controllers[i].mem_resources[2].flags & IORESOURCE_PREFETCH) bus_address_hi = pci_controllers[i].mem_resources[2].start >> 32; else { /* This is unlikely. */ pr_err("PCI: no memory resources on TRIO %d mac %d\n", controller->trio_index, controller->mac); continue; } /* * Alloc a PIO region for PCI memory access for each RC port. */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " "give up\n", controller->trio_index, controller->mac); continue; } controller->pio_mem_index = ret; /* * For PIO MEM, the bus_address_hi parameter is hard-coded 0 * because we always assign 32-bit PCI bus BAR ranges. */ ret = gxio_trio_init_pio_region_aux(trio_context, controller->pio_mem_index, controller->mac, 0, 0); if (ret < 0) { pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " "give up\n", controller->trio_index, controller->mac); continue; } /* * Configure a Mem-Map region for each memory controller so * that Linux can map all of its PA space to the PCI bus. * Use the IOMMU to handle hash-for-home memory. */ for_each_online_node(j) { unsigned long start_pfn = node_start_pfn[j]; unsigned long end_pfn = node_end_pfn[j]; unsigned long nr_pages = end_pfn - start_pfn; ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); if (ret < 0) { pr_err("PCI: Mem-Map alloc failure on TRIO %d " "mac %d for MC %d, give up\n", controller->trio_index, controller->mac, j); goto alloc_mem_map_failed; } controller->mem_maps[j] = ret; /* * Initialize the Mem-Map and the I/O MMU so that all * the physical memory can be accessed by the endpoint * devices. The base bus address is set to the base CPA * of this memory controller plus an offset (see pci.h). * The region's base VA is set to the base CPA. The * I/O MMU table essentially translates the CPA to * the real PA. Implicitly, for node 0, we create * a separate Mem-Map region that serves as the inbound * window for legacy 32-bit devices. This is a direct * map of the low 4GB CPA space. */ ret = gxio_trio_init_memory_map_mmu_aux(trio_context, controller->mem_maps[j], start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT, trio_context->asid, controller->mac, (start_pfn << PAGE_SHIFT) + TILE_PCI_MEM_MAP_BASE_OFFSET, j, GXIO_TRIO_ORDER_MODE_UNORDERED); if (ret < 0) { pr_err("PCI: Mem-Map init failure on TRIO %d " "mac %d for MC %d, give up\n", controller->trio_index, controller->mac, j); goto alloc_mem_map_failed; } continue; alloc_mem_map_failed: break; } } return 0; } subsys_initcall(pcibios_init); /* Note: to be deleted after Linux 3.6 merge. */ void __devinit pcibios_fixup_bus(struct pci_bus *bus) { } /* * This can be called from the generic PCI layer, but doesn't need to * do anything. */ char __devinit *pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } return str; } /* * This is called from the generic Linux layer. */ void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } /* * Enable memory address decoding, as appropriate, for the * device described by the 'dev' struct. The I/O decoding * is disabled, though the TILE-Gx supports I/O addressing. * * This is called from the generic PCI layer, and can be called * for bridges or endpoints. */ int pcibios_enable_device(struct pci_dev *dev, int mask) { return pci_enable_resources(dev, mask); } /* Called for each device after PCI setup is done. */ static void __init pcibios_fixup_final(struct pci_dev *pdev) { set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); pdev->dev.archdata.max_direct_dma_addr = TILE_PCI_MAX_DIRECT_DMA_ADDRESS; } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); /* Map a PCI MMIO bus address into VA space. */ void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) { struct pci_controller *controller = NULL; resource_size_t bar_start; resource_size_t bar_end; resource_size_t offset; resource_size_t start; resource_size_t end; int trio_fd; int i, j; start = phys_addr; end = phys_addr + size - 1; /* * In the following, each PCI controller's mem_resources[1] * represents its (non-prefetchable) PCI memory resource and * mem_resources[2] refers to its prefetchable PCI memory resource. * By searching phys_addr in each controller's mem_resources[], we can * determine the controller that should accept the PCI memory access. */ for (i = 0; i < num_rc_controllers; i++) { /* * Skip controllers that are not properly initialized or * have down links. */ if (pci_controllers[i].root_bus == NULL) continue; for (j = 1; j < 3; j++) { bar_start = pci_controllers[i].mem_resources[j].start; bar_end = pci_controllers[i].mem_resources[j].end; if ((start >= bar_start) && (end <= bar_end)) { controller = &pci_controllers[i]; goto got_it; } } } if (controller == NULL) return NULL; got_it: trio_fd = controller->trio->fd; /* Convert the resource start to the bus address offset. */ start = phys_addr - controller->mem_offset; offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start; /* * We need to keep the PCI bus address's in-page offset in the VA. */ return iorpc_ioremap(trio_fd, offset, size) + (phys_addr & (PAGE_SIZE - 1)); } EXPORT_SYMBOL(ioremap); void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { iounmap(addr); } EXPORT_SYMBOL(pci_iounmap); /**************************************************************** * * Tile PCI config space read/write routines * ****************************************************************/ /* * These are the normal read and write ops * These are expanded with macros from pci_bus_read_config_byte() etc. * * devfn is the combined PCI device & function. * * offset is in bytes, from the start of config space for the * specified bus & device. */ static int __devinit tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 *val) { struct pci_controller *controller = bus->sysdata; gxio_trio_context_t *trio_context = controller->trio; int busnum = bus->number & 0xff; int device = PCI_SLOT(devfn); int function = PCI_FUNC(devfn); int config_type = 1; TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; void *mmio_addr; /* * Map all accesses to the local device on root bus into the * MMIO space of the MAC. Accesses to the downstream devices * go to the PIO space. */ if (pci_is_root_bus(bus)) { if (device == 0) { /* * This is the internal downstream P2P bridge, * access directly. */ unsigned int reg_offset; reg_offset = ((offset & 0xFFF) << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (controller->mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); mmio_addr = trio_context->mmio_base_mac + reg_offset; goto valid_device; } else { /* * We fake an empty device for (device > 0), * since there is only one device on bus 0. */ goto invalid_device; } } /* * Accesses to the directly attached device have to be * sent as type-0 configs. */ if (busnum == (controller->first_busno + 1)) { /* * There is only one device off of our built-in P2P bridge. */ if (device != 0) goto invalid_device; config_type = 0; } cfg_addr.word = 0; cfg_addr.reg_addr = (offset & 0xFFF); cfg_addr.fn = function; cfg_addr.dev = device; cfg_addr.bus = busnum; cfg_addr.type = config_type; /* * Note that we don't set the mac field in cfg_addr because the * mapping is per port. */ mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + cfg_addr.word; valid_device: switch (size) { case 4: *val = __gxio_mmio_read32(mmio_addr); break; case 2: *val = __gxio_mmio_read16(mmio_addr); break; case 1: *val = __gxio_mmio_read8(mmio_addr); break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } TRACE_CFG_RD(size, *val, busnum, device, function, offset); return 0; invalid_device: switch (size) { case 4: *val = 0xFFFFFFFF; break; case 2: *val = 0xFFFF; break; case 1: *val = 0xFF; break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } return 0; } /* * See tile_cfg_read() for relevent comments. * Note that "val" is the value to write, not a pointer to that value. */ static int __devinit tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 val) { struct pci_controller *controller = bus->sysdata; gxio_trio_context_t *trio_context = controller->trio; int busnum = bus->number & 0xff; int device = PCI_SLOT(devfn); int function = PCI_FUNC(devfn); int config_type = 1; TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; void *mmio_addr; u32 val_32 = (u32)val; u16 val_16 = (u16)val; u8 val_8 = (u8)val; /* * Map all accesses to the local device on root bus into the * MMIO space of the MAC. Accesses to the downstream devices * go to the PIO space. */ if (pci_is_root_bus(bus)) { if (device == 0) { /* * This is the internal downstream P2P bridge, * access directly. */ unsigned int reg_offset; reg_offset = ((offset & 0xFFF) << TRIO_CFG_REGION_ADDR__REG_SHIFT) | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | (controller->mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); mmio_addr = trio_context->mmio_base_mac + reg_offset; goto valid_device; } else { /* * We fake an empty device for (device > 0), * since there is only one device on bus 0. */ goto invalid_device; } } /* * Accesses to the directly attached device have to be * sent as type-0 configs. */ if (busnum == (controller->first_busno + 1)) { /* * There is only one device off of our built-in P2P bridge. */ if (device != 0) goto invalid_device; config_type = 0; } cfg_addr.word = 0; cfg_addr.reg_addr = (offset & 0xFFF); cfg_addr.fn = function; cfg_addr.dev = device; cfg_addr.bus = busnum; cfg_addr.type = config_type; /* * Note that we don't set the mac field in cfg_addr because the * mapping is per port. */ mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + cfg_addr.word; valid_device: switch (size) { case 4: __gxio_mmio_write32(mmio_addr, val_32); TRACE_CFG_WR(size, val_32, busnum, device, function, offset); break; case 2: __gxio_mmio_write16(mmio_addr, val_16); TRACE_CFG_WR(size, val_16, busnum, device, function, offset); break; case 1: __gxio_mmio_write8(mmio_addr, val_8); TRACE_CFG_WR(size, val_8, busnum, device, function, offset); break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } invalid_device: return 0; } static struct pci_ops tile_cfg_ops = { .read = tile_cfg_read, .write = tile_cfg_write, }; /* * MSI support starts here. */ static unsigned int tilegx_msi_startup(struct irq_data *d) { if (d->msi_desc) unmask_msi_irq(d); return 0; } static void tilegx_msi_ack(struct irq_data *d) { __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); } static void tilegx_msi_mask(struct irq_data *d) { mask_msi_irq(d); __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); } static void tilegx_msi_unmask(struct irq_data *d) { __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); unmask_msi_irq(d); } static struct irq_chip tilegx_msi_chip = { .name = "tilegx_msi", .irq_startup = tilegx_msi_startup, .irq_ack = tilegx_msi_ack, .irq_mask = tilegx_msi_mask, .irq_unmask = tilegx_msi_unmask, /* TBD: support set_affinity. */ }; int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { struct pci_controller *controller; gxio_trio_context_t *trio_context; struct msi_msg msg; int default_irq; uint64_t mem_map_base; uint64_t mem_map_limit; u64 msi_addr; int mem_map; int cpu; int irq; int ret; irq = create_irq(); if (irq < 0) return irq; /* * Since we use a 64-bit Mem-Map to accept the MSI write, we fail * devices that are not capable of generating a 64-bit message address. * These devices will fall back to using the legacy interrupts. * Most PCIe endpoint devices do support 64-bit message addressing. */ if (desc->msi_attrib.is_64 == 0) { dev_printk(KERN_INFO, &pdev->dev, "64-bit MSI message address not supported, " "falling back to legacy interrupts.\n"); ret = -ENOMEM; goto is_64_failure; } default_irq = desc->msi_attrib.default_irq; controller = irq_get_handler_data(default_irq); BUG_ON(!controller); trio_context = controller->trio; /* * Allocate the Mem-Map that will accept the MSI write and * trigger the TILE-side interrupts. */ mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); if (mem_map < 0) { dev_printk(KERN_INFO, &pdev->dev, "%s Mem-Map alloc failure. " "Failed to initialize MSI interrupts. " "Falling back to legacy interrupts.\n", desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); ret = -ENOMEM; goto msi_mem_map_alloc_failure; } /* We try to distribute different IRQs to different tiles. */ cpu = tile_irq_cpu(irq); /* * Now call up to the HV to configure the Mem-Map interrupt and * set up the IPI binding. */ mem_map_base = MEM_MAP_INTR_REGIONS_BASE + mem_map * MEM_MAP_INTR_REGION_SIZE; mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1; ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq, controller->mac, mem_map, mem_map_base, mem_map_limit, trio_context->asid); if (ret < 0) { dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); goto hv_msi_config_failure; } irq_set_msi_desc(irq, desc); msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - TRIO_MAP_MEM_REG_INT0; msg.address_hi = msi_addr >> 32; msg.address_lo = msi_addr & 0xffffffff; msg.data = mem_map; write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); irq_set_handler_data(irq, controller); return 0; hv_msi_config_failure: /* Free mem-map */ msi_mem_map_alloc_failure: is_64_failure: destroy_irq(irq); return ret; } void arch_teardown_msi_irq(unsigned int irq) { destroy_irq(irq); }
gpl-2.0
BtbN/dolphin
Externals/wxWidgets3/src/gtk/hyperlink.cpp
83
7160
///////////////////////////////////////////////////////////////////////////// // Name: src/gtk/hyperlink.cpp // Purpose: Hyperlink control // Author: Francesco Montorsi // Created: 14/2/2007 // Copyright: (c) 2007 Francesco Montorsi // Licence: wxWindows licence ///////////////////////////////////////////////////////////////////////////// // ============================================================================ // declarations // ============================================================================ // -------------------------------------------------------------------------- // headers // -------------------------------------------------------------------------- // For compilers that support precompilation, includes "wx.h". #include "wx/wxprec.h" #ifdef __BORLANDC__ #pragma hdrstop #endif #if wxUSE_HYPERLINKCTRL && defined(__WXGTK210__) && !defined(__WXUNIVERSAL__) #include "wx/hyperlink.h" #ifndef WX_PRECOMP #endif #include <gtk/gtk.h> #include "wx/gtk/private.h" // ---------------------------------------------------------------------------- // local functions // ---------------------------------------------------------------------------- static inline bool UseNative() { // native gtk_link_button widget is only available in GTK+ 2.10 and later #ifdef __WXGTK3__ return true; #else return !gtk_check_version(2, 10, 0); #endif } // ============================================================================ // implementation // ============================================================================ // ---------------------------------------------------------------------------- // "clicked" // ---------------------------------------------------------------------------- extern "C" { static void gtk_hyperlink_clicked_callback( GtkWidget *WXUNUSED(widget), wxHyperlinkCtrl *linkCtrl ) { // send the event linkCtrl->SendEvent(); } } // ---------------------------------------------------------------------------- // wxHyperlinkCtrl // ---------------------------------------------------------------------------- bool wxHyperlinkCtrl::Create(wxWindow *parent, wxWindowID id, const wxString& label, const wxString& url, const wxPoint& pos, const wxSize& size, long style, const wxString& name) { if ( UseNative() ) { // do validation checks: CheckParams(label, url, style); if (!PreCreation( parent, pos, size ) || !CreateBase( parent, id, pos, size, style, wxDefaultValidator, name )) { wxFAIL_MSG( wxT("wxHyperlinkCtrl creation failed") ); return false; } m_widget = gtk_link_button_new("asdfsaf asdfdsaf asdfdsa"); g_object_ref(m_widget); // alignment float x_alignment = 0.5; if (HasFlag(wxHL_ALIGN_LEFT)) x_alignment = 0.0; else if (HasFlag(wxHL_ALIGN_RIGHT)) x_alignment = 1.0; gtk_button_set_alignment(GTK_BUTTON(m_widget), x_alignment, 0.5); // set to non empty strings both the url and the label SetURL(url.empty() ? label : url); SetLabel(label.empty() ? url : label); // our signal handlers: g_signal_connect_after (m_widget, "clicked", G_CALLBACK (gtk_hyperlink_clicked_callback), this); m_parent->DoAddChild( this ); PostCreation(size); // wxWindowGTK will connect to the enter_notify and leave_notify GTK+ signals // thus overriding GTK+'s internal signal handlers which set the cursor of // the widget - thus we need to manually set it here: SetCursor(wxCursor(wxCURSOR_HAND)); } else return wxGenericHyperlinkCtrl::Create(parent, id, label, url, pos, size, style, name); return true; } wxSize wxHyperlinkCtrl::DoGetBestSize() const { if ( UseNative() ) return wxControl::DoGetBestSize(); return wxGenericHyperlinkCtrl::DoGetBestSize(); } wxSize wxHyperlinkCtrl::DoGetBestClientSize() const { if ( UseNative() ) return wxControl::DoGetBestClientSize(); return wxGenericHyperlinkCtrl::DoGetBestClientSize(); } void wxHyperlinkCtrl::SetLabel(const wxString &label) { if ( UseNative() ) { wxControl::SetLabel(label); const wxString labelGTK = GTKConvertMnemonics(label); gtk_button_set_label(GTK_BUTTON(m_widget), wxGTK_CONV(labelGTK)); } else wxGenericHyperlinkCtrl::SetLabel(label); } void wxHyperlinkCtrl::SetURL(const wxString &uri) { if ( UseNative() ) gtk_link_button_set_uri(GTK_LINK_BUTTON(m_widget), wxGTK_CONV(uri)); else wxGenericHyperlinkCtrl::SetURL(uri); } wxString wxHyperlinkCtrl::GetURL() const { if ( UseNative() ) { const gchar *str = gtk_link_button_get_uri(GTK_LINK_BUTTON(m_widget)); return wxString::FromUTF8(str); } return wxGenericHyperlinkCtrl::GetURL(); } void wxHyperlinkCtrl::SetNormalColour(const wxColour &colour) { if ( UseNative() ) { // simply do nothing: GTK+ does not allow us to change it :( } else wxGenericHyperlinkCtrl::SetNormalColour(colour); } wxColour wxHyperlinkCtrl::GetNormalColour() const { wxColour ret; if ( UseNative() ) { GdkColor *link_color = NULL; // convert GdkColor in wxColour gtk_widget_style_get(m_widget, "link-color", &link_color, NULL); if (link_color) ret = wxColour(*link_color); gdk_color_free (link_color); } else ret = wxGenericHyperlinkCtrl::GetNormalColour(); return ret; } void wxHyperlinkCtrl::SetVisitedColour(const wxColour &colour) { if ( UseNative() ) { // simply do nothing: GTK+ does not allow us to change it :( } else wxGenericHyperlinkCtrl::SetVisitedColour(colour); } wxColour wxHyperlinkCtrl::GetVisitedColour() const { wxColour ret; if ( UseNative() ) { GdkColor *link_color = NULL; // convert GdkColor in wxColour gtk_widget_style_get(m_widget, "visited-link-color", &link_color, NULL); if (link_color) ret = wxColour(*link_color); gdk_color_free (link_color); } else return wxGenericHyperlinkCtrl::GetVisitedColour(); return ret; } void wxHyperlinkCtrl::SetHoverColour(const wxColour &colour) { if ( UseNative() ) { // simply do nothing: GTK+ does not allow us to change it :( } else wxGenericHyperlinkCtrl::SetHoverColour(colour); } wxColour wxHyperlinkCtrl::GetHoverColour() const { if ( UseNative() ) { // hover colour == normal colour for native GTK+ widget return GetNormalColour(); } return wxGenericHyperlinkCtrl::GetHoverColour(); } GdkWindow *wxHyperlinkCtrl::GTKGetWindow(wxArrayGdkWindows& windows) const { return UseNative() ? gtk_button_get_event_window(GTK_BUTTON(m_widget)) : wxGenericHyperlinkCtrl::GTKGetWindow(windows); } #endif // wxUSE_HYPERLINKCTRL && GTK+ 2.10+
gpl-2.0
Steven-Cai/pi-kernel
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
83
73393
/* bnx2x_dcb.c: Broadcom Everest network driver. * * Copyright 2009-2012 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. * * Maintained by: Eilon Greenstein <eilong@broadcom.com> * Written by: Dmitry Kravkov * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/netdevice.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/rtnetlink.h> #include <net/dcbnl.h> #include "bnx2x.h" #include "bnx2x_cmn.h" #include "bnx2x_dcb.h" /* forward declarations of dcbx related functions */ static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); static void bnx2x_pfc_set_pfc(struct bnx2x *bp); static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, u32 *set_configuration_ets_pg, u32 *pri_pg_tbl); static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, u32 *pg_pri_orginal_spread, struct pg_help_data *help_data); static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, u32 *pg_pri_orginal_spread); static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, struct dcbx_ets_feature *ets); static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, struct bnx2x_func_tx_start_params*); /* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */ static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, u32 addr, u32 len) { int i; for (i = 0; i < len; i += 4, buff++) *buff = REG_RD(bp, addr + i); } static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, u32 addr, u32 len) { int i; for (i = 0; i < len; i += 4, buff++) REG_WR(bp, addr + i, *buff); } static void bnx2x_pfc_set(struct bnx2x *bp) { struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; u32 pri_bit, val = 0; int i; pfc_params.num_of_rx_cos_priority_mask = bp->dcbx_port_params.ets.num_of_cos; /* Tx COS configuration */ for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) /* * We configure only the pauseable bits (non pauseable aren't * configured at all) it's done to avoid false pauses from * network */ pfc_params.rx_cos_priority_mask[i] = bp->dcbx_port_params.ets.cos_params[i].pri_bitmask & DCBX_PFC_PRI_PAUSE_MASK(bp); /* * Rx COS configuration * Changing PFC RX configuration . * In RX COS0 will always be configured to lossy and COS1 to lossless */ for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { pri_bit = 1 << i; if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) val |= 1 << (i * 4); } pfc_params.pkt_priority_to_cos = val; /* RX COS0 */ pfc_params.llfc_low_priority_classes = 0; /* RX COS1 */ pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); /* BRB configuration */ pfc_params.cos0_pauseable = false; pfc_params.cos1_pauseable = true; bnx2x_acquire_phy_lock(bp); bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params); bnx2x_release_phy_lock(bp); } static void bnx2x_pfc_clear(struct bnx2x *bp) { struct bnx2x_nig_brb_pfc_port_params nig_params = {0}; nig_params.pause_enable = 1; bnx2x_acquire_phy_lock(bp); bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); bnx2x_release_phy_lock(bp); } static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, struct dcbx_features *features, u32 error) { u8 i = 0; DP(NETIF_MSG_LINK, "local_mib.error %x\n", error); /* PG */ DP(NETIF_MSG_LINK, "local_mib.features.ets.enabled %x\n", features->ets.enabled); for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) DP(NETIF_MSG_LINK, "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i, DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i)); for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) DP(NETIF_MSG_LINK, "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i, DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i)); /* pfc */ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n", features->pfc.pri_en_bitmap); DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n", features->pfc.pfc_caps); DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n", features->pfc.enabled); DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n", features->app.default_pri); DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n", features->app.tc_supported); DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n", features->app.enabled); for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { DP(BNX2X_MSG_DCB, "dcbx_features.app.app_pri_tbl[%x].app_id %x\n", i, features->app.app_pri_tbl[i].app_id); DP(BNX2X_MSG_DCB, "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n", i, features->app.app_pri_tbl[i].pri_bitmap); DP(BNX2X_MSG_DCB, "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n", i, features->app.app_pri_tbl[i].appBitfield); } } static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp, u8 pri_bitmap, u8 llfc_traf_type) { u32 pri = MAX_PFC_PRIORITIES; u32 index = MAX_PFC_PRIORITIES - 1; u32 pri_mask; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; /* Choose the highest priority */ while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) { pri_mask = 1 << index; if (GET_FLAGS(pri_bitmap, pri_mask)) pri = index ; index--; } if (pri < MAX_PFC_PRIORITIES) ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri); } static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, struct dcbx_app_priority_feature *app, u32 error) { u8 index; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n"); if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n"); if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND)) DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n"); if (app->enabled && !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH | DCBX_REMOTE_APP_TLV_NOT_FOUND)) { bp->dcbx_port_params.app.enabled = true; for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) ttp[index] = 0; if (app->default_pri < MAX_PFC_PRIORITIES) ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { struct dcbx_app_priority_entry *entry = app->app_pri_tbl; if (GET_FLAGS(entry[index].appBitfield, DCBX_APP_SF_ETH_TYPE) && ETH_TYPE_FCOE == entry[index].app_id) bnx2x_dcbx_get_ap_priority(bp, entry[index].pri_bitmap, LLFC_TRAFFIC_TYPE_FCOE); if (GET_FLAGS(entry[index].appBitfield, DCBX_APP_SF_PORT) && TCP_PORT_ISCSI == entry[index].app_id) bnx2x_dcbx_get_ap_priority(bp, entry[index].pri_bitmap, LLFC_TRAFFIC_TYPE_ISCSI); } } else { DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n"); bp->dcbx_port_params.app.enabled = false; for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY; } } static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, struct dcbx_ets_feature *ets, u32 error) { int i = 0; u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0}; struct pg_help_data pg_help_data; struct bnx2x_dcbx_cos_params *cos_params = bp->dcbx_port_params.ets.cos_params; memset(&pg_help_data, 0, sizeof(struct pg_help_data)); if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND)) DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n"); /* Clean up old settings of ets on COS */ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { cos_params[i].pauseable = false; cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID; cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; cos_params[i].pri_bitmask = 0; } if (bp->dcbx_port_params.app.enabled && ets->enabled && !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) { DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n"); bp->dcbx_port_params.ets.enabled = true; bnx2x_dcbx_get_ets_pri_pg_tbl(bp, pg_pri_orginal_spread, ets->pri_pg_tbl); bnx2x_dcbx_get_num_pg_traf_type(bp, pg_pri_orginal_spread, &pg_help_data); bnx2x_dcbx_fill_cos_params(bp, &pg_help_data, ets, pg_pri_orginal_spread); } else { DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n"); bp->dcbx_port_params.ets.enabled = false; ets->pri_pg_tbl[0] = 0; for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++) DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1); } } static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, struct dcbx_pfc_feature *pfc, u32 error) { if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND)) DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n"); if (bp->dcbx_port_params.app.enabled && pfc->enabled && !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH | DCBX_REMOTE_PFC_TLV_NOT_FOUND)) { bp->dcbx_port_params.pfc.enabled = true; bp->dcbx_port_params.pfc.priority_non_pauseable_mask = ~(pfc->pri_en_bitmap); } else { DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n"); bp->dcbx_port_params.pfc.enabled = false; bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; } } /* maps unmapped priorities to to the same COS as L2 */ static void bnx2x_dcbx_map_nw(struct bnx2x *bp) { int i; u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW]; struct bnx2x_dcbx_cos_params *cos_params = bp->dcbx_port_params.ets.cos_params; /* get unmapped priorities by clearing mapped bits */ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) unmapped &= ~(1 << ttp[i]); /* find cos for nw prio and extend it with unmapped */ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { if (cos_params[i].pri_bitmask & nw_prio) { /* extend the bitmask with unmapped */ DP(BNX2X_MSG_DCB, "cos %d extended with 0x%08x\n", i, unmapped); cos_params[i].pri_bitmask |= unmapped; break; } } } static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, struct dcbx_features *features, u32 error) { bnx2x_dcbx_get_ap_feature(bp, &features->app, error); bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); bnx2x_dcbx_map_nw(bp); } #define DCBX_LOCAL_MIB_MAX_TRY_READ (100) static int bnx2x_dcbx_read_mib(struct bnx2x *bp, u32 *base_mib_addr, u32 offset, int read_mib_type) { int max_try_read = 0; u32 mib_size, prefix_seq_num, suffix_seq_num; struct lldp_remote_mib *remote_mib ; struct lldp_local_mib *local_mib; switch (read_mib_type) { case DCBX_READ_LOCAL_MIB: mib_size = sizeof(struct lldp_local_mib); break; case DCBX_READ_REMOTE_MIB: mib_size = sizeof(struct lldp_remote_mib); break; default: return 1; /*error*/ } offset += BP_PORT(bp) * mib_size; do { bnx2x_read_data(bp, base_mib_addr, offset, mib_size); max_try_read++; switch (read_mib_type) { case DCBX_READ_LOCAL_MIB: local_mib = (struct lldp_local_mib *) base_mib_addr; prefix_seq_num = local_mib->prefix_seq_num; suffix_seq_num = local_mib->suffix_seq_num; break; case DCBX_READ_REMOTE_MIB: remote_mib = (struct lldp_remote_mib *) base_mib_addr; prefix_seq_num = remote_mib->prefix_seq_num; suffix_seq_num = remote_mib->suffix_seq_num; break; default: return 1; /*error*/ } } while ((prefix_seq_num != suffix_seq_num) && (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ)); if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) { BNX2X_ERR("MIB could not be read\n"); return 1; } return 0; } static void bnx2x_pfc_set_pfc(struct bnx2x *bp) { if (bp->dcbx_port_params.pfc.enabled && !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) /* * 1. Fills up common PFC structures if required * 2. Configure NIG, MAC and BRB via the elink */ bnx2x_pfc_set(bp); else bnx2x_pfc_clear(bp); } static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) { struct bnx2x_func_state_params func_params = {NULL}; func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_TX_STOP; DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); return bnx2x_func_state_change(bp, &func_params); } static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) { struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_tx_start_params *tx_params = &func_params.params.tx_start; func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_TX_START; bnx2x_dcbx_fw_struct(bp, tx_params); DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); return bnx2x_func_state_change(bp, &func_params); } static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) { struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); int rc = 0; if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) { BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos); return; } /* valid COS entries */ if (ets->num_of_cos == 1) /* no ETS */ return; /* sanity */ if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) && (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) && (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { BNX2X_ERR("all COS should have at least bw_limit or strict" "ets->cos_params[0].strict= %x" "ets->cos_params[0].bw_tbl= %x" "ets->cos_params[1].strict= %x" "ets->cos_params[1].bw_tbl= %x", ets->cos_params[0].strict, ets->cos_params[0].bw_tbl, ets->cos_params[1].strict, ets->cos_params[1].bw_tbl); return; } /* If we join a group and there is bw_tbl and strict then bw rules */ if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) && (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) { u32 bw_tbl_0 = ets->cos_params[0].bw_tbl; u32 bw_tbl_1 = ets->cos_params[1].bw_tbl; /* Do not allow 0-100 configuration * since PBF does not support it * force 1-99 instead */ if (bw_tbl_0 == 0) { bw_tbl_0 = 1; bw_tbl_1 = 99; } else if (bw_tbl_1 == 0) { bw_tbl_1 = 1; bw_tbl_0 = 99; } bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); } else { if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) rc = bnx2x_ets_strict(&bp->link_params, 0); else if (ets->cos_params[1].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) rc = bnx2x_ets_strict(&bp->link_params, 1); if (rc) BNX2X_ERR("update_ets_params failed\n"); } } /* * In E3B0 the configuration may have more than 2 COS. */ static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) { struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); struct bnx2x_ets_params ets_params = { 0 }; u8 i; ets_params.num_of_cos = ets->num_of_cos; for (i = 0; i < ets->num_of_cos; i++) { /* COS is SP */ if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) { if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) { BNX2X_ERR("COS can't be not BW and not SP\n"); return; } ets_params.cos[i].state = bnx2x_cos_state_strict; ets_params.cos[i].params.sp_params.pri = ets->cos_params[i].strict; } else { /* COS is BW */ if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) { BNX2X_ERR("COS can't be not BW and not SP\n"); return; } ets_params.cos[i].state = bnx2x_cos_state_bw; ets_params.cos[i].params.bw_params.bw = (u8)ets->cos_params[i].bw_tbl; } } /* Configure the ETS in HW */ if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, &ets_params)) { BNX2X_ERR("bnx2x_ets_e3b0_config failed\n"); bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); } } static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) { bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); if (!bp->dcbx_port_params.ets.enabled || (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) return; if (CHIP_IS_E3B0(bp)) bnx2x_dcbx_update_ets_config(bp); else bnx2x_dcbx_2cos_limit_update_ets_config(bp); } #ifdef BCM_DCBNL static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) { struct lldp_remote_mib remote_mib = {0}; u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); int rc; DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n", dcbx_remote_mib_offset); if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) { BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n"); return -EINVAL; } rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset, DCBX_READ_REMOTE_MIB); if (rc) { BNX2X_ERR("Faild to read remote mib from FW\n"); return rc; } /* save features and flags */ bp->dcbx_remote_feat = remote_mib.features; bp->dcbx_remote_flags = remote_mib.flags; return 0; } #endif static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) { struct lldp_local_mib local_mib = {0}; u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); int rc; DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) { BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); return -EINVAL; } rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, DCBX_READ_LOCAL_MIB); if (rc) { BNX2X_ERR("Faild to read local mib from FW\n"); return rc; } /* save features and error */ bp->dcbx_local_feat = local_mib.features; bp->dcbx_error = local_mib.error; return 0; } #ifdef BCM_DCBNL static inline u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) { u8 pri; /* Choose the highest priority */ for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--) if (ent->pri_bitmap & (1 << pri)) break; return pri; } static inline u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) { return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) == DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM : DCB_APP_IDTYPE_ETHTYPE; } int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) { int i, err = 0; for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) { struct dcbx_app_priority_entry *ent = &bp->dcbx_local_feat.app.app_pri_tbl[i]; if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { u8 up = bnx2x_dcbx_dcbnl_app_up(ent); /* avoid invalid user-priority */ if (up) { struct dcb_app app; app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent); app.protocol = ent->app_id; app.priority = delall ? 0 : up; err = dcb_setapp(bp->dev, &app); } } } return err; } #endif static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) { u8 prio, cos; for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask & (1 << prio)) { bp->prio_to_cos[prio] = cos; DP(BNX2X_MSG_DCB, "tx_mapping %d --> %d\n", prio, cos); } } } /* setup tc must be called under rtnl lock, but we can't take it here * as we are handling an attetntion on a work queue which must be * flushed at some rtnl-locked contexts (e.g. if down) */ if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) schedule_delayed_work(&bp->sp_rtnl_task, 0); } void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) { switch (state) { case BNX2X_DCBX_STATE_NEG_RECEIVED: { DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); #ifdef BCM_DCBNL /** * Delete app tlvs from dcbnl before reading new * negotiation results */ bnx2x_dcbnl_update_applist(bp, true); /* Read rmeote mib if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_remote_mib(bp)) return; #endif /* Read neg results if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_neg_results(bp)) return; bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); /* mark DCBX result for PMF migration */ bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 1); #ifdef BCM_DCBNL /* * Add new app tlvs to dcbnl */ bnx2x_dcbnl_update_applist(bp, false); #endif /* * reconfigure the netdevice with the results of the new * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); /* * allow other funtions to update their netdevices * accordingly */ if (IS_MF(bp)) bnx2x_link_sync_notify(bp); bnx2x_dcbx_stop_hw_tx(bp); return; } case BNX2X_DCBX_STATE_TX_PAUSED: DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n"); bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_update_ets_params(bp); bnx2x_dcbx_resume_hw_tx(bp); return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); #ifdef BCM_DCBNL /* * Send a notification for the new negotiated parameters */ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); #endif return; default: BNX2X_ERR("Unknown DCBX_STATE\n"); } } #define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \ BP_PORT(bp)*sizeof(struct lldp_admin_mib)) static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, u32 dcbx_lldp_params_offset) { struct lldp_admin_mib admin_mib; u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0; u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); /*shortcuts*/ struct dcbx_features *af = &admin_mib.features; struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; memset(&admin_mib, 0, sizeof(struct lldp_admin_mib)); /* Read the data first */ bnx2x_read_data(bp, (u32 *)&admin_mib, offset, sizeof(struct lldp_admin_mib)); if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) { RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK); admin_mib.ver_cfg_flags |= (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) & DCBX_CEE_VERSION_MASK; af->ets.enabled = (u8)dp->admin_ets_enable; af->pfc.enabled = (u8)dp->admin_pfc_enable; /* FOR IEEE dp->admin_tc_supported_tx_enable */ if (dp->admin_ets_configuration_tx_enable) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_CONFIG_TX_ENABLED); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_CONFIG_TX_ENABLED); /* For IEEE admin_ets_recommendation_tx_enable */ if (dp->admin_pfc_tx_enable) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_CONFIG_TX_ENABLED); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_CONFIG_TX_ENABLED); if (dp->admin_application_priority_tx_enable) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_CONFIG_TX_ENABLED); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_CONFIG_TX_ENABLED); if (dp->admin_ets_willing) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); /* For IEEE admin_ets_reco_valid */ if (dp->admin_pfc_willing) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); if (dp->admin_app_priority_willing) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) { DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i, (u8)dp->admin_configuration_bw_precentage[i]); DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n", i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i)); } for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i, (u8)dp->admin_configuration_ets_pg[i]); DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n", i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); } /*For IEEE admin_recommendation_bw_precentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { if (dp->admin_priority_app_table[i].valid) { struct bnx2x_admin_priority_app_table *table = dp->admin_priority_app_table; if ((ETH_TYPE_FCOE == table[i].app_id) && (TRAFFIC_TYPE_ETH == table[i].traffic_type)) traf_type = FCOE_APP_IDX; else if ((TCP_PORT_ISCSI == table[i].app_id) && (TRAFFIC_TYPE_PORT == table[i].traffic_type)) traf_type = ISCSI_APP_IDX; else traf_type = other_traf_type++; af->app.app_pri_tbl[traf_type].app_id = table[i].app_id; af->app.app_pri_tbl[traf_type].pri_bitmap = (u8)(1 << table[i].priority); af->app.app_pri_tbl[traf_type].appBitfield = (DCBX_APP_ENTRY_VALID); af->app.app_pri_tbl[traf_type].appBitfield |= (TRAFFIC_TYPE_ETH == table[i].traffic_type) ? DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT; } } af->app.default_pri = (u8)dp->admin_default_priority; } /* Write the data. */ bnx2x_write_data(bp, (u32 *)&admin_mib, offset, sizeof(struct lldp_admin_mib)); } void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) { if (!CHIP_IS_E1x(bp)) { bp->dcb_state = dcb_on; bp->dcbx_enabled = dcbx_enabled; } else { bp->dcb_state = false; bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; } DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" : dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" : dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ? "on-chip with negotiation" : "invalid"); } void bnx2x_dcbx_init_params(struct bnx2x *bp) { bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */ bp->dcbx_config_params.admin_ets_willing = 1; bp->dcbx_config_params.admin_pfc_willing = 1; bp->dcbx_config_params.overwrite_settings = 1; bp->dcbx_config_params.admin_ets_enable = 1; bp->dcbx_config_params.admin_pfc_enable = 1; bp->dcbx_config_params.admin_tc_supported_tx_enable = 1; bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; bp->dcbx_config_params.admin_pfc_tx_enable = 1; bp->dcbx_config_params.admin_application_priority_tx_enable = 1; bp->dcbx_config_params.admin_ets_reco_valid = 1; bp->dcbx_config_params.admin_app_priority_willing = 1; bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100; bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100; bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0; bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3; bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4; bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; bp->dcbx_config_params.admin_pfc_bitmap = 0x0; bp->dcbx_config_params.admin_priority_app_table[0].valid = 0; bp->dcbx_config_params.admin_priority_app_table[1].valid = 0; bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; bp->dcbx_config_params.admin_default_priority = 0; } void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) { u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; /* only PMF can send ADMIN msg to MFW in old MFW versions */ if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF))) return; if (bp->dcbx_enabled <= 0) return; /* validate: * chip of good for dcbx version, * dcb is wanted * shmem2 contains DCBX support fields */ DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", bp->dcb_state, bp->port.pmf); if (bp->dcb_state == BNX2X_DCB_STATE_ON && SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { dcbx_lldp_params_offset = SHMEM2_RD(bp, dcbx_lldp_params_offset); DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n", dcbx_lldp_params_offset); bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { /* need HW lock to avoid scenario of two drivers * writing in parallel to shmem */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); if (update_shmem) bnx2x_dcbx_admin_mib_updated_params(bp, dcbx_lldp_params_offset); /* Let HW start negotiation */ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); /* release HW lock only after MFW acks that it finished * reading values from shmem */ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); } } } static void bnx2x_dcbx_print_cos_params(struct bnx2x *bp, struct bnx2x_func_tx_start_params *pfc_fw_cfg) { u8 pri = 0; u8 cos = 0; DP(BNX2X_MSG_DCB, "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version); DP(BNX2X_MSG_DCB, "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n", bp->dcbx_port_params.pfc.priority_non_pauseable_mask); for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { DP(BNX2X_MSG_DCB, "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n", cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); DP(BNX2X_MSG_DCB, "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n", cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); DP(BNX2X_MSG_DCB, "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n", cos, bp->dcbx_port_params.ets.cos_params[cos].strict); DP(BNX2X_MSG_DCB, "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n", cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable); } for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { DP(BNX2X_MSG_DCB, "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n", pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); DP(BNX2X_MSG_DCB, "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n", pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos); } } /* fills help_data according to pg_info */ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, u32 *pg_pri_orginal_spread, struct pg_help_data *help_data) { bool pg_found = false; u32 i, traf_type, add_traf_type, add_pg; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; struct pg_entry_help_data *data = help_data->data; /*shotcut*/ /* Set to invalid */ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) data[i].pg = DCBX_ILLEGAL_PG; for (add_traf_type = 0; add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) { pg_found = false; if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) { add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]]; for (traf_type = 0; traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; traf_type++) { if (data[traf_type].pg == add_pg) { if (!(data[traf_type].pg_priority & (1 << ttp[add_traf_type]))) data[traf_type]. num_of_dif_pri++; data[traf_type].pg_priority |= (1 << ttp[add_traf_type]); pg_found = true; break; } } if (false == pg_found) { data[help_data->num_of_pg].pg = add_pg; data[help_data->num_of_pg].pg_priority = (1 << ttp[add_traf_type]); data[help_data->num_of_pg].num_of_dif_pri = 1; help_data->num_of_pg++; } } DP(BNX2X_MSG_DCB, "add_traf_type %d pg_found %s num_of_pg %d\n", add_traf_type, (false == pg_found) ? "NO" : "YES", help_data->num_of_pg); } } static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, struct cos_help_data *cos_data, u32 pri_join_mask) { /* Only one priority than only one COS */ cos_data->data[0].pausable = IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); cos_data->data[0].pri_join_mask = pri_join_mask; cos_data->data[0].cos_bw = 100; cos_data->num_of_cos = 1; } static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, struct cos_entry_help_data *data, u8 pg_bw) { if (data->cos_bw == DCBX_INVALID_COS_BW) data->cos_bw = pg_bw; else data->cos_bw += pg_bw; } static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, struct dcbx_ets_feature *ets) { u32 pri_tested = 0; u8 i = 0; u8 entry = 0; u8 pg_entry = 0; u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; cos_data->data[0].pausable = true; cos_data->data[1].pausable = false; cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; for (i = 0 ; i < num_of_pri ; i++) { pri_tested = 1 << bp->dcbx_port_params. app.traffic_type_priority[i]; if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) { cos_data->data[1].pri_join_mask |= pri_tested; entry = 1; } else { cos_data->data[0].pri_join_mask |= pri_tested; entry = 0; } pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params. app.traffic_type_priority[i]]; /* There can be only one strict pg */ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); else /* If we join a group and one is strict * than the bw rulls */ cos_data->data[entry].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } if ((0 == cos_data->data[0].pri_join_mask) && (0 == cos_data->data[1].pri_join_mask)) BNX2X_ERR("dcbx error: Both groups must have priorities\n"); } #ifndef POWER_OF_2 #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) #endif static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, struct pg_help_data *pg_help_data, struct cos_help_data *cos_data, u32 pri_join_mask, u8 num_of_dif_pri) { u8 i = 0; u32 pri_tested = 0; u32 pri_mask_without_pri = 0; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; /*debug*/ if (num_of_dif_pri == 1) { bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); return; } /* single priority group */ if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { /* If there are both pauseable and non-pauseable priorities, * the pauseable priorities go to the first queue and * the non-pauseable priorities go to the second queue. */ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { /* Pauseable */ cos_data->data[0].pausable = true; /* Non pauseable.*/ cos_data->data[1].pausable = false; if (2 == num_of_dif_pri) { cos_data->data[0].cos_bw = 50; cos_data->data[1].cos_bw = 50; } if (3 == num_of_dif_pri) { if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask))) { cos_data->data[0].cos_bw = 33; cos_data->data[1].cos_bw = 67; } else { cos_data->data[0].cos_bw = 67; cos_data->data[1].cos_bw = 33; } } } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) { /* If there are only pauseable priorities, * then one/two priorities go to the first queue * and one priority goes to the second queue. */ if (2 == num_of_dif_pri) { cos_data->data[0].cos_bw = 50; cos_data->data[1].cos_bw = 50; } else { cos_data->data[0].cos_bw = 67; cos_data->data[1].cos_bw = 33; } cos_data->data[1].pausable = true; cos_data->data[0].pausable = true; /* All priorities except FCOE */ cos_data->data[0].pri_join_mask = (pri_join_mask & ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]))); /* Only FCOE priority.*/ cos_data->data[1].pri_join_mask = (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]); } else /* If there are only non-pauseable priorities, * they will all go to the same queue. */ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); } else { /* priority group which is not BW limited (PG#15):*/ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { /* If there are both pauseable and non-pauseable * priorities, the pauseable priorities go to the first * queue and the non-pauseable priorities * go to the second queue. */ if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { cos_data->data[0].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( BNX2X_DCBX_STRICT_COS_HIGHEST); } else { cos_data->data[0].strict = BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( BNX2X_DCBX_STRICT_COS_HIGHEST); cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } /* Pauseable */ cos_data->data[0].pausable = true; /* Non pause-able.*/ cos_data->data[1].pausable = false; } else { /* If there are only pauseable priorities or * only non-pauseable,* the lower priorities go * to the first queue and the higherpriorities go * to the second queue. */ cos_data->data[0].pausable = cos_data->data[1].pausable = IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) { pri_tested = 1 << bp->dcbx_port_params. app.traffic_type_priority[i]; /* Remove priority tested */ pri_mask_without_pri = (pri_join_mask & ((u8)(~pri_tested))); if (pri_mask_without_pri < pri_tested) break; } if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX) BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n"); cos_data->data[0].pri_join_mask = pri_mask_without_pri; cos_data->data[1].pri_join_mask = pri_tested; /* Both queues are strict priority, * and that with the highest priority * gets the highest strict priority in the arbiter. */ cos_data->data[0].strict = BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( BNX2X_DCBX_STRICT_COS_HIGHEST); cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } } } static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, u32 pri_join_mask, u8 num_of_dif_pri) { u8 i = 0; u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 }; /* If there are both pauseable and non-pauseable priorities, * the pauseable priorities go to the first queue and * the non-pauseable priorities go to the second queue. */ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_help_data->data[0].pg_priority) || IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_help_data->data[1].pg_priority)) { /* If one PG contains both pauseable and * non-pauseable priorities then ETS is disabled. */ bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, pg_pri_orginal_spread, ets); bp->dcbx_port_params.ets.enabled = false; return; } /* Pauseable */ cos_data->data[0].pausable = true; /* Non pauseable. */ cos_data->data[1].pausable = false; if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_help_data->data[0].pg_priority)) { /* 0 is pauseable */ cos_data->data[0].pri_join_mask = pg_help_data->data[0].pg_priority; pg[0] = pg_help_data->data[0].pg; cos_data->data[1].pri_join_mask = pg_help_data->data[1].pg_priority; pg[1] = pg_help_data->data[1].pg; } else {/* 1 is pauseable */ cos_data->data[0].pri_join_mask = pg_help_data->data[1].pg_priority; pg[0] = pg_help_data->data[1].pg; cos_data->data[1].pri_join_mask = pg_help_data->data[0].pg_priority; pg[1] = pg_help_data->data[0].pg; } } else { /* If there are only pauseable priorities or * only non-pauseable, each PG goes to a queue. */ cos_data->data[0].pausable = cos_data->data[1].pausable = IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); cos_data->data[0].pri_join_mask = pg_help_data->data[0].pg_priority; pg[0] = pg_help_data->data[0].pg; cos_data->data[1].pri_join_mask = pg_help_data->data[1].pg_priority; pg[1] = pg_help_data->data[1].pg; } /* There can be only one strict pg */ for (i = 0 ; i < ARRAY_SIZE(pg); i++) { if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) cos_data->data[i].cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); else cos_data->data[i].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } } static int bnx2x_dcbx_join_pgs( struct bnx2x *bp, struct dcbx_ets_feature *ets, struct pg_help_data *pg_help_data, u8 required_num_of_pg) { u8 entry_joined = pg_help_data->num_of_pg - 1; u8 entry_removed = entry_joined + 1; u8 pg_joined = 0; if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data) <= pg_help_data->num_of_pg) { BNX2X_ERR("required_num_of_pg can't be zero\n"); return -EINVAL; } while (required_num_of_pg < pg_help_data->num_of_pg) { entry_joined = pg_help_data->num_of_pg - 2; entry_removed = entry_joined + 1; /* protect index */ entry_removed %= ARRAY_SIZE(pg_help_data->data); pg_help_data->data[entry_joined].pg_priority |= pg_help_data->data[entry_removed].pg_priority; pg_help_data->data[entry_joined].num_of_dif_pri += pg_help_data->data[entry_removed].num_of_dif_pri; if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG || pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG) /* Entries joined strict priority rules */ pg_help_data->data[entry_joined].pg = DCBX_STRICT_PRI_PG; else { /* Entries can be joined join BW */ pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_help_data->data[entry_joined].pg) + DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_help_data->data[entry_removed].pg); DCBX_PG_BW_SET(ets->pg_bw_tbl, pg_help_data->data[entry_joined].pg, pg_joined); } /* Joined the entries */ pg_help_data->num_of_pg--; } return 0; } static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, u32 pri_join_mask, u8 num_of_dif_pri) { u8 i = 0; u32 pri_tested = 0; u8 entry = 0; u8 pg_entry = 0; bool b_found_strict = false; u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; /* If there are both pauseable and non-pauseable priorities, * the pauseable priorities go to the first queue and the * non-pauseable priorities go to the second queue. */ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, pg_pri_orginal_spread, ets); else { /* If two BW-limited PG-s were combined to one queue, * the BW is their sum. * * If there are only pauseable priorities or only non-pauseable, * and there are both BW-limited and non-BW-limited PG-s, * the BW-limited PG/s go to one queue and the non-BW-limited * PG/s go to the second queue. * * If there are only pauseable priorities or only non-pauseable * and all are BW limited, then two priorities go to the first * queue and one priority goes to the second queue. * * We will join this two cases: * if one is BW limited it will go to the secoend queue * otherwise the last priority will get it */ cos_data->data[0].pausable = cos_data->data[1].pausable = IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); for (i = 0 ; i < num_of_pri; i++) { pri_tested = 1 << bp->dcbx_port_params. app.traffic_type_priority[i]; pg_entry = (u8)pg_pri_orginal_spread[bp-> dcbx_port_params.app.traffic_type_priority[i]]; if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) { entry = 0; if (i == (num_of_pri-1) && false == b_found_strict) /* last entry will be handled separately * If no priority is strict than last * enty goes to last queue.*/ entry = 1; cos_data->data[entry].pri_join_mask |= pri_tested; bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); } else { b_found_strict = true; cos_data->data[1].pri_join_mask |= pri_tested; /* If we join a group and one is strict * than the bw rulls */ cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } } } } static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, u32 pri_join_mask, u8 num_of_dif_pri) { /* default E2 settings */ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; switch (help_data->num_of_pg) { case 1: bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params( bp, help_data, cos_data, pri_join_mask, num_of_dif_pri); break; case 2: bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( bp, help_data, ets, cos_data, pg_pri_orginal_spread, pri_join_mask, num_of_dif_pri); break; case 3: bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( bp, help_data, ets, cos_data, pg_pri_orginal_spread, pri_join_mask, num_of_dif_pri); break; default: BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); } } static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, struct cos_help_data *cos_data, u8 entry, u8 num_spread_of_entries, u8 strict_app_pris) { u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST; u8 num_of_app_pri = MAX_PFC_PRIORITIES; u8 app_pri_bit = 0; while (num_spread_of_entries && num_of_app_pri > 0) { app_pri_bit = 1 << (num_of_app_pri - 1); if (app_pri_bit & strict_app_pris) { struct cos_entry_help_data *data = &cos_data-> data[entry]; num_spread_of_entries--; if (num_spread_of_entries == 0) { /* last entry needed put all the entries left */ data->cos_bw = DCBX_INVALID_COS_BW; data->strict = strict_pri; data->pri_join_mask = strict_app_pris; data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, data->pri_join_mask); } else { strict_app_pris &= ~app_pri_bit; data->cos_bw = DCBX_INVALID_COS_BW; data->strict = strict_pri; data->pri_join_mask = app_pri_bit; data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, data->pri_join_mask); } strict_pri = BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri); entry++; } num_of_app_pri--; } if (num_spread_of_entries) { BNX2X_ERR("Didn't succeed to spread strict priorities\n"); return -EINVAL; } return 0; } static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, struct cos_help_data *cos_data, u8 entry, u8 num_spread_of_entries, u8 strict_app_pris) { if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, num_spread_of_entries, strict_app_pris)) { struct cos_entry_help_data *data = &cos_data-> data[entry]; /* Fill BW entry */ data->cos_bw = DCBX_INVALID_COS_BW; data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST; data->pri_join_mask = strict_app_pris; data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, data->pri_join_mask); return 1; } return num_spread_of_entries; } static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 pri_join_mask) { u8 need_num_of_entries = 0; u8 i = 0; u8 entry = 0; /* * if the number of requested PG-s in CEE is greater than 3 * then the results are not determined since this is a violation * of the standard. */ if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { if (bnx2x_dcbx_join_pgs(bp, ets, help_data, DCBX_COS_MAX_NUM_E3B0)) { BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n"); bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); return; } } for (i = 0 ; i < help_data->num_of_pg; i++) { struct pg_entry_help_data *pg = &help_data->data[i]; if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { struct cos_entry_help_data *data = &cos_data-> data[entry]; /* Fill BW entry */ data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg); data->strict = BNX2X_DCBX_STRICT_INVALID; data->pri_join_mask = pg->pg_priority; data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, data->pri_join_mask); entry++; } else { need_num_of_entries = min_t(u8, (u8)pg->num_of_dif_pri, (u8)DCBX_COS_MAX_NUM_E3B0 - help_data->num_of_pg + 1); /* * If there are still VOQ-s which have no associated PG, * then associate these VOQ-s to PG15. These PG-s will * be used for SP between priorities on PG15. */ entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, entry, need_num_of_entries, pg->pg_priority); } } /* the entry will represent the number of COSes used */ cos_data->num_of_cos = entry; } static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, u32 *pg_pri_orginal_spread) { struct cos_help_data cos_data; u8 i = 0; u32 pri_join_mask = 0; u8 num_of_dif_pri = 0; memset(&cos_data, 0, sizeof(cos_data)); /* Validate the pg value */ for (i = 0; i < help_data->num_of_pg ; i++) { if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) BNX2X_ERR("Invalid pg[%d] data %x\n", i, help_data->data[i].pg); pri_join_mask |= help_data->data[i].pg_priority; num_of_dif_pri += help_data->data[i].num_of_dif_pri; } /* defaults */ cos_data.num_of_cos = 1; for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) { cos_data.data[i].pri_join_mask = 0; cos_data.data[i].pausable = false; cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID; cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; } if (CHIP_IS_E3B0(bp)) bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, &cos_data, pri_join_mask); else /* E2 + E3A0 */ bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, help_data, ets, &cos_data, pg_pri_orginal_spread, pri_join_mask, num_of_dif_pri); for (i = 0; i < cos_data.num_of_cos ; i++) { struct bnx2x_dcbx_cos_params *p = &bp->dcbx_port_params.ets.cos_params[i]; p->strict = cos_data.data[i].strict; p->bw_tbl = cos_data.data[i].cos_bw; p->pri_bitmask = cos_data.data[i].pri_join_mask; p->pauseable = cos_data.data[i].pausable; /* sanity */ if (p->bw_tbl != DCBX_INVALID_COS_BW || p->strict != BNX2X_DCBX_STRICT_INVALID) { if (p->pri_bitmask == 0) BNX2X_ERR("Invalid pri_bitmask for %d\n", i); if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { if (p->pauseable && DCBX_PFC_PRI_GET_NON_PAUSE(bp, p->pri_bitmask) != 0) BNX2X_ERR("Inconsistent config for pausable COS %d\n", i); if (!p->pauseable && DCBX_PFC_PRI_GET_PAUSE(bp, p->pri_bitmask) != 0) BNX2X_ERR("Inconsistent config for nonpausable COS %d\n", i); } } if (p->pauseable) DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n", i, cos_data.data[i].pri_join_mask); else DP(BNX2X_MSG_DCB, "COS %d NONPAUSABLE prijoinmask 0x%x\n", i, cos_data.data[i].pri_join_mask); } bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; } static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, u32 *set_configuration_ets_pg, u32 *pri_pg_tbl) { int i; for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i); DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n", i, set_configuration_ets_pg[i]); } } static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, struct bnx2x_func_tx_start_params *pfc_fw_cfg) { u16 pri_bit = 0; u8 cos = 0, pri = 0; struct priority_cos *tt2cos; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); /* to disable DCB - the structure must be zeroed */ if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) return; /*shortcut*/ tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos; /* Fw version should be incremented each update */ pfc_fw_cfg->dcb_version = ++bp->dcb_version; pfc_fw_cfg->dcb_enabled = 1; /* Fill priority parameters */ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { tt2cos[pri].priority = ttp[pri]; pri_bit = 1 << tt2cos[pri].priority; /* Fill COS parameters based on COS calculated to * make it more general for future use */ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) if (bp->dcbx_port_params.ets.cos_params[cos]. pri_bitmask & pri_bit) tt2cos[pri].cos = cos; } /* we never want the FW to add a 0 vlan tag */ pfc_fw_cfg->dont_add_pri_0_en = 1; bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); } void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { /* if we need to syncronize DCBX result from prev PMF * read it from shmem and update bp and netdev accordingly */ if (SHMEM2_HAS(bp, drv_flags) && GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) { /* Read neg results if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_neg_results(bp)) return; bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); #ifdef BCM_DCBNL /* * Add new app tlvs to dcbnl */ bnx2x_dcbnl_update_applist(bp, false); /* * Send a notification for the new negotiated parameters */ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); #endif /* * reconfigure the netdevice with the results of the new * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); } } /* DCB netlink */ #ifdef BCM_DCBNL #define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) { /* validate dcbnl call that may change HW state: * DCB is on and DCBX mode was SUCCESSFULLY set by the user. */ return bp->dcb_state && bp->dcbx_mode_uset; } static u8 bnx2x_dcbnl_get_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state); return bp->dcb_state; } static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); return 0; } static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n"); /* first the HW mac address */ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); #ifdef BCM_CNIC /* second SAN address */ memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); #endif } static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, u8 prio_type, u8 pgid, u8 bw_pct, u8 up_map) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid); if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) return; /** * bw_pct ingnored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * * prio_type igonred - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict * prioirty traffic (on the port level). * * up_map ignored */ bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid; bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; } static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct); if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) return; bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct; bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; } static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio, u8 prio_type, u8 pgid, u8 bw_pct, u8 up_map) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); } static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev, int pgid, u8 bw_pct) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); } static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "prio = %d\n", prio); /** * bw_pct ingnored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * * prio_type igonred - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict * prioirty traffic (on the port level). * * up_map ignored */ *up_map = *bw_pct = *prio_type = *pgid = 0; if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) return; *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio); } static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid); *bw_pct = 0; if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) return; *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid); } static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); *prio_type = *pgid = *bw_pct = *up_map = 0; } static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); *bw_pct = 0; } static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 setting) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting); if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) return; bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio); if (setting) bp->dcbx_config_params.admin_pfc_tx_enable = 1; } static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "prio = %d\n", prio); *setting = 0; if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES) return; *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1; } static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); int rc = 0; DP(BNX2X_MSG_DCB, "SET-ALL\n"); if (!bnx2x_dcbnl_set_valid(bp)) return 1; if (bp->recovery_state != BNX2X_RECOVERY_DONE) { netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); return 1; } if (netif_running(bp->dev)) bnx2x_dcbx_init(bp, true); DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); if (rc) return 1; return 0; } static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) { struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; if (bp->dcb_state) { switch (capid) { case DCB_CAP_ATTR_PG: *cap = true; break; case DCB_CAP_ATTR_PFC: *cap = true; break; case DCB_CAP_ATTR_UP2TC: *cap = false; break; case DCB_CAP_ATTR_PG_TCS: *cap = 0x80; /* 8 priorities for PGs */ break; case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; /* 8 priorities for PFC */ break; case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_BCN: *cap = false; break; case DCB_CAP_ATTR_DCBX: *cap = BNX2X_DCBX_CAPS; break; default: BNX2X_ERR("Non valid capability ID\n"); rval = -EINVAL; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); rval = -EINVAL; } DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); return rval; } static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) { struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; DP(BNX2X_MSG_DCB, "tcid %d\n", tcid); if (bp->dcb_state) { switch (tcid) { case DCB_NUMTCS_ATTR_PG: *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : DCBX_COS_MAX_NUM_E2; break; case DCB_NUMTCS_ATTR_PFC: *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : DCBX_COS_MAX_NUM_E2; break; default: BNX2X_ERR("Non valid TC-ID\n"); rval = -EINVAL; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); rval = -EINVAL; } return rval; } static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num); return -EINVAL; } static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); if (!bp->dcb_state) return 0; return bp->dcbx_local_feat.pfc.enabled; } static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); if (!bnx2x_dcbnl_set_valid(bp)) return; bp->dcbx_config_params.admin_pfc_tx_enable = bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); } static void bnx2x_admin_app_set_ent( struct bnx2x_admin_priority_app_table *app_ent, u8 idtype, u16 idval, u8 up) { app_ent->valid = 1; switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: app_ent->traffic_type = TRAFFIC_TYPE_ETH; break; case DCB_APP_IDTYPE_PORTNUM: app_ent->traffic_type = TRAFFIC_TYPE_PORT; break; default: break; /* never gets here */ } app_ent->app_id = idval; app_ent->priority = up; } static bool bnx2x_admin_app_is_equal( struct bnx2x_admin_priority_app_table *app_ent, u8 idtype, u16 idval) { if (!app_ent->valid) return false; switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: if (app_ent->traffic_type != TRAFFIC_TYPE_ETH) return false; break; case DCB_APP_IDTYPE_PORTNUM: if (app_ent->traffic_type != TRAFFIC_TYPE_PORT) return false; break; default: return false; } if (app_ent->app_id != idval) return false; return true; } static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) { int i, ff; /* iterate over the app entries looking for idtype and idval */ for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { struct bnx2x_admin_priority_app_table *app_ent = &bp->dcbx_config_params.admin_priority_app_table[i]; if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) break; if (ff < 0 && !app_ent->valid) ff = i; } if (i < DCBX_CONFIG_MAX_APP_PROTOCOL) /* if found overwrite up */ bp->dcbx_config_params. admin_priority_app_table[i].priority = up; else if (ff >= 0) /* not found use first-free */ bnx2x_admin_app_set_ent( &bp->dcbx_config_params.admin_priority_app_table[ff], idtype, idval, up); else { /* app table is full */ BNX2X_ERR("Application table is too large\n"); return -EBUSY; } /* up configured, if not 0 make sure feature is enabled */ if (up) bp->dcbx_config_params.admin_application_priority_tx_enable = 1; return 0; } static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, u16 idval, u8 up) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n", idtype, idval, up); if (!bnx2x_dcbnl_set_valid(bp)) { DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); return -EINVAL; } /* verify idtype */ switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: case DCB_APP_IDTYPE_PORTNUM: break; default: DP(BNX2X_MSG_DCB, "Wrong ID type\n"); return -EINVAL; } return bnx2x_set_admin_app_up(bp, idtype, idval, up); } static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); u8 state; state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE; if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF) state |= DCB_CAP_DCBX_STATIC; return state; } static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %02x\n", state); /* set dcbx mode */ if ((state & BNX2X_DCBX_CAPS) != state) { BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n", state); return 1; } if (bp->dcb_state != BNX2X_DCB_STATE_ON) { BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n"); return 1; } if (state & DCB_CAP_DCBX_STATIC) bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF; else bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON; bp->dcbx_mode_uset = true; return 0; } static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, u8 *flags) { struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; DP(BNX2X_MSG_DCB, "featid %d\n", featid); if (bp->dcb_state) { *flags = 0; switch (featid) { case DCB_FEATCFG_ATTR_PG: if (bp->dcbx_local_feat.ets.enabled) *flags |= DCB_FEATCFG_ENABLE; if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) *flags |= DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_PFC: if (bp->dcbx_local_feat.pfc.enabled) *flags |= DCB_FEATCFG_ENABLE; if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH)) *flags |= DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_APP: if (bp->dcbx_local_feat.app.enabled) *flags |= DCB_FEATCFG_ENABLE; if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) *flags |= DCB_FEATCFG_ERROR; break; default: BNX2X_ERR("Non valid featrue-ID\n"); rval = -EINVAL; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); rval = -EINVAL; } return rval; } static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, u8 flags) { struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags); /* ignore the 'advertise' flag */ if (bnx2x_dcbnl_set_valid(bp)) { switch (featid) { case DCB_FEATCFG_ATTR_PG: bp->dcbx_config_params.admin_ets_enable = flags & DCB_FEATCFG_ENABLE ? 1 : 0; bp->dcbx_config_params.admin_ets_willing = flags & DCB_FEATCFG_WILLING ? 1 : 0; break; case DCB_FEATCFG_ATTR_PFC: bp->dcbx_config_params.admin_pfc_enable = flags & DCB_FEATCFG_ENABLE ? 1 : 0; bp->dcbx_config_params.admin_pfc_willing = flags & DCB_FEATCFG_WILLING ? 1 : 0; break; case DCB_FEATCFG_ATTR_APP: /* ignore enable, always enabled */ bp->dcbx_config_params.admin_app_priority_willing = flags & DCB_FEATCFG_WILLING ? 1 : 0; break; default: BNX2X_ERR("Non valid featrue-ID\n"); rval = -EINVAL; break; } } else { DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); rval = -EINVAL; } return rval; } static int bnx2x_peer_appinfo(struct net_device *netdev, struct dcb_peer_app_info *info, u16* app_count) { int i; struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "APP-INFO\n"); info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; *app_count = 0; for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield & DCBX_APP_ENTRY_VALID) (*app_count)++; return 0; } static int bnx2x_peer_apptable(struct net_device *netdev, struct dcb_app *table) { int i, j; struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "APP-TABLE\n"); for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { struct dcbx_app_priority_entry *ent = &bp->dcbx_remote_feat.app.app_pri_tbl[i]; if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent); table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent); table[j++].protocol = ent->app_id; } } return 0; } static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg) { int i; struct bnx2x *bp = netdev_priv(netdev); pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0; for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { pg->pg_bw[i] = DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i); pg->prio_pg[i] = DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i); } return 0; } static int bnx2x_cee_peer_getpfc(struct net_device *netdev, struct cee_pfc *pfc) { struct bnx2x *bp = netdev_priv(netdev); pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps; pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap; return 0; } const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = { .getstate = bnx2x_dcbnl_get_state, .setstate = bnx2x_dcbnl_set_state, .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr, .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx, .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx, .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx, .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx, .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx, .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx, .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx, .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx, .setpfccfg = bnx2x_dcbnl_set_pfc_cfg, .getpfccfg = bnx2x_dcbnl_get_pfc_cfg, .setall = bnx2x_dcbnl_set_all, .getcap = bnx2x_dcbnl_get_cap, .getnumtcs = bnx2x_dcbnl_get_numtcs, .setnumtcs = bnx2x_dcbnl_set_numtcs, .getpfcstate = bnx2x_dcbnl_get_pfc_state, .setpfcstate = bnx2x_dcbnl_set_pfc_state, .setapp = bnx2x_dcbnl_set_app_up, .getdcbx = bnx2x_dcbnl_get_dcbx, .setdcbx = bnx2x_dcbnl_set_dcbx, .getfeatcfg = bnx2x_dcbnl_get_featcfg, .setfeatcfg = bnx2x_dcbnl_set_featcfg, .peer_getappinfo = bnx2x_peer_appinfo, .peer_getapptable = bnx2x_peer_apptable, .cee_peer_getpg = bnx2x_cee_peer_getpg, .cee_peer_getpfc = bnx2x_cee_peer_getpfc, }; #endif /* BCM_DCBNL */
gpl-2.0
MyAOSP/kernel_htc_m7
kernel/freezer.c
83
2675
#include <linux/interrupt.h> #include <linux/suspend.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/freezer.h> #include <linux/kthread.h> atomic_t system_freezing_cnt = ATOMIC_INIT(0); EXPORT_SYMBOL(system_freezing_cnt); bool pm_freezing; bool pm_nosig_freezing; static DEFINE_SPINLOCK(freezer_lock); bool freezing_slow_path(struct task_struct *p) { if (p->flags & PF_NOFREEZE) return false; if (pm_nosig_freezing || cgroup_freezing(p)) return true; if (pm_freezing && !(p->flags & PF_KTHREAD)) return true; return false; } EXPORT_SYMBOL(freezing_slow_path); bool __refrigerator(bool check_kthr_stop) { bool was_frozen = false; long save = current->state; pr_debug("%s entered refrigerator\n", current->comm); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); spin_lock_irq(&freezer_lock); current->flags |= PF_FROZEN; if (!freezing(current) || (check_kthr_stop && kthread_should_stop())) current->flags &= ~PF_FROZEN; spin_unlock_irq(&freezer_lock); if (!(current->flags & PF_FROZEN)) break; was_frozen = true; schedule(); } pr_debug("%s left refrigerator\n", current->comm); set_current_state(save); return was_frozen; } EXPORT_SYMBOL(__refrigerator); static void fake_signal_wake_up(struct task_struct *p) { unsigned long flags; if (lock_task_sighand(p, &flags)) { signal_wake_up(p, 0); unlock_task_sighand(p, &flags); } } bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) { fake_signal_wake_up(p); } else { wake_up_state(p, TASK_INTERRUPTIBLE); } spin_unlock_irqrestore(&freezer_lock, flags); return true; } void __thaw_task(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&freezer_lock, flags); if (frozen(p)) wake_up_process(p); spin_unlock_irqrestore(&freezer_lock, flags); } bool set_freezable(void) { might_sleep(); spin_lock_irq(&freezer_lock); current->flags &= ~PF_NOFREEZE; spin_unlock_irq(&freezer_lock); return try_to_freeze(); } EXPORT_SYMBOL(set_freezable);
gpl-2.0
EmmanuelU/wild_kernel_samsung_msm8660
arch/arm/mach-msm/smem_log.c
83
48990
/* Copyright (c) 2008-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * Shared memory logging implementation. */ #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/remote_spinlock.h> #include <linux/debugfs.h> #include <linux/io.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <mach/msm_iomap.h> #include <mach/smem_log.h> #include "smd_private.h" #include "smd_rpc_sym.h" #include "modem_notifier.h" #define DEBUG #undef DEBUG #ifdef DEBUG #define D_DUMP_BUFFER(prestr, cnt, buf) \ do { \ int i; \ printk(KERN_ERR "%s", prestr); \ for (i = 0; i < cnt; i++) \ printk(KERN_ERR "%.2x", buf[i]); \ printk(KERN_ERR "\n"); \ } while (0) #else #define D_DUMP_BUFFER(prestr, cnt, buf) #endif #ifdef DEBUG #define D(x...) printk(x) #else #define D(x...) do {} while (0) #endif #if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) \ || defined(CONFIG_ARCH_FSM9XXX) #define TIMESTAMP_ADDR (MSM_TMR_BASE + 0x08) #else #define TIMESTAMP_ADDR (MSM_TMR_BASE + 0x04) #endif struct smem_log_item { uint32_t identifier; uint32_t timetick; uint32_t data1; uint32_t data2; uint32_t data3; }; #define SMEM_LOG_NUM_ENTRIES 2000 #define SMEM_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \ SMEM_LOG_NUM_ENTRIES) #define SMEM_LOG_NUM_STATIC_ENTRIES 150 #define SMEM_STATIC_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \ SMEM_LOG_NUM_STATIC_ENTRIES) #define SMEM_LOG_NUM_POWER_ENTRIES 2000 #define SMEM_POWER_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \ SMEM_LOG_NUM_POWER_ENTRIES) #define SMEM_SPINLOCK_SMEM_LOG "S:2" #define SMEM_SPINLOCK_STATIC_LOG "S:5" /* POWER shares with SMEM_SPINLOCK_SMEM_LOG */ static remote_spinlock_t remote_spinlock; static remote_spinlock_t remote_spinlock_static; static uint32_t smem_log_enable; static int smem_log_initialized; module_param_named(log_enable, smem_log_enable, int, S_IRUGO | S_IWUSR | S_IWGRP); struct smem_log_inst { int which_log; struct smem_log_item __iomem *events; uint32_t __iomem *idx; uint32_t num; uint32_t read_idx; uint32_t last_read_avail; wait_queue_head_t read_wait; remote_spinlock_t *remote_spinlock; }; enum smem_logs { GEN = 0, STA, POW, NUM }; static struct smem_log_inst inst[NUM]; #if defined(CONFIG_DEBUG_FS) #define HSIZE 13 struct sym { uint32_t val; char *str; struct hlist_node node; }; struct sym id_syms[] = { { SMEM_LOG_PROC_ID_MODEM, "MODM" }, { SMEM_LOG_PROC_ID_Q6, "QDSP" }, { SMEM_LOG_PROC_ID_APPS, "APPS" }, }; struct sym base_syms[] = { { SMEM_LOG_ONCRPC_EVENT_BASE, "ONCRPC" }, { SMEM_LOG_SMEM_EVENT_BASE, "SMEM" }, { SMEM_LOG_TMC_EVENT_BASE, "TMC" }, { SMEM_LOG_TIMETICK_EVENT_BASE, "TIMETICK" }, { SMEM_LOG_DEM_EVENT_BASE, "DEM" }, { SMEM_LOG_ERROR_EVENT_BASE, "ERROR" }, { SMEM_LOG_DCVS_EVENT_BASE, "DCVS" }, { SMEM_LOG_SLEEP_EVENT_BASE, "SLEEP" }, { SMEM_LOG_RPC_ROUTER_EVENT_BASE, "ROUTER" }, }; struct sym event_syms[] = { #if defined(CONFIG_MSM_N_WAY_SMSM) { DEM_SMSM_ISR, "SMSM_ISR" }, { DEM_STATE_CHANGE, "STATE_CHANGE" }, { DEM_STATE_MACHINE_ENTER, "STATE_MACHINE_ENTER" }, { DEM_ENTER_SLEEP, "ENTER_SLEEP" }, { DEM_END_SLEEP, "END_SLEEP" }, { DEM_SETUP_SLEEP, "SETUP_SLEEP" }, { DEM_SETUP_POWER_COLLAPSE, "SETUP_POWER_COLLAPSE" }, { DEM_SETUP_SUSPEND, "SETUP_SUSPEND" }, { DEM_EARLY_EXIT, "EARLY_EXIT" }, { DEM_WAKEUP_REASON, "WAKEUP_REASON" }, { DEM_DETECT_WAKEUP, "DETECT_WAKEUP" }, { DEM_DETECT_RESET, "DETECT_RESET" }, { DEM_DETECT_SLEEPEXIT, "DETECT_SLEEPEXIT" }, { DEM_DETECT_RUN, "DETECT_RUN" }, { DEM_APPS_SWFI, "APPS_SWFI" }, { DEM_SEND_WAKEUP, "SEND_WAKEUP" }, { DEM_ASSERT_OKTS, "ASSERT_OKTS" }, { DEM_NEGATE_OKTS, "NEGATE_OKTS" }, { DEM_PROC_COMM_CMD, "PROC_COMM_CMD" }, { DEM_REMOVE_PROC_PWR, "REMOVE_PROC_PWR" }, { DEM_RESTORE_PROC_PWR, "RESTORE_PROC_PWR" }, { DEM_SMI_CLK_DISABLED, "SMI_CLK_DISABLED" }, { DEM_SMI_CLK_ENABLED, "SMI_CLK_ENABLED" }, { DEM_MAO_INTS, "MAO_INTS" }, { DEM_APPS_WAKEUP_INT, "APPS_WAKEUP_INT" }, { DEM_PROC_WAKEUP, "PROC_WAKEUP" }, { DEM_PROC_POWERUP, "PROC_POWERUP" }, { DEM_TIMER_EXPIRED, "TIMER_EXPIRED" }, { DEM_SEND_BATTERY_INFO, "SEND_BATTERY_INFO" }, { DEM_REMOTE_PWR_CB, "REMOTE_PWR_CB" }, { DEM_TIME_SYNC_START, "TIME_SYNC_START" }, { DEM_TIME_SYNC_SEND_VALUE, "TIME_SYNC_SEND_VALUE" }, { DEM_TIME_SYNC_DONE, "TIME_SYNC_DONE" }, { DEM_TIME_SYNC_REQUEST, "TIME_SYNC_REQUEST" }, { DEM_TIME_SYNC_POLL, "TIME_SYNC_POLL" }, { DEM_TIME_SYNC_INIT, "TIME_SYNC_INIT" }, { DEM_INIT, "INIT" }, #else { DEM_NO_SLEEP, "NO_SLEEP" }, { DEM_INSUF_TIME, "INSUF_TIME" }, { DEMAPPS_ENTER_SLEEP, "APPS_ENTER_SLEEP" }, { DEMAPPS_DETECT_WAKEUP, "APPS_DETECT_WAKEUP" }, { DEMAPPS_END_APPS_TCXO, "APPS_END_APPS_TCXO" }, { DEMAPPS_ENTER_SLEEPEXIT, "APPS_ENTER_SLEEPEXIT" }, { DEMAPPS_END_APPS_SLEEP, "APPS_END_APPS_SLEEP" }, { DEMAPPS_SETUP_APPS_PWRCLPS, "APPS_SETUP_APPS_PWRCLPS" }, { DEMAPPS_PWRCLPS_EARLY_EXIT, "APPS_PWRCLPS_EARLY_EXIT" }, { DEMMOD_SEND_WAKEUP, "MOD_SEND_WAKEUP" }, { DEMMOD_NO_APPS_VOTE, "MOD_NO_APPS_VOTE" }, { DEMMOD_NO_TCXO_SLEEP, "MOD_NO_TCXO_SLEEP" }, { DEMMOD_BT_CLOCK, "MOD_BT_CLOCK" }, { DEMMOD_UART_CLOCK, "MOD_UART_CLOCK" }, { DEMMOD_OKTS, "MOD_OKTS" }, { DEM_SLEEP_INFO, "SLEEP_INFO" }, { DEMMOD_TCXO_END, "MOD_TCXO_END" }, { DEMMOD_END_SLEEP_SIG, "MOD_END_SLEEP_SIG" }, { DEMMOD_SETUP_APPSSLEEP, "MOD_SETUP_APPSSLEEP" }, { DEMMOD_ENTER_TCXO, "MOD_ENTER_TCXO" }, { DEMMOD_WAKE_APPS, "MOD_WAKE_APPS" }, { DEMMOD_POWER_COLLAPSE_APPS, "MOD_POWER_COLLAPSE_APPS" }, { DEMMOD_RESTORE_APPS_PWR, "MOD_RESTORE_APPS_PWR" }, { DEMAPPS_ASSERT_OKTS, "APPS_ASSERT_OKTS" }, { DEMAPPS_RESTART_START_TIMER, "APPS_RESTART_START_TIMER" }, { DEMAPPS_ENTER_RUN, "APPS_ENTER_RUN" }, { DEMMOD_MAO_INTS, "MOD_MAO_INTS" }, { DEMMOD_POWERUP_APPS_CALLED, "MOD_POWERUP_APPS_CALLED" }, { DEMMOD_PC_TIMER_EXPIRED, "MOD_PC_TIMER_EXPIRED" }, { DEM_DETECT_SLEEPEXIT, "_DETECT_SLEEPEXIT" }, { DEM_DETECT_RUN, "DETECT_RUN" }, { DEM_SET_APPS_TIMER, "SET_APPS_TIMER" }, { DEM_NEGATE_OKTS, "NEGATE_OKTS" }, { DEMMOD_APPS_WAKEUP_INT, "MOD_APPS_WAKEUP_INT" }, { DEMMOD_APPS_SWFI, "MOD_APPS_SWFI" }, { DEM_SEND_BATTERY_INFO, "SEND_BATTERY_INFO" }, { DEM_SMI_CLK_DISABLED, "SMI_CLK_DISABLED" }, { DEM_SMI_CLK_ENABLED, "SMI_CLK_ENABLED" }, { DEMAPPS_SETUP_APPS_SUSPEND, "APPS_SETUP_APPS_SUSPEND" }, { DEM_RPC_EARLY_EXIT, "RPC_EARLY_EXIT" }, { DEMAPPS_WAKEUP_REASON, "APPS_WAKEUP_REASON" }, { DEM_INIT, "INIT" }, #endif { DEMMOD_UMTS_BASE, "MOD_UMTS_BASE" }, { DEMMOD_GL1_GO_TO_SLEEP, "GL1_GO_TO_SLEEP" }, { DEMMOD_GL1_SLEEP_START, "GL1_SLEEP_START" }, { DEMMOD_GL1_AFTER_GSM_CLK_ON, "GL1_AFTER_GSM_CLK_ON" }, { DEMMOD_GL1_BEFORE_RF_ON, "GL1_BEFORE_RF_ON" }, { DEMMOD_GL1_AFTER_RF_ON, "GL1_AFTER_RF_ON" }, { DEMMOD_GL1_FRAME_TICK, "GL1_FRAME_TICK" }, { DEMMOD_GL1_WCDMA_START, "GL1_WCDMA_START" }, { DEMMOD_GL1_WCDMA_ENDING, "GL1_WCDMA_ENDING" }, { DEMMOD_UMTS_NOT_OKTS, "UMTS_NOT_OKTS" }, { DEMMOD_UMTS_START_TCXO_SHUTDOWN, "UMTS_START_TCXO_SHUTDOWN" }, { DEMMOD_UMTS_END_TCXO_SHUTDOWN, "UMTS_END_TCXO_SHUTDOWN" }, { DEMMOD_UMTS_START_ARM_HALT, "UMTS_START_ARM_HALT" }, { DEMMOD_UMTS_END_ARM_HALT, "UMTS_END_ARM_HALT" }, { DEMMOD_UMTS_NEXT_WAKEUP_SCLK, "UMTS_NEXT_WAKEUP_SCLK" }, { TIME_REMOTE_LOG_EVENT_START, "START" }, { TIME_REMOTE_LOG_EVENT_GOTO_WAIT, "GOTO_WAIT" }, { TIME_REMOTE_LOG_EVENT_GOTO_INIT, "GOTO_INIT" }, { ERR_ERROR_FATAL, "ERR_ERROR_FATAL" }, { ERR_ERROR_FATAL_TASK, "ERR_ERROR_FATAL_TASK" }, { DCVSAPPS_LOG_IDLE, "DCVSAPPS_LOG_IDLE" }, { DCVSAPPS_LOG_ERR, "DCVSAPPS_LOG_ERR" }, { DCVSAPPS_LOG_CHG, "DCVSAPPS_LOG_CHG" }, { DCVSAPPS_LOG_REG, "DCVSAPPS_LOG_REG" }, { DCVSAPPS_LOG_DEREG, "DCVSAPPS_LOG_DEREG" }, { SMEM_LOG_EVENT_CB, "CB" }, { SMEM_LOG_EVENT_START, "START" }, { SMEM_LOG_EVENT_INIT, "INIT" }, { SMEM_LOG_EVENT_RUNNING, "RUNNING" }, { SMEM_LOG_EVENT_STOP, "STOP" }, { SMEM_LOG_EVENT_RESTART, "RESTART" }, { SMEM_LOG_EVENT_SS, "SS" }, { SMEM_LOG_EVENT_READ, "READ" }, { SMEM_LOG_EVENT_WRITE, "WRITE" }, { SMEM_LOG_EVENT_SIGS1, "SIGS1" }, { SMEM_LOG_EVENT_SIGS2, "SIGS2" }, { SMEM_LOG_EVENT_WRITE_DM, "WRITE_DM" }, { SMEM_LOG_EVENT_READ_DM, "READ_DM" }, { SMEM_LOG_EVENT_SKIP_DM, "SKIP_DM" }, { SMEM_LOG_EVENT_STOP_DM, "STOP_DM" }, { SMEM_LOG_EVENT_ISR, "ISR" }, { SMEM_LOG_EVENT_TASK, "TASK" }, { SMEM_LOG_EVENT_RS, "RS" }, { ONCRPC_LOG_EVENT_SMD_WAIT, "SMD_WAIT" }, { ONCRPC_LOG_EVENT_RPC_WAIT, "RPC_WAIT" }, { ONCRPC_LOG_EVENT_RPC_BOTH_WAIT, "RPC_BOTH_WAIT" }, { ONCRPC_LOG_EVENT_RPC_INIT, "RPC_INIT" }, { ONCRPC_LOG_EVENT_RUNNING, "RUNNING" }, { ONCRPC_LOG_EVENT_APIS_INITED, "APIS_INITED" }, { ONCRPC_LOG_EVENT_AMSS_RESET, "AMSS_RESET" }, { ONCRPC_LOG_EVENT_SMD_RESET, "SMD_RESET" }, { ONCRPC_LOG_EVENT_ONCRPC_RESET, "ONCRPC_RESET" }, { ONCRPC_LOG_EVENT_CB, "CB" }, { ONCRPC_LOG_EVENT_STD_CALL, "STD_CALL" }, { ONCRPC_LOG_EVENT_STD_REPLY, "STD_REPLY" }, { ONCRPC_LOG_EVENT_STD_CALL_ASYNC, "STD_CALL_ASYNC" }, { NO_SLEEP_OLD, "NO_SLEEP_OLD" }, { INSUF_TIME, "INSUF_TIME" }, { MOD_UART_CLOCK, "MOD_UART_CLOCK" }, { SLEEP_INFO, "SLEEP_INFO" }, { MOD_TCXO_END, "MOD_TCXO_END" }, { MOD_ENTER_TCXO, "MOD_ENTER_TCXO" }, { NO_SLEEP_NEW, "NO_SLEEP_NEW" }, { RPC_ROUTER_LOG_EVENT_UNKNOWN, "UNKNOWN" }, { RPC_ROUTER_LOG_EVENT_MSG_READ, "MSG_READ" }, { RPC_ROUTER_LOG_EVENT_MSG_WRITTEN, "MSG_WRITTEN" }, { RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ, "MSG_CFM_REQ" }, { RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT, "MSG_CFM_SNT" }, { RPC_ROUTER_LOG_EVENT_MID_READ, "MID_READ" }, { RPC_ROUTER_LOG_EVENT_MID_WRITTEN, "MID_WRITTEN" }, { RPC_ROUTER_LOG_EVENT_MID_CFM_REQ, "MID_CFM_REQ" }, }; struct sym wakeup_syms[] = { { 0x00000040, "OTHER" }, { 0x00000020, "RESET" }, { 0x00000010, "ALARM" }, { 0x00000008, "TIMER" }, { 0x00000004, "GPIO" }, { 0x00000002, "INT" }, { 0x00000001, "RPC" }, { 0x00000000, "NONE" }, }; struct sym wakeup_int_syms[] = { { 0, "MDDI_EXT" }, { 1, "MDDI_PRI" }, { 2, "MDDI_CLIENT"}, { 3, "USB_OTG" }, { 4, "I2CC" }, { 5, "SDC1_0" }, { 6, "SDC1_1" }, { 7, "SDC2_0" }, { 8, "SDC2_1" }, { 9, "ADSP_A9A11" }, { 10, "UART1" }, { 11, "UART2" }, { 12, "UART3" }, { 13, "DP_RX_DATA" }, { 14, "DP_RX_DATA2" }, { 15, "DP_RX_DATA3" }, { 16, "DM_UART" }, { 17, "DM_DP_RX_DATA" }, { 18, "KEYSENSE" }, { 19, "HSSD" }, { 20, "NAND_WR_ER_DONE" }, { 21, "NAND_OP_DONE" }, { 22, "TCHSCRN1" }, { 23, "TCHSCRN2" }, { 24, "TCHSCRN_SSBI" }, { 25, "USB_HS" }, { 26, "UART2_DM_RX" }, { 27, "UART2_DM" }, { 28, "SDC4_1" }, { 29, "SDC4_0" }, { 30, "SDC3_1" }, { 31, "SDC3_0" }, }; struct sym smsm_syms[] = { { 0x80000000, "UN" }, { 0x7F000000, "ERR" }, { 0x00800000, "SMLP" }, { 0x00400000, "ADWN" }, { 0x00200000, "PWRS" }, { 0x00100000, "DWLD" }, { 0x00080000, "SRBT" }, { 0x00040000, "SDWN" }, { 0x00020000, "ARBT" }, { 0x00010000, "REL" }, { 0x00008000, "SLE" }, { 0x00004000, "SLP" }, { 0x00002000, "WFPI" }, { 0x00001000, "EEX" }, { 0x00000800, "TIN" }, { 0x00000400, "TWT" }, { 0x00000200, "PWRC" }, { 0x00000100, "RUN" }, { 0x00000080, "SA" }, { 0x00000040, "RES" }, { 0x00000020, "RIN" }, { 0x00000010, "RWT" }, { 0x00000008, "SIN" }, { 0x00000004, "SWT" }, { 0x00000002, "OE" }, { 0x00000001, "I" }, }; /* never reorder */ struct sym voter_d2_syms[] = { { 0x00000001, NULL }, { 0x00000002, NULL }, { 0x00000004, NULL }, { 0x00000008, NULL }, { 0x00000010, NULL }, { 0x00000020, NULL }, { 0x00000040, NULL }, { 0x00000080, NULL }, { 0x00000100, NULL }, { 0x00000200, NULL }, { 0x00000400, NULL }, { 0x00000800, NULL }, { 0x00001000, NULL }, { 0x00002000, NULL }, { 0x00004000, NULL }, { 0x00008000, NULL }, { 0x00010000, NULL }, { 0x00020000, NULL }, { 0x00040000, NULL }, { 0x00080000, NULL }, { 0x00100000, NULL }, { 0x00200000, NULL }, { 0x00400000, NULL }, { 0x00800000, NULL }, { 0x01000000, NULL }, { 0x02000000, NULL }, { 0x04000000, NULL }, { 0x08000000, NULL }, { 0x10000000, NULL }, { 0x20000000, NULL }, { 0x40000000, NULL }, { 0x80000000, NULL }, }; /* never reorder */ struct sym voter_d3_syms[] = { { 0x00000001, NULL }, { 0x00000002, NULL }, { 0x00000004, NULL }, { 0x00000008, NULL }, { 0x00000010, NULL }, { 0x00000020, NULL }, { 0x00000040, NULL }, { 0x00000080, NULL }, { 0x00000100, NULL }, { 0x00000200, NULL }, { 0x00000400, NULL }, { 0x00000800, NULL }, { 0x00001000, NULL }, { 0x00002000, NULL }, { 0x00004000, NULL }, { 0x00008000, NULL }, { 0x00010000, NULL }, { 0x00020000, NULL }, { 0x00040000, NULL }, { 0x00080000, NULL }, { 0x00100000, NULL }, { 0x00200000, NULL }, { 0x00400000, NULL }, { 0x00800000, NULL }, { 0x01000000, NULL }, { 0x02000000, NULL }, { 0x04000000, NULL }, { 0x08000000, NULL }, { 0x10000000, NULL }, { 0x20000000, NULL }, { 0x40000000, NULL }, { 0x80000000, NULL }, }; struct sym dem_state_master_syms[] = { { 0, "INIT" }, { 1, "RUN" }, { 2, "SLEEP_WAIT" }, { 3, "SLEEP_CONFIRMED" }, { 4, "SLEEP_EXIT" }, { 5, "RSA" }, { 6, "EARLY_EXIT" }, { 7, "RSA_DELAYED" }, { 8, "RSA_CHECK_INTS" }, { 9, "RSA_CONFIRMED" }, { 10, "RSA_WAKING" }, { 11, "RSA_RESTORE" }, { 12, "RESET" }, }; struct sym dem_state_slave_syms[] = { { 0, "INIT" }, { 1, "RUN" }, { 2, "SLEEP_WAIT" }, { 3, "SLEEP_EXIT" }, { 4, "SLEEP_RUN_PENDING" }, { 5, "POWER_COLLAPSE" }, { 6, "CHECK_INTERRUPTS" }, { 7, "SWFI" }, { 8, "WFPI" }, { 9, "EARLY_EXIT" }, { 10, "RESET_RECOVER" }, { 11, "RESET_ACKNOWLEDGE" }, { 12, "ERROR" }, }; struct sym smsm_entry_type_syms[] = { { 0, "SMSM_APPS_STATE" }, { 1, "SMSM_MODEM_STATE" }, { 2, "SMSM_Q6_STATE" }, { 3, "SMSM_APPS_DEM" }, { 4, "SMSM_MODEM_DEM" }, { 5, "SMSM_Q6_DEM" }, { 6, "SMSM_POWER_MASTER_DEM" }, { 7, "SMSM_TIME_MASTER_DEM" }, }; struct sym smsm_state_syms[] = { { 0x00000001, "INIT" }, { 0x00000002, "OSENTERED" }, { 0x00000004, "SMDWAIT" }, { 0x00000008, "SMDINIT" }, { 0x00000010, "RPCWAIT" }, { 0x00000020, "RPCINIT" }, { 0x00000040, "RESET" }, { 0x00000080, "RSA" }, { 0x00000100, "RUN" }, { 0x00000200, "PWRC" }, { 0x00000400, "TIMEWAIT" }, { 0x00000800, "TIMEINIT" }, { 0x00001000, "PWRC_EARLY_EXIT" }, { 0x00002000, "WFPI" }, { 0x00004000, "SLEEP" }, { 0x00008000, "SLEEPEXIT" }, { 0x00010000, "OEMSBL_RELEASE" }, { 0x00020000, "APPS_REBOOT" }, { 0x00040000, "SYSTEM_POWER_DOWN" }, { 0x00080000, "SYSTEM_REBOOT" }, { 0x00100000, "SYSTEM_DOWNLOAD" }, { 0x00200000, "PWRC_SUSPEND" }, { 0x00400000, "APPS_SHUTDOWN" }, { 0x00800000, "SMD_LOOPBACK" }, { 0x01000000, "RUN_QUIET" }, { 0x02000000, "MODEM_WAIT" }, { 0x04000000, "MODEM_BREAK" }, { 0x08000000, "MODEM_CONTINUE" }, { 0x80000000, "UNKNOWN" }, }; #define ID_SYM 0 #define BASE_SYM 1 #define EVENT_SYM 2 #define WAKEUP_SYM 3 #define WAKEUP_INT_SYM 4 #define SMSM_SYM 5 #define VOTER_D2_SYM 6 #define VOTER_D3_SYM 7 #define DEM_STATE_MASTER_SYM 8 #define DEM_STATE_SLAVE_SYM 9 #define SMSM_ENTRY_TYPE_SYM 10 #define SMSM_STATE_SYM 11 static struct sym_tbl { struct sym *data; int size; struct hlist_head hlist[HSIZE]; } tbl[] = { { id_syms, ARRAY_SIZE(id_syms) }, { base_syms, ARRAY_SIZE(base_syms) }, { event_syms, ARRAY_SIZE(event_syms) }, { wakeup_syms, ARRAY_SIZE(wakeup_syms) }, { wakeup_int_syms, ARRAY_SIZE(wakeup_int_syms) }, { smsm_syms, ARRAY_SIZE(smsm_syms) }, { voter_d2_syms, ARRAY_SIZE(voter_d2_syms) }, { voter_d3_syms, ARRAY_SIZE(voter_d3_syms) }, { dem_state_master_syms, ARRAY_SIZE(dem_state_master_syms) }, { dem_state_slave_syms, ARRAY_SIZE(dem_state_slave_syms) }, { smsm_entry_type_syms, ARRAY_SIZE(smsm_entry_type_syms) }, { smsm_state_syms, ARRAY_SIZE(smsm_state_syms) }, }; static void find_voters(void) { void *x, *next; unsigned size; int i = 0, j = 0; x = smem_get_entry(SMEM_SLEEP_STATIC, &size); next = x; while (next && (next < (x + size)) && ((i + j) < (ARRAY_SIZE(voter_d3_syms) + ARRAY_SIZE(voter_d2_syms)))) { if (i < ARRAY_SIZE(voter_d3_syms)) { voter_d3_syms[i].str = (char *) next; i++; } else if (i >= ARRAY_SIZE(voter_d3_syms) && j < ARRAY_SIZE(voter_d2_syms)) { voter_d2_syms[j].str = (char *) next; j++; } next += 9; } } #define hash(val) (val % HSIZE) static void init_syms(void) { int i; int j; for (i = 0; i < ARRAY_SIZE(tbl); ++i) for (j = 0; j < HSIZE; ++j) INIT_HLIST_HEAD(&tbl[i].hlist[j]); for (i = 0; i < ARRAY_SIZE(tbl); ++i) for (j = 0; j < tbl[i].size; ++j) { INIT_HLIST_NODE(&tbl[i].data[j].node); hlist_add_head(&tbl[i].data[j].node, &tbl[i].hlist[hash(tbl[i].data[j].val)]); } } static char *find_sym(uint32_t id, uint32_t val) { struct hlist_node *n; struct sym *s; hlist_for_each(n, &tbl[id].hlist[hash(val)]) { s = hlist_entry(n, struct sym, node); if (s->val == val) return s->str; } return 0; } #else static void init_syms(void) {} #endif static inline unsigned int read_timestamp(void) { unsigned int tick = 0; /* no barriers necessary as the read value is a dependency for the * comparison operation so the processor shouldn't be able to * reorder things */ do { tick = __raw_readl(TIMESTAMP_ADDR); } while (tick != __raw_readl(TIMESTAMP_ADDR)); return tick; } static void smem_log_event_from_user(struct smem_log_inst *inst, const char __user *buf, int size, int num) { uint32_t idx; uint32_t next_idx; unsigned long flags; uint32_t identifier = 0; uint32_t timetick = 0; int first = 1; int ret; remote_spin_lock_irqsave(inst->remote_spinlock, flags); while (num--) { idx = *inst->idx; if (idx < inst->num) { ret = copy_from_user(&inst->events[idx], buf, size); if (ret) { printk("ERROR %s:%i tried to write " "%i got ret %i", __func__, __LINE__, size, size - ret); goto out; } if (first) { identifier = inst->events[idx]. identifier; timetick = read_timestamp(); first = 0; } else { identifier |= SMEM_LOG_CONT; } inst->events[idx].identifier = identifier; inst->events[idx].timetick = timetick; } next_idx = idx + 1; if (next_idx >= inst->num) next_idx = 0; *inst->idx = next_idx; buf += sizeof(struct smem_log_item); } out: wmb(); remote_spin_unlock_irqrestore(inst->remote_spinlock, flags); } static void _smem_log_event( struct smem_log_item __iomem *events, uint32_t __iomem *_idx, remote_spinlock_t *lock, int num, uint32_t id, uint32_t data1, uint32_t data2, uint32_t data3) { struct smem_log_item item; uint32_t idx; uint32_t next_idx; unsigned long flags; item.timetick = read_timestamp(); item.identifier = id; item.data1 = data1; item.data2 = data2; item.data3 = data3; remote_spin_lock_irqsave(lock, flags); idx = *_idx; if (idx < num) { memcpy(&events[idx], &item, sizeof(item)); } next_idx = idx + 1; if (next_idx >= num) next_idx = 0; *_idx = next_idx; wmb(); remote_spin_unlock_irqrestore(lock, flags); } static void _smem_log_event6( struct smem_log_item __iomem *events, uint32_t __iomem *_idx, remote_spinlock_t *lock, int num, uint32_t id, uint32_t data1, uint32_t data2, uint32_t data3, uint32_t data4, uint32_t data5, uint32_t data6) { struct smem_log_item item[2]; uint32_t idx; uint32_t next_idx; unsigned long flags; item[0].timetick = read_timestamp(); item[0].identifier = id; item[0].data1 = data1; item[0].data2 = data2; item[0].data3 = data3; item[1].identifier = item[0].identifier; item[1].timetick = item[0].timetick; item[1].data1 = data4; item[1].data2 = data5; item[1].data3 = data6; remote_spin_lock_irqsave(lock, flags); idx = *_idx; /* FIXME: Wrap around */ if (idx < (num-1)) { memcpy(&events[idx], &item, sizeof(item)); } next_idx = idx + 2; if (next_idx >= num) next_idx = 0; *_idx = next_idx; wmb(); remote_spin_unlock_irqrestore(lock, flags); } void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2, uint32_t data3) { if (smem_log_enable) _smem_log_event(inst[GEN].events, inst[GEN].idx, inst[GEN].remote_spinlock, SMEM_LOG_NUM_ENTRIES, id, data1, data2, data3); } void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2, uint32_t data3, uint32_t data4, uint32_t data5, uint32_t data6) { if (smem_log_enable) _smem_log_event6(inst[GEN].events, inst[GEN].idx, inst[GEN].remote_spinlock, SMEM_LOG_NUM_ENTRIES, id, data1, data2, data3, data4, data5, data6); } void smem_log_event_to_static(uint32_t id, uint32_t data1, uint32_t data2, uint32_t data3) { if (smem_log_enable) _smem_log_event(inst[STA].events, inst[STA].idx, inst[STA].remote_spinlock, SMEM_LOG_NUM_STATIC_ENTRIES, id, data1, data2, data3); } void smem_log_event6_to_static(uint32_t id, uint32_t data1, uint32_t data2, uint32_t data3, uint32_t data4, uint32_t data5, uint32_t data6) { if (smem_log_enable) _smem_log_event6(inst[STA].events, inst[STA].idx, inst[STA].remote_spinlock, SMEM_LOG_NUM_STATIC_ENTRIES, id, data1, data2, data3, data4, data5, data6); } static int _smem_log_init(void) { int ret; inst[GEN].which_log = GEN; inst[GEN].events = (struct smem_log_item *)smem_alloc(SMEM_SMEM_LOG_EVENTS, SMEM_LOG_EVENTS_SIZE); inst[GEN].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_IDX, sizeof(uint32_t)); if (!inst[GEN].events || !inst[GEN].idx) pr_info("%s: no log or log_idx allocated\n", __func__); inst[GEN].num = SMEM_LOG_NUM_ENTRIES; inst[GEN].read_idx = 0; inst[GEN].last_read_avail = SMEM_LOG_NUM_ENTRIES; init_waitqueue_head(&inst[GEN].read_wait); inst[GEN].remote_spinlock = &remote_spinlock; inst[STA].which_log = STA; inst[STA].events = (struct smem_log_item *) smem_alloc(SMEM_SMEM_STATIC_LOG_EVENTS, SMEM_STATIC_LOG_EVENTS_SIZE); inst[STA].idx = (uint32_t *)smem_alloc(SMEM_SMEM_STATIC_LOG_IDX, sizeof(uint32_t)); if (!inst[STA].events || !inst[STA].idx) pr_info("%s: no static log or log_idx allocated\n", __func__); inst[STA].num = SMEM_LOG_NUM_STATIC_ENTRIES; inst[STA].read_idx = 0; inst[STA].last_read_avail = SMEM_LOG_NUM_ENTRIES; init_waitqueue_head(&inst[STA].read_wait); inst[STA].remote_spinlock = &remote_spinlock_static; inst[POW].which_log = POW; inst[POW].events = (struct smem_log_item *) smem_alloc(SMEM_SMEM_LOG_POWER_EVENTS, SMEM_POWER_LOG_EVENTS_SIZE); inst[POW].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_POWER_IDX, sizeof(uint32_t)); if (!inst[POW].events || !inst[POW].idx) pr_info("%s: no power log or log_idx allocated\n", __func__); inst[POW].num = SMEM_LOG_NUM_POWER_ENTRIES; inst[POW].read_idx = 0; inst[POW].last_read_avail = SMEM_LOG_NUM_ENTRIES; init_waitqueue_head(&inst[POW].read_wait); inst[POW].remote_spinlock = &remote_spinlock; ret = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_LOG); if (ret) { mb(); return ret; } ret = remote_spin_lock_init(&remote_spinlock_static, SMEM_SPINLOCK_STATIC_LOG); if (ret) { mb(); return ret; } init_syms(); mb(); return 0; } static ssize_t smem_log_read_bin(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int idx; int orig_idx; unsigned long flags; int ret; int tot_bytes = 0; struct smem_log_inst *local_inst; local_inst = fp->private_data; remote_spin_lock_irqsave(local_inst->remote_spinlock, flags); orig_idx = *local_inst->idx; idx = orig_idx; while (1) { idx--; if (idx < 0) idx = local_inst->num - 1; if (idx == orig_idx) { ret = tot_bytes; break; } if ((tot_bytes + sizeof(struct smem_log_item)) > count) { ret = tot_bytes; break; } ret = copy_to_user(buf, &local_inst->events[idx], sizeof(struct smem_log_item)); if (ret) { ret = -EIO; break; } tot_bytes += sizeof(struct smem_log_item); buf += sizeof(struct smem_log_item); } remote_spin_unlock_irqrestore(local_inst->remote_spinlock, flags); return ret; } static ssize_t smem_log_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { char loc_buf[128]; int i; int idx; int orig_idx; unsigned long flags; int ret; int tot_bytes = 0; struct smem_log_inst *inst; inst = fp->private_data; remote_spin_lock_irqsave(inst->remote_spinlock, flags); orig_idx = *inst->idx; idx = orig_idx; while (1) { idx--; if (idx < 0) idx = inst->num - 1; if (idx == orig_idx) { ret = tot_bytes; break; } i = scnprintf(loc_buf, 128, "0x%x 0x%x 0x%x 0x%x 0x%x\n", inst->events[idx].identifier, inst->events[idx].timetick, inst->events[idx].data1, inst->events[idx].data2, inst->events[idx].data3); if (i == 0) { ret = -EIO; break; } if ((tot_bytes + i) > count) { ret = tot_bytes; break; } tot_bytes += i; ret = copy_to_user(buf, loc_buf, i); if (ret) { ret = -EIO; break; } buf += i; } remote_spin_unlock_irqrestore(inst->remote_spinlock, flags); return ret; } static ssize_t smem_log_write_bin(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { if (count < sizeof(struct smem_log_item)) return -EINVAL; if (smem_log_enable) smem_log_event_from_user(fp->private_data, buf, sizeof(struct smem_log_item), count / sizeof(struct smem_log_item)); return count; } static ssize_t smem_log_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { int ret; const char delimiters[] = " ,;"; char locbuf[256] = {0}; uint32_t val[10] = {0}; int vals = 0; char *token; char *running; struct smem_log_inst *inst; unsigned long res; inst = fp->private_data; count = count > 255 ? 255 : count; if (!smem_log_enable) return count; locbuf[count] = '\0'; ret = copy_from_user(locbuf, buf, count); if (ret != 0) { printk(KERN_ERR "ERROR: %s could not copy %i bytes\n", __func__, ret); return -EINVAL; } D(KERN_ERR "%s: ", __func__); D_DUMP_BUFFER("We got", len, locbuf); running = locbuf; token = strsep(&running, delimiters); while (token && vals < ARRAY_SIZE(val)) { if (*token != '\0') { D(KERN_ERR "%s: ", __func__); D_DUMP_BUFFER("", strlen(token), token); ret = strict_strtoul(token, 0, &res); if (ret) { printk(KERN_ERR "ERROR: %s:%i got bad char " "at strict_strtoul\n", __func__, __LINE__-4); return -EINVAL; } val[vals++] = res; } token = strsep(&running, delimiters); } if (vals > 5) { if (inst->which_log == GEN) smem_log_event6(val[0], val[2], val[3], val[4], val[7], val[8], val[9]); else if (inst->which_log == STA) smem_log_event6_to_static(val[0], val[2], val[3], val[4], val[7], val[8], val[9]); else return -1; } else { if (inst->which_log == GEN) smem_log_event(val[0], val[2], val[3], val[4]); else if (inst->which_log == STA) smem_log_event_to_static(val[0], val[2], val[3], val[4]); else return -1; } return count; } static int smem_log_open(struct inode *ip, struct file *fp) { fp->private_data = &inst[GEN]; return 0; } static int smem_log_release(struct inode *ip, struct file *fp) { return 0; } static long smem_log_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); static const struct file_operations smem_log_fops = { .owner = THIS_MODULE, .read = smem_log_read, .write = smem_log_write, .open = smem_log_open, .release = smem_log_release, .unlocked_ioctl = smem_log_ioctl, }; static const struct file_operations smem_log_bin_fops = { .owner = THIS_MODULE, .read = smem_log_read_bin, .write = smem_log_write_bin, .open = smem_log_open, .release = smem_log_release, .unlocked_ioctl = smem_log_ioctl, }; static long smem_log_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { switch (cmd) { default: return -ENOTTY; case SMIOC_SETMODE: if (arg == SMIOC_TEXT) { D("%s set text mode\n", __func__); fp->f_op = &smem_log_fops; } else if (arg == SMIOC_BINARY) { D("%s set bin mode\n", __func__); fp->f_op = &smem_log_bin_fops; } else { return -EINVAL; } break; case SMIOC_SETLOG: if (arg == SMIOC_LOG) { if (inst[GEN].events) fp->private_data = &inst[GEN]; else return -ENODEV; } else if (arg == SMIOC_STATIC_LOG) { if (inst[STA].events) fp->private_data = &inst[STA]; else return -ENODEV; } else { return -EINVAL; } break; } return 0; } static struct miscdevice smem_log_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "smem_log", .fops = &smem_log_fops, }; #if defined(CONFIG_DEBUG_FS) #define SMEM_LOG_ITEM_PRINT_SIZE 160 #define EVENTS_PRINT_SIZE \ (SMEM_LOG_ITEM_PRINT_SIZE * SMEM_LOG_NUM_ENTRIES) static uint32_t smem_log_timeout_ms; module_param_named(timeout_ms, smem_log_timeout_ms, int, S_IRUGO | S_IWUSR | S_IWGRP); static int smem_log_debug_mask; module_param_named(debug_mask, smem_log_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define DBG(x...) do {\ if (smem_log_debug_mask) \ printk(KERN_DEBUG x);\ } while (0) static int update_read_avail(struct smem_log_inst *inst) { int curr_read_avail; unsigned long flags = 0; remote_spin_lock_irqsave(inst->remote_spinlock, flags); curr_read_avail = (*inst->idx - inst->read_idx); if (curr_read_avail < 0) curr_read_avail = inst->num - inst->read_idx + *inst->idx; DBG("%s: read = %d write = %d curr = %d last = %d\n", __func__, inst->read_idx, *inst->idx, curr_read_avail, inst->last_read_avail); if (curr_read_avail < inst->last_read_avail) { if (inst->last_read_avail != inst->num) pr_info("smem_log: skipping %d log entries\n", inst->last_read_avail); inst->read_idx = *inst->idx + 1; inst->last_read_avail = inst->num - 1; } else inst->last_read_avail = curr_read_avail; remote_spin_unlock_irqrestore(inst->remote_spinlock, flags); DBG("%s: read = %d write = %d curr = %d last = %d\n", __func__, inst->read_idx, *inst->idx, curr_read_avail, inst->last_read_avail); return inst->last_read_avail; } static int _debug_dump(int log, char *buf, int max, uint32_t cont) { unsigned int idx; int write_idx, read_avail = 0; unsigned long flags; int i = 0; if (!inst[log].events) return 0; if (cont && update_read_avail(&inst[log]) == 0) return 0; remote_spin_lock_irqsave(inst[log].remote_spinlock, flags); if (cont) { idx = inst[log].read_idx; write_idx = (inst[log].read_idx + inst[log].last_read_avail); if (write_idx >= inst[log].num) write_idx -= inst[log].num; } else { write_idx = *inst[log].idx; idx = (write_idx + 1); } DBG("%s: read %d write %d idx %d num %d\n", __func__, inst[log].read_idx, write_idx, idx, inst[log].num - 1); while ((max - i) > 50) { if ((inst[log].num - 1) < idx) idx = 0; if (idx == write_idx) break; if (inst[log].events[idx].identifier) { i += scnprintf(buf + i, max - i, "%08x %08x %08x %08x %08x\n", inst[log].events[idx].identifier, inst[log].events[idx].timetick, inst[log].events[idx].data1, inst[log].events[idx].data2, inst[log].events[idx].data3); } idx++; } if (cont) { inst[log].read_idx = idx; read_avail = (write_idx - inst[log].read_idx); if (read_avail < 0) read_avail = inst->num - inst->read_idx + write_idx; inst[log].last_read_avail = read_avail; } remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags); DBG("%s: read %d write %d idx %d num %d\n", __func__, inst[log].read_idx, write_idx, idx, inst[log].num); return i; } static int _debug_dump_voters(char *buf, int max) { int k, i = 0; find_voters(); i += scnprintf(buf + i, max - i, "Voters:\n"); for (k = 0; k < ARRAY_SIZE(voter_d3_syms); ++k) if (voter_d3_syms[k].str) i += scnprintf(buf + i, max - i, "%s ", voter_d3_syms[k].str); for (k = 0; k < ARRAY_SIZE(voter_d2_syms); ++k) if (voter_d2_syms[k].str) i += scnprintf(buf + i, max - i, "%s ", voter_d2_syms[k].str); i += scnprintf(buf + i, max - i, "\n"); return i; } static int _debug_dump_sym(int log, char *buf, int max, uint32_t cont) { unsigned int idx; int write_idx, read_avail = 0; unsigned long flags; int i = 0; char *proc; char *sub; char *id; const char *sym = NULL; uint32_t data[3]; uint32_t proc_val = 0; uint32_t sub_val = 0; uint32_t id_val = 0; uint32_t id_only_val = 0; uint32_t data1 = 0; uint32_t data2 = 0; uint32_t data3 = 0; if (!inst[log].events) return 0; find_voters(); if (cont && update_read_avail(&inst[log]) == 0) return 0; remote_spin_lock_irqsave(inst[log].remote_spinlock, flags); if (cont) { idx = inst[log].read_idx; write_idx = (inst[log].read_idx + inst[log].last_read_avail); if (write_idx >= inst[log].num) write_idx -= inst[log].num; } else { write_idx = *inst[log].idx; idx = (write_idx + 1); } DBG("%s: read %d write %d idx %d num %d\n", __func__, inst[log].read_idx, write_idx, idx, inst[log].num - 1); for (; (max - i) > SMEM_LOG_ITEM_PRINT_SIZE; idx++) { if (idx > (inst[log].num - 1)) idx = 0; if (idx == write_idx) break; if (idx < inst[log].num) { if (!inst[log].events[idx].identifier) continue; proc_val = PROC & inst[log].events[idx].identifier; sub_val = SUB & inst[log].events[idx].identifier; id_val = (SUB | ID) & inst[log].events[idx].identifier; id_only_val = ID & inst[log].events[idx].identifier; data1 = inst[log].events[idx].data1; data2 = inst[log].events[idx].data2; data3 = inst[log].events[idx].data3; if (!(proc_val & SMEM_LOG_CONT)) { i += scnprintf(buf + i, max - i, "\n"); proc = find_sym(ID_SYM, proc_val); if (proc) i += scnprintf(buf + i, max - i, "%4s: ", proc); else i += scnprintf(buf + i, max - i, "%04x: ", PROC & inst[log].events[idx]. identifier); i += scnprintf(buf + i, max - i, "%10u ", inst[log].events[idx].timetick); sub = find_sym(BASE_SYM, sub_val); if (sub) i += scnprintf(buf + i, max - i, "%9s: ", sub); else i += scnprintf(buf + i, max - i, "%08x: ", sub_val); id = find_sym(EVENT_SYM, id_val); if (id) i += scnprintf(buf + i, max - i, "%11s: ", id); else i += scnprintf(buf + i, max - i, "%08x: ", id_only_val); } if ((proc_val & SMEM_LOG_CONT) && (id_val == ONCRPC_LOG_EVENT_STD_CALL || id_val == ONCRPC_LOG_EVENT_STD_REPLY)) { data[0] = data1; data[1] = data2; data[2] = data3; i += scnprintf(buf + i, max - i, " %.16s", (char *) data); } else if (proc_val & SMEM_LOG_CONT) { i += scnprintf(buf + i, max - i, " %08x %08x %08x", data1, data2, data3); } else if (id_val == ONCRPC_LOG_EVENT_STD_CALL) { sym = smd_rpc_get_sym(data2); if (sym) i += scnprintf(buf + i, max - i, "xid:%4i %8s proc:%3i", data1, sym, data3); else i += scnprintf(buf + i, max - i, "xid:%4i %08x proc:%3i", data1, data2, data3); #if defined(CONFIG_MSM_N_WAY_SMSM) } else if (id_val == DEM_STATE_CHANGE) { if (data1 == 1) { i += scnprintf(buf + i, max - i, "MASTER: "); sym = find_sym(DEM_STATE_MASTER_SYM, data2); } else if (data1 == 0) { i += scnprintf(buf + i, max - i, " SLAVE: "); sym = find_sym(DEM_STATE_SLAVE_SYM, data2); } else { i += scnprintf(buf + i, max - i, "%x: ", data1); sym = NULL; } if (sym) i += scnprintf(buf + i, max - i, "from:%s ", sym); else i += scnprintf(buf + i, max - i, "from:0x%x ", data2); if (data1 == 1) sym = find_sym(DEM_STATE_MASTER_SYM, data3); else if (data1 == 0) sym = find_sym(DEM_STATE_SLAVE_SYM, data3); else sym = NULL; if (sym) i += scnprintf(buf + i, max - i, "to:%s ", sym); else i += scnprintf(buf + i, max - i, "to:0x%x ", data3); } else if (id_val == DEM_STATE_MACHINE_ENTER) { i += scnprintf(buf + i, max - i, "swfi:%i timer:%i manexit:%i", data1, data2, data3); } else if (id_val == DEM_TIME_SYNC_REQUEST || id_val == DEM_TIME_SYNC_POLL || id_val == DEM_TIME_SYNC_INIT) { sym = find_sym(SMSM_ENTRY_TYPE_SYM, data1); if (sym) i += scnprintf(buf + i, max - i, "hostid:%s", sym); else i += scnprintf(buf + i, max - i, "hostid:%x", data1); } else if (id_val == DEM_TIME_SYNC_START || id_val == DEM_TIME_SYNC_SEND_VALUE) { unsigned mask = 0x1; unsigned tmp = 0; if (id_val == DEM_TIME_SYNC_START) i += scnprintf(buf + i, max - i, "req:"); else i += scnprintf(buf + i, max - i, "pol:"); while (mask) { if (mask & data1) { sym = find_sym( SMSM_ENTRY_TYPE_SYM, tmp); if (sym) i += scnprintf(buf + i, max - i, "%s ", sym); else i += scnprintf(buf + i, max - i, "%i ", tmp); } mask <<= 1; tmp++; } if (id_val == DEM_TIME_SYNC_SEND_VALUE) i += scnprintf(buf + i, max - i, "tick:%x", data2); } else if (id_val == DEM_SMSM_ISR) { unsigned vals[] = {data2, data3}; unsigned j; unsigned mask; unsigned tmp; unsigned once; sym = find_sym(SMSM_ENTRY_TYPE_SYM, data1); if (sym) i += scnprintf(buf + i, max - i, "%s ", sym); else i += scnprintf(buf + i, max - i, "%x ", data1); for (j = 0; j < ARRAY_SIZE(vals); ++j) { i += scnprintf(buf + i, max - i, "["); mask = 0x80000000; once = 0; while (mask) { tmp = vals[j] & mask; mask >>= 1; if (!tmp) continue; sym = find_sym(SMSM_STATE_SYM, tmp); if (once) i += scnprintf(buf + i, max - i, " "); if (sym) i += scnprintf(buf + i, max - i, "%s", sym); else i += scnprintf(buf + i, max - i, "0x%x", tmp); once = 1; } i += scnprintf(buf + i, max - i, "] "); } #else } else if (id_val == DEMAPPS_WAKEUP_REASON) { unsigned mask = 0x80000000; unsigned tmp = 0; while (mask) { tmp = data1 & mask; mask >>= 1; if (!tmp) continue; sym = find_sym(WAKEUP_SYM, tmp); if (sym) i += scnprintf(buf + i, max - i, "%s ", sym); else i += scnprintf(buf + i, max - i, "%08x ", tmp); } i += scnprintf(buf + i, max - i, "%08x %08x", data2, data3); } else if (id_val == DEMMOD_APPS_WAKEUP_INT) { sym = find_sym(WAKEUP_INT_SYM, data1); if (sym) i += scnprintf(buf + i, max - i, "%s %08x %08x", sym, data2, data3); else i += scnprintf(buf + i, max - i, "%08x %08x %08x", data1, data2, data3); } else if (id_val == DEM_NO_SLEEP || id_val == NO_SLEEP_NEW) { unsigned vals[] = {data3, data2}; unsigned j; unsigned mask; unsigned tmp; unsigned once; i += scnprintf(buf + i, max - i, "%08x ", data1); i += scnprintf(buf + i, max - i, "["); once = 0; for (j = 0; j < ARRAY_SIZE(vals); ++j) { mask = 0x00000001; while (mask) { tmp = vals[j] & mask; mask <<= 1; if (!tmp) continue; if (j == 0) sym = find_sym( VOTER_D3_SYM, tmp); else sym = find_sym( VOTER_D2_SYM, tmp); if (once) i += scnprintf(buf + i, max - i, " "); if (sym) i += scnprintf(buf + i, max - i, "%s", sym); else i += scnprintf(buf + i, max - i, "%08x", tmp); once = 1; } } i += scnprintf(buf + i, max - i, "] "); #endif } else if (id_val == SMEM_LOG_EVENT_CB) { unsigned vals[] = {data2, data3}; unsigned j; unsigned mask; unsigned tmp; unsigned once; i += scnprintf(buf + i, max - i, "%08x ", data1); for (j = 0; j < ARRAY_SIZE(vals); ++j) { i += scnprintf(buf + i, max - i, "["); mask = 0x80000000; once = 0; while (mask) { tmp = vals[j] & mask; mask >>= 1; if (!tmp) continue; sym = find_sym(SMSM_SYM, tmp); if (once) i += scnprintf(buf + i, max - i, " "); if (sym) i += scnprintf(buf + i, max - i, "%s", sym); else i += scnprintf(buf + i, max - i, "%08x", tmp); once = 1; } i += scnprintf(buf + i, max - i, "] "); } } else { i += scnprintf(buf + i, max - i, "%08x %08x %08x", data1, data2, data3); } } } if (cont) { inst[log].read_idx = idx; read_avail = (write_idx - inst[log].read_idx); if (read_avail < 0) read_avail = inst->num - inst->read_idx + write_idx; inst[log].last_read_avail = read_avail; } remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags); DBG("%s: read %d write %d idx %d num %d\n", __func__, inst[log].read_idx, write_idx, idx, inst[log].num); return i; } static int debug_dump(char *buf, int max, uint32_t cont) { int r; while (cont) { update_read_avail(&inst[GEN]); r = wait_event_interruptible_timeout(inst[GEN].read_wait, inst[GEN].last_read_avail, smem_log_timeout_ms * HZ / 1000); DBG("%s: read available %d\n", __func__, inst[GEN].last_read_avail); if (r < 0) return 0; else if (inst[GEN].last_read_avail) break; } return _debug_dump(GEN, buf, max, cont); } static int debug_dump_sym(char *buf, int max, uint32_t cont) { int r; while (cont) { update_read_avail(&inst[GEN]); r = wait_event_interruptible_timeout(inst[GEN].read_wait, inst[GEN].last_read_avail, smem_log_timeout_ms * HZ / 1000); DBG("%s: readavailable %d\n", __func__, inst[GEN].last_read_avail); if (r < 0) return 0; else if (inst[GEN].last_read_avail) break; } return _debug_dump_sym(GEN, buf, max, cont); } static int debug_dump_static(char *buf, int max, uint32_t cont) { int r; while (cont) { update_read_avail(&inst[STA]); r = wait_event_interruptible_timeout(inst[STA].read_wait, inst[STA].last_read_avail, smem_log_timeout_ms * HZ / 1000); DBG("%s: readavailable %d\n", __func__, inst[STA].last_read_avail); if (r < 0) return 0; else if (inst[STA].last_read_avail) break; } return _debug_dump(STA, buf, max, cont); } static int debug_dump_static_sym(char *buf, int max, uint32_t cont) { int r; while (cont) { update_read_avail(&inst[STA]); r = wait_event_interruptible_timeout(inst[STA].read_wait, inst[STA].last_read_avail, smem_log_timeout_ms * HZ / 1000); DBG("%s: readavailable %d\n", __func__, inst[STA].last_read_avail); if (r < 0) return 0; else if (inst[STA].last_read_avail) break; } return _debug_dump_sym(STA, buf, max, cont); } static int debug_dump_power(char *buf, int max, uint32_t cont) { int r; while (cont) { update_read_avail(&inst[POW]); r = wait_event_interruptible_timeout(inst[POW].read_wait, inst[POW].last_read_avail, smem_log_timeout_ms * HZ / 1000); DBG("%s: readavailable %d\n", __func__, inst[POW].last_read_avail); if (r < 0) return 0; else if (inst[POW].last_read_avail) break; } return _debug_dump(POW, buf, max, cont); } static int debug_dump_power_sym(char *buf, int max, uint32_t cont) { int r; while (cont) { update_read_avail(&inst[POW]); r = wait_event_interruptible_timeout(inst[POW].read_wait, inst[POW].last_read_avail, smem_log_timeout_ms * HZ / 1000); DBG("%s: readavailable %d\n", __func__, inst[POW].last_read_avail); if (r < 0) return 0; else if (inst[POW].last_read_avail) break; } return _debug_dump_sym(POW, buf, max, cont); } static int debug_dump_voters(char *buf, int max, uint32_t cont) { return _debug_dump_voters(buf, max); } static char debug_buffer[EVENTS_PRINT_SIZE]; static ssize_t debug_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int r; static int bsize; int (*fill)(char *, int, uint32_t) = file->private_data; if (!(*ppos)) bsize = fill(debug_buffer, EVENTS_PRINT_SIZE, 0); DBG("%s: count %d ppos %d\n", __func__, count, (unsigned int)*ppos); r = simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize); return r; } static ssize_t debug_read_cont(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int (*fill)(char *, int, uint32_t) = file->private_data; char *buffer = kmalloc(count, GFP_KERNEL); int bsize; if (!buffer) return -ENOMEM; bsize = fill(buffer, count, 1); DBG("%s: count %d bsize %d\n", __func__, count, bsize); if (copy_to_user(buf, buffer, bsize)) { kfree(buffer); return -EFAULT; } kfree(buffer); return bsize; } static int debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations debug_ops = { .read = debug_read, .open = debug_open, }; static const struct file_operations debug_ops_cont = { .read = debug_read_cont, .open = debug_open, }; static void debug_create(const char *name, mode_t mode, struct dentry *dent, int (*fill)(char *buf, int max, uint32_t cont), const struct file_operations *fops) { debugfs_create_file(name, mode, dent, fill, fops); } static void smem_log_debugfs_init(void) { struct dentry *dent; dent = debugfs_create_dir("smem_log", 0); if (IS_ERR(dent)) return; debug_create("dump", 0444, dent, debug_dump, &debug_ops); debug_create("dump_sym", 0444, dent, debug_dump_sym, &debug_ops); debug_create("dump_static", 0444, dent, debug_dump_static, &debug_ops); debug_create("dump_static_sym", 0444, dent, debug_dump_static_sym, &debug_ops); debug_create("dump_power", 0444, dent, debug_dump_power, &debug_ops); debug_create("dump_power_sym", 0444, dent, debug_dump_power_sym, &debug_ops); debug_create("dump_voters", 0444, dent, debug_dump_voters, &debug_ops); debug_create("dump_cont", 0444, dent, debug_dump, &debug_ops_cont); debug_create("dump_sym_cont", 0444, dent, debug_dump_sym, &debug_ops_cont); debug_create("dump_static_cont", 0444, dent, debug_dump_static, &debug_ops_cont); debug_create("dump_static_sym_cont", 0444, dent, debug_dump_static_sym, &debug_ops_cont); debug_create("dump_power_cont", 0444, dent, debug_dump_power, &debug_ops_cont); debug_create("dump_power_sym_cont", 0444, dent, debug_dump_power_sym, &debug_ops_cont); smem_log_timeout_ms = 500; smem_log_debug_mask = 0; } #else static void smem_log_debugfs_init(void) {} #endif static int smem_log_initialize(void) { int ret; ret = _smem_log_init(); if (ret < 0) { pr_err("%s: init failed %d\n", __func__, ret); return ret; } ret = misc_register(&smem_log_dev); if (ret < 0) { pr_err("%s: device register failed %d\n", __func__, ret); return ret; } smem_log_enable = 1; smem_log_initialized = 1; smem_log_debugfs_init(); return ret; } static int modem_notifier(struct notifier_block *this, unsigned long code, void *_cmd) { switch (code) { case MODEM_NOTIFIER_SMSM_INIT: if (!smem_log_initialized) smem_log_initialize(); break; default: break; } return NOTIFY_DONE; } static struct notifier_block nb = { .notifier_call = modem_notifier, }; static int __init smem_log_init(void) { return modem_register_notifier(&nb); } module_init(smem_log_init); MODULE_DESCRIPTION("smem log"); MODULE_LICENSE("GPL v2");
gpl-2.0
ExPeacer/Xperia-2011-Kernel
fs/btrfs/acl.c
339
6976
/* * Copyright (C) 2007 Red Hat. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/string.h> #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> #include <linux/posix_acl.h> #include <linux/sched.h> #include "ctree.h" #include "btrfs_inode.h" #include "xattr.h" #ifdef CONFIG_BTRFS_FS_POSIX_ACL static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) { int size; const char *name; char *value = NULL; struct posix_acl *acl; acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: name = POSIX_ACL_XATTR_DEFAULT; break; default: BUG(); } size = __btrfs_getxattr(inode, name, "", 0); if (size > 0) { value = kzalloc(size, GFP_NOFS); if (!value) return ERR_PTR(-ENOMEM); size = __btrfs_getxattr(inode, name, value, size); if (size > 0) { acl = posix_acl_from_xattr(value, size); set_cached_acl(inode, type, acl); } kfree(value); } else if (size == -ENOENT || size == -ENODATA || size == 0) { /* FIXME, who returns -ENOENT? I think nobody */ acl = NULL; set_cached_acl(inode, type, acl); } else { acl = ERR_PTR(-EIO); } return acl; } static int btrfs_xattr_get_acl(struct inode *inode, int type, void *value, size_t size) { struct posix_acl *acl; int ret = 0; acl = btrfs_get_acl(inode, type); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) return -ENODATA; ret = posix_acl_to_xattr(acl, value, size); posix_acl_release(acl); return ret; } /* * Needs to be called with fs_mutex held */ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int ret, size = 0; const char *name; char *value = NULL; mode_t mode; if (acl) { ret = posix_acl_valid(acl); if (ret < 0) return ret; ret = 0; } switch (type) { case ACL_TYPE_ACCESS: mode = inode->i_mode; ret = posix_acl_equiv_mode(acl, &mode); if (ret < 0) return ret; ret = 0; inode->i_mode = mode; name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) return acl ? -EINVAL : 0; name = POSIX_ACL_XATTR_DEFAULT; break; default: return -EINVAL; } if (acl) { size = posix_acl_xattr_size(acl->a_count); value = kmalloc(size, GFP_NOFS); if (!value) { ret = -ENOMEM; goto out; } ret = posix_acl_to_xattr(acl, value, size); if (ret < 0) goto out; } ret = __btrfs_setxattr(inode, name, value, size, 0); out: kfree(value); if (!ret) set_cached_acl(inode, type, acl); return ret; } static int btrfs_xattr_set_acl(struct inode *inode, int type, const void *value, size_t size) { int ret = 0; struct posix_acl *acl = NULL; if (value) { acl = posix_acl_from_xattr(value, size); if (acl == NULL) { value = NULL; size = 0; } else if (IS_ERR(acl)) { return PTR_ERR(acl); } } ret = btrfs_set_acl(inode, acl, type); posix_acl_release(acl); return ret; } static int btrfs_xattr_acl_access_get(struct inode *inode, const char *name, void *value, size_t size) { return btrfs_xattr_get_acl(inode, ACL_TYPE_ACCESS, value, size); } static int btrfs_xattr_acl_access_set(struct inode *inode, const char *name, const void *value, size_t size, int flags) { return btrfs_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size); } static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name, void *value, size_t size) { return btrfs_xattr_get_acl(inode, ACL_TYPE_DEFAULT, value, size); } static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name, const void *value, size_t size, int flags) { return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size); } int btrfs_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; int error = -EAGAIN; acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); } return error; } /* * btrfs_init_acl is already generally called under fs_mutex, so the locking * stuff has been fixed to work with that. If the locking stuff changes, we * need to re-evaluate the acl locking stuff. */ int btrfs_init_acl(struct inode *inode, struct inode *dir) { struct posix_acl *acl = NULL; int ret = 0; /* this happens with subvols */ if (!dir) return 0; if (!S_ISLNK(inode->i_mode)) { if (IS_POSIXACL(dir)) { acl = btrfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); } if (!acl) inode->i_mode &= ~current_umask(); } if (IS_POSIXACL(dir) && acl) { struct posix_acl *clone; mode_t mode; if (S_ISDIR(inode->i_mode)) { ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT); if (ret) goto failed; } clone = posix_acl_clone(acl, GFP_NOFS); ret = -ENOMEM; if (!clone) goto failed; mode = inode->i_mode; ret = posix_acl_create_masq(clone, &mode); if (ret >= 0) { inode->i_mode = mode; if (ret > 0) { /* we need an acl */ ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS); } } } failed: posix_acl_release(acl); return ret; } int btrfs_acl_chmod(struct inode *inode) { struct posix_acl *acl, *clone; int ret = 0; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; if (!IS_POSIXACL(inode)) return 0; acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); clone = posix_acl_clone(acl, GFP_KERNEL); posix_acl_release(acl); if (!clone) return -ENOMEM; ret = posix_acl_chmod_masq(clone, inode->i_mode); if (!ret) ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS); posix_acl_release(clone); return ret; } struct xattr_handler btrfs_xattr_acl_default_handler = { .prefix = POSIX_ACL_XATTR_DEFAULT, .get = btrfs_xattr_acl_default_get, .set = btrfs_xattr_acl_default_set, }; struct xattr_handler btrfs_xattr_acl_access_handler = { .prefix = POSIX_ACL_XATTR_ACCESS, .get = btrfs_xattr_acl_access_get, .set = btrfs_xattr_acl_access_set, }; #else /* CONFIG_BTRFS_FS_POSIX_ACL */ int btrfs_acl_chmod(struct inode *inode) { return 0; } int btrfs_init_acl(struct inode *inode, struct inode *dir) { return 0; } #endif /* CONFIG_BTRFS_FS_POSIX_ACL */
gpl-2.0
Split-Screen/android_kernel_yu_msm8916
drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
2131
53576
/* * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c * * Samsung MFC (Multi Function Codec - FIMV) driver * This file contains hw related functions. * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/delay.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/firmware.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include "s5p_mfc_common.h" #include "s5p_mfc_cmd.h" #include "s5p_mfc_intr.h" #include "s5p_mfc_pm.h" #include "s5p_mfc_debug.h" #include "s5p_mfc_opr.h" #include "s5p_mfc_opr_v6.h" /* #define S5P_MFC_DEBUG_REGWRITE */ #ifdef S5P_MFC_DEBUG_REGWRITE #undef writel #define writel(v, r) \ do { \ pr_err("MFCWRITE(%p): %08x\n", r, (unsigned int)v); \ __raw_writel(v, r); \ } while (0) #endif /* S5P_MFC_DEBUG_REGWRITE */ #define READL(offset) readl(dev->regs_base + (offset)) #define WRITEL(data, offset) writel((data), dev->regs_base + (offset)) #define OFFSETA(x) (((x) - dev->port_a) >> S5P_FIMV_MEM_OFFSET) #define OFFSETB(x) (((x) - dev->port_b) >> S5P_FIMV_MEM_OFFSET) /* Allocate temporary buffers for decoding */ static int s5p_mfc_alloc_dec_temp_buffers_v6(struct s5p_mfc_ctx *ctx) { /* NOP */ return 0; } /* Release temproary buffers for decoding */ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx) { /* NOP */ } /* Allocate codec buffers */ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned int mb_width, mb_height; int ret; mb_width = MB_WIDTH(ctx->img_width); mb_height = MB_HEIGHT(ctx->img_height); if (ctx->type == MFCINST_DECODER) { mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n", ctx->luma_size, ctx->chroma_size, ctx->mv_size); mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count); } else if (ctx->type == MFCINST_ENCODER) { ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 * ALIGN(S5P_FIMV_TMV_BUFFER_SIZE_V6(mb_width, mb_height), S5P_FIMV_TMV_BUFFER_ALIGN_V6); ctx->luma_dpb_size = ALIGN((mb_width * mb_height) * S5P_FIMV_LUMA_MB_TO_PIXEL_V6, S5P_FIMV_LUMA_DPB_BUFFER_ALIGN_V6); ctx->chroma_dpb_size = ALIGN((mb_width * mb_height) * S5P_FIMV_CHROMA_MB_TO_PIXEL_V6, S5P_FIMV_CHROMA_DPB_BUFFER_ALIGN_V6); ctx->me_buffer_size = ALIGN(S5P_FIMV_ME_BUFFER_SIZE_V6( ctx->img_width, ctx->img_height, mb_width, mb_height), S5P_FIMV_ME_BUFFER_ALIGN_V6); mfc_debug(2, "recon luma size: %d chroma size: %d\n", ctx->luma_dpb_size, ctx->chroma_dpb_size); } else { return -EINVAL; } /* Codecs have different memory requirements */ switch (ctx->codec_mode) { case S5P_MFC_CODEC_H264_DEC: case S5P_MFC_CODEC_H264_MVC_DEC: ctx->scratch_buf_size = S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V6( mb_width, mb_height); ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + (ctx->mv_count * ctx->mv_size); break; case S5P_MFC_CODEC_MPEG4_DEC: ctx->scratch_buf_size = S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6( mb_width, mb_height); ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size; break; case S5P_MFC_CODEC_VC1RCV_DEC: case S5P_MFC_CODEC_VC1_DEC: ctx->scratch_buf_size = S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6( mb_width, mb_height); ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size; break; case S5P_MFC_CODEC_MPEG2_DEC: ctx->bank1.size = 0; ctx->bank2.size = 0; break; case S5P_MFC_CODEC_H263_DEC: ctx->scratch_buf_size = S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6( mb_width, mb_height); ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size; break; case S5P_MFC_CODEC_VP8_DEC: ctx->scratch_buf_size = S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6( mb_width, mb_height); ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size; break; case S5P_MFC_CODEC_H264_ENC: ctx->scratch_buf_size = S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6( mb_width, mb_height); ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + (ctx->pb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; case S5P_MFC_CODEC_MPEG4_ENC: case S5P_MFC_CODEC_H263_ENC: ctx->scratch_buf_size = S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_ENC_V6( mb_width, mb_height); ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + (ctx->pb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; default: break; } /* Allocate only if memory from bank 1 is necessary */ if (ctx->bank1.size > 0) { ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1); if (ret) { mfc_err("Failed to allocate Bank1 memory\n"); return ret; } BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); } return 0; } /* Release buffers allocated for codec */ static void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx) { s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1); } /* Allocate memory for instance data buffer */ static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv; int ret; mfc_debug_enter(); switch (ctx->codec_mode) { case S5P_MFC_CODEC_H264_DEC: case S5P_MFC_CODEC_H264_MVC_DEC: ctx->ctx.size = buf_size->h264_dec_ctx; break; case S5P_MFC_CODEC_MPEG4_DEC: case S5P_MFC_CODEC_H263_DEC: case S5P_MFC_CODEC_VC1RCV_DEC: case S5P_MFC_CODEC_VC1_DEC: case S5P_MFC_CODEC_MPEG2_DEC: case S5P_MFC_CODEC_VP8_DEC: ctx->ctx.size = buf_size->other_dec_ctx; break; case S5P_MFC_CODEC_H264_ENC: ctx->ctx.size = buf_size->h264_enc_ctx; break; case S5P_MFC_CODEC_MPEG4_ENC: case S5P_MFC_CODEC_H263_ENC: ctx->ctx.size = buf_size->other_enc_ctx; break; default: ctx->ctx.size = 0; mfc_err("Codec type(%d) should be checked!\n", ctx->codec_mode); break; } ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx); if (ret) { mfc_err("Failed to allocate instance buffer\n"); return ret; } memset(ctx->ctx.virt, 0, ctx->ctx.size); wmb(); mfc_debug_leave(); return 0; } /* Release instance buffer */ static void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx) { s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx); } /* Allocate context buffers for SYS_INIT */ static int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev) { struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv; int ret; mfc_debug_enter(); dev->ctx_buf.size = buf_size->dev_ctx; ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &dev->ctx_buf); if (ret) { mfc_err("Failed to allocate device context buffer\n"); return ret; } memset(dev->ctx_buf.virt, 0, buf_size->dev_ctx); wmb(); mfc_debug_leave(); return 0; } /* Release context buffers for SYS_INIT */ static void s5p_mfc_release_dev_context_buffer_v6(struct s5p_mfc_dev *dev) { s5p_mfc_release_priv_buf(dev->mem_dev_l, &dev->ctx_buf); } static int calc_plane(int width, int height) { int mbX, mbY; mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6); mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6); if (width * height < S5P_FIMV_MAX_FRAME_SIZE_V6) mbY = (mbY + 1) / 2 * 2; return (mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6) * (mbY * S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6); } static void s5p_mfc_dec_calc_dpb_size_v6(struct s5p_mfc_ctx *ctx) { ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6); ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN_V6); mfc_debug(2, "SEQ Done: Movie dimensions %dx%d,\n" "buffer dimensions: %dx%d\n", ctx->img_width, ctx->img_height, ctx->buf_width, ctx->buf_height); ctx->luma_size = calc_plane(ctx->img_width, ctx->img_height); ctx->chroma_size = calc_plane(ctx->img_width, (ctx->img_height >> 1)); if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) { ctx->mv_size = S5P_MFC_DEC_MV_SIZE_V6(ctx->img_width, ctx->img_height); ctx->mv_size = ALIGN(ctx->mv_size, 16); } else { ctx->mv_size = 0; } } static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx) { unsigned int mb_width, mb_height; mb_width = MB_WIDTH(ctx->img_width); mb_height = MB_HEIGHT(ctx->img_height); ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6); ctx->luma_size = ALIGN((mb_width * mb_height) * 256, 256); ctx->chroma_size = ALIGN((mb_width * mb_height) * 128, 256); } /* Set registers for decoding stream buffer */ static int s5p_mfc_set_dec_stream_buffer_v6(struct s5p_mfc_ctx *ctx, int buf_addr, unsigned int start_num_byte, unsigned int strm_size) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size; mfc_debug_enter(); mfc_debug(2, "inst_no: %d, buf_addr: 0x%08x,\n" "buf_size: 0x%08x (%d)\n", ctx->inst_no, buf_addr, strm_size, strm_size); WRITEL(strm_size, S5P_FIMV_D_STREAM_DATA_SIZE_V6); WRITEL(buf_addr, S5P_FIMV_D_CPB_BUFFER_ADDR_V6); WRITEL(buf_size->cpb, S5P_FIMV_D_CPB_BUFFER_SIZE_V6); WRITEL(start_num_byte, S5P_FIMV_D_CPB_BUFFER_OFFSET_V6); mfc_debug_leave(); return 0; } /* Set decoding frame buffer */ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx) { unsigned int frame_size, i; unsigned int frame_size_ch, frame_size_mv; struct s5p_mfc_dev *dev = ctx->dev; size_t buf_addr1; int buf_size1; int align_gap; buf_addr1 = ctx->bank1.dma; buf_size1 = ctx->bank1.size; mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1); mfc_debug(2, "Total DPB COUNT: %d\n", ctx->total_dpb_count); mfc_debug(2, "Setting display delay to %d\n", ctx->display_delay); WRITEL(ctx->total_dpb_count, S5P_FIMV_D_NUM_DPB_V6); WRITEL(ctx->luma_size, S5P_FIMV_D_LUMA_DPB_SIZE_V6); WRITEL(ctx->chroma_size, S5P_FIMV_D_CHROMA_DPB_SIZE_V6); WRITEL(buf_addr1, S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V6); WRITEL(ctx->scratch_buf_size, S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V6); buf_addr1 += ctx->scratch_buf_size; buf_size1 -= ctx->scratch_buf_size; if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC || ctx->codec_mode == S5P_FIMV_CODEC_H264_MVC_DEC){ WRITEL(ctx->mv_size, S5P_FIMV_D_MV_BUFFER_SIZE_V6); WRITEL(ctx->mv_count, S5P_FIMV_D_NUM_MV_V6); } frame_size = ctx->luma_size; frame_size_ch = ctx->chroma_size; frame_size_mv = ctx->mv_size; mfc_debug(2, "Frame size: %d ch: %d mv: %d\n", frame_size, frame_size_ch, frame_size_mv); for (i = 0; i < ctx->total_dpb_count; i++) { /* Bank2 */ mfc_debug(2, "Luma %d: %x\n", i, ctx->dst_bufs[i].cookie.raw.luma); WRITEL(ctx->dst_bufs[i].cookie.raw.luma, S5P_FIMV_D_LUMA_DPB_V6 + i * 4); mfc_debug(2, "\tChroma %d: %x\n", i, ctx->dst_bufs[i].cookie.raw.chroma); WRITEL(ctx->dst_bufs[i].cookie.raw.chroma, S5P_FIMV_D_CHROMA_DPB_V6 + i * 4); } if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) { for (i = 0; i < ctx->mv_count; i++) { /* To test alignment */ align_gap = buf_addr1; buf_addr1 = ALIGN(buf_addr1, 16); align_gap = buf_addr1 - align_gap; buf_size1 -= align_gap; mfc_debug(2, "\tBuf1: %x, size: %d\n", buf_addr1, buf_size1); WRITEL(buf_addr1, S5P_FIMV_D_MV_BUFFER_V6 + i * 4); buf_addr1 += frame_size_mv; buf_size1 -= frame_size_mv; } } mfc_debug(2, "Buf1: %u, buf_size1: %d (frames %d)\n", buf_addr1, buf_size1, ctx->total_dpb_count); if (buf_size1 < 0) { mfc_debug(2, "Not enough memory has been allocated.\n"); return -ENOMEM; } WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_INIT_BUFS_V6, NULL); mfc_debug(2, "After setting buffers.\n"); return 0; } /* Set registers for encoding stream buffer */ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx, unsigned long addr, unsigned int size) { struct s5p_mfc_dev *dev = ctx->dev; WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */ WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6); mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n", addr, size); return 0; } static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, unsigned long y_addr, unsigned long c_addr) { struct s5p_mfc_dev *dev = ctx->dev; WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */ WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6); mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr); mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr); } static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, unsigned long *y_addr, unsigned long *c_addr) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long enc_recon_y_addr, enc_recon_c_addr; *y_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_LUMA_ADDR_V6); *c_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_CHROMA_ADDR_V6); enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6); enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6); mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr); mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr); } /* Set encoding ref & codec buffer */ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; size_t buf_addr1; int i, buf_size1; mfc_debug_enter(); buf_addr1 = ctx->bank1.dma; buf_size1 = ctx->bank1.size; mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1); for (i = 0; i < ctx->pb_count; i++) { WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i)); buf_addr1 += ctx->luma_dpb_size; WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i)); buf_addr1 += ctx->chroma_dpb_size; WRITEL(buf_addr1, S5P_FIMV_E_ME_BUFFER_V6 + (4 * i)); buf_addr1 += ctx->me_buffer_size; buf_size1 -= (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size); } WRITEL(buf_addr1, S5P_FIMV_E_SCRATCH_BUFFER_ADDR_V6); WRITEL(ctx->scratch_buf_size, S5P_FIMV_E_SCRATCH_BUFFER_SIZE_V6); buf_addr1 += ctx->scratch_buf_size; buf_size1 -= ctx->scratch_buf_size; WRITEL(buf_addr1, S5P_FIMV_E_TMV_BUFFER0_V6); buf_addr1 += ctx->tmv_buffer_size >> 1; WRITEL(buf_addr1, S5P_FIMV_E_TMV_BUFFER1_V6); buf_addr1 += ctx->tmv_buffer_size >> 1; buf_size1 -= ctx->tmv_buffer_size; mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n", buf_addr1, buf_size1, ctx->pb_count); if (buf_size1 < 0) { mfc_debug(2, "Not enough memory has been allocated.\n"); return -ENOMEM; } WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_INIT_BUFS_V6, NULL); mfc_debug_leave(); return 0; } static int s5p_mfc_set_slice_mode(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; /* multi-slice control */ /* multi-slice MB number or bit size */ WRITEL(ctx->slice_mode, S5P_FIMV_E_MSLICE_MODE_V6); if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) { WRITEL(ctx->slice_size.mb, S5P_FIMV_E_MSLICE_SIZE_MB_V6); } else if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) { WRITEL(ctx->slice_size.bits, S5P_FIMV_E_MSLICE_SIZE_BITS_V6); } else { WRITEL(0x0, S5P_FIMV_E_MSLICE_SIZE_MB_V6); WRITEL(0x0, S5P_FIMV_E_MSLICE_SIZE_BITS_V6); } return 0; } static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; unsigned int reg = 0; mfc_debug_enter(); /* width */ WRITEL(ctx->img_width, S5P_FIMV_E_FRAME_WIDTH_V6); /* 16 align */ /* height */ WRITEL(ctx->img_height, S5P_FIMV_E_FRAME_HEIGHT_V6); /* 16 align */ /* cropped width */ WRITEL(ctx->img_width, S5P_FIMV_E_CROPPED_FRAME_WIDTH_V6); /* cropped height */ WRITEL(ctx->img_height, S5P_FIMV_E_CROPPED_FRAME_HEIGHT_V6); /* cropped offset */ WRITEL(0x0, S5P_FIMV_E_FRAME_CROP_OFFSET_V6); /* pictype : IDR period */ reg = 0; reg |= p->gop_size & 0xFFFF; WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6); /* multi-slice control */ /* multi-slice MB number or bit size */ ctx->slice_mode = p->slice_mode; reg = 0; if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) { reg |= (0x1 << 3); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); ctx->slice_size.mb = p->slice_mb; } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) { reg |= (0x1 << 3); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); ctx->slice_size.bits = p->slice_bit; } else { reg &= ~(0x1 << 3); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); } s5p_mfc_set_slice_mode(ctx); /* cyclic intra refresh */ WRITEL(p->intra_refresh_mb, S5P_FIMV_E_IR_SIZE_V6); reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6); if (p->intra_refresh_mb == 0) reg &= ~(0x1 << 4); else reg |= (0x1 << 4); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); /* 'NON_REFERENCE_STORE_ENABLE' for debugging */ reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6); reg &= ~(0x1 << 9); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); /* memory structure cur. frame */ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) { /* 0: Linear, 1: 2D tiled*/ reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6); reg &= ~(0x1 << 7); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); /* 0: NV12(CbCr), 1: NV21(CrCb) */ WRITEL(0x0, S5P_FIMV_PIXEL_FORMAT_V6); } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV21M) { /* 0: Linear, 1: 2D tiled*/ reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6); reg &= ~(0x1 << 7); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); /* 0: NV12(CbCr), 1: NV21(CrCb) */ WRITEL(0x1, S5P_FIMV_PIXEL_FORMAT_V6); } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) { /* 0: Linear, 1: 2D tiled*/ reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6); reg |= (0x1 << 7); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); /* 0: NV12(CbCr), 1: NV21(CrCb) */ WRITEL(0x0, S5P_FIMV_PIXEL_FORMAT_V6); } /* memory structure recon. frame */ /* 0: Linear, 1: 2D tiled */ reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6); reg |= (0x1 << 8); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); /* padding control & value */ WRITEL(0x0, S5P_FIMV_E_PADDING_CTRL_V6); if (p->pad) { reg = 0; /** enable */ reg |= (1 << 31); /** cr value */ reg |= ((p->pad_cr & 0xFF) << 16); /** cb value */ reg |= ((p->pad_cb & 0xFF) << 8); /** y value */ reg |= p->pad_luma & 0xFF; WRITEL(reg, S5P_FIMV_E_PADDING_CTRL_V6); } /* rate control config. */ reg = 0; /* frame-level rate control */ reg |= ((p->rc_frame & 0x1) << 9); WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /* bit rate */ if (p->rc_frame) WRITEL(p->rc_bitrate, S5P_FIMV_E_RC_BIT_RATE_V6); else WRITEL(1, S5P_FIMV_E_RC_BIT_RATE_V6); /* reaction coefficient */ if (p->rc_frame) { if (p->rc_reaction_coeff < TIGHT_CBR_MAX) /* tight CBR */ WRITEL(1, S5P_FIMV_E_RC_RPARAM_V6); else /* loose CBR */ WRITEL(2, S5P_FIMV_E_RC_RPARAM_V6); } /* seq header ctrl */ reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6); reg &= ~(0x1 << 2); reg |= ((p->seq_hdr_mode & 0x1) << 2); /* frame skip mode */ reg &= ~(0x3); reg |= (p->frame_skip_mode & 0x3); WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6); /* 'DROP_CONTROL_ENABLE', disable */ reg = READL(S5P_FIMV_E_RC_CONFIG_V6); reg &= ~(0x1 << 10); WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /* setting for MV range [16, 256] */ reg = 0; reg &= ~(0x3FFF); reg = 256; WRITEL(reg, S5P_FIMV_E_MV_HOR_RANGE_V6); reg = 0; reg &= ~(0x3FFF); reg = 256; WRITEL(reg, S5P_FIMV_E_MV_VER_RANGE_V6); WRITEL(0x0, S5P_FIMV_E_FRAME_INSERTION_V6); WRITEL(0x0, S5P_FIMV_E_ROI_BUFFER_ADDR_V6); WRITEL(0x0, S5P_FIMV_E_PARAM_CHANGE_V6); WRITEL(0x0, S5P_FIMV_E_RC_ROI_CTRL_V6); WRITEL(0x0, S5P_FIMV_E_PICTURE_TAG_V6); WRITEL(0x0, S5P_FIMV_E_BIT_COUNT_ENABLE_V6); WRITEL(0x0, S5P_FIMV_E_MAX_BIT_COUNT_V6); WRITEL(0x0, S5P_FIMV_E_MIN_BIT_COUNT_V6); WRITEL(0x0, S5P_FIMV_E_METADATA_BUFFER_ADDR_V6); WRITEL(0x0, S5P_FIMV_E_METADATA_BUFFER_SIZE_V6); mfc_debug_leave(); return 0; } static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264; unsigned int reg = 0; int i; mfc_debug_enter(); s5p_mfc_set_enc_params(ctx); /* pictype : number of B */ reg = READL(S5P_FIMV_E_GOP_CONFIG_V6); reg &= ~(0x3 << 16); reg |= ((p->num_b_frame & 0x3) << 16); WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6); /* profile & level */ reg = 0; /** level */ reg |= ((p_h264->level & 0xFF) << 8); /** profile - 0 ~ 3 */ reg |= p_h264->profile & 0x3F; WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6); /* rate control config. */ reg = READL(S5P_FIMV_E_RC_CONFIG_V6); /** macroblock level rate control */ reg &= ~(0x1 << 8); reg |= ((p->rc_mb & 0x1) << 8); WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /** frame QP */ reg &= ~(0x3F); reg |= p_h264->rc_frame_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /* max & min value of QP */ reg = 0; /** max QP */ reg |= ((p_h264->rc_max_qp & 0x3F) << 8); /** min QP */ reg |= p_h264->rc_min_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6); /* other QPs */ WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6); if (!p->rc_frame && !p->rc_mb) { reg = 0; reg |= ((p_h264->rc_b_frame_qp & 0x3F) << 16); reg |= ((p_h264->rc_p_frame_qp & 0x3F) << 8); reg |= p_h264->rc_frame_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6); } /* frame rate */ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) { reg = 0; reg |= ((p->rc_framerate_num & 0xFFFF) << 16); reg |= p->rc_framerate_denom & 0xFFFF; WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6); } /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { WRITEL(p_h264->cpb_size & 0xFFFF, S5P_FIMV_E_VBV_BUFFER_SIZE_V6); if (p->rc_frame) WRITEL(p->vbv_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6); } /* interlace */ reg = 0; reg |= ((p_h264->interlace & 0x1) << 3); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* height */ if (p_h264->interlace) { WRITEL(ctx->img_height >> 1, S5P_FIMV_E_FRAME_HEIGHT_V6); /* 32 align */ /* cropped height */ WRITEL(ctx->img_height >> 1, S5P_FIMV_E_CROPPED_FRAME_HEIGHT_V6); } /* loop filter ctrl */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x3 << 1); reg |= ((p_h264->loop_filter_mode & 0x3) << 1); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* loopfilter alpha offset */ if (p_h264->loop_filter_alpha < 0) { reg = 0x10; reg |= (0xFF - p_h264->loop_filter_alpha) + 1; } else { reg = 0x00; reg |= (p_h264->loop_filter_alpha & 0xF); } WRITEL(reg, S5P_FIMV_E_H264_LF_ALPHA_OFFSET_V6); /* loopfilter beta offset */ if (p_h264->loop_filter_beta < 0) { reg = 0x10; reg |= (0xFF - p_h264->loop_filter_beta) + 1; } else { reg = 0x00; reg |= (p_h264->loop_filter_beta & 0xF); } WRITEL(reg, S5P_FIMV_E_H264_LF_BETA_OFFSET_V6); /* entropy coding mode */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1); reg |= p_h264->entropy_mode & 0x1; WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* number of ref. picture */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1 << 7); reg |= (((p_h264->num_ref_pic_4p - 1) & 0x1) << 7); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* 8x8 transform enable */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x3 << 12); reg |= ((p_h264->_8x8_transform & 0x3) << 12); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* macroblock adaptive scaling features */ WRITEL(0x0, S5P_FIMV_E_MB_RC_CONFIG_V6); if (p->rc_mb) { reg = 0; /** dark region */ reg |= ((p_h264->rc_mb_dark & 0x1) << 3); /** smooth region */ reg |= ((p_h264->rc_mb_smooth & 0x1) << 2); /** static region */ reg |= ((p_h264->rc_mb_static & 0x1) << 1); /** high activity region */ reg |= p_h264->rc_mb_activity & 0x1; WRITEL(reg, S5P_FIMV_E_MB_RC_CONFIG_V6); } /* aspect ratio VUI */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1 << 5); reg |= ((p_h264->vui_sar & 0x1) << 5); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); WRITEL(0x0, S5P_FIMV_E_ASPECT_RATIO_V6); WRITEL(0x0, S5P_FIMV_E_EXTENDED_SAR_V6); if (p_h264->vui_sar) { /* aspect ration IDC */ reg = 0; reg |= p_h264->vui_sar_idc & 0xFF; WRITEL(reg, S5P_FIMV_E_ASPECT_RATIO_V6); if (p_h264->vui_sar_idc == 0xFF) { /* extended SAR */ reg = 0; reg |= (p_h264->vui_ext_sar_width & 0xFFFF) << 16; reg |= p_h264->vui_ext_sar_height & 0xFFFF; WRITEL(reg, S5P_FIMV_E_EXTENDED_SAR_V6); } } /* intra picture period for H.264 open GOP */ /* control */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1 << 4); reg |= ((p_h264->open_gop & 0x1) << 4); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* value */ WRITEL(0x0, S5P_FIMV_E_H264_I_PERIOD_V6); if (p_h264->open_gop) { reg = 0; reg |= p_h264->open_gop_size & 0xFFFF; WRITEL(reg, S5P_FIMV_E_H264_I_PERIOD_V6); } /* 'WEIGHTED_BI_PREDICTION' for B is disable */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x3 << 9); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1 << 14); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* ASO */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1 << 6); reg |= ((p_h264->aso & 0x1) << 6); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); /* hier qp enable */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1 << 8); reg |= ((p_h264->open_gop & 0x1) << 8); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); reg = 0; if (p_h264->hier_qp && p_h264->hier_qp_layer) { reg |= (p_h264->hier_qp_type & 0x1) << 0x3; reg |= p_h264->hier_qp_layer & 0x7; WRITEL(reg, S5P_FIMV_E_H264_NUM_T_LAYER_V6); /* QP value for each layer */ for (i = 0; i < (p_h264->hier_qp_layer & 0x7); i++) WRITEL(p_h264->hier_qp_layer_qp[i], S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER0_V6 + i * 4); } /* number of coding layer should be zero when hierarchical is disable */ WRITEL(reg, S5P_FIMV_E_H264_NUM_T_LAYER_V6); /* frame packing SEI generation */ reg = READL(S5P_FIMV_E_H264_OPTIONS_V6); reg &= ~(0x1 << 25); reg |= ((p_h264->sei_frame_packing & 0x1) << 25); WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6); if (p_h264->sei_frame_packing) { reg = 0; /** current frame0 flag */ reg |= ((p_h264->sei_fp_curr_frame_0 & 0x1) << 2); /** arrangement type */ reg |= p_h264->sei_fp_arrangement_type & 0x3; WRITEL(reg, S5P_FIMV_E_H264_FRAME_PACKING_SEI_INFO_V6); } if (p_h264->fmo) { switch (p_h264->fmo_map_type) { case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES: if (p_h264->fmo_slice_grp > 4) p_h264->fmo_slice_grp = 4; for (i = 0; i < (p_h264->fmo_slice_grp & 0xF); i++) WRITEL(p_h264->fmo_run_len[i] - 1, S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_0_V6 + i * 4); break; case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES: if (p_h264->fmo_slice_grp > 4) p_h264->fmo_slice_grp = 4; break; case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN: case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN: if (p_h264->fmo_slice_grp > 2) p_h264->fmo_slice_grp = 2; WRITEL(p_h264->fmo_chg_dir & 0x1, S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_DIR_V6); /* the valid range is 0 ~ number of macroblocks -1 */ WRITEL(p_h264->fmo_chg_rate, S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_RATE_MINUS1_V6); break; default: mfc_err("Unsupported map type for FMO: %d\n", p_h264->fmo_map_type); p_h264->fmo_map_type = 0; p_h264->fmo_slice_grp = 1; break; } WRITEL(p_h264->fmo_map_type, S5P_FIMV_E_H264_FMO_SLICE_GRP_MAP_TYPE_V6); WRITEL(p_h264->fmo_slice_grp - 1, S5P_FIMV_E_H264_FMO_NUM_SLICE_GRP_MINUS1_V6); } else { WRITEL(0, S5P_FIMV_E_H264_FMO_NUM_SLICE_GRP_MINUS1_V6); } mfc_debug_leave(); return 0; } static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4; unsigned int reg = 0; mfc_debug_enter(); s5p_mfc_set_enc_params(ctx); /* pictype : number of B */ reg = READL(S5P_FIMV_E_GOP_CONFIG_V6); reg &= ~(0x3 << 16); reg |= ((p->num_b_frame & 0x3) << 16); WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6); /* profile & level */ reg = 0; /** level */ reg |= ((p_mpeg4->level & 0xFF) << 8); /** profile - 0 ~ 1 */ reg |= p_mpeg4->profile & 0x3F; WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6); /* rate control config. */ reg = READL(S5P_FIMV_E_RC_CONFIG_V6); /** macroblock level rate control */ reg &= ~(0x1 << 8); reg |= ((p->rc_mb & 0x1) << 8); WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /** frame QP */ reg &= ~(0x3F); reg |= p_mpeg4->rc_frame_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /* max & min value of QP */ reg = 0; /** max QP */ reg |= ((p_mpeg4->rc_max_qp & 0x3F) << 8); /** min QP */ reg |= p_mpeg4->rc_min_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6); /* other QPs */ WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6); if (!p->rc_frame && !p->rc_mb) { reg = 0; reg |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 16); reg |= ((p_mpeg4->rc_p_frame_qp & 0x3F) << 8); reg |= p_mpeg4->rc_frame_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6); } /* frame rate */ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) { reg = 0; reg |= ((p->rc_framerate_num & 0xFFFF) << 16); reg |= p->rc_framerate_denom & 0xFFFF; WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6); } /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { WRITEL(p->vbv_size & 0xFFFF, S5P_FIMV_E_VBV_BUFFER_SIZE_V6); if (p->rc_frame) WRITEL(p->vbv_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6); } /* Disable HEC */ WRITEL(0x0, S5P_FIMV_E_MPEG4_OPTIONS_V6); WRITEL(0x0, S5P_FIMV_E_MPEG4_HEC_PERIOD_V6); mfc_debug_leave(); return 0; } static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4; unsigned int reg = 0; mfc_debug_enter(); s5p_mfc_set_enc_params(ctx); /* profile & level */ reg = 0; /** profile */ reg |= (0x1 << 4); WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6); /* rate control config. */ reg = READL(S5P_FIMV_E_RC_CONFIG_V6); /** macroblock level rate control */ reg &= ~(0x1 << 8); reg |= ((p->rc_mb & 0x1) << 8); WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /** frame QP */ reg &= ~(0x3F); reg |= p_h263->rc_frame_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6); /* max & min value of QP */ reg = 0; /** max QP */ reg |= ((p_h263->rc_max_qp & 0x3F) << 8); /** min QP */ reg |= p_h263->rc_min_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6); /* other QPs */ WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6); if (!p->rc_frame && !p->rc_mb) { reg = 0; reg |= ((p_h263->rc_b_frame_qp & 0x3F) << 16); reg |= ((p_h263->rc_p_frame_qp & 0x3F) << 8); reg |= p_h263->rc_frame_qp & 0x3F; WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6); } /* frame rate */ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) { reg = 0; reg |= ((p->rc_framerate_num & 0xFFFF) << 16); reg |= p->rc_framerate_denom & 0xFFFF; WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6); } /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { WRITEL(p->vbv_size & 0xFFFF, S5P_FIMV_E_VBV_BUFFER_SIZE_V6); if (p->rc_frame) WRITEL(p->vbv_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6); } mfc_debug_leave(); return 0; } /* Initialize decoding */ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned int reg = 0; int fmo_aso_ctrl = 0; mfc_debug_enter(); mfc_debug(2, "InstNo: %d/%d\n", ctx->inst_no, S5P_FIMV_CH_SEQ_HEADER_V6); mfc_debug(2, "BUFs: %08x %08x %08x\n", READL(S5P_FIMV_D_CPB_BUFFER_ADDR_V6), READL(S5P_FIMV_D_CPB_BUFFER_ADDR_V6), READL(S5P_FIMV_D_CPB_BUFFER_ADDR_V6)); /* FMO_ASO_CTRL - 0: Enable, 1: Disable */ reg |= (fmo_aso_ctrl << S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6); /* When user sets desplay_delay to 0, * It works as "display_delay enable" and delay set to 0. * If user wants display_delay disable, It should be * set to negative value. */ if (ctx->display_delay >= 0) { reg |= (0x1 << S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6); WRITEL(ctx->display_delay, S5P_FIMV_D_DISPLAY_DELAY_V6); } /* Setup loop filter, for decoding this is only valid for MPEG4 */ if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC) { mfc_debug(2, "Set loop filter to: %d\n", ctx->loop_filter_mpeg4); reg |= (ctx->loop_filter_mpeg4 << S5P_FIMV_D_OPT_LF_CTRL_SHIFT_V6); } if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) reg |= (0x1 << S5P_FIMV_D_OPT_TILE_MODE_SHIFT_V6); WRITEL(reg, S5P_FIMV_D_DEC_OPTIONS_V6); /* 0: NV12(CbCr), 1: NV21(CrCb) */ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M) WRITEL(0x1, S5P_FIMV_PIXEL_FORMAT_V6); else WRITEL(0x0, S5P_FIMV_PIXEL_FORMAT_V6); /* sei parse */ WRITEL(ctx->sei_fp_parse & 0x1, S5P_FIMV_D_SEI_ENABLE_V6); WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_SEQ_HEADER_V6, NULL); mfc_debug_leave(); return 0; } static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush) { struct s5p_mfc_dev *dev = ctx->dev; if (flush) { dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_H2R_CMD_FLUSH_V6, NULL); } } /* Decode a single frame */ static int s5p_mfc_decode_one_frame_v6(struct s5p_mfc_ctx *ctx, enum s5p_mfc_decode_arg last_frame) { struct s5p_mfc_dev *dev = ctx->dev; WRITEL(ctx->dec_dst_flag, S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V6); WRITEL(ctx->slice_interface & 0x1, S5P_FIMV_D_SLICE_IF_ENABLE_V6); WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); /* Issue different commands to instance basing on whether it * is the last frame or not. */ switch (last_frame) { case 0: s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_FRAME_START_V6, NULL); break; case 1: s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_LAST_FRAME_V6, NULL); break; default: mfc_err("Unsupported last frame arg.\n"); return -EINVAL; } mfc_debug(2, "Decoding a usual frame.\n"); return 0; } static int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) s5p_mfc_set_enc_params_h264(ctx); else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC) s5p_mfc_set_enc_params_mpeg4(ctx); else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC) s5p_mfc_set_enc_params_h263(ctx); else { mfc_err("Unknown codec for encoding (%x).\n", ctx->codec_mode); return -EINVAL; } WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_SEQ_HEADER_V6, NULL); return 0; } static int s5p_mfc_h264_set_aso_slice_order_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264; int i; if (p_h264->aso) { for (i = 0; i < 8; i++) WRITEL(p_h264->aso_slice_order[i], S5P_FIMV_E_H264_ASO_SLICE_ORDER_0_V6 + i * 4); } return 0; } /* Encode a single frame */ static int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; mfc_debug(2, "++\n"); /* memory structure cur. frame */ if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) s5p_mfc_h264_set_aso_slice_order_v6(ctx); s5p_mfc_set_slice_mode(ctx); WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev, S5P_FIMV_CH_FRAME_START_V6, NULL); mfc_debug(2, "--\n"); return 0; } static inline int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev) { unsigned long flags; int new_ctx; int cnt; spin_lock_irqsave(&dev->condlock, flags); mfc_debug(2, "Previous context: %d (bits %08lx)\n", dev->curr_ctx, dev->ctx_work_bits); new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS; cnt = 0; while (!test_bit(new_ctx, &dev->ctx_work_bits)) { new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS; cnt++; if (cnt > MFC_NUM_CONTEXTS) { /* No contexts to run */ spin_unlock_irqrestore(&dev->condlock, flags); return -EAGAIN; } } spin_unlock_irqrestore(&dev->condlock, flags); return new_ctx; } static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *temp_vb; unsigned long flags; spin_lock_irqsave(&dev->irqlock, flags); /* Frames are being decoded */ if (list_empty(&ctx->src_queue)) { mfc_debug(2, "No src buffers.\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return; } /* Get the next source buffer */ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); temp_vb->flags |= MFC_BUF_FLAG_USED; s5p_mfc_set_dec_stream_buffer_v6(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, 0); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_decode_one_frame_v6(ctx, 1); } static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *temp_vb; unsigned long flags; int last_frame = 0; if (ctx->state == MFCINST_FINISHING) { last_frame = MFC_DEC_LAST_FRAME; s5p_mfc_set_dec_stream_buffer_v6(ctx, 0, 0, 0); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_decode_one_frame_v6(ctx, last_frame); return 0; } spin_lock_irqsave(&dev->irqlock, flags); /* Frames are being decoded */ if (list_empty(&ctx->src_queue)) { mfc_debug(2, "No src buffers.\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } /* Get the next source buffer */ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); temp_vb->flags |= MFC_BUF_FLAG_USED; s5p_mfc_set_dec_stream_buffer_v6(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); if (temp_vb->b->v4l2_planes[0].bytesused == 0) { last_frame = 1; mfc_debug(2, "Setting ctx->state to FINISHING\n"); ctx->state = MFCINST_FINISHING; } s5p_mfc_decode_one_frame_v6(ctx, last_frame); return 0; } static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *dst_mb; struct s5p_mfc_buf *src_mb; unsigned long src_y_addr, src_c_addr, dst_addr; /* unsigned int src_y_size, src_c_size; */ unsigned int dst_size; spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { mfc_debug(2, "no src buffers.\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } if (list_empty(&ctx->dst_queue)) { mfc_debug(2, "no dst buffers.\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); src_mb->flags |= MFC_BUF_FLAG_USED; src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr); mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr); s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_mb->flags |= MFC_BUF_FLAG_USED; dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); dst_size = vb2_plane_size(dst_mb->b, 0); s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_encode_one_frame_v6(ctx); return 0; } static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *temp_vb; /* Initializing decoding - parsing header */ spin_lock_irqsave(&dev->irqlock, flags); mfc_debug(2, "Preparing to init decoding.\n"); temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); s5p_mfc_set_dec_stream_buffer_v6(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_init_decode_v6(ctx); } static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *dst_mb; unsigned long dst_addr; unsigned int dst_size; spin_lock_irqsave(&dev->irqlock, flags); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); dst_size = vb2_plane_size(dst_mb->b, 0); s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_init_encode_v6(ctx); } static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; int ret; /* Header was parsed now start processing * First set the output frame buffers * s5p_mfc_alloc_dec_buffers(ctx); */ if (ctx->capture_state != QUEUE_BUFS_MMAPED) { mfc_err("It seems that not all destionation buffers were\n" "mmaped.MFC requires that all destination are mmaped\n" "before starting processing.\n"); return -EAGAIN; } dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_set_dec_frame_buffer_v6(ctx); if (ret) { mfc_err("Failed to alloc frame mem.\n"); ctx->state = MFCINST_ERROR; } return ret; } static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; int ret; dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_set_enc_ref_buffer_v6(ctx); if (ret) { mfc_err("Failed to alloc frame mem.\n"); ctx->state = MFCINST_ERROR; } return ret; } /* Try running an operation on hardware */ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) { struct s5p_mfc_ctx *ctx; int new_ctx; unsigned int ret = 0; mfc_debug(1, "Try run dev: %p\n", dev); /* Check whether hardware is not running */ if (test_and_set_bit(0, &dev->hw_lock) != 0) { /* This is perfectly ok, the scheduled ctx should wait */ mfc_debug(1, "Couldn't lock HW.\n"); return; } /* Choose the context to run */ new_ctx = s5p_mfc_get_new_ctx(dev); if (new_ctx < 0) { /* No contexts to run */ if (test_and_clear_bit(0, &dev->hw_lock) == 0) { mfc_err("Failed to unlock hardware.\n"); return; } mfc_debug(1, "No ctx is scheduled to be run.\n"); return; } mfc_debug(1, "New context: %d\n", new_ctx); ctx = dev->ctx[new_ctx]; mfc_debug(1, "Seting new context to %p\n", ctx); /* Got context to run in ctx */ mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n", ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt); mfc_debug(1, "ctx->state=%d\n", ctx->state); /* Last frame has already been sent to MFC * Now obtaining frames from MFC buffer */ s5p_mfc_clock_on(); if (ctx->type == MFCINST_DECODER) { switch (ctx->state) { case MFCINST_FINISHING: s5p_mfc_run_dec_last_frames(ctx); break; case MFCINST_RUNNING: ret = s5p_mfc_run_dec_frame(ctx); break; case MFCINST_INIT: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd, ctx); break; case MFCINST_RETURN_INST: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd, ctx); break; case MFCINST_GOT_INST: s5p_mfc_run_init_dec(ctx); break; case MFCINST_HEAD_PARSED: ret = s5p_mfc_run_init_dec_buffers(ctx); break; case MFCINST_FLUSH: s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag); break; case MFCINST_RES_CHANGE_INIT: s5p_mfc_run_dec_last_frames(ctx); break; case MFCINST_RES_CHANGE_FLUSH: s5p_mfc_run_dec_last_frames(ctx); break; case MFCINST_RES_CHANGE_END: mfc_debug(2, "Finished remaining frames after resolution change.\n"); ctx->capture_state = QUEUE_FREE; mfc_debug(2, "Will re-init the codec`.\n"); s5p_mfc_run_init_dec(ctx); break; default: ret = -EAGAIN; } } else if (ctx->type == MFCINST_ENCODER) { switch (ctx->state) { case MFCINST_FINISHING: case MFCINST_RUNNING: ret = s5p_mfc_run_enc_frame(ctx); break; case MFCINST_INIT: ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd, ctx); break; case MFCINST_RETURN_INST: ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd, ctx); break; case MFCINST_GOT_INST: s5p_mfc_run_init_enc(ctx); break; case MFCINST_HEAD_PRODUCED: ret = s5p_mfc_run_init_enc_buffers(ctx); break; default: ret = -EAGAIN; } } else { mfc_err("invalid context type: %d\n", ctx->type); ret = -EAGAIN; } if (ret) { /* Free hardware lock */ if (test_and_clear_bit(0, &dev->hw_lock) == 0) mfc_err("Failed to unlock hardware.\n"); /* This is in deed imporant, as no operation has been * scheduled, reduce the clock count as no one will * ever do this, because no interrupt related to this try_run * will ever come from hardware. */ s5p_mfc_clock_off(); } } static void s5p_mfc_cleanup_queue_v6(struct list_head *lh, struct vb2_queue *vq) { struct s5p_mfc_buf *b; int i; while (!list_empty(lh)) { b = list_entry(lh->next, struct s5p_mfc_buf, list); for (i = 0; i < b->b->num_planes; i++) vb2_set_plane_payload(b->b, i, 0); vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR); list_del(&b->list); } } static void s5p_mfc_clear_int_flags_v6(struct s5p_mfc_dev *dev) { mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD_V6); mfc_write(dev, 0, S5P_FIMV_RISC2HOST_INT_V6); } static void s5p_mfc_write_info_v6(struct s5p_mfc_ctx *ctx, unsigned int data, unsigned int ofs) { struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_clock_on(); WRITEL(data, ofs); s5p_mfc_clock_off(); } static unsigned int s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned int ofs) { struct s5p_mfc_dev *dev = ctx->dev; int ret; s5p_mfc_clock_on(); ret = READL(ofs); s5p_mfc_clock_off(); return ret; } static int s5p_mfc_get_dspl_y_adr_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6); } static int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DECODED_LUMA_ADDR_V6); } static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6); } static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6); } static int s5p_mfc_get_dec_frame_type_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DECODED_FRAME_TYPE_V6) & S5P_FIMV_DECODE_FRAME_MASK_V6; } static int s5p_mfc_get_disp_frame_type_v6(struct s5p_mfc_ctx *ctx) { return mfc_read(ctx->dev, S5P_FIMV_D_DISPLAY_FRAME_TYPE_V6) & S5P_FIMV_DECODE_FRAME_MASK_V6; } static int s5p_mfc_get_consumed_stream_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DECODED_NAL_SIZE_V6); } static int s5p_mfc_get_int_reason_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_RISC2HOST_CMD_V6) & S5P_FIMV_RISC2HOST_CMD_MASK; } static int s5p_mfc_get_int_err_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_ERROR_CODE_V6); } static int s5p_mfc_err_dec_v6(unsigned int err) { return (err & S5P_FIMV_ERR_DEC_MASK_V6) >> S5P_FIMV_ERR_DEC_SHIFT_V6; } static int s5p_mfc_err_dspl_v6(unsigned int err) { return (err & S5P_FIMV_ERR_DSPL_MASK_V6) >> S5P_FIMV_ERR_DSPL_SHIFT_V6; } static int s5p_mfc_get_img_width_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DISPLAY_FRAME_WIDTH_V6); } static int s5p_mfc_get_img_height_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DISPLAY_FRAME_HEIGHT_V6); } static int s5p_mfc_get_dpb_count_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_MIN_NUM_DPB_V6); } static int s5p_mfc_get_mv_count_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_MIN_NUM_MV_V6); } static int s5p_mfc_get_inst_no_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_RET_INSTANCE_ID_V6); } static int s5p_mfc_get_enc_dpb_count_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_E_NUM_DPB_V6); } static int s5p_mfc_get_enc_strm_size_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_E_STREAM_SIZE_V6); } static int s5p_mfc_get_enc_slice_type_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_E_SLICE_TYPE_V6); } static int s5p_mfc_get_enc_pic_count_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_E_PICTURE_COUNT_V6); } static int s5p_mfc_get_sei_avail_status_v6(struct s5p_mfc_ctx *ctx) { return mfc_read(ctx->dev, S5P_FIMV_D_FRAME_PACK_SEI_AVAIL_V6); } static int s5p_mfc_get_mvc_num_views_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_MVC_NUM_VIEWS_V6); } static int s5p_mfc_get_mvc_view_id_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_MVC_VIEW_ID_V6); } static unsigned int s5p_mfc_get_pic_type_top_v6(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v6(ctx, PIC_TIME_TOP_V6); } static unsigned int s5p_mfc_get_pic_type_bot_v6(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v6(ctx, PIC_TIME_BOT_V6); } static unsigned int s5p_mfc_get_crop_info_h_v6(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v6(ctx, CROP_INFO_H_V6); } static unsigned int s5p_mfc_get_crop_info_v_v6(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v6(ctx, CROP_INFO_V_V6); } /* Initialize opr function pointers for MFC v6 */ static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = { .alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v6, .release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v6, .alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v6, .release_codec_buffers = s5p_mfc_release_codec_buffers_v6, .alloc_instance_buffer = s5p_mfc_alloc_instance_buffer_v6, .release_instance_buffer = s5p_mfc_release_instance_buffer_v6, .alloc_dev_context_buffer = s5p_mfc_alloc_dev_context_buffer_v6, .release_dev_context_buffer = s5p_mfc_release_dev_context_buffer_v6, .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v6, .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v6, .set_dec_stream_buffer = s5p_mfc_set_dec_stream_buffer_v6, .set_dec_frame_buffer = s5p_mfc_set_dec_frame_buffer_v6, .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v6, .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v6, .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v6, .set_enc_ref_buffer = s5p_mfc_set_enc_ref_buffer_v6, .init_decode = s5p_mfc_init_decode_v6, .init_encode = s5p_mfc_init_encode_v6, .encode_one_frame = s5p_mfc_encode_one_frame_v6, .try_run = s5p_mfc_try_run_v6, .cleanup_queue = s5p_mfc_cleanup_queue_v6, .clear_int_flags = s5p_mfc_clear_int_flags_v6, .write_info = s5p_mfc_write_info_v6, .read_info = s5p_mfc_read_info_v6, .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v6, .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v6, .get_dspl_status = s5p_mfc_get_dspl_status_v6, .get_dec_status = s5p_mfc_get_dec_status_v6, .get_dec_frame_type = s5p_mfc_get_dec_frame_type_v6, .get_disp_frame_type = s5p_mfc_get_disp_frame_type_v6, .get_consumed_stream = s5p_mfc_get_consumed_stream_v6, .get_int_reason = s5p_mfc_get_int_reason_v6, .get_int_err = s5p_mfc_get_int_err_v6, .err_dec = s5p_mfc_err_dec_v6, .err_dspl = s5p_mfc_err_dspl_v6, .get_img_width = s5p_mfc_get_img_width_v6, .get_img_height = s5p_mfc_get_img_height_v6, .get_dpb_count = s5p_mfc_get_dpb_count_v6, .get_mv_count = s5p_mfc_get_mv_count_v6, .get_inst_no = s5p_mfc_get_inst_no_v6, .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v6, .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v6, .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v6, .get_enc_pic_count = s5p_mfc_get_enc_pic_count_v6, .get_sei_avail_status = s5p_mfc_get_sei_avail_status_v6, .get_mvc_num_views = s5p_mfc_get_mvc_num_views_v6, .get_mvc_view_id = s5p_mfc_get_mvc_view_id_v6, .get_pic_type_top = s5p_mfc_get_pic_type_top_v6, .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v6, .get_crop_info_h = s5p_mfc_get_crop_info_h_v6, .get_crop_info_v = s5p_mfc_get_crop_info_v_v6, }; struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void) { return &s5p_mfc_ops_v6; }
gpl-2.0
sakuramilk/sc06d_kernel_ics
drivers/video/pxa168fb.c
2387
21180
/* * linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller * * Copyright (C) 2008 Marvell International Ltd. * All rights reserved. * * 2009-02-16 adapted from original version for PXA168/910 * Jun Nie <njun@marvell.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/uaccess.h> #include <video/pxa168fb.h> #include "pxa168fb.h" #define DEFAULT_REFRESH 60 /* Hz */ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) { /* * Pseudocolor mode? */ if (var->bits_per_pixel == 8) return PIX_FMT_PSEUDOCOLOR; /* * Check for 565/1555. */ if (var->bits_per_pixel == 16 && var->red.length <= 5 && var->green.length <= 6 && var->blue.length <= 5) { if (var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB565; else return PIX_FMT_BGR565; } if (var->transp.length == 1 && var->green.length <= 5) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB1555; else return PIX_FMT_BGR1555; } /* fall through */ } /* * Check for 888/A888. */ if (var->bits_per_pixel <= 32 && var->red.length <= 8 && var->green.length <= 8 && var->blue.length <= 8) { if (var->bits_per_pixel == 24 && var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888PACK; else return PIX_FMT_BGR888PACK; } if (var->bits_per_pixel == 32 && var->transp.length == 8) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGBA888; else return PIX_FMT_BGRA888; } else { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888UNPACK; else return PIX_FMT_BGR888UNPACK; } /* fall through */ } return -EINVAL; } static void set_pix_fmt(struct fb_var_screeninfo *var, int pix_fmt) { switch (pix_fmt) { case PIX_FMT_RGB565: var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR565: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 11; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGB1555: var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_BGR1555: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_RGB888PACK: var->bits_per_pixel = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR888PACK: var->bits_per_pixel = 24; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGBA888: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_BGRA888: var->bits_per_pixel = 32; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_PSEUDOCOLOR: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; } } static void set_mode(struct pxa168fb_info *fbi, struct fb_var_screeninfo *var, struct fb_videomode *mode, int pix_fmt, int ystretch) { struct fb_info *info = fbi->info; set_pix_fmt(var, pix_fmt); var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = max(var->xres, var->xres_virtual); if (ystretch) var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); else var->yres_virtual = max(var->yres, var->yres_virtual); var->grayscale = 0; var->accel_flags = FB_ACCEL_NONE; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = FB_VMODE_NONINTERLACED; var->rotate = FB_ROTATE_UR; } static int pxa168fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; int pix_fmt; /* * Determine which pixel format we're going to use. */ pix_fmt = determine_best_pix_fmt(var); if (pix_fmt < 0) return pix_fmt; set_pix_fmt(var, pix_fmt); fbi->pix_fmt = pix_fmt; /* * Basic geometry sanity checks. */ if (var->xoffset + var->xres > var->xres_virtual) return -EINVAL; if (var->yoffset + var->yres > var->yres_virtual) return -EINVAL; if (var->xres + var->right_margin + var->hsync_len + var->left_margin > 2048) return -EINVAL; if (var->yres + var->lower_margin + var->vsync_len + var->upper_margin > 2048) return -EINVAL; /* * Check size of framebuffer. */ if (var->xres_virtual * var->yres_virtual * (var->bits_per_pixel >> 3) > info->fix.smem_len) return -EINVAL; return 0; } /* * The hardware clock divider has an integer and a fractional * stage: * * clk2 = clk_in / integer_divider * clk_out = clk2 * (1 - (fractional_divider >> 12)) * * Calculate integer and fractional divider for given clk_in * and clk_out. */ static void set_clock_divider(struct pxa168fb_info *fbi, const struct fb_videomode *m) { int divider_int; int needed_pixclk; u64 div_result; u32 x = 0; /* * Notice: The field pixclock is used by linux fb * is in pixel second. E.g. struct fb_videomode & * struct fb_var_screeninfo */ /* * Check input values. */ if (!m || !m->pixclock || !m->refresh) { dev_err(fbi->dev, "Input refresh or pixclock is wrong.\n"); return; } /* * Using PLL/AXI clock. */ x = 0x80000000; /* * Calc divider according to refresh rate. */ div_result = 1000000000000ll; do_div(div_result, m->pixclock); needed_pixclk = (u32)div_result; divider_int = clk_get_rate(fbi->clk) / needed_pixclk; /* check whether divisor is too small. */ if (divider_int < 2) { dev_warn(fbi->dev, "Warning: clock source is too slow." "Try smaller resolution\n"); divider_int = 2; } /* * Set setting to reg. */ x |= divider_int; writel(x, fbi->reg_base + LCD_CFG_SCLK_DIV); } static void set_dma_control0(struct pxa168fb_info *fbi) { u32 x; /* * Set bit to enable graphics DMA. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); x &= ~CFG_GRA_ENA_MASK; x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0); /* * If we are in a pseudo-color mode, we need to enable * palette lookup. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) x |= 0x10000000; /* * Configure hardware pixel format. */ x &= ~(0xF << 16); x |= (fbi->pix_fmt >> 1) << 16; /* * Check red and blue pixel swap. * 1. source data swap * 2. panel output data swap */ x &= ~(1 << 12); x |= ((fbi->pix_fmt & 1) ^ (fbi->panel_rbswap)) << 12; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL0); } static void set_dma_control1(struct pxa168fb_info *fbi, int sync) { u32 x; /* * Configure default bits: vsync triggers DMA, gated clock * enable, power save enable, configure alpha registers to * display 100% graphics, and set pixel command. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL1); x |= 0x2032ff81; /* * We trigger DMA on the falling edge of vsync if vsync is * active low, or on the rising edge if vsync is active high. */ if (!(sync & FB_SYNC_VERT_HIGH_ACT)) x |= 0x08000000; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL1); } static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int pixel_offset; unsigned long addr; pixel_offset = (yoffset * var->xres_virtual) + xoffset; addr = fbi->fb_start_dma + (pixel_offset * (var->bits_per_pixel >> 3)); writel(addr, fbi->reg_base + LCD_CFG_GRA_START_ADDR0); } static void set_dumb_panel_control(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct pxa168fb_mach_info *mi = fbi->dev->platform_data; u32 x; /* * Preserve enable flag. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL) & 0x00000001; x |= (fbi->is_blanked ? 0x7 : mi->dumb_mode) << 28; x |= mi->gpio_output_data << 20; x |= mi->gpio_output_mask << 12; x |= mi->panel_rgb_reverse_lanes ? 0x00000080 : 0; x |= mi->invert_composite_blank ? 0x00000040 : 0; x |= (info->var.sync & FB_SYNC_COMP_HIGH_ACT) ? 0x00000020 : 0; x |= mi->invert_pix_val_ena ? 0x00000010 : 0; x |= (info->var.sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 0x00000008; x |= (info->var.sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 0x00000004; x |= mi->invert_pixclock ? 0x00000002 : 0; writel(x, fbi->reg_base + LCD_SPU_DUMB_CTRL); } static void set_dumb_screen_dimensions(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *v = &info->var; int x; int y; x = v->xres + v->right_margin + v->hsync_len + v->left_margin; y = v->yres + v->lower_margin + v->vsync_len + v->upper_margin; writel((y << 16) | x, fbi->reg_base + LCD_SPUT_V_H_TOTAL); } static int pxa168fb_set_par(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; struct fb_videomode mode; u32 x; struct pxa168fb_mach_info *mi; mi = fbi->dev->platform_data; /* * Set additional mode info. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) info->fix.visual = FB_VISUAL_PSEUDOCOLOR; else info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; info->fix.ypanstep = var->yres; /* * Disable panel output while we setup the display. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x & ~1, fbi->reg_base + LCD_SPU_DUMB_CTRL); /* * Configure global panel parameters. */ writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_V_H_ACTIVE); /* * convet var to video mode */ fb_var_to_videomode(&mode, &info->var); /* Calculate clock divisor. */ set_clock_divider(fbi, &mode); /* Configure dma ctrl regs. */ set_dma_control0(fbi); set_dma_control1(fbi, info->var.sync); /* * Configure graphics DMA parameters. */ x = readl(fbi->reg_base + LCD_CFG_GRA_PITCH); x = (x & ~0xFFFF) | ((var->xres_virtual * var->bits_per_pixel) >> 3); writel(x, fbi->reg_base + LCD_CFG_GRA_PITCH); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GRA_HPXL_VLN); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GZM_HPXL_VLN); /* * Configure dumb panel ctrl regs & timings. */ set_dumb_panel_control(info); set_dumb_screen_dimensions(info); writel((var->left_margin << 16) | var->right_margin, fbi->reg_base + LCD_SPU_H_PORCH); writel((var->upper_margin << 16) | var->lower_margin, fbi->reg_base + LCD_SPU_V_PORCH); /* * Re-enable panel output. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x | 1, fbi->reg_base + LCD_SPU_DUMB_CTRL); return 0; } static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset; } static u32 to_rgb(u16 red, u16 green, u16 blue) { red >>= 8; green >>= 8; blue >>= 8; return (red << 16) | (green << 8) | blue; } static int pxa168fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int trans, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; u32 val; if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) { val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue , &info->var.blue); fbi->pseudo_palette[regno] = val; } if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) { val = to_rgb(red, green, blue); writel(val, fbi->reg_base + LCD_SPU_SRAM_WRDAT); writel(0x8300 | regno, fbi->reg_base + LCD_SPU_SRAM_CTRL); } return 0; } static int pxa168fb_blank(int blank, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; fbi->is_blanked = (blank == FB_BLANK_UNBLANK) ? 0 : 1; set_dumb_panel_control(info); return 0; } static int pxa168fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { set_graphics_start(info, var->xoffset, var->yoffset); return 0; } static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id) { struct pxa168fb_info *fbi = dev_id; u32 isr = readl(fbi->reg_base + SPU_IRQ_ISR); if ((isr & GRA_FRAME_IRQ0_ENA_MASK)) { writel(isr & (~GRA_FRAME_IRQ0_ENA_MASK), fbi->reg_base + SPU_IRQ_ISR); return IRQ_HANDLED; } return IRQ_NONE; } static struct fb_ops pxa168fb_ops = { .owner = THIS_MODULE, .fb_check_var = pxa168fb_check_var, .fb_set_par = pxa168fb_set_par, .fb_setcolreg = pxa168fb_setcolreg, .fb_blank = pxa168fb_blank, .fb_pan_display = pxa168fb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int __devinit pxa168fb_init_mode(struct fb_info *info, struct pxa168fb_mach_info *mi) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int ret = 0; u32 total_w, total_h, refresh; u64 div_result; const struct fb_videomode *m; /* * Set default value */ refresh = DEFAULT_REFRESH; /* try to find best video mode. */ m = fb_find_best_mode(&info->var, &info->modelist); if (m) fb_videomode_to_var(&info->var, m); /* Init settings. */ var->xres_virtual = var->xres; var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); dev_dbg(fbi->dev, "pxa168fb: find best mode: res = %dx%d\n", var->xres, var->yres); /* correct pixclock. */ total_w = var->xres + var->left_margin + var->right_margin + var->hsync_len; total_h = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; div_result = 1000000000000ll; do_div(div_result, total_w * total_h * refresh); var->pixclock = (u32)div_result; return ret; } static int __devinit pxa168fb_probe(struct platform_device *pdev) { struct pxa168fb_mach_info *mi; struct fb_info *info = 0; struct pxa168fb_info *fbi = 0; struct resource *res; struct clk *clk; int irq, ret; mi = pdev->dev.platform_data; if (mi == NULL) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } clk = clk_get(&pdev->dev, "LCDCLK"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "unable to get LCDCLK"); return PTR_ERR(clk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no IO memory defined\n"); ret = -ENOENT; goto failed_put_clk; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ defined\n"); ret = -ENOENT; goto failed_put_clk; } info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev); if (info == NULL) { ret = -ENOMEM; goto failed_put_clk; } /* Initialize private data */ fbi = info->par; fbi->info = info; fbi->clk = clk; fbi->dev = info->dev = &pdev->dev; fbi->panel_rbswap = mi->panel_rbswap; fbi->is_blanked = 0; fbi->active = mi->active; /* * Initialise static fb parameters. */ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; info->node = -1; strlcpy(info->fix.id, mi->id, 16); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 0; info->fix.ywrapstep = 0; info->fix.mmio_start = res->start; info->fix.mmio_len = res->end - res->start + 1; info->fix.accel = FB_ACCEL_NONE; info->fbops = &pxa168fb_ops; info->pseudo_palette = fbi->pseudo_palette; /* * Map LCD controller registers. */ fbi->reg_base = ioremap_nocache(res->start, resource_size(res)); if (fbi->reg_base == NULL) { ret = -ENOMEM; goto failed_free_info; } /* * Allocate framebuffer memory. */ info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE); info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len, &fbi->fb_start_dma, GFP_KERNEL); if (info->screen_base == NULL) { ret = -ENOMEM; goto failed_free_info; } info->fix.smem_start = (unsigned long)fbi->fb_start_dma; set_graphics_start(info, 0, 0); /* * Set video mode according to platform data. */ set_mode(fbi, &info->var, mi->modes, mi->pix_fmt, 1); fb_videomode_to_modelist(mi->modes, mi->num_modes, &info->modelist); /* * init video mode data. */ pxa168fb_init_mode(info, mi); /* * Fill in sane defaults. */ ret = pxa168fb_check_var(&info->var, info); if (ret) goto failed_free_fbmem; /* * enable controller clock */ clk_enable(fbi->clk); pxa168fb_set_par(info); /* * Configure default register values. */ writel(0, fbi->reg_base + LCD_SPU_BLANKCOLOR); writel(mi->io_pin_allocation_mode, fbi->reg_base + SPU_IOPAD_CONTROL); writel(0, fbi->reg_base + LCD_CFG_GRA_START_ADDR1); writel(0, fbi->reg_base + LCD_SPU_GRA_OVSA_HPXL_VLN); writel(0, fbi->reg_base + LCD_SPU_SRAM_PARA0); writel(CFG_CSB_256x32(0x1)|CFG_CSB_256x24(0x1)|CFG_CSB_256x8(0x1), fbi->reg_base + LCD_SPU_SRAM_PARA1); /* * Allocate color map. */ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { ret = -ENOMEM; goto failed_free_clk; } /* * Register irq handler. */ ret = request_irq(irq, pxa168fb_handle_irq, IRQF_SHARED, info->fix.id, fbi); if (ret < 0) { dev_err(&pdev->dev, "unable to request IRQ\n"); ret = -ENXIO; goto failed_free_cmap; } /* * Enable GFX interrupt */ writel(GRA_FRAME_IRQ0_ENA(0x1), fbi->reg_base + SPU_IRQ_ENA); /* * Register framebuffer. */ ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Failed to register pxa168-fb: %d\n", ret); ret = -ENXIO; goto failed_free_irq; } platform_set_drvdata(pdev, fbi); return 0; failed_free_irq: free_irq(irq, fbi); failed_free_cmap: fb_dealloc_cmap(&info->cmap); failed_free_clk: clk_disable(fbi->clk); failed_free_fbmem: dma_free_coherent(fbi->dev, info->fix.smem_len, info->screen_base, fbi->fb_start_dma); failed_free_info: kfree(info); failed_put_clk: clk_put(clk); dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret); return ret; } static int __devexit pxa168fb_remove(struct platform_device *pdev) { struct pxa168fb_info *fbi = platform_get_drvdata(pdev); struct fb_info *info; int irq; unsigned int data; if (!fbi) return 0; /* disable DMA transfer */ data = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); data &= ~CFG_GRA_ENA_MASK; writel(data, fbi->reg_base + LCD_SPU_DMA_CTRL0); info = fbi->info; unregister_framebuffer(info); writel(GRA_FRAME_IRQ0_ENA(0x0), fbi->reg_base + SPU_IRQ_ENA); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); irq = platform_get_irq(pdev, 0); free_irq(irq, fbi); dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), info->screen_base, info->fix.smem_start); iounmap(fbi->reg_base); clk_disable(fbi->clk); clk_put(fbi->clk); framebuffer_release(info); return 0; } static struct platform_driver pxa168fb_driver = { .driver = { .name = "pxa168-fb", .owner = THIS_MODULE, }, .probe = pxa168fb_probe, .remove = __devexit_p(pxa168fb_remove), }; static int __init pxa168fb_init(void) { return platform_driver_register(&pxa168fb_driver); } module_init(pxa168fb_init); static void __exit pxa168fb_exit(void) { platform_driver_unregister(&pxa168fb_driver); } module_exit(pxa168fb_exit); MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> " "Green Wan <gwan@marvell.com>"); MODULE_DESCRIPTION("Framebuffer driver for PXA168/910"); MODULE_LICENSE("GPL");
gpl-2.0
whodunnit/AK-OnePlusOne-CM
arch/x86/mm/srat.c
4691
4594
/* * ACPI 3.0 based NUMA setup * Copyright 2004 Andi Kleen, SuSE Labs. * * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs. * * Called from acpi_numa_init while reading the SRAT and SLIT tables. * Assumes all memory regions belonging to a single proximity domain * are in one chunk. Holes between them will be included in the node. */ #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/mmzone.h> #include <linux/bitmap.h> #include <linux/module.h> #include <linux/topology.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/mm.h> #include <asm/proto.h> #include <asm/numa.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/uv/uv.h> int acpi_numa __initdata; static __init int setup_node(int pxm) { return acpi_map_pxm_to_node(pxm); } static __init void bad_srat(void) { printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; } static __init inline int srat_disabled(void) { return acpi_numa < 0; } /* Callback for SLIT parsing */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { int i, j; for (i = 0; i < slit->locality_count; i++) for (j = 0; j < slit->locality_count; j++) numa_set_distance(pxm_to_node(i), pxm_to_node(j), slit->entry[slit->locality_count * i + j]); } /* Callback for Proximity Domain -> x2APIC mapping */ void __init acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) { int pxm, node; int apic_id; if (srat_disabled()) return; if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) { bad_srat(); return; } if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) return; pxm = pa->proximity_domain; apic_id = pa->apic_id; if (!apic->apic_id_valid(apic_id)) { printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n", pxm, apic_id); return; } node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); bad_srat(); return; } if (apic_id >= MAX_LOCAL_APIC) { printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); return; } set_apicid_to_node(apic_id, node); node_set(node, numa_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node); } /* Callback for Proximity Domain -> LAPIC mapping */ void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { int pxm, node; int apic_id; if (srat_disabled()) return; if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { bad_srat(); return; } if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) return; pxm = pa->proximity_domain_lo; if (acpi_srat_revision >= 2) pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8; node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); bad_srat(); return; } if (get_uv_system_type() >= UV_X2APIC) apic_id = (pa->apic_id << 8) | pa->local_sapic_eid; else apic_id = pa->apic_id; if (apic_id >= MAX_LOCAL_APIC) { printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); return; } set_apicid_to_node(apic_id, node); node_set(node, numa_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node); } #ifdef CONFIG_MEMORY_HOTPLUG static inline int save_add_info(void) {return 1;} #else static inline int save_add_info(void) {return 0;} #endif /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { u64 start, end; int node, pxm; if (srat_disabled()) return; if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { bad_srat(); return; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) return; if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) return; start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; if (acpi_srat_revision <= 1) pxm &= 0xff; node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains.\n"); bad_srat(); return; } if (numa_add_memblk(node, start, end) < 0) { bad_srat(); return; } printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, start, end); } void __init acpi_numa_arch_fixup(void) {} int __init x86_acpi_numa_init(void) { int ret; ret = acpi_numa_init(); if (ret < 0) return ret; return srat_disabled() ? -EINVAL : 0; }
gpl-2.0
donkeykang/donkeyk
drivers/isdn/i4l/isdn_ppp.c
4691
79856
/* $Id: isdn_ppp.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $ * * Linux ISDN subsystem, functions for synchronous PPP (linklevel). * * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/isdn.h> #include <linux/poll.h> #include <linux/ppp-comp.h> #include <linux/slab.h> #ifdef CONFIG_IPPP_FILTER #include <linux/filter.h> #endif #include "isdn_common.h" #include "isdn_ppp.h" #include "isdn_net.h" #ifndef PPP_IPX #define PPP_IPX 0x002b #endif /* Prototypes */ static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot); static int isdn_ppp_closewait(int slot); static void isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto); static int isdn_ppp_if_get_unit(char *namebuf); static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *); static struct sk_buff *isdn_ppp_decompress(struct sk_buff *, struct ippp_struct *, struct ippp_struct *, int *proto); static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto); static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto, struct ippp_struct *is, struct ippp_struct *master, int type); static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb); /* New CCP stuff */ static void isdn_ppp_ccp_kickup(struct ippp_struct *is); static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto, unsigned char code, unsigned char id, unsigned char *data, int len); static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is); static void isdn_ppp_ccp_reset_free(struct ippp_struct *is); static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is, unsigned char id); static void isdn_ppp_ccp_timer_callback(unsigned long closure); static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is, unsigned char id); static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is, struct isdn_ppp_resetparams *rp); static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is, unsigned char id); #ifdef CONFIG_ISDN_MPP static ippp_bundle *isdn_ppp_bundle_arr = NULL; static int isdn_ppp_mp_bundle_array_init(void); static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to); static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb); static void isdn_ppp_mp_cleanup(isdn_net_local *lp); static int isdn_ppp_bundle(struct ippp_struct *, int unit); #endif /* CONFIG_ISDN_MPP */ char *isdn_ppp_revision = "$Revision: 1.1.2.3 $"; static struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS]; static struct isdn_ppp_compressor *ipc_head = NULL; /* * frame log (debug) */ static void isdn_ppp_frame_log(char *info, char *data, int len, int maxlen, int unit, int slot) { int cnt, j, i; char buf[80]; if (len < maxlen) maxlen = len; for (i = 0, cnt = 0; cnt < maxlen; i++) { for (j = 0; j < 16 && cnt < maxlen; j++, cnt++) sprintf(buf + j * 3, "%02x ", (unsigned char)data[cnt]); printk(KERN_DEBUG "[%d/%d].%s[%d]: %s\n", unit, slot, info, i, buf); } } /* * unbind isdn_net_local <=> ippp-device * note: it can happen, that we hangup/free the master before the slaves * in this case we bind another lp to the master device */ int isdn_ppp_free(isdn_net_local *lp) { struct ippp_struct *is; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return 0; } #ifdef CONFIG_ISDN_MPP spin_lock(&lp->netdev->pb->lock); #endif isdn_net_rm_from_bundle(lp); #ifdef CONFIG_ISDN_MPP if (lp->netdev->pb->ref_ct == 1) /* last link in queue? */ isdn_ppp_mp_cleanup(lp); lp->netdev->pb->ref_ct--; spin_unlock(&lp->netdev->pb->lock); #endif /* CONFIG_ISDN_MPP */ if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n", __func__, lp->ppp_slot); return 0; } is = ippp_table[lp->ppp_slot]; if ((is->state & IPPP_CONNECT)) isdn_ppp_closewait(lp->ppp_slot); /* force wakeup on ippp device */ else if (is->state & IPPP_ASSIGNED) is->state = IPPP_OPEN; /* fallback to 'OPEN but not ASSIGNED' state */ if (is->debug & 0x1) printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_slot, (long) lp, (long) is->lp); is->lp = NULL; /* link is down .. set lp to NULL */ lp->ppp_slot = -1; /* is this OK ?? */ return 0; } /* * bind isdn_net_local <=> ippp-device * * This function is allways called with holding dev->lock so * no additional lock is needed */ int isdn_ppp_bind(isdn_net_local *lp) { int i; int unit = 0; struct ippp_struct *is; int retval; if (lp->pppbind < 0) { /* device bounded to ippp device ? */ isdn_net_dev *net_dev = dev->netdev; char exclusive[ISDN_MAX_CHANNELS]; /* exclusive flags */ memset(exclusive, 0, ISDN_MAX_CHANNELS); while (net_dev) { /* step through net devices to find exclusive minors */ isdn_net_local *lp = net_dev->local; if (lp->pppbind >= 0) exclusive[lp->pppbind] = 1; net_dev = net_dev->next; } /* * search a free device / slot */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (ippp_table[i]->state == IPPP_OPEN && !exclusive[ippp_table[i]->minor]) { /* OPEN, but not connected! */ break; } } } else { for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (ippp_table[i]->minor == lp->pppbind && (ippp_table[i]->state & IPPP_OPEN) == IPPP_OPEN) break; } } if (i >= ISDN_MAX_CHANNELS) { printk(KERN_WARNING "isdn_ppp_bind: Can't find a (free) connection to the ipppd daemon.\n"); retval = -1; goto out; } /* get unit number from interface name .. ugly! */ unit = isdn_ppp_if_get_unit(lp->netdev->dev->name); if (unit < 0) { printk(KERN_ERR "isdn_ppp_bind: illegal interface name %s.\n", lp->netdev->dev->name); retval = -1; goto out; } lp->ppp_slot = i; is = ippp_table[i]; is->lp = lp; is->unit = unit; is->state = IPPP_OPEN | IPPP_ASSIGNED; /* assigned to a netdevice but not connected */ #ifdef CONFIG_ISDN_MPP retval = isdn_ppp_mp_init(lp, NULL); if (retval < 0) goto out; #endif /* CONFIG_ISDN_MPP */ retval = lp->ppp_slot; out: return retval; } /* * kick the ipppd on the device * (wakes up daemon after B-channel connect) */ void isdn_ppp_wakeup_daemon(isdn_net_local *lp) { if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } ippp_table[lp->ppp_slot]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK; wake_up_interruptible(&ippp_table[lp->ppp_slot]->wq); } /* * there was a hangup on the netdevice * force wakeup of the ippp device * go into 'device waits for release' state */ static int isdn_ppp_closewait(int slot) { struct ippp_struct *is; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return 0; } is = ippp_table[slot]; if (is->state) wake_up_interruptible(&is->wq); is->state = IPPP_CLOSEWAIT; return 1; } /* * isdn_ppp_find_slot / isdn_ppp_free_slot */ static int isdn_ppp_get_slot(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (!ippp_table[i]->state) return i; } return -1; } /* * isdn_ppp_open */ int isdn_ppp_open(int min, struct file *file) { int slot; struct ippp_struct *is; if (min < 0 || min >= ISDN_MAX_CHANNELS) return -ENODEV; slot = isdn_ppp_get_slot(); if (slot < 0) { return -EBUSY; } is = file->private_data = ippp_table[slot]; printk(KERN_DEBUG "ippp, open, slot: %d, minor: %d, state: %04x\n", slot, min, is->state); /* compression stuff */ is->link_compressor = is->compressor = NULL; is->link_decompressor = is->decompressor = NULL; is->link_comp_stat = is->comp_stat = NULL; is->link_decomp_stat = is->decomp_stat = NULL; is->compflags = 0; is->reset = isdn_ppp_ccp_reset_alloc(is); is->lp = NULL; is->mp_seqno = 0; /* MP sequence number */ is->pppcfg = 0; /* ppp configuration */ is->mpppcfg = 0; /* mppp configuration */ is->last_link_seqno = -1; /* MP: maybe set to Bundle-MIN, when joining a bundle ?? */ is->unit = -1; /* set, when we have our interface */ is->mru = 1524; /* MRU, default 1524 */ is->maxcid = 16; /* VJ: maxcid */ is->tk = current; init_waitqueue_head(&is->wq); is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ is->last = is->rq; is->minor = min; #ifdef CONFIG_ISDN_PPP_VJ /* * VJ header compression init */ is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */ #endif #ifdef CONFIG_IPPP_FILTER is->pass_filter = NULL; is->active_filter = NULL; #endif is->state = IPPP_OPEN; return 0; } /* * release ippp device */ void isdn_ppp_release(int min, struct file *file) { int i; struct ippp_struct *is; if (min < 0 || min >= ISDN_MAX_CHANNELS) return; is = file->private_data; if (!is) { printk(KERN_ERR "%s: no file->private_data\n", __func__); return; } if (is->debug & 0x1) printk(KERN_DEBUG "ippp: release, minor: %d %lx\n", min, (long) is->lp); if (is->lp) { /* a lp address says: this link is still up */ isdn_net_dev *p = is->lp->netdev; if (!p) { printk(KERN_ERR "%s: no lp->netdev\n", __func__); return; } is->state &= ~IPPP_CONNECT; /* -> effect: no call of wakeup */ /* * isdn_net_hangup() calls isdn_ppp_free() * isdn_ppp_free() sets is->lp to NULL and lp->ppp_slot to -1 * removing the IPPP_CONNECT flag omits calling of isdn_ppp_wakeup_daemon() */ isdn_net_hangup(p->dev); } for (i = 0; i < NUM_RCV_BUFFS; i++) { kfree(is->rq[i].buf); is->rq[i].buf = NULL; } is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ is->last = is->rq; #ifdef CONFIG_ISDN_PPP_VJ /* TODO: if this was the previous master: link the slcomp to the new master */ slhc_free(is->slcomp); is->slcomp = NULL; #endif #ifdef CONFIG_IPPP_FILTER kfree(is->pass_filter); is->pass_filter = NULL; kfree(is->active_filter); is->active_filter = NULL; #endif /* TODO: if this was the previous master: link the stuff to the new master */ if (is->comp_stat) is->compressor->free(is->comp_stat); if (is->link_comp_stat) is->link_compressor->free(is->link_comp_stat); if (is->link_decomp_stat) is->link_decompressor->free(is->link_decomp_stat); if (is->decomp_stat) is->decompressor->free(is->decomp_stat); is->compressor = is->link_compressor = NULL; is->decompressor = is->link_decompressor = NULL; is->comp_stat = is->link_comp_stat = NULL; is->decomp_stat = is->link_decomp_stat = NULL; /* Clean up if necessary */ if (is->reset) isdn_ppp_ccp_reset_free(is); /* this slot is ready for new connections */ is->state = 0; } /* * get_arg .. ioctl helper */ static int get_arg(void __user *b, void *val, int len) { if (len <= 0) len = sizeof(void *); if (copy_from_user(val, b, len)) return -EFAULT; return 0; } /* * set arg .. ioctl helper */ static int set_arg(void __user *b, void *val, int len) { if (len <= 0) len = sizeof(void *); if (copy_to_user(b, val, len)) return -EFAULT; return 0; } #ifdef CONFIG_IPPP_FILTER static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; int len, err; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; if (!uprog.len) { *p = NULL; return 0; } /* uprog.len is unsigned short, so no overflow here */ len = uprog.len * sizeof(struct sock_filter); code = memdup_user(uprog.filter, len); if (IS_ERR(code)) return PTR_ERR(code); err = sk_chk_filter(code, uprog.len); if (err) { kfree(code); return err; } *p = code; return uprog.len; } #endif /* CONFIG_IPPP_FILTER */ /* * ippp device ioctl */ int isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) { unsigned long val; int r, i, j; struct ippp_struct *is; isdn_net_local *lp; struct isdn_ppp_comp_data data; void __user *argp = (void __user *)arg; is = file->private_data; lp = is->lp; if (is->debug & 0x1) printk(KERN_DEBUG "isdn_ppp_ioctl: minor: %d cmd: %x state: %x\n", min, cmd, is->state); if (!(is->state & IPPP_OPEN)) return -EINVAL; switch (cmd) { case PPPIOCBUNDLE: #ifdef CONFIG_ISDN_MPP if (!(is->state & IPPP_CONNECT)) return -EINVAL; if ((r = get_arg(argp, &val, sizeof(val)))) return r; printk(KERN_DEBUG "iPPP-bundle: minor: %d, slave unit: %d, master unit: %d\n", (int) min, (int) is->unit, (int) val); return isdn_ppp_bundle(is, val); #else return -1; #endif break; case PPPIOCGUNIT: /* get ppp/isdn unit number */ if ((r = set_arg(argp, &is->unit, sizeof(is->unit)))) return r; break; case PPPIOCGIFNAME: if (!lp) return -EINVAL; if ((r = set_arg(argp, lp->netdev->dev->name, strlen(lp->netdev->dev->name)))) return r; break; case PPPIOCGMPFLAGS: /* get configuration flags */ if ((r = set_arg(argp, &is->mpppcfg, sizeof(is->mpppcfg)))) return r; break; case PPPIOCSMPFLAGS: /* set configuration flags */ if ((r = get_arg(argp, &val, sizeof(val)))) return r; is->mpppcfg = val; break; case PPPIOCGFLAGS: /* get configuration flags */ if ((r = set_arg(argp, &is->pppcfg, sizeof(is->pppcfg)))) return r; break; case PPPIOCSFLAGS: /* set configuration flags */ if ((r = get_arg(argp, &val, sizeof(val)))) { return r; } if (val & SC_ENABLE_IP && !(is->pppcfg & SC_ENABLE_IP) && (is->state & IPPP_CONNECT)) { if (lp) { /* OK .. we are ready to send buffers */ is->pppcfg = val; /* isdn_ppp_xmit test for SC_ENABLE_IP !!! */ netif_wake_queue(lp->netdev->dev); break; } } is->pppcfg = val; break; case PPPIOCGIDLE: /* get idle time information */ if (lp) { struct ppp_idle pidle; pidle.xmit_idle = pidle.recv_idle = lp->huptimer; if ((r = set_arg(argp, &pidle, sizeof(struct ppp_idle)))) return r; } break; case PPPIOCSMRU: /* set receive unit size for PPP */ if ((r = get_arg(argp, &val, sizeof(val)))) return r; is->mru = val; break; case PPPIOCSMPMRU: break; case PPPIOCSMPMTU: break; case PPPIOCSMAXCID: /* set the maximum compression slot id */ if ((r = get_arg(argp, &val, sizeof(val)))) return r; val++; if (is->maxcid != val) { #ifdef CONFIG_ISDN_PPP_VJ struct slcompress *sltmp; #endif if (is->debug & 0x1) printk(KERN_DEBUG "ippp, ioctl: changed MAXCID to %ld\n", val); is->maxcid = val; #ifdef CONFIG_ISDN_PPP_VJ sltmp = slhc_init(16, val); if (!sltmp) { printk(KERN_ERR "ippp, can't realloc slhc struct\n"); return -ENOMEM; } if (is->slcomp) slhc_free(is->slcomp); is->slcomp = sltmp; #endif } break; case PPPIOCGDEBUG: if ((r = set_arg(argp, &is->debug, sizeof(is->debug)))) return r; break; case PPPIOCSDEBUG: if ((r = get_arg(argp, &val, sizeof(val)))) return r; is->debug = val; break; case PPPIOCGCOMPRESSORS: { unsigned long protos[8] = {0,}; struct isdn_ppp_compressor *ipc = ipc_head; while (ipc) { j = ipc->num / (sizeof(long) * 8); i = ipc->num % (sizeof(long) * 8); if (j < 8) protos[j] |= (0x1 << i); ipc = ipc->next; } if ((r = set_arg(argp, protos, 8 * sizeof(long)))) return r; } break; case PPPIOCSCOMPRESSOR: if ((r = get_arg(argp, &data, sizeof(struct isdn_ppp_comp_data)))) return r; return isdn_ppp_set_compressor(is, &data); case PPPIOCGCALLINFO: { struct pppcallinfo pci; memset((char *)&pci, 0, sizeof(struct pppcallinfo)); if (lp) { strncpy(pci.local_num, lp->msn, 63); if (lp->dial) { strncpy(pci.remote_num, lp->dial->num, 63); } pci.charge_units = lp->charge; if (lp->outgoing) pci.calltype = CALLTYPE_OUTGOING; else pci.calltype = CALLTYPE_INCOMING; if (lp->flags & ISDN_NET_CALLBACK) pci.calltype |= CALLTYPE_CALLBACK; } return set_arg(argp, &pci, sizeof(struct pppcallinfo)); } #ifdef CONFIG_IPPP_FILTER case PPPIOCSPASS: { struct sock_filter *code; int len = get_filter(argp, &code); if (len < 0) return len; kfree(is->pass_filter); is->pass_filter = code; is->pass_len = len; break; } case PPPIOCSACTIVE: { struct sock_filter *code; int len = get_filter(argp, &code); if (len < 0) return len; kfree(is->active_filter); is->active_filter = code; is->active_len = len; break; } #endif /* CONFIG_IPPP_FILTER */ default: break; } return 0; } unsigned int isdn_ppp_poll(struct file *file, poll_table *wait) { u_int mask; struct ippp_buf_queue *bf, *bl; u_long flags; struct ippp_struct *is; is = file->private_data; if (is->debug & 0x2) printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n", iminor(file->f_path.dentry->d_inode)); /* just registers wait_queue hook. This doesn't really wait. */ poll_wait(file, &is->wq, wait); if (!(is->state & IPPP_OPEN)) { if (is->state == IPPP_CLOSEWAIT) return POLLHUP; printk(KERN_DEBUG "isdn_ppp: device not open\n"); return POLLERR; } /* we're always ready to send .. */ mask = POLLOUT | POLLWRNORM; spin_lock_irqsave(&is->buflock, flags); bl = is->last; bf = is->first; /* * if IPPP_NOBLOCK is set we return even if we have nothing to read */ if (bf->next != bl || (is->state & IPPP_NOBLOCK)) { is->state &= ~IPPP_NOBLOCK; mask |= POLLIN | POLLRDNORM; } spin_unlock_irqrestore(&is->buflock, flags); return mask; } /* * fill up isdn_ppp_read() queue .. */ static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot) { struct ippp_buf_queue *bf, *bl; u_long flags; u_char *nbuf; struct ippp_struct *is; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_WARNING "ippp: illegal slot(%d).\n", slot); return 0; } is = ippp_table[slot]; if (!(is->state & IPPP_CONNECT)) { printk(KERN_DEBUG "ippp: device not activated.\n"); return 0; } nbuf = kmalloc(len + 4, GFP_ATOMIC); if (!nbuf) { printk(KERN_WARNING "ippp: Can't alloc buf\n"); return 0; } nbuf[0] = PPP_ALLSTATIONS; nbuf[1] = PPP_UI; nbuf[2] = proto >> 8; nbuf[3] = proto & 0xff; memcpy(nbuf + 4, buf, len); spin_lock_irqsave(&is->buflock, flags); bf = is->first; bl = is->last; if (bf == bl) { printk(KERN_WARNING "ippp: Queue is full; discarding first buffer\n"); bf = bf->next; kfree(bf->buf); is->first = bf; } bl->buf = (char *) nbuf; bl->len = len + 4; is->last = bl->next; spin_unlock_irqrestore(&is->buflock, flags); wake_up_interruptible(&is->wq); return len; } /* * read() .. non-blocking: ipppd calls it only after select() * reports, that there is data */ int isdn_ppp_read(int min, struct file *file, char __user *buf, int count) { struct ippp_struct *is; struct ippp_buf_queue *b; u_long flags; u_char *save_buf; is = file->private_data; if (!(is->state & IPPP_OPEN)) return 0; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; spin_lock_irqsave(&is->buflock, flags); b = is->first->next; save_buf = b->buf; if (!save_buf) { spin_unlock_irqrestore(&is->buflock, flags); return -EAGAIN; } if (b->len < count) count = b->len; b->buf = NULL; is->first = b; spin_unlock_irqrestore(&is->buflock, flags); if (copy_to_user(buf, save_buf, count)) count = -EFAULT; kfree(save_buf); return count; } /* * ipppd wanna write a packet to the card .. non-blocking */ int isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) { isdn_net_local *lp; struct ippp_struct *is; int proto; unsigned char protobuf[4]; is = file->private_data; if (!(is->state & IPPP_CONNECT)) return 0; lp = is->lp; /* -> push it directly to the lowlevel interface */ if (!lp) printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); else { /* * Don't reset huptimer for * LCP packets. (Echo requests). */ if (copy_from_user(protobuf, buf, 4)) return -EFAULT; proto = PPP_PROTOCOL(protobuf); if (proto != PPP_LCP) lp->huptimer = 0; if (lp->isdn_device < 0 || lp->isdn_channel < 0) return 0; if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && lp->dialstate == 0 && (lp->flags & ISDN_NET_CONNECTED)) { unsigned short hl; struct sk_buff *skb; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; skb = alloc_skb(hl + count, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "isdn_ppp_write: out of memory!\n"); return count; } skb_reserve(skb, hl); if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); return -EFAULT; } if (is->debug & 0x40) { printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); } isdn_ppp_send_ccp(lp->netdev, lp, skb); /* keeps CCP/compression states in sync */ isdn_net_write_super(lp, skb); } } return count; } /* * init memory, structures etc. */ int isdn_ppp_init(void) { int i, j; #ifdef CONFIG_ISDN_MPP if (isdn_ppp_mp_bundle_array_init() < 0) return -ENOMEM; #endif /* CONFIG_ISDN_MPP */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (!(ippp_table[i] = kzalloc(sizeof(struct ippp_struct), GFP_KERNEL))) { printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n"); for (j = 0; j < i; j++) kfree(ippp_table[j]); return -1; } spin_lock_init(&ippp_table[i]->buflock); ippp_table[i]->state = 0; ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1; ippp_table[i]->last = ippp_table[i]->rq; for (j = 0; j < NUM_RCV_BUFFS; j++) { ippp_table[i]->rq[j].buf = NULL; ippp_table[i]->rq[j].last = ippp_table[i]->rq + (NUM_RCV_BUFFS + j - 1) % NUM_RCV_BUFFS; ippp_table[i]->rq[j].next = ippp_table[i]->rq + (j + 1) % NUM_RCV_BUFFS; } } return 0; } void isdn_ppp_cleanup(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) kfree(ippp_table[i]); #ifdef CONFIG_ISDN_MPP kfree(isdn_ppp_bundle_arr); #endif /* CONFIG_ISDN_MPP */ } /* * check for address/control field and skip if allowed * retval != 0 -> discard packet silently */ static int isdn_ppp_skip_ac(struct ippp_struct *is, struct sk_buff *skb) { if (skb->len < 1) return -1; if (skb->data[0] == 0xff) { if (skb->len < 2) return -1; if (skb->data[1] != 0x03) return -1; // skip address/control (AC) field skb_pull(skb, 2); } else { if (is->pppcfg & SC_REJ_COMP_AC) // if AC compression was not negotiated, but used, discard packet return -1; } return 0; } /* * get the PPP protocol header and pull skb * retval < 0 -> discard packet silently */ static int isdn_ppp_strip_proto(struct sk_buff *skb) { int proto; if (skb->len < 1) return -1; if (skb->data[0] & 0x1) { // protocol field is compressed proto = skb->data[0]; skb_pull(skb, 1); } else { if (skb->len < 2) return -1; proto = ((int) skb->data[0] << 8) + skb->data[1]; skb_pull(skb, 2); } return proto; } /* * handler for incoming packets on a syncPPP interface */ void isdn_ppp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb) { struct ippp_struct *is; int slot; int proto; BUG_ON(net_dev->local->master); // we're called with the master device always slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_receive: lp->ppp_slot(%d)\n", lp->ppp_slot); kfree_skb(skb); return; } is = ippp_table[slot]; if (is->debug & 0x4) { printk(KERN_DEBUG "ippp_receive: is:%08lx lp:%08lx slot:%d unit:%d len:%d\n", (long)is, (long)lp, lp->ppp_slot, is->unit, (int)skb->len); isdn_ppp_frame_log("receive", skb->data, skb->len, 32, is->unit, lp->ppp_slot); } if (isdn_ppp_skip_ac(is, skb) < 0) { kfree_skb(skb); return; } proto = isdn_ppp_strip_proto(skb); if (proto < 0) { kfree_skb(skb); return; } #ifdef CONFIG_ISDN_MPP if (is->compflags & SC_LINK_DECOMP_ON) { skb = isdn_ppp_decompress(skb, is, NULL, &proto); if (!skb) // decompression error return; } if (!(is->mpppcfg & SC_REJ_MP_PROT)) { // we agreed to receive MPPP if (proto == PPP_MP) { isdn_ppp_mp_receive(net_dev, lp, skb); return; } } #endif isdn_ppp_push_higher(net_dev, lp, skb, proto); } /* * we receive a reassembled frame, MPPP has been taken care of before. * address/control and protocol have been stripped from the skb * note: net_dev has to be master net_dev */ static void isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto) { struct net_device *dev = net_dev->dev; struct ippp_struct *is, *mis; isdn_net_local *mlp = NULL; int slot; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_push_higher: lp->ppp_slot(%d)\n", lp->ppp_slot); goto drop_packet; } is = ippp_table[slot]; if (lp->master) { // FIXME? mlp = ISDN_MASTER_PRIV(lp); slot = mlp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_push_higher: master->ppp_slot(%d)\n", lp->ppp_slot); goto drop_packet; } } mis = ippp_table[slot]; if (is->debug & 0x10) { printk(KERN_DEBUG "push, skb %d %04x\n", (int) skb->len, proto); isdn_ppp_frame_log("rpush", skb->data, skb->len, 32, is->unit, lp->ppp_slot); } if (mis->compflags & SC_DECOMP_ON) { skb = isdn_ppp_decompress(skb, is, mis, &proto); if (!skb) // decompression error return; } switch (proto) { case PPP_IPX: /* untested */ if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IPX\n"); skb->protocol = htons(ETH_P_IPX); break; case PPP_IP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IP\n"); skb->protocol = htons(ETH_P_IP); break; case PPP_COMP: case PPP_COMPFRAG: printk(KERN_INFO "isdn_ppp: unexpected compressed frame dropped\n"); goto drop_packet; #ifdef CONFIG_ISDN_PPP_VJ case PPP_VJC_UNCOMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n"); if (net_dev->local->ppp_slot < 0) { printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", __func__, net_dev->local->ppp_slot); goto drop_packet; } if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) { printk(KERN_WARNING "isdn_ppp: received illegal VJC_UNCOMP frame!\n"); goto drop_packet; } skb->protocol = htons(ETH_P_IP); break; case PPP_VJC_COMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_COMP\n"); { struct sk_buff *skb_old = skb; int pkt_len; skb = dev_alloc_skb(skb_old->len + 128); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); skb = skb_old; goto drop_packet; } skb_put(skb, skb_old->len + 128); skb_copy_from_linear_data(skb_old, skb->data, skb_old->len); if (net_dev->local->ppp_slot < 0) { printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", __func__, net_dev->local->ppp_slot); goto drop_packet; } pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb_old->len); kfree_skb(skb_old); if (pkt_len < 0) goto drop_packet; skb_trim(skb, pkt_len); skb->protocol = htons(ETH_P_IP); } break; #endif case PPP_CCP: case PPP_CCPFRAG: isdn_ppp_receive_ccp(net_dev, lp, skb, proto); /* Dont pop up ResetReq/Ack stuff to the daemon any longer - the job is done already */ if (skb->data[0] == CCP_RESETREQ || skb->data[0] == CCP_RESETACK) break; /* fall through */ default: isdn_ppp_fill_rq(skb->data, skb->len, proto, lp->ppp_slot); /* push data to pppd device */ kfree_skb(skb); return; } #ifdef CONFIG_IPPP_FILTER /* check if the packet passes the pass and active filters * the filter instructions are constructed assuming * a four-byte PPP header on each packet (which is still present) */ skb_push(skb, 4); { u_int16_t *p = (u_int16_t *) skb->data; *p = 0; /* indicate inbound */ } if (is->pass_filter && sk_run_filter(skb, is->pass_filter) == 0) { if (is->debug & 0x2) printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); kfree_skb(skb); return; } if (!(is->active_filter && sk_run_filter(skb, is->active_filter) == 0)) { if (is->debug & 0x2) printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); lp->huptimer = 0; if (mlp) mlp->huptimer = 0; } skb_pull(skb, 4); #else /* CONFIG_IPPP_FILTER */ lp->huptimer = 0; if (mlp) mlp->huptimer = 0; #endif /* CONFIG_IPPP_FILTER */ skb->dev = dev; skb_reset_mac_header(skb); netif_rx(skb); /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ return; drop_packet: net_dev->local->stats.rx_dropped++; kfree_skb(skb); } /* * isdn_ppp_skb_push .. * checks whether we have enough space at the beginning of the skb * and allocs a new SKB if necessary */ static unsigned char *isdn_ppp_skb_push(struct sk_buff **skb_p, int len) { struct sk_buff *skb = *skb_p; if (skb_headroom(skb) < len) { struct sk_buff *nskb = skb_realloc_headroom(skb, len); if (!nskb) { printk(KERN_ERR "isdn_ppp_skb_push: can't realloc headroom!\n"); dev_kfree_skb(skb); return NULL; } printk(KERN_DEBUG "isdn_ppp_skb_push:under %d %d\n", skb_headroom(skb), len); dev_kfree_skb(skb); *skb_p = nskb; return skb_push(nskb, len); } return skb_push(skb, len); } /* * send ppp frame .. we expect a PIDCOMPressable proto -- * (here: currently always PPP_IP,PPP_VJC_COMP,PPP_VJC_UNCOMP) * * VJ compression may change skb pointer!!! .. requeue with old * skb isn't allowed!! */ int isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev) { isdn_net_local *lp, *mlp; isdn_net_dev *nd; unsigned int proto = PPP_IP; /* 0x21 */ struct ippp_struct *ipt, *ipts; int slot, retval = NETDEV_TX_OK; mlp = netdev_priv(netdev); nd = mlp->netdev; /* get master lp */ slot = mlp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n", mlp->ppp_slot); kfree_skb(skb); goto out; } ipts = ippp_table[slot]; if (!(ipts->pppcfg & SC_ENABLE_IP)) { /* PPP connected ? */ if (ipts->debug & 0x1) printk(KERN_INFO "%s: IP frame delayed.\n", netdev->name); retval = NETDEV_TX_BUSY; goto out; } switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_IP; break; case ETH_P_IPX: proto = PPP_IPX; /* untested */ break; default: printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n", skb->protocol); dev_kfree_skb(skb); goto out; } lp = isdn_net_get_locked_lp(nd); if (!lp) { printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name); retval = NETDEV_TX_BUSY; goto out; } /* we have our lp locked from now on */ slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n", lp->ppp_slot); kfree_skb(skb); goto unlock; } ipt = ippp_table[slot]; /* * after this line .. requeueing in the device queue is no longer allowed!!! */ /* Pull off the fake header we stuck on earlier to keep * the fragmentation code happy. */ skb_pull(skb, IPPP_MAX_HEADER); #ifdef CONFIG_IPPP_FILTER /* check if we should pass this packet * the filter instructions are constructed assuming * a four-byte PPP header on each packet */ *skb_push(skb, 4) = 1; /* indicate outbound */ { __be16 *p = (__be16 *)skb->data; p++; *p = htons(proto); } if (ipt->pass_filter && sk_run_filter(skb, ipt->pass_filter) == 0) { if (ipt->debug & 0x4) printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); kfree_skb(skb); goto unlock; } if (!(ipt->active_filter && sk_run_filter(skb, ipt->active_filter) == 0)) { if (ipt->debug & 0x4) printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); lp->huptimer = 0; } skb_pull(skb, 4); #else /* CONFIG_IPPP_FILTER */ lp->huptimer = 0; #endif /* CONFIG_IPPP_FILTER */ if (ipt->debug & 0x4) printk(KERN_DEBUG "xmit skb, len %d\n", (int) skb->len); if (ipts->debug & 0x40) isdn_ppp_frame_log("xmit0", skb->data, skb->len, 32, ipts->unit, lp->ppp_slot); #ifdef CONFIG_ISDN_PPP_VJ if (proto == PPP_IP && ipts->pppcfg & SC_COMP_TCP) { /* ipts here? probably yes, but check this again */ struct sk_buff *new_skb; unsigned short hl; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want. */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen + IPPP_MAX_HEADER; /* * Note: hl might still be insufficient because the method * above does not account for a possibible MPPP slave channel * which had larger HL header space requirements than the * master. */ new_skb = alloc_skb(hl + skb->len, GFP_ATOMIC); if (new_skb) { u_char *buf; int pktlen; skb_reserve(new_skb, hl); new_skb->dev = skb->dev; skb_put(new_skb, skb->len); buf = skb->data; pktlen = slhc_compress(ipts->slcomp, skb->data, skb->len, new_skb->data, &buf, !(ipts->pppcfg & SC_NO_TCP_CCID)); if (buf != skb->data) { if (new_skb->data != buf) printk(KERN_ERR "isdn_ppp: FATAL error after slhc_compress!!\n"); dev_kfree_skb(skb); skb = new_skb; } else { dev_kfree_skb(new_skb); } skb_trim(skb, pktlen); if (skb->data[0] & SL_TYPE_COMPRESSED_TCP) { /* cslip? style -> PPP */ proto = PPP_VJC_COMP; skb->data[0] ^= SL_TYPE_COMPRESSED_TCP; } else { if (skb->data[0] >= SL_TYPE_UNCOMPRESSED_TCP) proto = PPP_VJC_UNCOMP; skb->data[0] = (skb->data[0] & 0x0f) | 0x40; } } } #endif /* * normal (single link) or bundle compression */ if (ipts->compflags & SC_COMP_ON) { /* We send compressed only if both down- und upstream compression is negotiated, that means, CCP is up */ if (ipts->compflags & SC_DECOMP_ON) { skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 0); } else { printk(KERN_DEBUG "isdn_ppp: CCP not yet up - sending as-is\n"); } } if (ipt->debug & 0x24) printk(KERN_DEBUG "xmit2 skb, len %d, proto %04x\n", (int) skb->len, proto); #ifdef CONFIG_ISDN_MPP if (ipt->mpppcfg & SC_MP_PROT) { /* we get mp_seqno from static isdn_net_local */ long mp_seqno = ipts->mp_seqno; ipts->mp_seqno++; if (ipt->mpppcfg & SC_OUT_SHORT_SEQ) { unsigned char *data = isdn_ppp_skb_push(&skb, 3); if (!data) goto unlock; mp_seqno &= 0xfff; data[0] = MP_BEGIN_FRAG | MP_END_FRAG | ((mp_seqno >> 8) & 0xf); /* (B)egin & (E)ndbit .. */ data[1] = mp_seqno & 0xff; data[2] = proto; /* PID compression */ } else { unsigned char *data = isdn_ppp_skb_push(&skb, 5); if (!data) goto unlock; data[0] = MP_BEGIN_FRAG | MP_END_FRAG; /* (B)egin & (E)ndbit .. */ data[1] = (mp_seqno >> 16) & 0xff; /* sequence number: 24bit */ data[2] = (mp_seqno >> 8) & 0xff; data[3] = (mp_seqno >> 0) & 0xff; data[4] = proto; /* PID compression */ } proto = PPP_MP; /* MP Protocol, 0x003d */ } #endif /* * 'link in bundle' compression ... */ if (ipt->compflags & SC_LINK_COMP_ON) skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 1); if ((ipt->pppcfg & SC_COMP_PROT) && (proto <= 0xff)) { unsigned char *data = isdn_ppp_skb_push(&skb, 1); if (!data) goto unlock; data[0] = proto & 0xff; } else { unsigned char *data = isdn_ppp_skb_push(&skb, 2); if (!data) goto unlock; data[0] = (proto >> 8) & 0xff; data[1] = proto & 0xff; } if (!(ipt->pppcfg & SC_COMP_AC)) { unsigned char *data = isdn_ppp_skb_push(&skb, 2); if (!data) goto unlock; data[0] = 0xff; /* All Stations */ data[1] = 0x03; /* Unnumbered information */ } /* tx-stats are now updated via BSENT-callback */ if (ipts->debug & 0x40) { printk(KERN_DEBUG "skb xmit: len: %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, ipt->unit, lp->ppp_slot); } isdn_net_writebuf_skb(lp, skb); unlock: spin_unlock_bh(&lp->xmit_lock); out: return retval; } #ifdef CONFIG_IPPP_FILTER /* * check if this packet may trigger auto-dial. */ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp) { struct ippp_struct *is = ippp_table[lp->ppp_slot]; u_int16_t proto; int drop = 0; switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_IP; break; case ETH_P_IPX: proto = PPP_IPX; break; default: printk(KERN_ERR "isdn_ppp_autodial_filter: unsupported protocol 0x%x.\n", skb->protocol); return 1; } /* the filter instructions are constructed assuming * a four-byte PPP header on each packet. we have to * temporarily remove part of the fake header stuck on * earlier. */ *skb_pull(skb, IPPP_MAX_HEADER - 4) = 1; /* indicate outbound */ { __be16 *p = (__be16 *)skb->data; p++; *p = htons(proto); } drop |= is->pass_filter && sk_run_filter(skb, is->pass_filter) == 0; drop |= is->active_filter && sk_run_filter(skb, is->active_filter) == 0; skb_push(skb, IPPP_MAX_HEADER - 4); return drop; } #endif #ifdef CONFIG_ISDN_MPP /* this is _not_ rfc1990 header, but something we convert both short and long * headers to for convinience's sake: * byte 0 is flags as in rfc1990 * bytes 1...4 is 24-bit seqence number converted to host byte order */ #define MP_HEADER_LEN 5 #define MP_LONGSEQ_MASK 0x00ffffff #define MP_SHORTSEQ_MASK 0x00000fff #define MP_LONGSEQ_MAX MP_LONGSEQ_MASK #define MP_SHORTSEQ_MAX MP_SHORTSEQ_MASK #define MP_LONGSEQ_MAXBIT ((MP_LONGSEQ_MASK + 1) >> 1) #define MP_SHORTSEQ_MAXBIT ((MP_SHORTSEQ_MASK + 1) >> 1) /* sequence-wrap safe comparisons (for long sequence)*/ #define MP_LT(a, b) ((a - b) & MP_LONGSEQ_MAXBIT) #define MP_LE(a, b) !((b - a) & MP_LONGSEQ_MAXBIT) #define MP_GT(a, b) ((b - a) & MP_LONGSEQ_MAXBIT) #define MP_GE(a, b) !((a - b) & MP_LONGSEQ_MAXBIT) #define MP_SEQ(f) ((*(u32 *)(f->data + 1))) #define MP_FLAGS(f) (f->data[0]) static int isdn_ppp_mp_bundle_array_init(void) { int i; int sz = ISDN_MAX_CHANNELS * sizeof(ippp_bundle); if ((isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL) return -ENOMEM; for (i = 0; i < ISDN_MAX_CHANNELS; i++) spin_lock_init(&isdn_ppp_bundle_arr[i].lock); return 0; } static ippp_bundle *isdn_ppp_mp_bundle_alloc(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (isdn_ppp_bundle_arr[i].ref_ct <= 0) return (isdn_ppp_bundle_arr + i); return NULL; } static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to) { struct ippp_struct *is; if (lp->ppp_slot < 0) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return (-EINVAL); } is = ippp_table[lp->ppp_slot]; if (add_to) { if (lp->netdev->pb) lp->netdev->pb->ref_ct--; lp->netdev->pb = add_to; } else { /* first link in a bundle */ is->mp_seqno = 0; if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) return -ENOMEM; lp->next = lp->last = lp; /* nobody else in a queue */ lp->netdev->pb->frags = NULL; lp->netdev->pb->frames = 0; lp->netdev->pb->seq = UINT_MAX; } lp->netdev->pb->ref_ct++; is->last_link_seqno = 0; return 0; } static u32 isdn_ppp_mp_get_seq(int short_seq, struct sk_buff *skb, u32 last_seq); static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, struct sk_buff *to); static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *from, struct sk_buff *to); static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb); static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb); static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb) { struct ippp_struct *is; isdn_net_local *lpq; ippp_bundle *mp; isdn_mppp_stats *stats; struct sk_buff *newfrag, *frag, *start, *nextf; u32 newseq, minseq, thisseq; unsigned long flags; int slot; spin_lock_irqsave(&net_dev->pb->lock, flags); mp = net_dev->pb; stats = &mp->stats; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", __func__, lp->ppp_slot); stats->frame_drops++; dev_kfree_skb(skb); spin_unlock_irqrestore(&mp->lock, flags); return; } is = ippp_table[slot]; if (++mp->frames > stats->max_queue_len) stats->max_queue_len = mp->frames; if (is->debug & 0x8) isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, skb, is->last_link_seqno); /* if this packet seq # is less than last already processed one, * toss it right away, but check for sequence start case first */ if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) { mp->seq = newseq; /* the first packet: required for * rfc1990 non-compliant clients -- * prevents constant packet toss */ } else if (MP_LT(newseq, mp->seq)) { stats->frame_drops++; isdn_ppp_mp_free_skb(mp, skb); spin_unlock_irqrestore(&mp->lock, flags); return; } /* find the minimum received sequence number over all links */ is->last_link_seqno = minseq = newseq; for (lpq = net_dev->queue;;) { slot = lpq->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n", __func__, lpq->ppp_slot); } else { u32 lls = ippp_table[slot]->last_link_seqno; if (MP_LT(lls, minseq)) minseq = lls; } if ((lpq = lpq->next) == net_dev->queue) break; } if (MP_LT(minseq, mp->seq)) minseq = mp->seq; /* can't go beyond already processed * packets */ newfrag = skb; /* if this new fragment is before the first one, then enqueue it now. */ if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { newfrag->next = frag; mp->frags = frag = newfrag; newfrag = NULL; } start = MP_FLAGS(frag) & MP_BEGIN_FRAG && MP_SEQ(frag) == mp->seq ? frag : NULL; /* * main fragment traversing loop * * try to accomplish several tasks: * - insert new fragment into the proper sequence slot (once that's done * newfrag will be set to NULL) * - reassemble any complete fragment sequence (non-null 'start' * indicates there is a contiguous sequence present) * - discard any incomplete sequences that are below minseq -- due * to the fact that sender always increment sequence number, if there * is an incomplete sequence below minseq, no new fragments would * come to complete such sequence and it should be discarded * * loop completes when we accomplished the following tasks: * - new fragment is inserted in the proper sequence ('newfrag' is * set to NULL) * - we hit a gap in the sequence, so no reassembly/processing is * possible ('start' would be set to NULL) * * algorithm for this code is derived from code in the book * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) */ while (start != NULL || newfrag != NULL) { thisseq = MP_SEQ(frag); nextf = frag->next; /* drop any duplicate fragments */ if (newfrag != NULL && thisseq == newseq) { isdn_ppp_mp_free_skb(mp, newfrag); newfrag = NULL; } /* insert new fragment before next element if possible. */ if (newfrag != NULL && (nextf == NULL || MP_LT(newseq, MP_SEQ(nextf)))) { newfrag->next = nextf; frag->next = nextf = newfrag; newfrag = NULL; } if (start != NULL) { /* check for misplaced start */ if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { printk(KERN_WARNING"isdn_mppp(seq %d): new " "BEGIN flag with no prior END", thisseq); stats->seqerrs++; stats->frame_drops++; start = isdn_ppp_mp_discard(mp, start, frag); nextf = frag->next; } } else if (MP_LE(thisseq, minseq)) { if (MP_FLAGS(frag) & MP_BEGIN_FRAG) start = frag; else { if (MP_FLAGS(frag) & MP_END_FRAG) stats->frame_drops++; if (mp->frags == frag) mp->frags = nextf; isdn_ppp_mp_free_skb(mp, frag); frag = nextf; continue; } } /* if start is non-null and we have end fragment, then * we have full reassembly sequence -- reassemble * and process packet now */ if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) { minseq = mp->seq = (thisseq + 1) & MP_LONGSEQ_MASK; /* Reassemble the packet then dispatch it */ isdn_ppp_mp_reassembly(net_dev, lp, start, nextf); start = NULL; frag = NULL; mp->frags = nextf; } /* check if need to update start pointer: if we just * reassembled the packet and sequence is contiguous * then next fragment should be the start of new reassembly * if sequence is contiguous, but we haven't reassembled yet, * keep going. * if sequence is not contiguous, either clear everything * below low watermark and set start to the next frag or * clear start ptr. */ if (nextf != NULL && ((thisseq + 1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { /* if we just reassembled and the next one is here, * then start another reassembly. */ if (frag == NULL) { if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) start = nextf; else { printk(KERN_WARNING"isdn_mppp(seq %d):" " END flag with no following " "BEGIN", thisseq); stats->seqerrs++; } } } else { if (nextf != NULL && frag != NULL && MP_LT(thisseq, minseq)) { /* we've got a break in the sequence * and we not at the end yet * and we did not just reassembled *(if we did, there wouldn't be anything before) * and we below the low watermark * discard all the frames below low watermark * and start over */ stats->frame_drops++; mp->frags = isdn_ppp_mp_discard(mp, start, nextf); } /* break in the sequence, no reassembly */ start = NULL; } frag = nextf; } /* while -- main loop */ if (mp->frags == NULL) mp->frags = frag; /* rather straighforward way to deal with (not very) possible * queue overflow */ if (mp->frames > MP_MAX_QUEUE_LEN) { stats->overflows++; while (mp->frames > MP_MAX_QUEUE_LEN) { frag = mp->frags->next; isdn_ppp_mp_free_skb(mp, mp->frags); mp->frags = frag; } } spin_unlock_irqrestore(&mp->lock, flags); } static void isdn_ppp_mp_cleanup(isdn_net_local *lp) { struct sk_buff *frag = lp->netdev->pb->frags; struct sk_buff *nextfrag; while (frag) { nextfrag = frag->next; isdn_ppp_mp_free_skb(lp->netdev->pb, frag); frag = nextfrag; } lp->netdev->pb->frags = NULL; } static u32 isdn_ppp_mp_get_seq(int short_seq, struct sk_buff *skb, u32 last_seq) { u32 seq; int flags = skb->data[0] & (MP_BEGIN_FRAG | MP_END_FRAG); if (!short_seq) { seq = ntohl(*(__be32 *)skb->data) & MP_LONGSEQ_MASK; skb_push(skb, 1); } else { /* convert 12-bit short seq number to 24-bit long one */ seq = ntohs(*(__be16 *)skb->data) & MP_SHORTSEQ_MASK; /* check for seqence wrap */ if (!(seq & MP_SHORTSEQ_MAXBIT) && (last_seq & MP_SHORTSEQ_MAXBIT) && (unsigned long)last_seq <= MP_LONGSEQ_MAX) seq |= (last_seq + MP_SHORTSEQ_MAX + 1) & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK); else seq |= last_seq & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK); skb_push(skb, 3); /* put converted seqence back in skb */ } *(u32 *)(skb->data + 1) = seq; /* put seqence back in _host_ byte * order */ skb->data[0] = flags; /* restore flags */ return seq; } struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, struct sk_buff *to) { if (from) while (from != to) { struct sk_buff *next = from->next; isdn_ppp_mp_free_skb(mp, from); from = next; } return from; } void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *from, struct sk_buff *to) { ippp_bundle *mp = net_dev->pb; int proto; struct sk_buff *skb; unsigned int tot_len; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) { if (ippp_table[lp->ppp_slot]->debug & 0x40) printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " "len %d\n", MP_SEQ(from), from->len); skb = from; skb_pull(skb, MP_HEADER_LEN); mp->frames--; } else { struct sk_buff *frag; int n; for (tot_len = n = 0, frag = from; frag != to; frag = frag->next, n++) tot_len += frag->len - MP_HEADER_LEN; if (ippp_table[lp->ppp_slot]->debug & 0x40) printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " "to %d, len %d\n", MP_SEQ(from), (MP_SEQ(from) + n - 1) & MP_LONGSEQ_MASK, tot_len); if ((skb = dev_alloc_skb(tot_len)) == NULL) { printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " "of size %d\n", tot_len); isdn_ppp_mp_discard(mp, from, to); return; } while (from != to) { unsigned int len = from->len - MP_HEADER_LEN; skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, skb_put(skb, len), len); frag = from->next; isdn_ppp_mp_free_skb(mp, from); from = frag; } } proto = isdn_ppp_strip_proto(skb); isdn_ppp_push_higher(net_dev, lp, skb, proto); } static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb) { dev_kfree_skb(skb); mp->frames--; } static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb) { printk(KERN_DEBUG "mp_recv: %d/%d -> %02x %02x %02x %02x %02x %02x\n", slot, (int) skb->len, (int) skb->data[0], (int) skb->data[1], (int) skb->data[2], (int) skb->data[3], (int) skb->data[4], (int) skb->data[5]); } static int isdn_ppp_bundle(struct ippp_struct *is, int unit) { char ifn[IFNAMSIZ + 1]; isdn_net_dev *p; isdn_net_local *lp, *nlp; int rc; unsigned long flags; sprintf(ifn, "ippp%d", unit); p = isdn_net_findif(ifn); if (!p) { printk(KERN_ERR "ippp_bundle: cannot find %s\n", ifn); return -EINVAL; } spin_lock_irqsave(&p->pb->lock, flags); nlp = is->lp; lp = p->queue; if (nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS || lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "ippp_bundle: binding to invalid slot %d\n", nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS ? nlp->ppp_slot : lp->ppp_slot); rc = -EINVAL; goto out; } isdn_net_add_to_bundle(p, nlp); ippp_table[nlp->ppp_slot]->unit = ippp_table[lp->ppp_slot]->unit; /* maybe also SC_CCP stuff */ ippp_table[nlp->ppp_slot]->pppcfg |= ippp_table[lp->ppp_slot]->pppcfg & (SC_ENABLE_IP | SC_NO_TCP_CCID | SC_REJ_COMP_TCP); ippp_table[nlp->ppp_slot]->mpppcfg |= ippp_table[lp->ppp_slot]->mpppcfg & (SC_MP_PROT | SC_REJ_MP_PROT | SC_OUT_SHORT_SEQ | SC_IN_SHORT_SEQ); rc = isdn_ppp_mp_init(nlp, p->pb); out: spin_unlock_irqrestore(&p->pb->lock, flags); return rc; } #endif /* CONFIG_ISDN_MPP */ /* * network device ioctl handlers */ static int isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev) { struct ppp_stats __user *res = ifr->ifr_data; struct ppp_stats t; isdn_net_local *lp = netdev_priv(dev); if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats))) return -EFAULT; /* build a temporary stat struct and copy it to user space */ memset(&t, 0, sizeof(struct ppp_stats)); if (dev->flags & IFF_UP) { t.p.ppp_ipackets = lp->stats.rx_packets; t.p.ppp_ibytes = lp->stats.rx_bytes; t.p.ppp_ierrors = lp->stats.rx_errors; t.p.ppp_opackets = lp->stats.tx_packets; t.p.ppp_obytes = lp->stats.tx_bytes; t.p.ppp_oerrors = lp->stats.tx_errors; #ifdef CONFIG_ISDN_PPP_VJ if (slot >= 0 && ippp_table[slot]->slcomp) { struct slcompress *slcomp = ippp_table[slot]->slcomp; t.vj.vjs_packets = slcomp->sls_o_compressed + slcomp->sls_o_uncompressed; t.vj.vjs_compressed = slcomp->sls_o_compressed; t.vj.vjs_searches = slcomp->sls_o_searches; t.vj.vjs_misses = slcomp->sls_o_misses; t.vj.vjs_errorin = slcomp->sls_i_error; t.vj.vjs_tossed = slcomp->sls_i_tossed; t.vj.vjs_uncompressedin = slcomp->sls_i_uncompressed; t.vj.vjs_compressedin = slcomp->sls_i_compressed; } #endif } if (copy_to_user(res, &t, sizeof(struct ppp_stats))) return -EFAULT; return 0; } int isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int error = 0; int len; isdn_net_local *lp = netdev_priv(dev); if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) return -EINVAL; switch (cmd) { #define PPP_VERSION "2.3.7" case SIOCGPPPVER: len = strlen(PPP_VERSION) + 1; if (copy_to_user(ifr->ifr_data, PPP_VERSION, len)) error = -EFAULT; break; case SIOCGPPPSTATS: error = isdn_ppp_dev_ioctl_stats(lp->ppp_slot, ifr, dev); break; default: error = -EINVAL; break; } return error; } static int isdn_ppp_if_get_unit(char *name) { int len, i, unit = 0, deci; len = strlen(name); if (strncmp("ippp", name, 4) || len > 8) return -1; for (i = 0, deci = 1; i < len; i++, deci *= 10) { char a = name[len - i - 1]; if (a >= '0' && a <= '9') unit += (a - '0') * deci; else break; } if (!i || len - i != 4) unit = -1; return unit; } int isdn_ppp_dial_slave(char *name) { #ifdef CONFIG_ISDN_MPP isdn_net_dev *ndev; isdn_net_local *lp; struct net_device *sdev; if (!(ndev = isdn_net_findif(name))) return 1; lp = ndev->local; if (!(lp->flags & ISDN_NET_CONNECTED)) return 5; sdev = lp->slave; while (sdev) { isdn_net_local *mlp = netdev_priv(sdev); if (!(mlp->flags & ISDN_NET_CONNECTED)) break; sdev = mlp->slave; } if (!sdev) return 2; isdn_net_dial_req(netdev_priv(sdev)); return 0; #else return -1; #endif } int isdn_ppp_hangup_slave(char *name) { #ifdef CONFIG_ISDN_MPP isdn_net_dev *ndev; isdn_net_local *lp; struct net_device *sdev; if (!(ndev = isdn_net_findif(name))) return 1; lp = ndev->local; if (!(lp->flags & ISDN_NET_CONNECTED)) return 5; sdev = lp->slave; while (sdev) { isdn_net_local *mlp = netdev_priv(sdev); if (mlp->slave) { /* find last connected link in chain */ isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp); if (!(nlp->flags & ISDN_NET_CONNECTED)) break; } else if (mlp->flags & ISDN_NET_CONNECTED) break; sdev = mlp->slave; } if (!sdev) return 2; isdn_net_hangup(sdev); return 0; #else return -1; #endif } /* * PPP compression stuff */ /* Push an empty CCP Data Frame up to the daemon to wake it up and let it generate a CCP Reset-Request or tear down CCP altogether */ static void isdn_ppp_ccp_kickup(struct ippp_struct *is) { isdn_ppp_fill_rq(NULL, 0, PPP_COMP, is->lp->ppp_slot); } /* In-kernel handling of CCP Reset-Request and Reset-Ack is necessary, but absolutely nontrivial. The most abstruse problem we are facing is that the generation, reception and all the handling of timeouts and resends including proper request id management should be entirely left to the (de)compressor, but indeed is not covered by the current API to the (de)compressor. The API is a prototype version from PPP where only some (de)compressors have yet been implemented and all of them are rather simple in their reset handling. Especially, their is only one outstanding ResetAck at a time with all of them and ResetReq/-Acks do not have parameters. For this very special case it was sufficient to just return an error code from the decompressor and have a single reset() entry to communicate all the necessary information between the framework and the (de)compressor. Bad enough, LZS is different (and any other compressor may be different, too). It has multiple histories (eventually) and needs to Reset each of them independently and thus uses multiple outstanding Acks and history numbers as an additional parameter to Reqs/Acks. All that makes it harder to port the reset state engine into the kernel because it is not just the same simple one as in (i)pppd but it must be able to pass additional parameters and have multiple out- standing Acks. We are trying to achieve the impossible by handling reset transactions independent by their id. The id MUST change when the data portion changes, thus any (de)compressor who uses more than one resettable state must provide and recognize individual ids for each individual reset transaction. The framework itself does _only_ differentiate them by id, because it has no other semantics like the (de)compressor might. This looks like a major redesign of the interface would be nice, but I don't have an idea how to do it better. */ /* Send a CCP Reset-Request or Reset-Ack directly from the kernel. This is getting that lengthy because there is no simple "send-this-frame-out" function above but every wrapper does a bit different. Hope I guess correct in this hack... */ static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto, unsigned char code, unsigned char id, unsigned char *data, int len) { struct sk_buff *skb; unsigned char *p; int hl; int cnt = 0; isdn_net_local *lp = is->lp; /* Alloc large enough skb */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; skb = alloc_skb(len + hl + 16, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "ippp: CCP cannot send reset - out of memory\n"); return; } skb_reserve(skb, hl); /* We may need to stuff an address and control field first */ if (!(is->pppcfg & SC_COMP_AC)) { p = skb_put(skb, 2); *p++ = 0xff; *p++ = 0x03; } /* Stuff proto, code, id and length */ p = skb_put(skb, 6); *p++ = (proto >> 8); *p++ = (proto & 0xff); *p++ = code; *p++ = id; cnt = 4 + len; *p++ = (cnt >> 8); *p++ = (cnt & 0xff); /* Now stuff remaining bytes */ if (len) { p = skb_put(skb, len); memcpy(p, data, len); } /* skb is now ready for xmit */ printk(KERN_DEBUG "Sending CCP Frame:\n"); isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); isdn_net_write_super(lp, skb); } /* Allocate the reset state vector */ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is) { struct ippp_ccp_reset *r; r = kzalloc(sizeof(struct ippp_ccp_reset), GFP_KERNEL); if (!r) { printk(KERN_ERR "ippp_ccp: failed to allocate reset data" " structure - no mem\n"); return NULL; } printk(KERN_DEBUG "ippp_ccp: allocated reset data structure %p\n", r); is->reset = r; return r; } /* Destroy the reset state vector. Kill all pending timers first. */ static void isdn_ppp_ccp_reset_free(struct ippp_struct *is) { unsigned int id; printk(KERN_DEBUG "ippp_ccp: freeing reset data structure %p\n", is->reset); for (id = 0; id < 256; id++) { if (is->reset->rs[id]) { isdn_ppp_ccp_reset_free_state(is, (unsigned char)id); } } kfree(is->reset); is->reset = NULL; } /* Free a given state and clear everything up for later reallocation */ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs; if (is->reset->rs[id]) { printk(KERN_DEBUG "ippp_ccp: freeing state for id %d\n", id); rs = is->reset->rs[id]; /* Make sure the kernel will not call back later */ if (rs->ta) del_timer(&rs->timer); is->reset->rs[id] = NULL; kfree(rs); } else { printk(KERN_WARNING "ippp_ccp: id %d is not allocated\n", id); } } /* The timer callback function which is called when a ResetReq has timed out, aka has never been answered by a ResetAck */ static void isdn_ppp_ccp_timer_callback(unsigned long closure) { struct ippp_ccp_reset_state *rs = (struct ippp_ccp_reset_state *)closure; if (!rs) { printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n"); return; } if (rs->ta && rs->state == CCPResetSentReq) { /* We are correct here */ if (!rs->expra) { /* Hmm, there is no Ack really expected. We can clean up the state now, it will be reallocated if the decompressor insists on another reset */ rs->ta = 0; isdn_ppp_ccp_reset_free_state(rs->is, rs->id); return; } printk(KERN_DEBUG "ippp_ccp: CCP Reset timed out for id %d\n", rs->id); /* Push it again */ isdn_ppp_ccp_xmit_reset(rs->is, PPP_CCP, CCP_RESETREQ, rs->id, rs->data, rs->dlen); /* Restart timer */ rs->timer.expires = jiffies + HZ * 5; add_timer(&rs->timer); } else { printk(KERN_WARNING "ippp_ccp: timer cb in wrong state %d\n", rs->state); } } /* Allocate a new reset transaction state */ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs; if (is->reset->rs[id]) { printk(KERN_WARNING "ippp_ccp: old state exists for id %d\n", id); return NULL; } else { rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); if (!rs) return NULL; rs->state = CCPResetIdle; rs->is = is; rs->id = id; init_timer(&rs->timer); rs->timer.data = (unsigned long)rs; rs->timer.function = isdn_ppp_ccp_timer_callback; is->reset->rs[id] = rs; } return rs; } /* A decompressor wants a reset with a set of parameters - do what is necessary to fulfill it */ static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is, struct isdn_ppp_resetparams *rp) { struct ippp_ccp_reset_state *rs; if (rp->valid) { /* The decompressor defines parameters by itself */ if (rp->rsend) { /* And he wants us to send a request */ if (!(rp->idval)) { printk(KERN_ERR "ippp_ccp: decompressor must" " specify reset id\n"); return; } if (is->reset->rs[rp->id]) { /* There is already a transaction in existence for this id. May be still waiting for a Ack or may be wrong. */ rs = is->reset->rs[rp->id]; if (rs->state == CCPResetSentReq && rs->ta) { printk(KERN_DEBUG "ippp_ccp: reset" " trans still in progress" " for id %d\n", rp->id); } else { printk(KERN_WARNING "ippp_ccp: reset" " trans in wrong state %d for" " id %d\n", rs->state, rp->id); } } else { /* Ok, this is a new transaction */ printk(KERN_DEBUG "ippp_ccp: new trans for id" " %d to be started\n", rp->id); rs = isdn_ppp_ccp_reset_alloc_state(is, rp->id); if (!rs) { printk(KERN_ERR "ippp_ccp: out of mem" " allocing ccp trans\n"); return; } rs->state = CCPResetSentReq; rs->expra = rp->expra; if (rp->dtval) { rs->dlen = rp->dlen; memcpy(rs->data, rp->data, rp->dlen); } /* HACK TODO - add link comp here */ isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ, rs->id, rs->data, rs->dlen); /* Start the timer */ rs->timer.expires = jiffies + 5 * HZ; add_timer(&rs->timer); rs->ta = 1; } } else { printk(KERN_DEBUG "ippp_ccp: no reset sent\n"); } } else { /* The reset params are invalid. The decompressor does not care about them, so we just send the minimal requests and increase ids only when an Ack is received for a given id */ if (is->reset->rs[is->reset->lastid]) { /* There is already a transaction in existence for this id. May be still waiting for a Ack or may be wrong. */ rs = is->reset->rs[is->reset->lastid]; if (rs->state == CCPResetSentReq && rs->ta) { printk(KERN_DEBUG "ippp_ccp: reset" " trans still in progress" " for id %d\n", rp->id); } else { printk(KERN_WARNING "ippp_ccp: reset" " trans in wrong state %d for" " id %d\n", rs->state, rp->id); } } else { printk(KERN_DEBUG "ippp_ccp: new trans for id" " %d to be started\n", is->reset->lastid); rs = isdn_ppp_ccp_reset_alloc_state(is, is->reset->lastid); if (!rs) { printk(KERN_ERR "ippp_ccp: out of mem" " allocing ccp trans\n"); return; } rs->state = CCPResetSentReq; /* We always expect an Ack if the decompressor doesn't know better */ rs->expra = 1; rs->dlen = 0; /* HACK TODO - add link comp here */ isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ, rs->id, NULL, 0); /* Start the timer */ rs->timer.expires = jiffies + 5 * HZ; add_timer(&rs->timer); rs->ta = 1; } } } /* An Ack was received for this id. This means we stop the timer and clean up the state prior to calling the decompressors reset routine. */ static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs = is->reset->rs[id]; if (rs) { if (rs->ta && rs->state == CCPResetSentReq) { /* Great, we are correct */ if (!rs->expra) printk(KERN_DEBUG "ippp_ccp: ResetAck received" " for id %d but not expected\n", id); } else { printk(KERN_INFO "ippp_ccp: ResetAck received out of" "sync for id %d\n", id); } if (rs->ta) { rs->ta = 0; del_timer(&rs->timer); } isdn_ppp_ccp_reset_free_state(is, id); } else { printk(KERN_INFO "ippp_ccp: ResetAck received for unknown id" " %d\n", id); } /* Make sure the simple reset stuff uses a new id next time */ is->reset->lastid++; } /* * decompress packet * * if master = 0, we're trying to uncompress an per-link compressed packet, * as opposed to an compressed reconstructed-from-MPPP packet. * proto is updated to protocol field of uncompressed packet. * * retval: decompressed packet, * same packet if uncompressed, * NULL if decompression error */ static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb, struct ippp_struct *is, struct ippp_struct *master, int *proto) { void *stat = NULL; struct isdn_ppp_compressor *ipc = NULL; struct sk_buff *skb_out; int len; struct ippp_struct *ri; struct isdn_ppp_resetparams rsparm; unsigned char rsdata[IPPP_RESET_MAXDATABYTES]; if (!master) { // per-link decompression stat = is->link_decomp_stat; ipc = is->link_decompressor; ri = is; } else { stat = master->decomp_stat; ipc = master->decompressor; ri = master; } if (!ipc) { // no decompressor -> we can't decompress. printk(KERN_DEBUG "ippp: no decompressor defined!\n"); return skb; } BUG_ON(!stat); // if we have a compressor, stat has been set as well if ((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG)) { // compressed packets are compressed by their protocol type // Set up reset params for the decompressor memset(&rsparm, 0, sizeof(rsparm)); rsparm.data = rsdata; rsparm.maxdlen = IPPP_RESET_MAXDATABYTES; skb_out = dev_alloc_skb(is->mru + PPP_HDRLEN); if (!skb_out) { kfree_skb(skb); printk(KERN_ERR "ippp: decomp memory allocation failure\n"); return NULL; } len = ipc->decompress(stat, skb, skb_out, &rsparm); kfree_skb(skb); if (len <= 0) { switch (len) { case DECOMP_ERROR: printk(KERN_INFO "ippp: decomp wants reset %s params\n", rsparm.valid ? "with" : "without"); isdn_ppp_ccp_reset_trans(ri, &rsparm); break; case DECOMP_FATALERROR: ri->pppcfg |= SC_DC_FERROR; /* Kick ipppd to recognize the error */ isdn_ppp_ccp_kickup(ri); break; } kfree_skb(skb_out); return NULL; } *proto = isdn_ppp_strip_proto(skb_out); if (*proto < 0) { kfree_skb(skb_out); return NULL; } return skb_out; } else { // uncompressed packets are fed through the decompressor to // update the decompressor state ipc->incomp(stat, skb, *proto); return skb; } } /* * compress a frame * type=0: normal/bundle compression * =1: link compression * returns original skb if we haven't compressed the frame * and a new skb pointer if we've done it */ static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto, struct ippp_struct *is, struct ippp_struct *master, int type) { int ret; int new_proto; struct isdn_ppp_compressor *compressor; void *stat; struct sk_buff *skb_out; /* we do not compress control protocols */ if (*proto < 0 || *proto > 0x3fff) { return skb_in; } if (type) { /* type=1 => Link compression */ return skb_in; } else { if (!master) { compressor = is->compressor; stat = is->comp_stat; } else { compressor = master->compressor; stat = master->comp_stat; } new_proto = PPP_COMP; } if (!compressor) { printk(KERN_ERR "isdn_ppp: No compressor set!\n"); return skb_in; } if (!stat) { printk(KERN_ERR "isdn_ppp: Compressor not initialized?\n"); return skb_in; } /* Allow for at least 150 % expansion (for now) */ skb_out = alloc_skb(skb_in->len + skb_in->len / 2 + 32 + skb_headroom(skb_in), GFP_ATOMIC); if (!skb_out) return skb_in; skb_reserve(skb_out, skb_headroom(skb_in)); ret = (compressor->compress)(stat, skb_in, skb_out, *proto); if (!ret) { dev_kfree_skb(skb_out); return skb_in; } dev_kfree_skb(skb_in); *proto = new_proto; return skb_out; } /* * we received a CCP frame .. * not a clean solution, but we MUST handle a few cases in the kernel */ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto) { struct ippp_struct *is; struct ippp_struct *mis; int len; struct isdn_ppp_resetparams rsparm; unsigned char rsdata[IPPP_RESET_MAXDATABYTES]; printk(KERN_DEBUG "Received CCP frame from peer slot(%d)\n", lp->ppp_slot); if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } is = ippp_table[lp->ppp_slot]; isdn_ppp_frame_log("ccp-rcv", skb->data, skb->len, 32, is->unit, lp->ppp_slot); if (lp->master) { int slot = ISDN_MASTER_PRIV(lp)->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return; } mis = ippp_table[slot]; } else mis = is; switch (skb->data[0]) { case CCP_CONFREQ: if (is->debug & 0x10) printk(KERN_DEBUG "Disable compression here!\n"); if (proto == PPP_CCP) mis->compflags &= ~SC_COMP_ON; else is->compflags &= ~SC_LINK_COMP_ON; break; case CCP_TERMREQ: case CCP_TERMACK: if (is->debug & 0x10) printk(KERN_DEBUG "Disable (de)compression here!\n"); if (proto == PPP_CCP) mis->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON); else is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON); break; case CCP_CONFACK: /* if we RECEIVE an ackowledge we enable the decompressor */ if (is->debug & 0x10) printk(KERN_DEBUG "Enable decompression here!\n"); if (proto == PPP_CCP) { if (!mis->decompressor) break; mis->compflags |= SC_DECOMP_ON; } else { if (!is->decompressor) break; is->compflags |= SC_LINK_DECOMP_ON; } break; case CCP_RESETACK: printk(KERN_DEBUG "Received ResetAck from peer\n"); len = (skb->data[2] << 8) | skb->data[3]; len -= 4; if (proto == PPP_CCP) { /* If a reset Ack was outstanding for this id, then clean up the state engine */ isdn_ppp_ccp_reset_ack_rcvd(mis, skb->data[1]); if (mis->decompressor && mis->decomp_stat) mis->decompressor-> reset(mis->decomp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, NULL); /* TODO: This is not easy to decide here */ mis->compflags &= ~SC_DECOMP_DISCARD; } else { isdn_ppp_ccp_reset_ack_rcvd(is, skb->data[1]); if (is->link_decompressor && is->link_decomp_stat) is->link_decompressor-> reset(is->link_decomp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, NULL); /* TODO: neither here */ is->compflags &= ~SC_LINK_DECOMP_DISCARD; } break; case CCP_RESETREQ: printk(KERN_DEBUG "Received ResetReq from peer\n"); /* Receiving a ResetReq means we must reset our compressor */ /* Set up reset params for the reset entry */ memset(&rsparm, 0, sizeof(rsparm)); rsparm.data = rsdata; rsparm.maxdlen = IPPP_RESET_MAXDATABYTES; /* Isolate data length */ len = (skb->data[2] << 8) | skb->data[3]; len -= 4; if (proto == PPP_CCP) { if (mis->compressor && mis->comp_stat) mis->compressor-> reset(mis->comp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, &rsparm); } else { if (is->link_compressor && is->link_comp_stat) is->link_compressor-> reset(is->link_comp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, &rsparm); } /* Ack the Req as specified by rsparm */ if (rsparm.valid) { /* Compressor reset handler decided how to answer */ if (rsparm.rsend) { /* We should send a Frame */ isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK, rsparm.idval ? rsparm.id : skb->data[1], rsparm.dtval ? rsparm.data : NULL, rsparm.dtval ? rsparm.dlen : 0); } else { printk(KERN_DEBUG "ResetAck suppressed\n"); } } else { /* We answer with a straight reflected Ack */ isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK, skb->data[1], len ? &skb->data[4] : NULL, len); } break; } } /* * Daemon sends a CCP frame ... */ /* TODO: Clean this up with new Reset semantics */ /* I believe the CCP handling as-is is done wrong. Compressed frames * should only be sent/received after CCP reaches UP state, which means * both sides have sent CONF_ACK. Currently, we handle both directions * independently, which means we may accept compressed frames too early * (supposedly not a problem), but may also mean we send compressed frames * too early, which may turn out to be a problem. * This part of state machine should actually be handled by (i)pppd, but * that's too big of a change now. --kai */ /* Actually, we might turn this into an advantage: deal with the RFC in * the old tradition of beeing generous on what we accept, but beeing * strict on what we send. Thus we should just * - accept compressed frames as soon as decompression is negotiated * - send compressed frames only when decomp *and* comp are negotiated * - drop rx compressed frames if we cannot decomp (instead of pushing them * up to ipppd) * and I tried to modify this file according to that. --abp */ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb) { struct ippp_struct *mis, *is; int proto, slot = lp->ppp_slot; unsigned char *data; if (!skb || skb->len < 3) return; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, slot); return; } is = ippp_table[slot]; /* Daemon may send with or without address and control field comp */ data = skb->data; if (!(is->pppcfg & SC_COMP_AC) && data[0] == 0xff && data[1] == 0x03) { data += 2; if (skb->len < 5) return; } proto = ((int)data[0]<<8) + data[1]; if (proto != PPP_CCP && proto != PPP_CCPFRAG) return; printk(KERN_DEBUG "Received CCP frame from daemon:\n"); isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); if (lp->master) { slot = ISDN_MASTER_PRIV(lp)->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return; } mis = ippp_table[slot]; } else mis = is; if (mis != is) printk(KERN_DEBUG "isdn_ppp: Ouch! Master CCP sends on slave slot!\n"); switch (data[2]) { case CCP_CONFREQ: if (is->debug & 0x10) printk(KERN_DEBUG "Disable decompression here!\n"); if (proto == PPP_CCP) is->compflags &= ~SC_DECOMP_ON; else is->compflags &= ~SC_LINK_DECOMP_ON; break; case CCP_TERMREQ: case CCP_TERMACK: if (is->debug & 0x10) printk(KERN_DEBUG "Disable (de)compression here!\n"); if (proto == PPP_CCP) is->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON); else is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON); break; case CCP_CONFACK: /* if we SEND an ackowledge we can/must enable the compressor */ if (is->debug & 0x10) printk(KERN_DEBUG "Enable compression here!\n"); if (proto == PPP_CCP) { if (!is->compressor) break; is->compflags |= SC_COMP_ON; } else { if (!is->compressor) break; is->compflags |= SC_LINK_COMP_ON; } break; case CCP_RESETACK: /* If we send a ACK we should reset our compressor */ if (is->debug & 0x10) printk(KERN_DEBUG "Reset decompression state here!\n"); printk(KERN_DEBUG "ResetAck from daemon passed by\n"); if (proto == PPP_CCP) { /* link to master? */ if (is->compressor && is->comp_stat) is->compressor->reset(is->comp_stat, 0, 0, NULL, 0, NULL); is->compflags &= ~SC_COMP_DISCARD; } else { if (is->link_compressor && is->link_comp_stat) is->link_compressor->reset(is->link_comp_stat, 0, 0, NULL, 0, NULL); is->compflags &= ~SC_LINK_COMP_DISCARD; } break; case CCP_RESETREQ: /* Just let it pass by */ printk(KERN_DEBUG "ResetReq from daemon passed by\n"); break; } } int isdn_ppp_register_compressor(struct isdn_ppp_compressor *ipc) { ipc->next = ipc_head; ipc->prev = NULL; if (ipc_head) { ipc_head->prev = ipc; } ipc_head = ipc; return 0; } int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *ipc) { if (ipc->prev) ipc->prev->next = ipc->next; else ipc_head = ipc->next; if (ipc->next) ipc->next->prev = ipc->prev; ipc->prev = ipc->next = NULL; return 0; } static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *data) { struct isdn_ppp_compressor *ipc = ipc_head; int ret; void *stat; int num = data->num; if (is->debug & 0x10) printk(KERN_DEBUG "[%d] Set %s type %d\n", is->unit, (data->flags & IPPP_COMP_FLAG_XMIT) ? "compressor" : "decompressor", num); /* If is has no valid reset state vector, we cannot allocate a decompressor. The decompressor would cause reset transactions sooner or later, and they need that vector. */ if (!(data->flags & IPPP_COMP_FLAG_XMIT) && !is->reset) { printk(KERN_ERR "ippp_ccp: no reset data structure - can't" " allow decompression.\n"); return -ENOMEM; } while (ipc) { if (ipc->num == num) { stat = ipc->alloc(data); if (stat) { ret = ipc->init(stat, data, is->unit, 0); if (!ret) { printk(KERN_ERR "Can't init (de)compression!\n"); ipc->free(stat); stat = NULL; break; } } else { printk(KERN_ERR "Can't alloc (de)compression!\n"); break; } if (data->flags & IPPP_COMP_FLAG_XMIT) { if (data->flags & IPPP_COMP_FLAG_LINK) { if (is->link_comp_stat) is->link_compressor->free(is->link_comp_stat); is->link_comp_stat = stat; is->link_compressor = ipc; } else { if (is->comp_stat) is->compressor->free(is->comp_stat); is->comp_stat = stat; is->compressor = ipc; } } else { if (data->flags & IPPP_COMP_FLAG_LINK) { if (is->link_decomp_stat) is->link_decompressor->free(is->link_decomp_stat); is->link_decomp_stat = stat; is->link_decompressor = ipc; } else { if (is->decomp_stat) is->decompressor->free(is->decomp_stat); is->decomp_stat = stat; is->decompressor = ipc; } } return 0; } ipc = ipc->next; } return -EINVAL; }
gpl-2.0
MassStash/htc_m8whl_kernel_sense
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
4947
15040
/* * Copyright (C) 1999 - 2010 Intel Corporation. * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. * * This code was derived from the Intel e1000e Linux driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include "pch_gbe.h" #include <linux/module.h> /* for __MODULE_STRING */ #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 /** * TxDescriptors - Transmit Descriptor Count * @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD * @Default Value: PCH_GBE_DEFAULT_TXD */ static int TxDescriptors = OPTION_UNSET; module_param(TxDescriptors, int, 0); MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors"); /** * RxDescriptors -Receive Descriptor Count * @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD * @Default Value: PCH_GBE_DEFAULT_RXD */ static int RxDescriptors = OPTION_UNSET; module_param(RxDescriptors, int, 0); MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors"); /** * Speed - User Specified Speed Override * @Valid Range: 0, 10, 100, 1000 * - 0: auto-negotiate at all supported speeds * - 10: only link at 10 Mbps * - 100: only link at 100 Mbps * - 1000: only link at 1000 Mbps * @Default Value: 0 */ static int Speed = OPTION_UNSET; module_param(Speed, int, 0); MODULE_PARM_DESC(Speed, "Speed setting"); /** * Duplex - User Specified Duplex Override * @Valid Range: 0-2 * - 0: auto-negotiate for duplex * - 1: only link at half duplex * - 2: only link at full duplex * @Default Value: 0 */ static int Duplex = OPTION_UNSET; module_param(Duplex, int, 0); MODULE_PARM_DESC(Duplex, "Duplex setting"); #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 /** * AutoNeg - Auto-negotiation Advertisement Override * @Valid Range: 0x01-0x0F, 0x20-0x2F * * The AutoNeg value is a bit mask describing which speed and duplex * combinations should be advertised during auto-negotiation. * The supported speed and duplex modes are listed below * * Bit 7 6 5 4 3 2 1 0 * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 * Duplex Full Full Half Full Half * * @Default Value: 0x2F (copper) */ static int AutoNeg = OPTION_UNSET; module_param(AutoNeg, int, 0); MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting"); #define PHY_ADVERTISE_10_HALF 0x0001 #define PHY_ADVERTISE_10_FULL 0x0002 #define PHY_ADVERTISE_100_HALF 0x0004 #define PHY_ADVERTISE_100_FULL 0x0008 #define PHY_ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ #define PHY_ADVERTISE_1000_FULL 0x0020 #define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F /** * FlowControl - User Specified Flow Control Override * @Valid Range: 0-3 * - 0: No Flow Control * - 1: Rx only, respond to PAUSE frames but do not generate them * - 2: Tx only, generate PAUSE frames but ignore them on receive * - 3: Full Flow Control Support * @Default Value: Read flow control settings from the EEPROM */ static int FlowControl = OPTION_UNSET; module_param(FlowControl, int, 0); MODULE_PARM_DESC(FlowControl, "Flow Control setting"); /* * XsumRX - Receive Checksum Offload Enable/Disable * @Valid Range: 0, 1 * - 0: disables all checksum offload * - 1: enables receive IP/TCP/UDP checksum offload * @Default Value: PCH_GBE_DEFAULT_RX_CSUM */ static int XsumRX = OPTION_UNSET; module_param(XsumRX, int, 0); MODULE_PARM_DESC(XsumRX, "Disable or enable Receive Checksum offload"); #define PCH_GBE_DEFAULT_RX_CSUM true /* trueorfalse */ /* * XsumTX - Transmit Checksum Offload Enable/Disable * @Valid Range: 0, 1 * - 0: disables all checksum offload * - 1: enables transmit IP/TCP/UDP checksum offload * @Default Value: PCH_GBE_DEFAULT_TX_CSUM */ static int XsumTX = OPTION_UNSET; module_param(XsumTX, int, 0); MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload"); #define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */ /** * pch_gbe_option - Force the MAC's flow control settings * @hw: Pointer to the HW structure * Returns * 0: Successful. * Negative value: Failed. */ struct pch_gbe_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; const struct pch_gbe_opt_list { int i; char *str; } *p; } l; } arg; }; static const struct pch_gbe_opt_list speed_list[] = { { 0, "" }, { SPEED_10, "" }, { SPEED_100, "" }, { SPEED_1000, "" } }; static const struct pch_gbe_opt_list dplx_list[] = { { 0, "" }, { HALF_DUPLEX, "" }, { FULL_DUPLEX, "" } }; static const struct pch_gbe_opt_list an_list[] = #define AA "AutoNeg advertising " {{ 0x01, AA "10/HD" }, { 0x02, AA "10/FD" }, { 0x03, AA "10/FD, 10/HD" }, { 0x04, AA "100/HD" }, { 0x05, AA "100/HD, 10/HD" }, { 0x06, AA "100/HD, 10/FD" }, { 0x07, AA "100/HD, 10/FD, 10/HD" }, { 0x08, AA "100/FD" }, { 0x09, AA "100/FD, 10/HD" }, { 0x0a, AA "100/FD, 10/FD" }, { 0x0b, AA "100/FD, 10/FD, 10/HD" }, { 0x0c, AA "100/FD, 100/HD" }, { 0x0d, AA "100/FD, 100/HD, 10/HD" }, { 0x0e, AA "100/FD, 100/HD, 10/FD" }, { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, { 0x20, AA "1000/FD" }, { 0x21, AA "1000/FD, 10/HD" }, { 0x22, AA "1000/FD, 10/FD" }, { 0x23, AA "1000/FD, 10/FD, 10/HD" }, { 0x24, AA "1000/FD, 100/HD" }, { 0x25, AA "1000/FD, 100/HD, 10/HD" }, { 0x26, AA "1000/FD, 100/HD, 10/FD" }, { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, { 0x28, AA "1000/FD, 100/FD" }, { 0x29, AA "1000/FD, 100/FD, 10/HD" }, { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" } }; static const struct pch_gbe_opt_list fc_list[] = { { PCH_GBE_FC_NONE, "Flow Control Disabled" }, { PCH_GBE_FC_RX_PAUSE, "Flow Control Receive Only" }, { PCH_GBE_FC_TX_PAUSE, "Flow Control Transmit Only" }, { PCH_GBE_FC_FULL, "Flow Control Enabled" } }; /** * pch_gbe_validate_option - Validate option * @value: value * @opt: option * @adapter: Board private structure * Returns * 0: Successful. * Negative value: Failed. */ static int pch_gbe_validate_option(int *value, const struct pch_gbe_option *opt, struct pch_gbe_adapter *adapter) { if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: pr_debug("%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: pr_debug("%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { pr_debug("%s set to %i\n", opt->name, *value); return 0; } break; case list_option: { int i; const struct pch_gbe_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') pr_debug("%s\n", ent->str); return 0; } } } break; default: BUG(); } pr_debug("Invalid %s value specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /** * pch_gbe_check_copper_options - Range Checking for Link Options, Copper Version * @adapter: Board private structure */ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) { struct pch_gbe_hw *hw = &adapter->hw; int speed, dplx; { /* Speed */ static const struct pch_gbe_option opt = { .type = list_option, .name = "Speed", .err = "parameter ignored", .def = 0, .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list), .p = speed_list } } }; speed = Speed; pch_gbe_validate_option(&speed, &opt, adapter); } { /* Duplex */ static const struct pch_gbe_option opt = { .type = list_option, .name = "Duplex", .err = "parameter ignored", .def = 0, .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list), .p = dplx_list } } }; dplx = Duplex; pch_gbe_validate_option(&dplx, &opt, adapter); } { /* Autoneg */ static const struct pch_gbe_option opt = { .type = list_option, .name = "AutoNeg", .err = "parameter ignored", .def = PCH_AUTONEG_ADVERTISE_DEFAULT, .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list), .p = an_list} } }; if (speed || dplx) { pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); hw->phy.autoneg_advertised = opt.def; } else { int tmp = AutoNeg; pch_gbe_validate_option(&tmp, &opt, adapter); hw->phy.autoneg_advertised = tmp; } } switch (speed + dplx) { case 0: hw->mac.autoneg = hw->mac.fc_autoneg = 1; if ((speed || dplx)) pr_debug("Speed and duplex autonegotiation enabled\n"); hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case HALF_DUPLEX: pr_debug("Half Duplex specified without Speed\n"); pr_debug("Using Autonegotiation at Half Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | PHY_ADVERTISE_100_HALF; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case FULL_DUPLEX: pr_debug("Full Duplex specified without Speed\n"); pr_debug("Using Autonegotiation at Full Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL | PHY_ADVERTISE_100_FULL | PHY_ADVERTISE_1000_FULL; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_10: pr_debug("10 Mbps Speed specified without Duplex\n"); pr_debug("Using Autonegotiation at 10 Mbps only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | PHY_ADVERTISE_10_FULL; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_10 + HALF_DUPLEX: pr_debug("Forcing to 10 Mbps Half Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_10 + FULL_DUPLEX: pr_debug("Forcing to 10 Mbps Full Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_100: pr_debug("100 Mbps Speed specified without Duplex\n"); pr_debug("Using Autonegotiation at 100 Mbps only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF | PHY_ADVERTISE_100_FULL; hw->mac.link_speed = SPEED_100; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_100 + HALF_DUPLEX: pr_debug("Forcing to 100 Mbps Half Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_100; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_100 + FULL_DUPLEX: pr_debug("Forcing to 100 Mbps Full Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_100; hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_1000: pr_debug("1000 Mbps Speed specified without Duplex\n"); goto full_duplex_only; case SPEED_1000 + HALF_DUPLEX: pr_debug("Half Duplex is not supported at 1000 Mbps\n"); /* fall through */ case SPEED_1000 + FULL_DUPLEX: full_duplex_only: pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL; hw->mac.link_speed = SPEED_1000; hw->mac.link_duplex = DUPLEX_FULL; break; default: BUG(); } } /** * pch_gbe_check_options - Range Checking for Command Line Parameters * @adapter: Board private structure */ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) { struct pch_gbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; int val; { /* Transmit Descriptor Count */ static const struct pch_gbe_option opt = { .type = range_option, .name = "Transmit Descriptors", .err = "using default of " __MODULE_STRING(PCH_GBE_DEFAULT_TXD), .def = PCH_GBE_DEFAULT_TXD, .arg = { .r = { .min = PCH_GBE_MIN_TXD, .max = PCH_GBE_MAX_TXD } } }; struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; tx_ring->count = TxDescriptors; pch_gbe_validate_option(&tx_ring->count, &opt, adapter); tx_ring->count = roundup(tx_ring->count, PCH_GBE_TX_DESC_MULTIPLE); } { /* Receive Descriptor Count */ static const struct pch_gbe_option opt = { .type = range_option, .name = "Receive Descriptors", .err = "using default of " __MODULE_STRING(PCH_GBE_DEFAULT_RXD), .def = PCH_GBE_DEFAULT_RXD, .arg = { .r = { .min = PCH_GBE_MIN_RXD, .max = PCH_GBE_MAX_RXD } } }; struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; rx_ring->count = RxDescriptors; pch_gbe_validate_option(&rx_ring->count, &opt, adapter); rx_ring->count = roundup(rx_ring->count, PCH_GBE_RX_DESC_MULTIPLE); } { /* Checksum Offload Enable/Disable */ static const struct pch_gbe_option opt = { .type = enable_option, .name = "Checksum Offload", .err = "defaulting to Enabled", .def = PCH_GBE_DEFAULT_RX_CSUM }; val = XsumRX; pch_gbe_validate_option(&val, &opt, adapter); if (!val) dev->features &= ~NETIF_F_RXCSUM; } { /* Checksum Offload Enable/Disable */ static const struct pch_gbe_option opt = { .type = enable_option, .name = "Checksum Offload", .err = "defaulting to Enabled", .def = PCH_GBE_DEFAULT_TX_CSUM }; val = XsumTX; pch_gbe_validate_option(&val, &opt, adapter); if (!val) dev->features &= ~NETIF_F_ALL_CSUM; } { /* Flow Control */ static const struct pch_gbe_option opt = { .type = list_option, .name = "Flow Control", .err = "reading default settings from EEPROM", .def = PCH_GBE_FC_DEFAULT, .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list), .p = fc_list } } }; int tmp = FlowControl; pch_gbe_validate_option(&tmp, &opt, adapter); hw->mac.fc = tmp; } pch_gbe_check_copper_options(adapter); }
gpl-2.0
qdk0901/kernel-exynos5410
drivers/platform/x86/intel_mid_powerbtn.c
4947
4004
/* * Power button driver for Medfield. * * Copyright (C) 2010 Intel Corp * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/mfd/intel_msic.h> #define DRIVER_NAME "msic_power_btn" #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */ /* * MSIC document ti_datasheet defines the 1st bit reg 0x21 is used to mask * power button interrupt */ #define MSIC_PWRBTNM (1 << 0) static irqreturn_t mfld_pb_isr(int irq, void *dev_id) { struct input_dev *input = dev_id; int ret; u8 pbstat; ret = intel_msic_reg_read(INTEL_MSIC_PBSTATUS, &pbstat); dev_dbg(input->dev.parent, "PB_INT status= %d\n", pbstat); if (ret < 0) { dev_err(input->dev.parent, "Read error %d while reading" " MSIC_PB_STATUS\n", ret); } else { input_event(input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL)); input_sync(input); } return IRQ_HANDLED; } static int __devinit mfld_pb_probe(struct platform_device *pdev) { struct input_dev *input; int irq = platform_get_irq(pdev, 0); int error; if (irq < 0) return -EINVAL; input = input_allocate_device(); if (!input) { dev_err(&pdev->dev, "Input device allocation error\n"); return -ENOMEM; } input->name = pdev->name; input->phys = "power-button/input0"; input->id.bustype = BUS_HOST; input->dev.parent = &pdev->dev; input_set_capability(input, EV_KEY, KEY_POWER); error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND, DRIVER_NAME, input); if (error) { dev_err(&pdev->dev, "Unable to request irq %d for mfld power" "button\n", irq); goto err_free_input; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "Unable to register input dev, error " "%d\n", error); goto err_free_irq; } platform_set_drvdata(pdev, input); /* * SCU firmware might send power button interrupts to IA core before * kernel boots and doesn't get EOI from IA core. The first bit of * MSIC reg 0x21 is kept masked, and SCU firmware doesn't send new * power interrupt to Android kernel. Unmask the bit when probing * power button in kernel. * There is a very narrow race between irq handler and power button * initialization. The race happens rarely. So we needn't worry * about it. */ error = intel_msic_reg_update(INTEL_MSIC_IRQLVL1MSK, 0, MSIC_PWRBTNM); if (error) { dev_err(&pdev->dev, "Unable to clear power button interrupt, " "error: %d\n", error); goto err_free_irq; } return 0; err_free_irq: free_irq(irq, input); err_free_input: input_free_device(input); return error; } static int __devexit mfld_pb_remove(struct platform_device *pdev) { struct input_dev *input = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, input); input_unregister_device(input); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver mfld_pb_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = mfld_pb_probe, .remove = __devexit_p(mfld_pb_remove), }; module_platform_driver(mfld_pb_driver); MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>"); MODULE_DESCRIPTION("Intel Medfield Power Button Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
scoty755/Sense7_Kernel_b2wlj
drivers/video/cyber2000fb.c
4947
48424
/* * linux/drivers/video/cyber2000fb.c * * Copyright (C) 1998-2002 Russell King * * MIPS and 50xx clock support * Copyright (C) 2001 Bradley D. LaRonde <brad@ltc.com> * * 32 bit support, text color and panning fixes for modes != 8 bit * Copyright (C) 2002 Denis Oliver Kropp <dok@directfb.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Integraphics CyberPro 2000, 2010 and 5000 frame buffer device * * Based on cyberfb.c. * * Note that we now use the new fbcon fix, var and cmap scheme. We do * still have to check which console is the currently displayed one * however, especially for the colourmap stuff. * * We also use the new hotplug PCI subsystem. I'm not sure if there * are any such cards, but I'm erring on the side of caution. We don't * want to go pop just because someone does have one. * * Note that this doesn't work fully in the case of multiple CyberPro * cards with grabbers. We currently can only attach to the first * CyberPro card found. * * When we're in truecolour mode, we power down the LUT RAM as a power * saving feature. Also, when we enter any of the powersaving modes * (except soft blanking) we power down the RAMDACs. This saves about * 1W, which is roughly 8% of the power consumption of a NetWinder * (which, incidentally, is about the same saving as a 2.5in hard disk * entering standby mode.) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <asm/pgtable.h> #ifdef __arm__ #include <asm/mach-types.h> #endif #include "cyber2000fb.h" struct cfb_info { struct fb_info fb; struct display_switch *dispsw; struct display *display; unsigned char __iomem *region; unsigned char __iomem *regs; u_int id; u_int irq; int func_use_count; u_long ref_ps; /* * Clock divisors */ u_int divisors[4]; struct { u8 red, green, blue; } palette[NR_PALETTE]; u_char mem_ctl1; u_char mem_ctl2; u_char mclk_mult; u_char mclk_div; /* * RAMDAC control register is both of these or'ed together */ u_char ramdac_ctrl; u_char ramdac_powerdown; u32 pseudo_palette[16]; spinlock_t reg_b0_lock; #ifdef CONFIG_FB_CYBER2000_DDC bool ddc_registered; struct i2c_adapter ddc_adapter; struct i2c_algo_bit_data ddc_algo; #endif #ifdef CONFIG_FB_CYBER2000_I2C struct i2c_adapter i2c_adapter; struct i2c_algo_bit_data i2c_algo; #endif }; static char *default_font = "Acorn8x8"; module_param(default_font, charp, 0); MODULE_PARM_DESC(default_font, "Default font name"); /* * Our access methods. */ #define cyber2000fb_writel(val, reg, cfb) writel(val, (cfb)->regs + (reg)) #define cyber2000fb_writew(val, reg, cfb) writew(val, (cfb)->regs + (reg)) #define cyber2000fb_writeb(val, reg, cfb) writeb(val, (cfb)->regs + (reg)) #define cyber2000fb_readb(reg, cfb) readb((cfb)->regs + (reg)) static inline void cyber2000_crtcw(unsigned int reg, unsigned int val, struct cfb_info *cfb) { cyber2000fb_writew((reg & 255) | val << 8, 0x3d4, cfb); } static inline void cyber2000_grphw(unsigned int reg, unsigned int val, struct cfb_info *cfb) { cyber2000fb_writew((reg & 255) | val << 8, 0x3ce, cfb); } static inline unsigned int cyber2000_grphr(unsigned int reg, struct cfb_info *cfb) { cyber2000fb_writeb(reg, 0x3ce, cfb); return cyber2000fb_readb(0x3cf, cfb); } static inline void cyber2000_attrw(unsigned int reg, unsigned int val, struct cfb_info *cfb) { cyber2000fb_readb(0x3da, cfb); cyber2000fb_writeb(reg, 0x3c0, cfb); cyber2000fb_readb(0x3c1, cfb); cyber2000fb_writeb(val, 0x3c0, cfb); } static inline void cyber2000_seqw(unsigned int reg, unsigned int val, struct cfb_info *cfb) { cyber2000fb_writew((reg & 255) | val << 8, 0x3c4, cfb); } /* -------------------- Hardware specific routines ------------------------- */ /* * Hardware Cyber2000 Acceleration */ static void cyber2000fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct cfb_info *cfb = (struct cfb_info *)info; unsigned long dst, col; if (!(cfb->fb.var.accel_flags & FB_ACCELF_TEXT)) { cfb_fillrect(info, rect); return; } cyber2000fb_writeb(0, CO_REG_CONTROL, cfb); cyber2000fb_writew(rect->width - 1, CO_REG_PIXWIDTH, cfb); cyber2000fb_writew(rect->height - 1, CO_REG_PIXHEIGHT, cfb); col = rect->color; if (cfb->fb.var.bits_per_pixel > 8) col = ((u32 *)cfb->fb.pseudo_palette)[col]; cyber2000fb_writel(col, CO_REG_FGCOLOUR, cfb); dst = rect->dx + rect->dy * cfb->fb.var.xres_virtual; if (cfb->fb.var.bits_per_pixel == 24) { cyber2000fb_writeb(dst, CO_REG_X_PHASE, cfb); dst *= 3; } cyber2000fb_writel(dst, CO_REG_DEST_PTR, cfb); cyber2000fb_writeb(CO_FG_MIX_SRC, CO_REG_FGMIX, cfb); cyber2000fb_writew(CO_CMD_L_PATTERN_FGCOL, CO_REG_CMD_L, cfb); cyber2000fb_writew(CO_CMD_H_BLITTER, CO_REG_CMD_H, cfb); } static void cyber2000fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct cfb_info *cfb = (struct cfb_info *)info; unsigned int cmd = CO_CMD_L_PATTERN_FGCOL; unsigned long src, dst; if (!(cfb->fb.var.accel_flags & FB_ACCELF_TEXT)) { cfb_copyarea(info, region); return; } cyber2000fb_writeb(0, CO_REG_CONTROL, cfb); cyber2000fb_writew(region->width - 1, CO_REG_PIXWIDTH, cfb); cyber2000fb_writew(region->height - 1, CO_REG_PIXHEIGHT, cfb); src = region->sx + region->sy * cfb->fb.var.xres_virtual; dst = region->dx + region->dy * cfb->fb.var.xres_virtual; if (region->sx < region->dx) { src += region->width - 1; dst += region->width - 1; cmd |= CO_CMD_L_INC_LEFT; } if (region->sy < region->dy) { src += (region->height - 1) * cfb->fb.var.xres_virtual; dst += (region->height - 1) * cfb->fb.var.xres_virtual; cmd |= CO_CMD_L_INC_UP; } if (cfb->fb.var.bits_per_pixel == 24) { cyber2000fb_writeb(dst, CO_REG_X_PHASE, cfb); src *= 3; dst *= 3; } cyber2000fb_writel(src, CO_REG_SRC1_PTR, cfb); cyber2000fb_writel(dst, CO_REG_DEST_PTR, cfb); cyber2000fb_writew(CO_FG_MIX_SRC, CO_REG_FGMIX, cfb); cyber2000fb_writew(cmd, CO_REG_CMD_L, cfb); cyber2000fb_writew(CO_CMD_H_FGSRCMAP | CO_CMD_H_BLITTER, CO_REG_CMD_H, cfb); } static void cyber2000fb_imageblit(struct fb_info *info, const struct fb_image *image) { cfb_imageblit(info, image); return; } static int cyber2000fb_sync(struct fb_info *info) { struct cfb_info *cfb = (struct cfb_info *)info; int count = 100000; if (!(cfb->fb.var.accel_flags & FB_ACCELF_TEXT)) return 0; while (cyber2000fb_readb(CO_REG_CONTROL, cfb) & CO_CTRL_BUSY) { if (!count--) { debug_printf("accel_wait timed out\n"); cyber2000fb_writeb(0, CO_REG_CONTROL, cfb); break; } udelay(1); } return 0; } /* * =========================================================================== */ static inline u32 convert_bitfield(u_int val, struct fb_bitfield *bf) { u_int mask = (1 << bf->length) - 1; return (val >> (16 - bf->length) & mask) << bf->offset; } /* * Set a single color register. Return != 0 for invalid regno. */ static int cyber2000fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct cfb_info *cfb = (struct cfb_info *)info; struct fb_var_screeninfo *var = &cfb->fb.var; u32 pseudo_val; int ret = 1; switch (cfb->fb.fix.visual) { default: return 1; /* * Pseudocolour: * 8 8 * pixel --/--+--/--> red lut --> red dac * | 8 * +--/--> green lut --> green dac * | 8 * +--/--> blue lut --> blue dac */ case FB_VISUAL_PSEUDOCOLOR: if (regno >= NR_PALETTE) return 1; red >>= 8; green >>= 8; blue >>= 8; cfb->palette[regno].red = red; cfb->palette[regno].green = green; cfb->palette[regno].blue = blue; cyber2000fb_writeb(regno, 0x3c8, cfb); cyber2000fb_writeb(red, 0x3c9, cfb); cyber2000fb_writeb(green, 0x3c9, cfb); cyber2000fb_writeb(blue, 0x3c9, cfb); return 0; /* * Direct colour: * n rl * pixel --/--+--/--> red lut --> red dac * | gl * +--/--> green lut --> green dac * | bl * +--/--> blue lut --> blue dac * n = bpp, rl = red length, gl = green length, bl = blue length */ case FB_VISUAL_DIRECTCOLOR: red >>= 8; green >>= 8; blue >>= 8; if (var->green.length == 6 && regno < 64) { cfb->palette[regno << 2].green = green; /* * The 6 bits of the green component are applied * to the high 6 bits of the LUT. */ cyber2000fb_writeb(regno << 2, 0x3c8, cfb); cyber2000fb_writeb(cfb->palette[regno >> 1].red, 0x3c9, cfb); cyber2000fb_writeb(green, 0x3c9, cfb); cyber2000fb_writeb(cfb->palette[regno >> 1].blue, 0x3c9, cfb); green = cfb->palette[regno << 3].green; ret = 0; } if (var->green.length >= 5 && regno < 32) { cfb->palette[regno << 3].red = red; cfb->palette[regno << 3].green = green; cfb->palette[regno << 3].blue = blue; /* * The 5 bits of each colour component are * applied to the high 5 bits of the LUT. */ cyber2000fb_writeb(regno << 3, 0x3c8, cfb); cyber2000fb_writeb(red, 0x3c9, cfb); cyber2000fb_writeb(green, 0x3c9, cfb); cyber2000fb_writeb(blue, 0x3c9, cfb); ret = 0; } if (var->green.length == 4 && regno < 16) { cfb->palette[regno << 4].red = red; cfb->palette[regno << 4].green = green; cfb->palette[regno << 4].blue = blue; /* * The 5 bits of each colour component are * applied to the high 5 bits of the LUT. */ cyber2000fb_writeb(regno << 4, 0x3c8, cfb); cyber2000fb_writeb(red, 0x3c9, cfb); cyber2000fb_writeb(green, 0x3c9, cfb); cyber2000fb_writeb(blue, 0x3c9, cfb); ret = 0; } /* * Since this is only used for the first 16 colours, we * don't have to care about overflowing for regno >= 32 */ pseudo_val = regno << var->red.offset | regno << var->green.offset | regno << var->blue.offset; break; /* * True colour: * n rl * pixel --/--+--/--> red dac * | gl * +--/--> green dac * | bl * +--/--> blue dac * n = bpp, rl = red length, gl = green length, bl = blue length */ case FB_VISUAL_TRUECOLOR: pseudo_val = convert_bitfield(transp ^ 0xffff, &var->transp); pseudo_val |= convert_bitfield(red, &var->red); pseudo_val |= convert_bitfield(green, &var->green); pseudo_val |= convert_bitfield(blue, &var->blue); ret = 0; break; } /* * Now set our pseudo palette for the CFB16/24/32 drivers. */ if (regno < 16) ((u32 *)cfb->fb.pseudo_palette)[regno] = pseudo_val; return ret; } struct par_info { /* * Hardware */ u_char clock_mult; u_char clock_div; u_char extseqmisc; u_char co_pixfmt; u_char crtc_ofl; u_char crtc[19]; u_int width; u_int pitch; u_int fetch; /* * Other */ u_char ramdac; }; static const u_char crtc_idx[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18 }; static void cyber2000fb_write_ramdac_ctrl(struct cfb_info *cfb) { unsigned int i; unsigned int val = cfb->ramdac_ctrl | cfb->ramdac_powerdown; cyber2000fb_writeb(0x56, 0x3ce, cfb); i = cyber2000fb_readb(0x3cf, cfb); cyber2000fb_writeb(i | 4, 0x3cf, cfb); cyber2000fb_writeb(val, 0x3c6, cfb); cyber2000fb_writeb(i, 0x3cf, cfb); /* prevent card lock-up observed on x86 with CyberPro 2000 */ cyber2000fb_readb(0x3cf, cfb); } static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw) { u_int i; /* * Blank palette */ for (i = 0; i < NR_PALETTE; i++) { cyber2000fb_writeb(i, 0x3c8, cfb); cyber2000fb_writeb(0, 0x3c9, cfb); cyber2000fb_writeb(0, 0x3c9, cfb); cyber2000fb_writeb(0, 0x3c9, cfb); } cyber2000fb_writeb(0xef, 0x3c2, cfb); cyber2000_crtcw(0x11, 0x0b, cfb); cyber2000_attrw(0x11, 0x00, cfb); cyber2000_seqw(0x00, 0x01, cfb); cyber2000_seqw(0x01, 0x01, cfb); cyber2000_seqw(0x02, 0x0f, cfb); cyber2000_seqw(0x03, 0x00, cfb); cyber2000_seqw(0x04, 0x0e, cfb); cyber2000_seqw(0x00, 0x03, cfb); for (i = 0; i < sizeof(crtc_idx); i++) cyber2000_crtcw(crtc_idx[i], hw->crtc[i], cfb); for (i = 0x0a; i < 0x10; i++) cyber2000_crtcw(i, 0, cfb); cyber2000_grphw(EXT_CRT_VRTOFL, hw->crtc_ofl, cfb); cyber2000_grphw(0x00, 0x00, cfb); cyber2000_grphw(0x01, 0x00, cfb); cyber2000_grphw(0x02, 0x00, cfb); cyber2000_grphw(0x03, 0x00, cfb); cyber2000_grphw(0x04, 0x00, cfb); cyber2000_grphw(0x05, 0x60, cfb); cyber2000_grphw(0x06, 0x05, cfb); cyber2000_grphw(0x07, 0x0f, cfb); cyber2000_grphw(0x08, 0xff, cfb); /* Attribute controller registers */ for (i = 0; i < 16; i++) cyber2000_attrw(i, i, cfb); cyber2000_attrw(0x10, 0x01, cfb); cyber2000_attrw(0x11, 0x00, cfb); cyber2000_attrw(0x12, 0x0f, cfb); cyber2000_attrw(0x13, 0x00, cfb); cyber2000_attrw(0x14, 0x00, cfb); /* PLL registers */ spin_lock(&cfb->reg_b0_lock); cyber2000_grphw(EXT_DCLK_MULT, hw->clock_mult, cfb); cyber2000_grphw(EXT_DCLK_DIV, hw->clock_div, cfb); cyber2000_grphw(EXT_MCLK_MULT, cfb->mclk_mult, cfb); cyber2000_grphw(EXT_MCLK_DIV, cfb->mclk_div, cfb); cyber2000_grphw(0x90, 0x01, cfb); cyber2000_grphw(0xb9, 0x80, cfb); cyber2000_grphw(0xb9, 0x00, cfb); spin_unlock(&cfb->reg_b0_lock); cfb->ramdac_ctrl = hw->ramdac; cyber2000fb_write_ramdac_ctrl(cfb); cyber2000fb_writeb(0x20, 0x3c0, cfb); cyber2000fb_writeb(0xff, 0x3c6, cfb); cyber2000_grphw(0x14, hw->fetch, cfb); cyber2000_grphw(0x15, ((hw->fetch >> 8) & 0x03) | ((hw->pitch >> 4) & 0x30), cfb); cyber2000_grphw(EXT_SEQ_MISC, hw->extseqmisc, cfb); /* * Set up accelerator registers */ cyber2000fb_writew(hw->width, CO_REG_SRC_WIDTH, cfb); cyber2000fb_writew(hw->width, CO_REG_DEST_WIDTH, cfb); cyber2000fb_writeb(hw->co_pixfmt, CO_REG_PIXFMT, cfb); } static inline int cyber2000fb_update_start(struct cfb_info *cfb, struct fb_var_screeninfo *var) { u_int base = var->yoffset * var->xres_virtual + var->xoffset; base *= var->bits_per_pixel; /* * Convert to bytes and shift two extra bits because DAC * can only start on 4 byte aligned data. */ base >>= 5; if (base >= 1 << 20) return -EINVAL; cyber2000_grphw(0x10, base >> 16 | 0x10, cfb); cyber2000_crtcw(0x0c, base >> 8, cfb); cyber2000_crtcw(0x0d, base, cfb); return 0; } static int cyber2000fb_decode_crtc(struct par_info *hw, struct cfb_info *cfb, struct fb_var_screeninfo *var) { u_int Htotal, Hblankend, Hsyncend; u_int Vtotal, Vdispend, Vblankstart, Vblankend, Vsyncstart, Vsyncend; #define ENCODE_BIT(v, b1, m, b2) ((((v) >> (b1)) & (m)) << (b2)) hw->crtc[13] = hw->pitch; hw->crtc[17] = 0xe3; hw->crtc[14] = 0; hw->crtc[8] = 0; Htotal = var->xres + var->right_margin + var->hsync_len + var->left_margin; if (Htotal > 2080) return -EINVAL; hw->crtc[0] = (Htotal >> 3) - 5; hw->crtc[1] = (var->xres >> 3) - 1; hw->crtc[2] = var->xres >> 3; hw->crtc[4] = (var->xres + var->right_margin) >> 3; Hblankend = (Htotal - 4 * 8) >> 3; hw->crtc[3] = ENCODE_BIT(Hblankend, 0, 0x1f, 0) | ENCODE_BIT(1, 0, 0x01, 7); Hsyncend = (var->xres + var->right_margin + var->hsync_len) >> 3; hw->crtc[5] = ENCODE_BIT(Hsyncend, 0, 0x1f, 0) | ENCODE_BIT(Hblankend, 5, 0x01, 7); Vdispend = var->yres - 1; Vsyncstart = var->yres + var->lower_margin; Vsyncend = var->yres + var->lower_margin + var->vsync_len; Vtotal = var->yres + var->lower_margin + var->vsync_len + var->upper_margin - 2; if (Vtotal > 2047) return -EINVAL; Vblankstart = var->yres + 6; Vblankend = Vtotal - 10; hw->crtc[6] = Vtotal; hw->crtc[7] = ENCODE_BIT(Vtotal, 8, 0x01, 0) | ENCODE_BIT(Vdispend, 8, 0x01, 1) | ENCODE_BIT(Vsyncstart, 8, 0x01, 2) | ENCODE_BIT(Vblankstart, 8, 0x01, 3) | ENCODE_BIT(1, 0, 0x01, 4) | ENCODE_BIT(Vtotal, 9, 0x01, 5) | ENCODE_BIT(Vdispend, 9, 0x01, 6) | ENCODE_BIT(Vsyncstart, 9, 0x01, 7); hw->crtc[9] = ENCODE_BIT(0, 0, 0x1f, 0) | ENCODE_BIT(Vblankstart, 9, 0x01, 5) | ENCODE_BIT(1, 0, 0x01, 6); hw->crtc[10] = Vsyncstart; hw->crtc[11] = ENCODE_BIT(Vsyncend, 0, 0x0f, 0) | ENCODE_BIT(1, 0, 0x01, 7); hw->crtc[12] = Vdispend; hw->crtc[15] = Vblankstart; hw->crtc[16] = Vblankend; hw->crtc[18] = 0xff; /* * overflow - graphics reg 0x11 * 0=VTOTAL:10 1=VDEND:10 2=VRSTART:10 3=VBSTART:10 * 4=LINECOMP:10 5-IVIDEO 6=FIXCNT */ hw->crtc_ofl = ENCODE_BIT(Vtotal, 10, 0x01, 0) | ENCODE_BIT(Vdispend, 10, 0x01, 1) | ENCODE_BIT(Vsyncstart, 10, 0x01, 2) | ENCODE_BIT(Vblankstart, 10, 0x01, 3) | EXT_CRT_VRTOFL_LINECOMP10; /* woody: set the interlaced bit... */ /* FIXME: what about doublescan? */ if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) hw->crtc_ofl |= EXT_CRT_VRTOFL_INTERLACE; return 0; } /* * The following was discovered by a good monitor, bit twiddling, theorising * and but mostly luck. Strangely, it looks like everyone elses' PLL! * * Clock registers: * fclock = fpll / div2 * fpll = fref * mult / div1 * where: * fref = 14.318MHz (69842ps) * mult = reg0xb0.7:0 * div1 = (reg0xb1.5:0 + 1) * div2 = 2^(reg0xb1.7:6) * fpll should be between 115 and 260 MHz * (8696ps and 3846ps) */ static int cyber2000fb_decode_clock(struct par_info *hw, struct cfb_info *cfb, struct fb_var_screeninfo *var) { u_long pll_ps = var->pixclock; const u_long ref_ps = cfb->ref_ps; u_int div2, t_div1, best_div1, best_mult; int best_diff; int vco; /* * Step 1: * find div2 such that 115MHz < fpll < 260MHz * and 0 <= div2 < 4 */ for (div2 = 0; div2 < 4; div2++) { u_long new_pll; new_pll = pll_ps / cfb->divisors[div2]; if (8696 > new_pll && new_pll > 3846) { pll_ps = new_pll; break; } } if (div2 == 4) return -EINVAL; /* * Step 2: * Given pll_ps and ref_ps, find: * pll_ps * 0.995 < pll_ps_calc < pll_ps * 1.005 * where { 1 < best_div1 < 32, 1 < best_mult < 256 } * pll_ps_calc = best_div1 / (ref_ps * best_mult) */ best_diff = 0x7fffffff; best_mult = 2; best_div1 = 32; for (t_div1 = 2; t_div1 < 32; t_div1 += 1) { u_int rr, t_mult, t_pll_ps; int diff; /* * Find the multiplier for this divisor */ rr = ref_ps * t_div1; t_mult = (rr + pll_ps / 2) / pll_ps; /* * Is the multiplier within the correct range? */ if (t_mult > 256 || t_mult < 2) continue; /* * Calculate the actual clock period from this multiplier * and divisor, and estimate the error. */ t_pll_ps = (rr + t_mult / 2) / t_mult; diff = pll_ps - t_pll_ps; if (diff < 0) diff = -diff; if (diff < best_diff) { best_diff = diff; best_mult = t_mult; best_div1 = t_div1; } /* * If we hit an exact value, there is no point in continuing. */ if (diff == 0) break; } /* * Step 3: * combine values */ hw->clock_mult = best_mult - 1; hw->clock_div = div2 << 6 | (best_div1 - 1); vco = ref_ps * best_div1 / best_mult; if ((ref_ps == 40690) && (vco < 5556)) /* Set VFSEL when VCO > 180MHz (5.556 ps). */ hw->clock_div |= EXT_DCLK_DIV_VFSEL; return 0; } /* * Set the User Defined Part of the Display */ static int cyber2000fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct cfb_info *cfb = (struct cfb_info *)info; struct par_info hw; unsigned int mem; int err; var->transp.msb_right = 0; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; switch (var->bits_per_pixel) { case 8: /* PSEUDOCOLOUR, 256 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; case 16:/* DIRECTCOLOUR, 64k or 32k */ switch (var->green.length) { case 6: /* RGB565, 64k */ var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; break; default: case 5: /* RGB555, 32k */ var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; break; case 4: /* RGB444, 4k + transparency? */ var->transp.offset = 12; var->transp.length = 4; var->red.offset = 8; var->red.length = 4; var->green.offset = 4; var->green.length = 4; var->blue.offset = 0; var->blue.length = 4; break; } break; case 24:/* TRUECOLOUR, 16m */ var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; case 32:/* TRUECOLOUR, 16m */ var->transp.offset = 24; var->transp.length = 8; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; default: return -EINVAL; } mem = var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8); if (mem > cfb->fb.fix.smem_len) var->yres_virtual = cfb->fb.fix.smem_len * 8 / (var->bits_per_pixel * var->xres_virtual); if (var->yres > var->yres_virtual) var->yres = var->yres_virtual; if (var->xres > var->xres_virtual) var->xres = var->xres_virtual; err = cyber2000fb_decode_clock(&hw, cfb, var); if (err) return err; err = cyber2000fb_decode_crtc(&hw, cfb, var); if (err) return err; return 0; } static int cyber2000fb_set_par(struct fb_info *info) { struct cfb_info *cfb = (struct cfb_info *)info; struct fb_var_screeninfo *var = &cfb->fb.var; struct par_info hw; unsigned int mem; hw.width = var->xres_virtual; hw.ramdac = RAMDAC_VREFEN | RAMDAC_DAC8BIT; switch (var->bits_per_pixel) { case 8: hw.co_pixfmt = CO_PIXFMT_8BPP; hw.pitch = hw.width >> 3; hw.extseqmisc = EXT_SEQ_MISC_8; break; case 16: hw.co_pixfmt = CO_PIXFMT_16BPP; hw.pitch = hw.width >> 2; switch (var->green.length) { case 6: /* RGB565, 64k */ hw.extseqmisc = EXT_SEQ_MISC_16_RGB565; break; case 5: /* RGB555, 32k */ hw.extseqmisc = EXT_SEQ_MISC_16_RGB555; break; case 4: /* RGB444, 4k + transparency? */ hw.extseqmisc = EXT_SEQ_MISC_16_RGB444; break; default: BUG(); } break; case 24:/* TRUECOLOUR, 16m */ hw.co_pixfmt = CO_PIXFMT_24BPP; hw.width *= 3; hw.pitch = hw.width >> 3; hw.ramdac |= (RAMDAC_BYPASS | RAMDAC_RAMPWRDN); hw.extseqmisc = EXT_SEQ_MISC_24_RGB888; break; case 32:/* TRUECOLOUR, 16m */ hw.co_pixfmt = CO_PIXFMT_32BPP; hw.pitch = hw.width >> 1; hw.ramdac |= (RAMDAC_BYPASS | RAMDAC_RAMPWRDN); hw.extseqmisc = EXT_SEQ_MISC_32; break; default: BUG(); } /* * Sigh, this is absolutely disgusting, but caused by * the way the fbcon developers want to separate out * the "checking" and the "setting" of the video mode. * * If the mode is not suitable for the hardware here, * we can't prevent it being set by returning an error. * * In theory, since NetWinders contain just one VGA card, * we should never end up hitting this problem. */ BUG_ON(cyber2000fb_decode_clock(&hw, cfb, var) != 0); BUG_ON(cyber2000fb_decode_crtc(&hw, cfb, var) != 0); hw.width -= 1; hw.fetch = hw.pitch; if (!(cfb->mem_ctl2 & MEM_CTL2_64BIT)) hw.fetch <<= 1; hw.fetch += 1; cfb->fb.fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; /* * Same here - if the size of the video mode exceeds the * available RAM, we can't prevent this mode being set. * * In theory, since NetWinders contain just one VGA card, * we should never end up hitting this problem. */ mem = cfb->fb.fix.line_length * var->yres_virtual; BUG_ON(mem > cfb->fb.fix.smem_len); /* * 8bpp displays are always pseudo colour. 16bpp and above * are direct colour or true colour, depending on whether * the RAMDAC palettes are bypassed. (Direct colour has * palettes, true colour does not.) */ if (var->bits_per_pixel == 8) cfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; else if (hw.ramdac & RAMDAC_BYPASS) cfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; else cfb->fb.fix.visual = FB_VISUAL_DIRECTCOLOR; cyber2000fb_set_timing(cfb, &hw); cyber2000fb_update_start(cfb, var); return 0; } /* * Pan or Wrap the Display */ static int cyber2000fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct cfb_info *cfb = (struct cfb_info *)info; if (cyber2000fb_update_start(cfb, var)) return -EINVAL; cfb->fb.var.xoffset = var->xoffset; cfb->fb.var.yoffset = var->yoffset; if (var->vmode & FB_VMODE_YWRAP) { cfb->fb.var.vmode |= FB_VMODE_YWRAP; } else { cfb->fb.var.vmode &= ~FB_VMODE_YWRAP; } return 0; } /* * (Un)Blank the display. * * Blank the screen if blank_mode != 0, else unblank. If * blank == NULL then the caller blanks by setting the CLUT * (Color Look Up Table) to all black. Return 0 if blanking * succeeded, != 0 if un-/blanking failed due to e.g. a * video mode which doesn't support it. Implements VESA * suspend and powerdown modes on hardware that supports * disabling hsync/vsync: * blank_mode == 2: suspend vsync * blank_mode == 3: suspend hsync * blank_mode == 4: powerdown * * wms...Enable VESA DMPS compatible powerdown mode * run "setterm -powersave powerdown" to take advantage */ static int cyber2000fb_blank(int blank, struct fb_info *info) { struct cfb_info *cfb = (struct cfb_info *)info; unsigned int sync = 0; int i; switch (blank) { case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ sync = EXT_SYNC_CTL_VS_0 | EXT_SYNC_CTL_HS_0; break; case FB_BLANK_HSYNC_SUSPEND: /* hsync off */ sync = EXT_SYNC_CTL_VS_NORMAL | EXT_SYNC_CTL_HS_0; break; case FB_BLANK_VSYNC_SUSPEND: /* vsync off */ sync = EXT_SYNC_CTL_VS_0 | EXT_SYNC_CTL_HS_NORMAL; break; case FB_BLANK_NORMAL: /* soft blank */ default: /* unblank */ break; } cyber2000_grphw(EXT_SYNC_CTL, sync, cfb); if (blank <= 1) { /* turn on ramdacs */ cfb->ramdac_powerdown &= ~(RAMDAC_DACPWRDN | RAMDAC_BYPASS | RAMDAC_RAMPWRDN); cyber2000fb_write_ramdac_ctrl(cfb); } /* * Soft blank/unblank the display. */ if (blank) { /* soft blank */ for (i = 0; i < NR_PALETTE; i++) { cyber2000fb_writeb(i, 0x3c8, cfb); cyber2000fb_writeb(0, 0x3c9, cfb); cyber2000fb_writeb(0, 0x3c9, cfb); cyber2000fb_writeb(0, 0x3c9, cfb); } } else { /* unblank */ for (i = 0; i < NR_PALETTE; i++) { cyber2000fb_writeb(i, 0x3c8, cfb); cyber2000fb_writeb(cfb->palette[i].red, 0x3c9, cfb); cyber2000fb_writeb(cfb->palette[i].green, 0x3c9, cfb); cyber2000fb_writeb(cfb->palette[i].blue, 0x3c9, cfb); } } if (blank >= 2) { /* turn off ramdacs */ cfb->ramdac_powerdown |= RAMDAC_DACPWRDN | RAMDAC_BYPASS | RAMDAC_RAMPWRDN; cyber2000fb_write_ramdac_ctrl(cfb); } return 0; } static struct fb_ops cyber2000fb_ops = { .owner = THIS_MODULE, .fb_check_var = cyber2000fb_check_var, .fb_set_par = cyber2000fb_set_par, .fb_setcolreg = cyber2000fb_setcolreg, .fb_blank = cyber2000fb_blank, .fb_pan_display = cyber2000fb_pan_display, .fb_fillrect = cyber2000fb_fillrect, .fb_copyarea = cyber2000fb_copyarea, .fb_imageblit = cyber2000fb_imageblit, .fb_sync = cyber2000fb_sync, }; /* * This is the only "static" reference to the internal data structures * of this driver. It is here solely at the moment to support the other * CyberPro modules external to this driver. */ static struct cfb_info *int_cfb_info; /* * Enable access to the extended registers */ void cyber2000fb_enable_extregs(struct cfb_info *cfb) { cfb->func_use_count += 1; if (cfb->func_use_count == 1) { int old; old = cyber2000_grphr(EXT_FUNC_CTL, cfb); old |= EXT_FUNC_CTL_EXTREGENBL; cyber2000_grphw(EXT_FUNC_CTL, old, cfb); } } EXPORT_SYMBOL(cyber2000fb_enable_extregs); /* * Disable access to the extended registers */ void cyber2000fb_disable_extregs(struct cfb_info *cfb) { if (cfb->func_use_count == 1) { int old; old = cyber2000_grphr(EXT_FUNC_CTL, cfb); old &= ~EXT_FUNC_CTL_EXTREGENBL; cyber2000_grphw(EXT_FUNC_CTL, old, cfb); } if (cfb->func_use_count == 0) printk(KERN_ERR "disable_extregs: count = 0\n"); else cfb->func_use_count -= 1; } EXPORT_SYMBOL(cyber2000fb_disable_extregs); /* * Attach a capture/tv driver to the core CyberX0X0 driver. */ int cyber2000fb_attach(struct cyberpro_info *info, int idx) { if (int_cfb_info != NULL) { info->dev = int_cfb_info->fb.device; #ifdef CONFIG_FB_CYBER2000_I2C info->i2c = &int_cfb_info->i2c_adapter; #else info->i2c = NULL; #endif info->regs = int_cfb_info->regs; info->irq = int_cfb_info->irq; info->fb = int_cfb_info->fb.screen_base; info->fb_size = int_cfb_info->fb.fix.smem_len; info->info = int_cfb_info; strlcpy(info->dev_name, int_cfb_info->fb.fix.id, sizeof(info->dev_name)); } return int_cfb_info != NULL; } EXPORT_SYMBOL(cyber2000fb_attach); /* * Detach a capture/tv driver from the core CyberX0X0 driver. */ void cyber2000fb_detach(int idx) { } EXPORT_SYMBOL(cyber2000fb_detach); #ifdef CONFIG_FB_CYBER2000_DDC #define DDC_REG 0xb0 #define DDC_SCL_OUT (1 << 0) #define DDC_SDA_OUT (1 << 4) #define DDC_SCL_IN (1 << 2) #define DDC_SDA_IN (1 << 6) static void cyber2000fb_enable_ddc(struct cfb_info *cfb) { spin_lock(&cfb->reg_b0_lock); cyber2000fb_writew(0x1bf, 0x3ce, cfb); } static void cyber2000fb_disable_ddc(struct cfb_info *cfb) { cyber2000fb_writew(0x0bf, 0x3ce, cfb); spin_unlock(&cfb->reg_b0_lock); } static void cyber2000fb_ddc_setscl(void *data, int val) { struct cfb_info *cfb = data; unsigned char reg; cyber2000fb_enable_ddc(cfb); reg = cyber2000_grphr(DDC_REG, cfb); if (!val) /* bit is inverted */ reg |= DDC_SCL_OUT; else reg &= ~DDC_SCL_OUT; cyber2000_grphw(DDC_REG, reg, cfb); cyber2000fb_disable_ddc(cfb); } static void cyber2000fb_ddc_setsda(void *data, int val) { struct cfb_info *cfb = data; unsigned char reg; cyber2000fb_enable_ddc(cfb); reg = cyber2000_grphr(DDC_REG, cfb); if (!val) /* bit is inverted */ reg |= DDC_SDA_OUT; else reg &= ~DDC_SDA_OUT; cyber2000_grphw(DDC_REG, reg, cfb); cyber2000fb_disable_ddc(cfb); } static int cyber2000fb_ddc_getscl(void *data) { struct cfb_info *cfb = data; int retval; cyber2000fb_enable_ddc(cfb); retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SCL_IN); cyber2000fb_disable_ddc(cfb); return retval; } static int cyber2000fb_ddc_getsda(void *data) { struct cfb_info *cfb = data; int retval; cyber2000fb_enable_ddc(cfb); retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SDA_IN); cyber2000fb_disable_ddc(cfb); return retval; } static int __devinit cyber2000fb_setup_ddc_bus(struct cfb_info *cfb) { strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id, sizeof(cfb->ddc_adapter.name)); cfb->ddc_adapter.owner = THIS_MODULE; cfb->ddc_adapter.class = I2C_CLASS_DDC; cfb->ddc_adapter.algo_data = &cfb->ddc_algo; cfb->ddc_adapter.dev.parent = cfb->fb.device; cfb->ddc_algo.setsda = cyber2000fb_ddc_setsda; cfb->ddc_algo.setscl = cyber2000fb_ddc_setscl; cfb->ddc_algo.getsda = cyber2000fb_ddc_getsda; cfb->ddc_algo.getscl = cyber2000fb_ddc_getscl; cfb->ddc_algo.udelay = 10; cfb->ddc_algo.timeout = 20; cfb->ddc_algo.data = cfb; i2c_set_adapdata(&cfb->ddc_adapter, cfb); return i2c_bit_add_bus(&cfb->ddc_adapter); } #endif /* CONFIG_FB_CYBER2000_DDC */ #ifdef CONFIG_FB_CYBER2000_I2C static void cyber2000fb_i2c_setsda(void *data, int state) { struct cfb_info *cfb = data; unsigned int latch2; spin_lock(&cfb->reg_b0_lock); latch2 = cyber2000_grphr(EXT_LATCH2, cfb); latch2 &= EXT_LATCH2_I2C_CLKEN; if (state) latch2 |= EXT_LATCH2_I2C_DATEN; cyber2000_grphw(EXT_LATCH2, latch2, cfb); spin_unlock(&cfb->reg_b0_lock); } static void cyber2000fb_i2c_setscl(void *data, int state) { struct cfb_info *cfb = data; unsigned int latch2; spin_lock(&cfb->reg_b0_lock); latch2 = cyber2000_grphr(EXT_LATCH2, cfb); latch2 &= EXT_LATCH2_I2C_DATEN; if (state) latch2 |= EXT_LATCH2_I2C_CLKEN; cyber2000_grphw(EXT_LATCH2, latch2, cfb); spin_unlock(&cfb->reg_b0_lock); } static int cyber2000fb_i2c_getsda(void *data) { struct cfb_info *cfb = data; int ret; spin_lock(&cfb->reg_b0_lock); ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_DAT); spin_unlock(&cfb->reg_b0_lock); return ret; } static int cyber2000fb_i2c_getscl(void *data) { struct cfb_info *cfb = data; int ret; spin_lock(&cfb->reg_b0_lock); ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_CLK); spin_unlock(&cfb->reg_b0_lock); return ret; } static int __devinit cyber2000fb_i2c_register(struct cfb_info *cfb) { strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id, sizeof(cfb->i2c_adapter.name)); cfb->i2c_adapter.owner = THIS_MODULE; cfb->i2c_adapter.algo_data = &cfb->i2c_algo; cfb->i2c_adapter.dev.parent = cfb->fb.device; cfb->i2c_algo.setsda = cyber2000fb_i2c_setsda; cfb->i2c_algo.setscl = cyber2000fb_i2c_setscl; cfb->i2c_algo.getsda = cyber2000fb_i2c_getsda; cfb->i2c_algo.getscl = cyber2000fb_i2c_getscl; cfb->i2c_algo.udelay = 5; cfb->i2c_algo.timeout = msecs_to_jiffies(100); cfb->i2c_algo.data = cfb; return i2c_bit_add_bus(&cfb->i2c_adapter); } static void cyber2000fb_i2c_unregister(struct cfb_info *cfb) { i2c_del_adapter(&cfb->i2c_adapter); } #else #define cyber2000fb_i2c_register(cfb) (0) #define cyber2000fb_i2c_unregister(cfb) do { } while (0) #endif /* * These parameters give * 640x480, hsync 31.5kHz, vsync 60Hz */ static struct fb_videomode __devinitdata cyber2000fb_default_mode = { .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39722, .left_margin = 56, .right_margin = 16, .upper_margin = 34, .lower_margin = 9, .hsync_len = 88, .vsync_len = 2, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }; static char igs_regs[] = { EXT_CRT_IRQ, 0, EXT_CRT_TEST, 0, EXT_SYNC_CTL, 0, EXT_SEG_WRITE_PTR, 0, EXT_SEG_READ_PTR, 0, EXT_BIU_MISC, EXT_BIU_MISC_LIN_ENABLE | EXT_BIU_MISC_COP_ENABLE | EXT_BIU_MISC_COP_BFC, EXT_FUNC_CTL, 0, CURS_H_START, 0, CURS_H_START + 1, 0, CURS_H_PRESET, 0, CURS_V_START, 0, CURS_V_START + 1, 0, CURS_V_PRESET, 0, CURS_CTL, 0, EXT_ATTRIB_CTL, EXT_ATTRIB_CTL_EXT, EXT_OVERSCAN_RED, 0, EXT_OVERSCAN_GREEN, 0, EXT_OVERSCAN_BLUE, 0, /* some of these are questionable when we have a BIOS */ EXT_MEM_CTL0, EXT_MEM_CTL0_7CLK | EXT_MEM_CTL0_RAS_1 | EXT_MEM_CTL0_MULTCAS, EXT_HIDDEN_CTL1, 0x30, EXT_FIFO_CTL, 0x0b, EXT_FIFO_CTL + 1, 0x17, 0x76, 0x00, EXT_HIDDEN_CTL4, 0xc8 }; /* * Initialise the CyberPro hardware. On the CyberPro5XXXX, * ensure that we're using the correct PLL (5XXX's may be * programmed to use an additional set of PLLs.) */ static void cyberpro_init_hw(struct cfb_info *cfb) { int i; for (i = 0; i < sizeof(igs_regs); i += 2) cyber2000_grphw(igs_regs[i], igs_regs[i + 1], cfb); if (cfb->id == ID_CYBERPRO_5000) { unsigned char val; cyber2000fb_writeb(0xba, 0x3ce, cfb); val = cyber2000fb_readb(0x3cf, cfb) & 0x80; cyber2000fb_writeb(val, 0x3cf, cfb); } } static struct cfb_info __devinit *cyberpro_alloc_fb_info(unsigned int id, char *name) { struct cfb_info *cfb; cfb = kzalloc(sizeof(struct cfb_info), GFP_KERNEL); if (!cfb) return NULL; cfb->id = id; if (id == ID_CYBERPRO_5000) cfb->ref_ps = 40690; /* 24.576 MHz */ else cfb->ref_ps = 69842; /* 14.31818 MHz (69841?) */ cfb->divisors[0] = 1; cfb->divisors[1] = 2; cfb->divisors[2] = 4; if (id == ID_CYBERPRO_2000) cfb->divisors[3] = 8; else cfb->divisors[3] = 6; strcpy(cfb->fb.fix.id, name); cfb->fb.fix.type = FB_TYPE_PACKED_PIXELS; cfb->fb.fix.type_aux = 0; cfb->fb.fix.xpanstep = 0; cfb->fb.fix.ypanstep = 1; cfb->fb.fix.ywrapstep = 0; switch (id) { case ID_IGA_1682: cfb->fb.fix.accel = 0; break; case ID_CYBERPRO_2000: cfb->fb.fix.accel = FB_ACCEL_IGS_CYBER2000; break; case ID_CYBERPRO_2010: cfb->fb.fix.accel = FB_ACCEL_IGS_CYBER2010; break; case ID_CYBERPRO_5000: cfb->fb.fix.accel = FB_ACCEL_IGS_CYBER5000; break; } cfb->fb.var.nonstd = 0; cfb->fb.var.activate = FB_ACTIVATE_NOW; cfb->fb.var.height = -1; cfb->fb.var.width = -1; cfb->fb.var.accel_flags = FB_ACCELF_TEXT; cfb->fb.fbops = &cyber2000fb_ops; cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; cfb->fb.pseudo_palette = cfb->pseudo_palette; spin_lock_init(&cfb->reg_b0_lock); fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0); return cfb; } static void cyberpro_free_fb_info(struct cfb_info *cfb) { if (cfb) { /* * Free the colourmap */ fb_alloc_cmap(&cfb->fb.cmap, 0, 0); kfree(cfb); } } /* * Parse Cyber2000fb options. Usage: * video=cyber2000:font:fontname */ #ifndef MODULE static int cyber2000fb_setup(char *options) { char *opt; if (!options || !*options) return 0; while ((opt = strsep(&options, ",")) != NULL) { if (!*opt) continue; if (strncmp(opt, "font:", 5) == 0) { static char default_font_storage[40]; strlcpy(default_font_storage, opt + 5, sizeof(default_font_storage)); default_font = default_font_storage; continue; } printk(KERN_ERR "CyberPro20x0: unknown parameter: %s\n", opt); } return 0; } #endif /* MODULE */ /* * The CyberPro chips can be placed on many different bus types. * This probe function is common to all bus types. The bus-specific * probe function is expected to have: * - enabled access to the linear memory region * - memory mapped access to the registers * - initialised mem_ctl1 and mem_ctl2 appropriately. */ static int __devinit cyberpro_common_probe(struct cfb_info *cfb) { u_long smem_size; u_int h_sync, v_sync; int err; cyberpro_init_hw(cfb); /* * Get the video RAM size and width from the VGA register. * This should have been already initialised by the BIOS, * but if it's garbage, claim default 1MB VRAM (woody) */ cfb->mem_ctl1 = cyber2000_grphr(EXT_MEM_CTL1, cfb); cfb->mem_ctl2 = cyber2000_grphr(EXT_MEM_CTL2, cfb); /* * Determine the size of the memory. */ switch (cfb->mem_ctl2 & MEM_CTL2_SIZE_MASK) { case MEM_CTL2_SIZE_4MB: smem_size = 0x00400000; break; case MEM_CTL2_SIZE_2MB: smem_size = 0x00200000; break; case MEM_CTL2_SIZE_1MB: smem_size = 0x00100000; break; default: smem_size = 0x00100000; break; } cfb->fb.fix.smem_len = smem_size; cfb->fb.fix.mmio_len = MMIO_SIZE; cfb->fb.screen_base = cfb->region; #ifdef CONFIG_FB_CYBER2000_DDC if (cyber2000fb_setup_ddc_bus(cfb) == 0) cfb->ddc_registered = true; #endif err = -EINVAL; if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0, &cyber2000fb_default_mode, 8)) { printk(KERN_ERR "%s: no valid mode found\n", cfb->fb.fix.id); goto failed; } cfb->fb.var.yres_virtual = cfb->fb.fix.smem_len * 8 / (cfb->fb.var.bits_per_pixel * cfb->fb.var.xres_virtual); if (cfb->fb.var.yres_virtual < cfb->fb.var.yres) cfb->fb.var.yres_virtual = cfb->fb.var.yres; /* fb_set_var(&cfb->fb.var, -1, &cfb->fb); */ /* * Calculate the hsync and vsync frequencies. Note that * we split the 1e12 constant up so that we can preserve * the precision and fit the results into 32-bit registers. * (1953125000 * 512 = 1e12) */ h_sync = 1953125000 / cfb->fb.var.pixclock; h_sync = h_sync * 512 / (cfb->fb.var.xres + cfb->fb.var.left_margin + cfb->fb.var.right_margin + cfb->fb.var.hsync_len); v_sync = h_sync / (cfb->fb.var.yres + cfb->fb.var.upper_margin + cfb->fb.var.lower_margin + cfb->fb.var.vsync_len); printk(KERN_INFO "%s: %dKiB VRAM, using %dx%d, %d.%03dkHz, %dHz\n", cfb->fb.fix.id, cfb->fb.fix.smem_len >> 10, cfb->fb.var.xres, cfb->fb.var.yres, h_sync / 1000, h_sync % 1000, v_sync); err = cyber2000fb_i2c_register(cfb); if (err) goto failed; err = register_framebuffer(&cfb->fb); if (err) cyber2000fb_i2c_unregister(cfb); failed: #ifdef CONFIG_FB_CYBER2000_DDC if (err && cfb->ddc_registered) i2c_del_adapter(&cfb->ddc_adapter); #endif return err; } static void __devexit cyberpro_common_remove(struct cfb_info *cfb) { unregister_framebuffer(&cfb->fb); #ifdef CONFIG_FB_CYBER2000_DDC if (cfb->ddc_registered) i2c_del_adapter(&cfb->ddc_adapter); #endif cyber2000fb_i2c_unregister(cfb); } static void cyberpro_common_resume(struct cfb_info *cfb) { cyberpro_init_hw(cfb); /* * Reprogram the MEM_CTL1 and MEM_CTL2 registers */ cyber2000_grphw(EXT_MEM_CTL1, cfb->mem_ctl1, cfb); cyber2000_grphw(EXT_MEM_CTL2, cfb->mem_ctl2, cfb); /* * Restore the old video mode and the palette. * We also need to tell fbcon to redraw the console. */ cyber2000fb_set_par(&cfb->fb); } #ifdef CONFIG_ARCH_SHARK #include <mach/framebuffer.h> static int __devinit cyberpro_vl_probe(void) { struct cfb_info *cfb; int err = -ENOMEM; if (!request_mem_region(FB_START, FB_SIZE, "CyberPro2010")) return err; cfb = cyberpro_alloc_fb_info(ID_CYBERPRO_2010, "CyberPro2010"); if (!cfb) goto failed_release; cfb->irq = -1; cfb->region = ioremap(FB_START, FB_SIZE); if (!cfb->region) goto failed_ioremap; cfb->regs = cfb->region + MMIO_OFFSET; cfb->fb.device = NULL; cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET; cfb->fb.fix.smem_start = FB_START; /* * Bring up the hardware. This is expected to enable access * to the linear memory region, and allow access to the memory * mapped registers. Also, mem_ctl1 and mem_ctl2 must be * initialised. */ cyber2000fb_writeb(0x18, 0x46e8, cfb); cyber2000fb_writeb(0x01, 0x102, cfb); cyber2000fb_writeb(0x08, 0x46e8, cfb); cyber2000fb_writeb(EXT_BIU_MISC, 0x3ce, cfb); cyber2000fb_writeb(EXT_BIU_MISC_LIN_ENABLE, 0x3cf, cfb); cfb->mclk_mult = 0xdb; cfb->mclk_div = 0x54; err = cyberpro_common_probe(cfb); if (err) goto failed; if (int_cfb_info == NULL) int_cfb_info = cfb; return 0; failed: iounmap(cfb->region); failed_ioremap: cyberpro_free_fb_info(cfb); failed_release: release_mem_region(FB_START, FB_SIZE); return err; } #endif /* CONFIG_ARCH_SHARK */ /* * PCI specific support. */ #ifdef CONFIG_PCI /* * We need to wake up the CyberPro, and make sure its in linear memory * mode. Unfortunately, this is specific to the platform and card that * we are running on. * * On x86 and ARM, should we be initialising the CyberPro first via the * IO registers, and then the MMIO registers to catch all cases? Can we * end up in the situation where the chip is in MMIO mode, but not awake * on an x86 system? */ static int cyberpro_pci_enable_mmio(struct cfb_info *cfb) { unsigned char val; #if defined(__sparc_v9__) #error "You lose, consult DaveM." #elif defined(__sparc__) /* * SPARC does not have an "outb" instruction, so we generate * I/O cycles storing into a reserved memory space at * physical address 0x3000000 */ unsigned char __iomem *iop; iop = ioremap(0x3000000, 0x5000); if (iop == NULL) { printk(KERN_ERR "iga5000: cannot map I/O\n"); return -ENOMEM; } writeb(0x18, iop + 0x46e8); writeb(0x01, iop + 0x102); writeb(0x08, iop + 0x46e8); writeb(EXT_BIU_MISC, iop + 0x3ce); writeb(EXT_BIU_MISC_LIN_ENABLE, iop + 0x3cf); iounmap(iop); #else /* * Most other machine types are "normal", so * we use the standard IO-based wakeup. */ outb(0x18, 0x46e8); outb(0x01, 0x102); outb(0x08, 0x46e8); outb(EXT_BIU_MISC, 0x3ce); outb(EXT_BIU_MISC_LIN_ENABLE, 0x3cf); #endif /* * Allow the CyberPro to accept PCI burst accesses */ if (cfb->id == ID_CYBERPRO_2010) { printk(KERN_INFO "%s: NOT enabling PCI bursts\n", cfb->fb.fix.id); } else { val = cyber2000_grphr(EXT_BUS_CTL, cfb); if (!(val & EXT_BUS_CTL_PCIBURST_WRITE)) { printk(KERN_INFO "%s: enabling PCI bursts\n", cfb->fb.fix.id); val |= EXT_BUS_CTL_PCIBURST_WRITE; if (cfb->id == ID_CYBERPRO_5000) val |= EXT_BUS_CTL_PCIBURST_READ; cyber2000_grphw(EXT_BUS_CTL, val, cfb); } } return 0; } static int __devinit cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct cfb_info *cfb; char name[16]; int err; sprintf(name, "CyberPro%4X", id->device); err = pci_enable_device(dev); if (err) return err; err = -ENOMEM; cfb = cyberpro_alloc_fb_info(id->driver_data, name); if (!cfb) goto failed_release; err = pci_request_regions(dev, cfb->fb.fix.id); if (err) goto failed_regions; cfb->irq = dev->irq; cfb->region = pci_ioremap_bar(dev, 0); if (!cfb->region) goto failed_ioremap; cfb->regs = cfb->region + MMIO_OFFSET; cfb->fb.device = &dev->dev; cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET; cfb->fb.fix.smem_start = pci_resource_start(dev, 0); /* * Bring up the hardware. This is expected to enable access * to the linear memory region, and allow access to the memory * mapped registers. Also, mem_ctl1 and mem_ctl2 must be * initialised. */ err = cyberpro_pci_enable_mmio(cfb); if (err) goto failed; /* * Use MCLK from BIOS. FIXME: what about hotplug? */ cfb->mclk_mult = cyber2000_grphr(EXT_MCLK_MULT, cfb); cfb->mclk_div = cyber2000_grphr(EXT_MCLK_DIV, cfb); #ifdef __arm__ /* * MCLK on the NetWinder and the Shark is fixed at 75MHz */ if (machine_is_netwinder()) { cfb->mclk_mult = 0xdb; cfb->mclk_div = 0x54; } #endif err = cyberpro_common_probe(cfb); if (err) goto failed; /* * Our driver data */ pci_set_drvdata(dev, cfb); if (int_cfb_info == NULL) int_cfb_info = cfb; return 0; failed: iounmap(cfb->region); failed_ioremap: pci_release_regions(dev); failed_regions: cyberpro_free_fb_info(cfb); failed_release: return err; } static void __devexit cyberpro_pci_remove(struct pci_dev *dev) { struct cfb_info *cfb = pci_get_drvdata(dev); if (cfb) { cyberpro_common_remove(cfb); iounmap(cfb->region); cyberpro_free_fb_info(cfb); /* * Ensure that the driver data is no longer * valid. */ pci_set_drvdata(dev, NULL); if (cfb == int_cfb_info) int_cfb_info = NULL; pci_release_regions(dev); } } static int cyberpro_pci_suspend(struct pci_dev *dev, pm_message_t state) { return 0; } /* * Re-initialise the CyberPro hardware */ static int cyberpro_pci_resume(struct pci_dev *dev) { struct cfb_info *cfb = pci_get_drvdata(dev); if (cfb) { cyberpro_pci_enable_mmio(cfb); cyberpro_common_resume(cfb); } return 0; } static struct pci_device_id cyberpro_pci_table[] = { /* Not yet * { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682, * PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_IGA_1682 }, */ { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_2000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_CYBERPRO_2000 }, { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_2010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_CYBERPRO_2010 }, { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_CYBERPRO_5000 }, { 0, } }; MODULE_DEVICE_TABLE(pci, cyberpro_pci_table); static struct pci_driver cyberpro_driver = { .name = "CyberPro", .probe = cyberpro_pci_probe, .remove = __devexit_p(cyberpro_pci_remove), .suspend = cyberpro_pci_suspend, .resume = cyberpro_pci_resume, .id_table = cyberpro_pci_table }; #endif /* * I don't think we can use the "module_init" stuff here because * the fbcon stuff may not be initialised yet. Hence the #ifdef * around module_init. * * Tony: "module_init" is now required */ static int __init cyber2000fb_init(void) { int ret = -1, err; #ifndef MODULE char *option = NULL; if (fb_get_options("cyber2000fb", &option)) return -ENODEV; cyber2000fb_setup(option); #endif #ifdef CONFIG_ARCH_SHARK err = cyberpro_vl_probe(); if (!err) ret = 0; #endif #ifdef CONFIG_PCI err = pci_register_driver(&cyberpro_driver); if (!err) ret = 0; #endif return ret ? err : 0; } module_init(cyber2000fb_init); #ifndef CONFIG_ARCH_SHARK static void __exit cyberpro_exit(void) { pci_unregister_driver(&cyberpro_driver); } module_exit(cyberpro_exit); #endif MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_sony_tianchi
sound/soc/codecs/adau1373.c
4947
47387
/* * Analog Devices ADAU1373 Audio Codec drive * * Copyright 2011 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/gcd.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/adau1373.h> #include "adau1373.h" struct adau1373_dai { unsigned int clk_src; unsigned int sysclk; bool enable_src; bool master; }; struct adau1373 { struct adau1373_dai dais[3]; }; #define ADAU1373_INPUT_MODE 0x00 #define ADAU1373_AINL_CTRL(x) (0x01 + (x) * 2) #define ADAU1373_AINR_CTRL(x) (0x02 + (x) * 2) #define ADAU1373_LLINE_OUT(x) (0x9 + (x) * 2) #define ADAU1373_RLINE_OUT(x) (0xa + (x) * 2) #define ADAU1373_LSPK_OUT 0x0d #define ADAU1373_RSPK_OUT 0x0e #define ADAU1373_LHP_OUT 0x0f #define ADAU1373_RHP_OUT 0x10 #define ADAU1373_ADC_GAIN 0x11 #define ADAU1373_LADC_MIXER 0x12 #define ADAU1373_RADC_MIXER 0x13 #define ADAU1373_LLINE1_MIX 0x14 #define ADAU1373_RLINE1_MIX 0x15 #define ADAU1373_LLINE2_MIX 0x16 #define ADAU1373_RLINE2_MIX 0x17 #define ADAU1373_LSPK_MIX 0x18 #define ADAU1373_RSPK_MIX 0x19 #define ADAU1373_LHP_MIX 0x1a #define ADAU1373_RHP_MIX 0x1b #define ADAU1373_EP_MIX 0x1c #define ADAU1373_HP_CTRL 0x1d #define ADAU1373_HP_CTRL2 0x1e #define ADAU1373_LS_CTRL 0x1f #define ADAU1373_EP_CTRL 0x21 #define ADAU1373_MICBIAS_CTRL1 0x22 #define ADAU1373_MICBIAS_CTRL2 0x23 #define ADAU1373_OUTPUT_CTRL 0x24 #define ADAU1373_PWDN_CTRL1 0x25 #define ADAU1373_PWDN_CTRL2 0x26 #define ADAU1373_PWDN_CTRL3 0x27 #define ADAU1373_DPLL_CTRL(x) (0x28 + (x) * 7) #define ADAU1373_PLL_CTRL1(x) (0x29 + (x) * 7) #define ADAU1373_PLL_CTRL2(x) (0x2a + (x) * 7) #define ADAU1373_PLL_CTRL3(x) (0x2b + (x) * 7) #define ADAU1373_PLL_CTRL4(x) (0x2c + (x) * 7) #define ADAU1373_PLL_CTRL5(x) (0x2d + (x) * 7) #define ADAU1373_PLL_CTRL6(x) (0x2e + (x) * 7) #define ADAU1373_PLL_CTRL7(x) (0x2f + (x) * 7) #define ADAU1373_HEADDECT 0x36 #define ADAU1373_ADC_DAC_STATUS 0x37 #define ADAU1373_ADC_CTRL 0x3c #define ADAU1373_DAI(x) (0x44 + (x)) #define ADAU1373_CLK_SRC_DIV(x) (0x40 + (x) * 2) #define ADAU1373_BCLKDIV(x) (0x47 + (x)) #define ADAU1373_SRC_RATIOA(x) (0x4a + (x) * 2) #define ADAU1373_SRC_RATIOB(x) (0x4b + (x) * 2) #define ADAU1373_DEEMP_CTRL 0x50 #define ADAU1373_SRC_DAI_CTRL(x) (0x51 + (x)) #define ADAU1373_DIN_MIX_CTRL(x) (0x56 + (x)) #define ADAU1373_DOUT_MIX_CTRL(x) (0x5b + (x)) #define ADAU1373_DAI_PBL_VOL(x) (0x62 + (x) * 2) #define ADAU1373_DAI_PBR_VOL(x) (0x63 + (x) * 2) #define ADAU1373_DAI_RECL_VOL(x) (0x68 + (x) * 2) #define ADAU1373_DAI_RECR_VOL(x) (0x69 + (x) * 2) #define ADAU1373_DAC1_PBL_VOL 0x6e #define ADAU1373_DAC1_PBR_VOL 0x6f #define ADAU1373_DAC2_PBL_VOL 0x70 #define ADAU1373_DAC2_PBR_VOL 0x71 #define ADAU1373_ADC_RECL_VOL 0x72 #define ADAU1373_ADC_RECR_VOL 0x73 #define ADAU1373_DMIC_RECL_VOL 0x74 #define ADAU1373_DMIC_RECR_VOL 0x75 #define ADAU1373_VOL_GAIN1 0x76 #define ADAU1373_VOL_GAIN2 0x77 #define ADAU1373_VOL_GAIN3 0x78 #define ADAU1373_HPF_CTRL 0x7d #define ADAU1373_BASS1 0x7e #define ADAU1373_BASS2 0x7f #define ADAU1373_DRC(x) (0x80 + (x) * 0x10) #define ADAU1373_3D_CTRL1 0xc0 #define ADAU1373_3D_CTRL2 0xc1 #define ADAU1373_FDSP_SEL1 0xdc #define ADAU1373_FDSP_SEL2 0xdd #define ADAU1373_FDSP_SEL3 0xde #define ADAU1373_FDSP_SEL4 0xdf #define ADAU1373_DIGMICCTRL 0xe2 #define ADAU1373_DIGEN 0xeb #define ADAU1373_SOFT_RESET 0xff #define ADAU1373_PLL_CTRL6_DPLL_BYPASS BIT(1) #define ADAU1373_PLL_CTRL6_PLL_EN BIT(0) #define ADAU1373_DAI_INVERT_BCLK BIT(7) #define ADAU1373_DAI_MASTER BIT(6) #define ADAU1373_DAI_INVERT_LRCLK BIT(4) #define ADAU1373_DAI_WLEN_16 0x0 #define ADAU1373_DAI_WLEN_20 0x4 #define ADAU1373_DAI_WLEN_24 0x8 #define ADAU1373_DAI_WLEN_32 0xc #define ADAU1373_DAI_WLEN_MASK 0xc #define ADAU1373_DAI_FORMAT_RIGHT_J 0x0 #define ADAU1373_DAI_FORMAT_LEFT_J 0x1 #define ADAU1373_DAI_FORMAT_I2S 0x2 #define ADAU1373_DAI_FORMAT_DSP 0x3 #define ADAU1373_BCLKDIV_SOURCE BIT(5) #define ADAU1373_BCLKDIV_32 0x03 #define ADAU1373_BCLKDIV_64 0x02 #define ADAU1373_BCLKDIV_128 0x01 #define ADAU1373_BCLKDIV_256 0x00 #define ADAU1373_ADC_CTRL_PEAK_DETECT BIT(0) #define ADAU1373_ADC_CTRL_RESET BIT(1) #define ADAU1373_ADC_CTRL_RESET_FORCE BIT(2) #define ADAU1373_OUTPUT_CTRL_LDIFF BIT(3) #define ADAU1373_OUTPUT_CTRL_LNFBEN BIT(2) #define ADAU1373_PWDN_CTRL3_PWR_EN BIT(0) #define ADAU1373_EP_CTRL_MICBIAS1_OFFSET 4 #define ADAU1373_EP_CTRL_MICBIAS2_OFFSET 2 static const uint8_t adau1373_default_regs[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* 0x30 */ 0x00, 0x00, 0x00, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x00, /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x80 */ 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00, 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x90 */ 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00, 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0xa0 */ 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, /* 0xe0 */ 0x00, 0x1f, 0x0f, 0x00, 0x00, }; static const unsigned int adau1373_out_tlv[] = { TLV_DB_RANGE_HEAD(4), 0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1), 8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0), 16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0), 24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0), }; static const DECLARE_TLV_DB_MINMAX(adau1373_digital_tlv, -9563, 0); static const DECLARE_TLV_DB_SCALE(adau1373_in_pga_tlv, -1300, 100, 1); static const DECLARE_TLV_DB_SCALE(adau1373_ep_tlv, -600, 600, 1); static const DECLARE_TLV_DB_SCALE(adau1373_input_boost_tlv, 0, 2000, 0); static const DECLARE_TLV_DB_SCALE(adau1373_gain_boost_tlv, 0, 600, 0); static const DECLARE_TLV_DB_SCALE(adau1373_speaker_boost_tlv, 1200, 600, 0); static const char *adau1373_fdsp_sel_text[] = { "None", "Channel 1", "Channel 2", "Channel 3", "Channel 4", "Channel 5", }; static const SOC_ENUM_SINGLE_DECL(adau1373_drc1_channel_enum, ADAU1373_FDSP_SEL1, 4, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_drc2_channel_enum, ADAU1373_FDSP_SEL1, 0, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_drc3_channel_enum, ADAU1373_FDSP_SEL2, 0, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_hpf_channel_enum, ADAU1373_FDSP_SEL3, 0, adau1373_fdsp_sel_text); static const SOC_ENUM_SINGLE_DECL(adau1373_bass_channel_enum, ADAU1373_FDSP_SEL4, 4, adau1373_fdsp_sel_text); static const char *adau1373_hpf_cutoff_text[] = { "3.7Hz", "50Hz", "100Hz", "150Hz", "200Hz", "250Hz", "300Hz", "350Hz", "400Hz", "450Hz", "500Hz", "550Hz", "600Hz", "650Hz", "700Hz", "750Hz", "800Hz", }; static const SOC_ENUM_SINGLE_DECL(adau1373_hpf_cutoff_enum, ADAU1373_HPF_CTRL, 3, adau1373_hpf_cutoff_text); static const char *adau1373_bass_lpf_cutoff_text[] = { "801Hz", "1001Hz", }; static const char *adau1373_bass_clip_level_text[] = { "0.125", "0.250", "0.370", "0.500", "0.625", "0.750", "0.875", }; static const unsigned int adau1373_bass_clip_level_values[] = { 1, 2, 3, 4, 5, 6, 7, }; static const char *adau1373_bass_hpf_cutoff_text[] = { "158Hz", "232Hz", "347Hz", "520Hz", }; static const unsigned int adau1373_bass_tlv[] = { TLV_DB_RANGE_HEAD(3), 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1), 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0), 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0), }; static const SOC_ENUM_SINGLE_DECL(adau1373_bass_lpf_cutoff_enum, ADAU1373_BASS1, 5, adau1373_bass_lpf_cutoff_text); static const SOC_VALUE_ENUM_SINGLE_DECL(adau1373_bass_clip_level_enum, ADAU1373_BASS1, 2, 7, adau1373_bass_clip_level_text, adau1373_bass_clip_level_values); static const SOC_ENUM_SINGLE_DECL(adau1373_bass_hpf_cutoff_enum, ADAU1373_BASS1, 0, adau1373_bass_hpf_cutoff_text); static const char *adau1373_3d_level_text[] = { "0%", "6.67%", "13.33%", "20%", "26.67%", "33.33%", "40%", "46.67%", "53.33%", "60%", "66.67%", "73.33%", "80%", "86.67", "99.33%", "100%" }; static const char *adau1373_3d_cutoff_text[] = { "No 3D", "0.03125 fs", "0.04583 fs", "0.075 fs", "0.11458 fs", "0.16875 fs", "0.27083 fs" }; static const SOC_ENUM_SINGLE_DECL(adau1373_3d_level_enum, ADAU1373_3D_CTRL1, 4, adau1373_3d_level_text); static const SOC_ENUM_SINGLE_DECL(adau1373_3d_cutoff_enum, ADAU1373_3D_CTRL1, 0, adau1373_3d_cutoff_text); static const unsigned int adau1373_3d_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), 1, 7, TLV_DB_LINEAR_ITEM(-1800, -120), }; static const char *adau1373_lr_mux_text[] = { "Mute", "Right Channel (L+R)", "Left Channel (L+R)", "Stereo", }; static const SOC_ENUM_SINGLE_DECL(adau1373_lineout1_lr_mux_enum, ADAU1373_OUTPUT_CTRL, 4, adau1373_lr_mux_text); static const SOC_ENUM_SINGLE_DECL(adau1373_lineout2_lr_mux_enum, ADAU1373_OUTPUT_CTRL, 6, adau1373_lr_mux_text); static const SOC_ENUM_SINGLE_DECL(adau1373_speaker_lr_mux_enum, ADAU1373_LS_CTRL, 4, adau1373_lr_mux_text); static const struct snd_kcontrol_new adau1373_controls[] = { SOC_DOUBLE_R_TLV("AIF1 Capture Volume", ADAU1373_DAI_RECL_VOL(0), ADAU1373_DAI_RECR_VOL(0), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF2 Capture Volume", ADAU1373_DAI_RECL_VOL(1), ADAU1373_DAI_RECR_VOL(1), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF3 Capture Volume", ADAU1373_DAI_RECL_VOL(2), ADAU1373_DAI_RECR_VOL(2), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("ADC Capture Volume", ADAU1373_ADC_RECL_VOL, ADAU1373_ADC_RECR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("DMIC Capture Volume", ADAU1373_DMIC_RECL_VOL, ADAU1373_DMIC_RECR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF1 Playback Volume", ADAU1373_DAI_PBL_VOL(0), ADAU1373_DAI_PBR_VOL(0), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF2 Playback Volume", ADAU1373_DAI_PBL_VOL(1), ADAU1373_DAI_PBR_VOL(1), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("AIF3 Playback Volume", ADAU1373_DAI_PBL_VOL(2), ADAU1373_DAI_PBR_VOL(2), 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("DAC1 Playback Volume", ADAU1373_DAC1_PBL_VOL, ADAU1373_DAC1_PBR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("DAC2 Playback Volume", ADAU1373_DAC2_PBL_VOL, ADAU1373_DAC2_PBR_VOL, 0, 0xff, 1, adau1373_digital_tlv), SOC_DOUBLE_R_TLV("Lineout1 Playback Volume", ADAU1373_LLINE_OUT(0), ADAU1373_RLINE_OUT(0), 0, 0x1f, 0, adau1373_out_tlv), SOC_DOUBLE_R_TLV("Speaker Playback Volume", ADAU1373_LSPK_OUT, ADAU1373_RSPK_OUT, 0, 0x1f, 0, adau1373_out_tlv), SOC_DOUBLE_R_TLV("Headphone Playback Volume", ADAU1373_LHP_OUT, ADAU1373_RHP_OUT, 0, 0x1f, 0, adau1373_out_tlv), SOC_DOUBLE_R_TLV("Input 1 Capture Volume", ADAU1373_AINL_CTRL(0), ADAU1373_AINR_CTRL(0), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_DOUBLE_R_TLV("Input 2 Capture Volume", ADAU1373_AINL_CTRL(1), ADAU1373_AINR_CTRL(1), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_DOUBLE_R_TLV("Input 3 Capture Volume", ADAU1373_AINL_CTRL(2), ADAU1373_AINR_CTRL(2), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_DOUBLE_R_TLV("Input 4 Capture Volume", ADAU1373_AINL_CTRL(3), ADAU1373_AINR_CTRL(3), 0, 0x1f, 0, adau1373_in_pga_tlv), SOC_SINGLE_TLV("Earpiece Playback Volume", ADAU1373_EP_CTRL, 0, 3, 0, adau1373_ep_tlv), SOC_DOUBLE_TLV("AIF3 Boost Playback Volume", ADAU1373_VOL_GAIN1, 4, 5, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF2 Boost Playback Volume", ADAU1373_VOL_GAIN1, 2, 3, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF1 Boost Playback Volume", ADAU1373_VOL_GAIN1, 0, 1, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF3 Boost Capture Volume", ADAU1373_VOL_GAIN2, 4, 5, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF2 Boost Capture Volume", ADAU1373_VOL_GAIN2, 2, 3, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("AIF1 Boost Capture Volume", ADAU1373_VOL_GAIN2, 0, 1, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("DMIC Boost Capture Volume", ADAU1373_VOL_GAIN3, 6, 7, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("ADC Boost Capture Volume", ADAU1373_VOL_GAIN3, 4, 5, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("DAC2 Boost Playback Volume", ADAU1373_VOL_GAIN3, 2, 3, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("DAC1 Boost Playback Volume", ADAU1373_VOL_GAIN3, 0, 1, 1, 0, adau1373_gain_boost_tlv), SOC_DOUBLE_TLV("Input 1 Boost Capture Volume", ADAU1373_ADC_GAIN, 0, 4, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Input 2 Boost Capture Volume", ADAU1373_ADC_GAIN, 1, 5, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Input 3 Boost Capture Volume", ADAU1373_ADC_GAIN, 2, 6, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Input 4 Boost Capture Volume", ADAU1373_ADC_GAIN, 3, 7, 1, 0, adau1373_input_boost_tlv), SOC_DOUBLE_TLV("Speaker Boost Playback Volume", ADAU1373_LS_CTRL, 2, 3, 1, 0, adau1373_speaker_boost_tlv), SOC_ENUM("Lineout1 LR Mux", adau1373_lineout1_lr_mux_enum), SOC_ENUM("Speaker LR Mux", adau1373_speaker_lr_mux_enum), SOC_ENUM("HPF Cutoff", adau1373_hpf_cutoff_enum), SOC_DOUBLE("HPF Switch", ADAU1373_HPF_CTRL, 1, 0, 1, 0), SOC_ENUM("HPF Channel", adau1373_hpf_channel_enum), SOC_ENUM("Bass HPF Cutoff", adau1373_bass_hpf_cutoff_enum), SOC_VALUE_ENUM("Bass Clip Level Threshold", adau1373_bass_clip_level_enum), SOC_ENUM("Bass LPF Cutoff", adau1373_bass_lpf_cutoff_enum), SOC_DOUBLE("Bass Playback Switch", ADAU1373_BASS2, 0, 1, 1, 0), SOC_SINGLE_TLV("Bass Playback Volume", ADAU1373_BASS2, 2, 7, 0, adau1373_bass_tlv), SOC_ENUM("Bass Channel", adau1373_bass_channel_enum), SOC_ENUM("3D Freq", adau1373_3d_cutoff_enum), SOC_ENUM("3D Level", adau1373_3d_level_enum), SOC_SINGLE("3D Playback Switch", ADAU1373_3D_CTRL2, 0, 1, 0), SOC_SINGLE_TLV("3D Playback Volume", ADAU1373_3D_CTRL2, 2, 7, 0, adau1373_3d_tlv), SOC_ENUM("3D Channel", adau1373_bass_channel_enum), SOC_SINGLE("Zero Cross Switch", ADAU1373_PWDN_CTRL3, 7, 1, 0), }; static const struct snd_kcontrol_new adau1373_lineout2_controls[] = { SOC_DOUBLE_R_TLV("Lineout2 Playback Volume", ADAU1373_LLINE_OUT(1), ADAU1373_RLINE_OUT(1), 0, 0x1f, 0, adau1373_out_tlv), SOC_ENUM("Lineout2 LR Mux", adau1373_lineout2_lr_mux_enum), }; static const struct snd_kcontrol_new adau1373_drc_controls[] = { SOC_ENUM("DRC1 Channel", adau1373_drc1_channel_enum), SOC_ENUM("DRC2 Channel", adau1373_drc2_channel_enum), SOC_ENUM("DRC3 Channel", adau1373_drc3_channel_enum), }; static int adau1373_pll_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; unsigned int pll_id = w->name[3] - '1'; unsigned int val; if (SND_SOC_DAPM_EVENT_ON(event)) val = ADAU1373_PLL_CTRL6_PLL_EN; else val = 0; snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id), ADAU1373_PLL_CTRL6_PLL_EN, val); if (SND_SOC_DAPM_EVENT_ON(event)) mdelay(5); return 0; } static const char *adau1373_decimator_text[] = { "ADC", "DMIC1", }; static const struct soc_enum adau1373_decimator_enum = SOC_ENUM_SINGLE(0, 0, 2, adau1373_decimator_text); static const struct snd_kcontrol_new adau1373_decimator_mux = SOC_DAPM_ENUM_VIRT("Decimator Mux", adau1373_decimator_enum); static const struct snd_kcontrol_new adau1373_left_adc_mixer_controls[] = { SOC_DAPM_SINGLE("DAC1 Switch", ADAU1373_LADC_MIXER, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Switch", ADAU1373_LADC_MIXER, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Switch", ADAU1373_LADC_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Switch", ADAU1373_LADC_MIXER, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Switch", ADAU1373_LADC_MIXER, 0, 1, 0), }; static const struct snd_kcontrol_new adau1373_right_adc_mixer_controls[] = { SOC_DAPM_SINGLE("DAC1 Switch", ADAU1373_RADC_MIXER, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Switch", ADAU1373_RADC_MIXER, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Switch", ADAU1373_RADC_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Switch", ADAU1373_RADC_MIXER, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Switch", ADAU1373_RADC_MIXER, 0, 1, 0), }; #define DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(_name, _reg) \ const struct snd_kcontrol_new _name[] = { \ SOC_DAPM_SINGLE("Left DAC2 Switch", _reg, 7, 1, 0), \ SOC_DAPM_SINGLE("Right DAC2 Switch", _reg, 6, 1, 0), \ SOC_DAPM_SINGLE("Left DAC1 Switch", _reg, 5, 1, 0), \ SOC_DAPM_SINGLE("Right DAC1 Switch", _reg, 4, 1, 0), \ SOC_DAPM_SINGLE("Input 4 Bypass Switch", _reg, 3, 1, 0), \ SOC_DAPM_SINGLE("Input 3 Bypass Switch", _reg, 2, 1, 0), \ SOC_DAPM_SINGLE("Input 2 Bypass Switch", _reg, 1, 1, 0), \ SOC_DAPM_SINGLE("Input 1 Bypass Switch", _reg, 0, 1, 0), \ } static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_left_line1_mixer_controls, ADAU1373_LLINE1_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_right_line1_mixer_controls, ADAU1373_RLINE1_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_left_line2_mixer_controls, ADAU1373_LLINE2_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_right_line2_mixer_controls, ADAU1373_RLINE2_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_left_spk_mixer_controls, ADAU1373_LSPK_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_right_spk_mixer_controls, ADAU1373_RSPK_MIX); static DECLARE_ADAU1373_OUTPUT_MIXER_CTRLS(adau1373_ep_mixer_controls, ADAU1373_EP_MIX); static const struct snd_kcontrol_new adau1373_left_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", ADAU1373_LHP_MIX, 5, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", ADAU1373_LHP_MIX, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Bypass Switch", ADAU1373_LHP_MIX, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Bypass Switch", ADAU1373_LHP_MIX, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Bypass Switch", ADAU1373_LHP_MIX, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Bypass Switch", ADAU1373_LHP_MIX, 0, 1, 0), }; static const struct snd_kcontrol_new adau1373_right_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Right DAC1 Switch", ADAU1373_RHP_MIX, 5, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", ADAU1373_RHP_MIX, 4, 1, 0), SOC_DAPM_SINGLE("Input 4 Bypass Switch", ADAU1373_RHP_MIX, 3, 1, 0), SOC_DAPM_SINGLE("Input 3 Bypass Switch", ADAU1373_RHP_MIX, 2, 1, 0), SOC_DAPM_SINGLE("Input 2 Bypass Switch", ADAU1373_RHP_MIX, 1, 1, 0), SOC_DAPM_SINGLE("Input 1 Bypass Switch", ADAU1373_RHP_MIX, 0, 1, 0), }; #define DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(_name, _reg) \ const struct snd_kcontrol_new _name[] = { \ SOC_DAPM_SINGLE("DMIC2 Swapped Switch", _reg, 6, 1, 0), \ SOC_DAPM_SINGLE("DMIC2 Switch", _reg, 5, 1, 0), \ SOC_DAPM_SINGLE("ADC/DMIC1 Swapped Switch", _reg, 4, 1, 0), \ SOC_DAPM_SINGLE("ADC/DMIC1 Switch", _reg, 3, 1, 0), \ SOC_DAPM_SINGLE("AIF3 Switch", _reg, 2, 1, 0), \ SOC_DAPM_SINGLE("AIF2 Switch", _reg, 1, 1, 0), \ SOC_DAPM_SINGLE("AIF1 Switch", _reg, 0, 1, 0), \ } static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel1_mixer_controls, ADAU1373_DIN_MIX_CTRL(0)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel2_mixer_controls, ADAU1373_DIN_MIX_CTRL(1)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel3_mixer_controls, ADAU1373_DIN_MIX_CTRL(2)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel4_mixer_controls, ADAU1373_DIN_MIX_CTRL(3)); static DECLARE_ADAU1373_DSP_CHANNEL_MIXER_CTRLS(adau1373_dsp_channel5_mixer_controls, ADAU1373_DIN_MIX_CTRL(4)); #define DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(_name, _reg) \ const struct snd_kcontrol_new _name[] = { \ SOC_DAPM_SINGLE("DSP Channel5 Switch", _reg, 4, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel4 Switch", _reg, 3, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel3 Switch", _reg, 2, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel2 Switch", _reg, 1, 1, 0), \ SOC_DAPM_SINGLE("DSP Channel1 Switch", _reg, 0, 1, 0), \ } static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_aif1_mixer_controls, ADAU1373_DOUT_MIX_CTRL(0)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_aif2_mixer_controls, ADAU1373_DOUT_MIX_CTRL(1)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_aif3_mixer_controls, ADAU1373_DOUT_MIX_CTRL(2)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_dac1_mixer_controls, ADAU1373_DOUT_MIX_CTRL(3)); static DECLARE_ADAU1373_DSP_OUTPUT_MIXER_CTRLS(adau1373_dac2_mixer_controls, ADAU1373_DOUT_MIX_CTRL(4)); static const struct snd_soc_dapm_widget adau1373_dapm_widgets[] = { /* Datasheet claims Left ADC is bit 6 and Right ADC is bit 7, but that * doesn't seem to be the case. */ SND_SOC_DAPM_ADC("Left ADC", NULL, ADAU1373_PWDN_CTRL1, 7, 0), SND_SOC_DAPM_ADC("Right ADC", NULL, ADAU1373_PWDN_CTRL1, 6, 0), SND_SOC_DAPM_ADC("DMIC1", NULL, ADAU1373_DIGMICCTRL, 0, 0), SND_SOC_DAPM_ADC("DMIC2", NULL, ADAU1373_DIGMICCTRL, 2, 0), SND_SOC_DAPM_VIRT_MUX("Decimator Mux", SND_SOC_NOPM, 0, 0, &adau1373_decimator_mux), SND_SOC_DAPM_SUPPLY("MICBIAS2", ADAU1373_PWDN_CTRL1, 5, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS1", ADAU1373_PWDN_CTRL1, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("IN4PGA", ADAU1373_PWDN_CTRL1, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("IN3PGA", ADAU1373_PWDN_CTRL1, 2, 0, NULL, 0), SND_SOC_DAPM_PGA("IN2PGA", ADAU1373_PWDN_CTRL1, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("IN1PGA", ADAU1373_PWDN_CTRL1, 0, 0, NULL, 0), SND_SOC_DAPM_DAC("Left DAC2", NULL, ADAU1373_PWDN_CTRL2, 7, 0), SND_SOC_DAPM_DAC("Right DAC2", NULL, ADAU1373_PWDN_CTRL2, 6, 0), SND_SOC_DAPM_DAC("Left DAC1", NULL, ADAU1373_PWDN_CTRL2, 5, 0), SND_SOC_DAPM_DAC("Right DAC1", NULL, ADAU1373_PWDN_CTRL2, 4, 0), SOC_MIXER_ARRAY("Left ADC Mixer", SND_SOC_NOPM, 0, 0, adau1373_left_adc_mixer_controls), SOC_MIXER_ARRAY("Right ADC Mixer", SND_SOC_NOPM, 0, 0, adau1373_right_adc_mixer_controls), SOC_MIXER_ARRAY("Left Lineout2 Mixer", ADAU1373_PWDN_CTRL2, 3, 0, adau1373_left_line2_mixer_controls), SOC_MIXER_ARRAY("Right Lineout2 Mixer", ADAU1373_PWDN_CTRL2, 2, 0, adau1373_right_line2_mixer_controls), SOC_MIXER_ARRAY("Left Lineout1 Mixer", ADAU1373_PWDN_CTRL2, 1, 0, adau1373_left_line1_mixer_controls), SOC_MIXER_ARRAY("Right Lineout1 Mixer", ADAU1373_PWDN_CTRL2, 0, 0, adau1373_right_line1_mixer_controls), SOC_MIXER_ARRAY("Earpiece Mixer", ADAU1373_PWDN_CTRL3, 4, 0, adau1373_ep_mixer_controls), SOC_MIXER_ARRAY("Left Speaker Mixer", ADAU1373_PWDN_CTRL3, 3, 0, adau1373_left_spk_mixer_controls), SOC_MIXER_ARRAY("Right Speaker Mixer", ADAU1373_PWDN_CTRL3, 2, 0, adau1373_right_spk_mixer_controls), SOC_MIXER_ARRAY("Left Headphone Mixer", SND_SOC_NOPM, 0, 0, adau1373_left_hp_mixer_controls), SOC_MIXER_ARRAY("Right Headphone Mixer", SND_SOC_NOPM, 0, 0, adau1373_right_hp_mixer_controls), SND_SOC_DAPM_SUPPLY("Headphone Enable", ADAU1373_PWDN_CTRL3, 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF1 CLK", ADAU1373_SRC_DAI_CTRL(0), 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF2 CLK", ADAU1373_SRC_DAI_CTRL(1), 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF3 CLK", ADAU1373_SRC_DAI_CTRL(2), 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF1 IN SRC", ADAU1373_SRC_DAI_CTRL(0), 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF1 OUT SRC", ADAU1373_SRC_DAI_CTRL(0), 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF2 IN SRC", ADAU1373_SRC_DAI_CTRL(1), 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF2 OUT SRC", ADAU1373_SRC_DAI_CTRL(1), 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF3 IN SRC", ADAU1373_SRC_DAI_CTRL(2), 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF3 OUT SRC", ADAU1373_SRC_DAI_CTRL(2), 1, 0, NULL, 0), SND_SOC_DAPM_AIF_IN("AIF1 IN", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF1 OUT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF2 IN", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF2 OUT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF3 IN", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF3 OUT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0), SOC_MIXER_ARRAY("DSP Channel1 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel1_mixer_controls), SOC_MIXER_ARRAY("DSP Channel2 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel2_mixer_controls), SOC_MIXER_ARRAY("DSP Channel3 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel3_mixer_controls), SOC_MIXER_ARRAY("DSP Channel4 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel4_mixer_controls), SOC_MIXER_ARRAY("DSP Channel5 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dsp_channel5_mixer_controls), SOC_MIXER_ARRAY("AIF1 Mixer", SND_SOC_NOPM, 0, 0, adau1373_aif1_mixer_controls), SOC_MIXER_ARRAY("AIF2 Mixer", SND_SOC_NOPM, 0, 0, adau1373_aif2_mixer_controls), SOC_MIXER_ARRAY("AIF3 Mixer", SND_SOC_NOPM, 0, 0, adau1373_aif3_mixer_controls), SOC_MIXER_ARRAY("DAC1 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dac1_mixer_controls), SOC_MIXER_ARRAY("DAC2 Mixer", SND_SOC_NOPM, 0, 0, adau1373_dac2_mixer_controls), SND_SOC_DAPM_SUPPLY("DSP", ADAU1373_DIGEN, 4, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Recording Engine B", ADAU1373_DIGEN, 3, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Recording Engine A", ADAU1373_DIGEN, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Playback Engine B", ADAU1373_DIGEN, 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Playback Engine A", ADAU1373_DIGEN, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("PLL1", SND_SOC_NOPM, 0, 0, adau1373_pll_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("PLL2", SND_SOC_NOPM, 0, 0, adau1373_pll_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("SYSCLK1", ADAU1373_CLK_SRC_DIV(0), 7, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("SYSCLK2", ADAU1373_CLK_SRC_DIV(1), 7, 0, NULL, 0), SND_SOC_DAPM_INPUT("AIN1L"), SND_SOC_DAPM_INPUT("AIN1R"), SND_SOC_DAPM_INPUT("AIN2L"), SND_SOC_DAPM_INPUT("AIN2R"), SND_SOC_DAPM_INPUT("AIN3L"), SND_SOC_DAPM_INPUT("AIN3R"), SND_SOC_DAPM_INPUT("AIN4L"), SND_SOC_DAPM_INPUT("AIN4R"), SND_SOC_DAPM_INPUT("DMIC1DAT"), SND_SOC_DAPM_INPUT("DMIC2DAT"), SND_SOC_DAPM_OUTPUT("LOUT1L"), SND_SOC_DAPM_OUTPUT("LOUT1R"), SND_SOC_DAPM_OUTPUT("LOUT2L"), SND_SOC_DAPM_OUTPUT("LOUT2R"), SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("HPR"), SND_SOC_DAPM_OUTPUT("SPKL"), SND_SOC_DAPM_OUTPUT("SPKR"), SND_SOC_DAPM_OUTPUT("EP"), }; static int adau1373_check_aif_clk(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_codec *codec = source->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); unsigned int dai; const char *clk; dai = sink->name[3] - '1'; if (!adau1373->dais[dai].master) return 0; if (adau1373->dais[dai].clk_src == ADAU1373_CLK_SRC_PLL1) clk = "SYSCLK1"; else clk = "SYSCLK2"; return strcmp(source->name, clk) == 0; } static int adau1373_check_src(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_codec *codec = source->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); unsigned int dai; dai = sink->name[3] - '1'; return adau1373->dais[dai].enable_src; } #define DSP_CHANNEL_MIXER_ROUTES(_sink) \ { _sink, "DMIC2 Swapped Switch", "DMIC2" }, \ { _sink, "DMIC2 Switch", "DMIC2" }, \ { _sink, "ADC/DMIC1 Swapped Switch", "Decimator Mux" }, \ { _sink, "ADC/DMIC1 Switch", "Decimator Mux" }, \ { _sink, "AIF1 Switch", "AIF1 IN" }, \ { _sink, "AIF2 Switch", "AIF2 IN" }, \ { _sink, "AIF3 Switch", "AIF3 IN" } #define DSP_OUTPUT_MIXER_ROUTES(_sink) \ { _sink, "DSP Channel1 Switch", "DSP Channel1 Mixer" }, \ { _sink, "DSP Channel2 Switch", "DSP Channel2 Mixer" }, \ { _sink, "DSP Channel3 Switch", "DSP Channel3 Mixer" }, \ { _sink, "DSP Channel4 Switch", "DSP Channel4 Mixer" }, \ { _sink, "DSP Channel5 Switch", "DSP Channel5 Mixer" } #define LEFT_OUTPUT_MIXER_ROUTES(_sink) \ { _sink, "Right DAC2 Switch", "Right DAC2" }, \ { _sink, "Left DAC2 Switch", "Left DAC2" }, \ { _sink, "Right DAC1 Switch", "Right DAC1" }, \ { _sink, "Left DAC1 Switch", "Left DAC1" }, \ { _sink, "Input 1 Bypass Switch", "IN1PGA" }, \ { _sink, "Input 2 Bypass Switch", "IN2PGA" }, \ { _sink, "Input 3 Bypass Switch", "IN3PGA" }, \ { _sink, "Input 4 Bypass Switch", "IN4PGA" } #define RIGHT_OUTPUT_MIXER_ROUTES(_sink) \ { _sink, "Right DAC2 Switch", "Right DAC2" }, \ { _sink, "Left DAC2 Switch", "Left DAC2" }, \ { _sink, "Right DAC1 Switch", "Right DAC1" }, \ { _sink, "Left DAC1 Switch", "Left DAC1" }, \ { _sink, "Input 1 Bypass Switch", "IN1PGA" }, \ { _sink, "Input 2 Bypass Switch", "IN2PGA" }, \ { _sink, "Input 3 Bypass Switch", "IN3PGA" }, \ { _sink, "Input 4 Bypass Switch", "IN4PGA" } static const struct snd_soc_dapm_route adau1373_dapm_routes[] = { { "Left ADC Mixer", "DAC1 Switch", "Left DAC1" }, { "Left ADC Mixer", "Input 1 Switch", "IN1PGA" }, { "Left ADC Mixer", "Input 2 Switch", "IN2PGA" }, { "Left ADC Mixer", "Input 3 Switch", "IN3PGA" }, { "Left ADC Mixer", "Input 4 Switch", "IN4PGA" }, { "Right ADC Mixer", "DAC1 Switch", "Right DAC1" }, { "Right ADC Mixer", "Input 1 Switch", "IN1PGA" }, { "Right ADC Mixer", "Input 2 Switch", "IN2PGA" }, { "Right ADC Mixer", "Input 3 Switch", "IN3PGA" }, { "Right ADC Mixer", "Input 4 Switch", "IN4PGA" }, { "Left ADC", NULL, "Left ADC Mixer" }, { "Right ADC", NULL, "Right ADC Mixer" }, { "Decimator Mux", "ADC", "Left ADC" }, { "Decimator Mux", "ADC", "Right ADC" }, { "Decimator Mux", "DMIC1", "DMIC1" }, DSP_CHANNEL_MIXER_ROUTES("DSP Channel1 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel2 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel3 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel4 Mixer"), DSP_CHANNEL_MIXER_ROUTES("DSP Channel5 Mixer"), DSP_OUTPUT_MIXER_ROUTES("AIF1 Mixer"), DSP_OUTPUT_MIXER_ROUTES("AIF2 Mixer"), DSP_OUTPUT_MIXER_ROUTES("AIF3 Mixer"), DSP_OUTPUT_MIXER_ROUTES("DAC1 Mixer"), DSP_OUTPUT_MIXER_ROUTES("DAC2 Mixer"), { "AIF1 OUT", NULL, "AIF1 Mixer" }, { "AIF2 OUT", NULL, "AIF2 Mixer" }, { "AIF3 OUT", NULL, "AIF3 Mixer" }, { "Left DAC1", NULL, "DAC1 Mixer" }, { "Right DAC1", NULL, "DAC1 Mixer" }, { "Left DAC2", NULL, "DAC2 Mixer" }, { "Right DAC2", NULL, "DAC2 Mixer" }, LEFT_OUTPUT_MIXER_ROUTES("Left Lineout1 Mixer"), RIGHT_OUTPUT_MIXER_ROUTES("Right Lineout1 Mixer"), LEFT_OUTPUT_MIXER_ROUTES("Left Lineout2 Mixer"), RIGHT_OUTPUT_MIXER_ROUTES("Right Lineout2 Mixer"), LEFT_OUTPUT_MIXER_ROUTES("Left Speaker Mixer"), RIGHT_OUTPUT_MIXER_ROUTES("Right Speaker Mixer"), { "Left Headphone Mixer", "Left DAC2 Switch", "Left DAC2" }, { "Left Headphone Mixer", "Left DAC1 Switch", "Left DAC1" }, { "Left Headphone Mixer", "Input 1 Bypass Switch", "IN1PGA" }, { "Left Headphone Mixer", "Input 2 Bypass Switch", "IN2PGA" }, { "Left Headphone Mixer", "Input 3 Bypass Switch", "IN3PGA" }, { "Left Headphone Mixer", "Input 4 Bypass Switch", "IN4PGA" }, { "Right Headphone Mixer", "Right DAC2 Switch", "Right DAC2" }, { "Right Headphone Mixer", "Right DAC1 Switch", "Right DAC1" }, { "Right Headphone Mixer", "Input 1 Bypass Switch", "IN1PGA" }, { "Right Headphone Mixer", "Input 2 Bypass Switch", "IN2PGA" }, { "Right Headphone Mixer", "Input 3 Bypass Switch", "IN3PGA" }, { "Right Headphone Mixer", "Input 4 Bypass Switch", "IN4PGA" }, { "Left Headphone Mixer", NULL, "Headphone Enable" }, { "Right Headphone Mixer", NULL, "Headphone Enable" }, { "Earpiece Mixer", "Right DAC2 Switch", "Right DAC2" }, { "Earpiece Mixer", "Left DAC2 Switch", "Left DAC2" }, { "Earpiece Mixer", "Right DAC1 Switch", "Right DAC1" }, { "Earpiece Mixer", "Left DAC1 Switch", "Left DAC1" }, { "Earpiece Mixer", "Input 1 Bypass Switch", "IN1PGA" }, { "Earpiece Mixer", "Input 2 Bypass Switch", "IN2PGA" }, { "Earpiece Mixer", "Input 3 Bypass Switch", "IN3PGA" }, { "Earpiece Mixer", "Input 4 Bypass Switch", "IN4PGA" }, { "LOUT1L", NULL, "Left Lineout1 Mixer" }, { "LOUT1R", NULL, "Right Lineout1 Mixer" }, { "LOUT2L", NULL, "Left Lineout2 Mixer" }, { "LOUT2R", NULL, "Right Lineout2 Mixer" }, { "SPKL", NULL, "Left Speaker Mixer" }, { "SPKR", NULL, "Right Speaker Mixer" }, { "HPL", NULL, "Left Headphone Mixer" }, { "HPR", NULL, "Right Headphone Mixer" }, { "EP", NULL, "Earpiece Mixer" }, { "IN1PGA", NULL, "AIN1L" }, { "IN2PGA", NULL, "AIN2L" }, { "IN3PGA", NULL, "AIN3L" }, { "IN4PGA", NULL, "AIN4L" }, { "IN1PGA", NULL, "AIN1R" }, { "IN2PGA", NULL, "AIN2R" }, { "IN3PGA", NULL, "AIN3R" }, { "IN4PGA", NULL, "AIN4R" }, { "SYSCLK1", NULL, "PLL1" }, { "SYSCLK2", NULL, "PLL2" }, { "Left DAC1", NULL, "SYSCLK1" }, { "Right DAC1", NULL, "SYSCLK1" }, { "Left DAC2", NULL, "SYSCLK1" }, { "Right DAC2", NULL, "SYSCLK1" }, { "Left ADC", NULL, "SYSCLK1" }, { "Right ADC", NULL, "SYSCLK1" }, { "DSP", NULL, "SYSCLK1" }, { "AIF1 Mixer", NULL, "DSP" }, { "AIF2 Mixer", NULL, "DSP" }, { "AIF3 Mixer", NULL, "DSP" }, { "DAC1 Mixer", NULL, "DSP" }, { "DAC2 Mixer", NULL, "DSP" }, { "DAC1 Mixer", NULL, "Playback Engine A" }, { "DAC2 Mixer", NULL, "Playback Engine B" }, { "Left ADC Mixer", NULL, "Recording Engine A" }, { "Right ADC Mixer", NULL, "Recording Engine A" }, { "AIF1 CLK", NULL, "SYSCLK1", adau1373_check_aif_clk }, { "AIF2 CLK", NULL, "SYSCLK1", adau1373_check_aif_clk }, { "AIF3 CLK", NULL, "SYSCLK1", adau1373_check_aif_clk }, { "AIF1 CLK", NULL, "SYSCLK2", adau1373_check_aif_clk }, { "AIF2 CLK", NULL, "SYSCLK2", adau1373_check_aif_clk }, { "AIF3 CLK", NULL, "SYSCLK2", adau1373_check_aif_clk }, { "AIF1 IN", NULL, "AIF1 CLK" }, { "AIF1 OUT", NULL, "AIF1 CLK" }, { "AIF2 IN", NULL, "AIF2 CLK" }, { "AIF2 OUT", NULL, "AIF2 CLK" }, { "AIF3 IN", NULL, "AIF3 CLK" }, { "AIF3 OUT", NULL, "AIF3 CLK" }, { "AIF1 IN", NULL, "AIF1 IN SRC", adau1373_check_src }, { "AIF1 OUT", NULL, "AIF1 OUT SRC", adau1373_check_src }, { "AIF2 IN", NULL, "AIF2 IN SRC", adau1373_check_src }, { "AIF2 OUT", NULL, "AIF2 OUT SRC", adau1373_check_src }, { "AIF3 IN", NULL, "AIF3 IN SRC", adau1373_check_src }, { "AIF3 OUT", NULL, "AIF3 OUT SRC", adau1373_check_src }, { "DMIC1", NULL, "DMIC1DAT" }, { "DMIC1", NULL, "SYSCLK1" }, { "DMIC1", NULL, "Recording Engine A" }, { "DMIC2", NULL, "DMIC2DAT" }, { "DMIC2", NULL, "SYSCLK1" }, { "DMIC2", NULL, "Recording Engine B" }, }; static int adau1373_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); struct adau1373_dai *adau1373_dai = &adau1373->dais[dai->id]; unsigned int div; unsigned int freq; unsigned int ctrl; freq = adau1373_dai->sysclk; if (freq % params_rate(params) != 0) return -EINVAL; switch (freq / params_rate(params)) { case 1024: /* sysclk / 256 */ div = 0; break; case 1536: /* 2/3 sysclk / 256 */ div = 1; break; case 2048: /* 1/2 sysclk / 256 */ div = 2; break; case 3072: /* 1/3 sysclk / 256 */ div = 3; break; case 4096: /* 1/4 sysclk / 256 */ div = 4; break; case 6144: /* 1/6 sysclk / 256 */ div = 5; break; case 5632: /* 2/11 sysclk / 256 */ div = 6; break; default: return -EINVAL; } adau1373_dai->enable_src = (div != 0); snd_soc_update_bits(codec, ADAU1373_BCLKDIV(dai->id), ~ADAU1373_BCLKDIV_SOURCE, (div << 2) | ADAU1373_BCLKDIV_64); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: ctrl = ADAU1373_DAI_WLEN_16; break; case SNDRV_PCM_FORMAT_S20_3LE: ctrl = ADAU1373_DAI_WLEN_20; break; case SNDRV_PCM_FORMAT_S24_LE: ctrl = ADAU1373_DAI_WLEN_24; break; case SNDRV_PCM_FORMAT_S32_LE: ctrl = ADAU1373_DAI_WLEN_32; break; default: return -EINVAL; } return snd_soc_update_bits(codec, ADAU1373_DAI(dai->id), ADAU1373_DAI_WLEN_MASK, ctrl); } static int adau1373_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); struct adau1373_dai *adau1373_dai = &adau1373->dais[dai->id]; unsigned int ctrl; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: ctrl = ADAU1373_DAI_MASTER; adau1373_dai->master = true; break; case SND_SOC_DAIFMT_CBS_CFS: ctrl = 0; adau1373_dai->master = false; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: ctrl |= ADAU1373_DAI_FORMAT_I2S; break; case SND_SOC_DAIFMT_LEFT_J: ctrl |= ADAU1373_DAI_FORMAT_LEFT_J; break; case SND_SOC_DAIFMT_RIGHT_J: ctrl |= ADAU1373_DAI_FORMAT_RIGHT_J; break; case SND_SOC_DAIFMT_DSP_B: ctrl |= ADAU1373_DAI_FORMAT_DSP; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: ctrl |= ADAU1373_DAI_INVERT_BCLK; break; case SND_SOC_DAIFMT_NB_IF: ctrl |= ADAU1373_DAI_INVERT_LRCLK; break; case SND_SOC_DAIFMT_IB_IF: ctrl |= ADAU1373_DAI_INVERT_LRCLK | ADAU1373_DAI_INVERT_BCLK; break; default: return -EINVAL; } snd_soc_update_bits(codec, ADAU1373_DAI(dai->id), ~ADAU1373_DAI_WLEN_MASK, ctrl); return 0; } static int adau1373_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(dai->codec); struct adau1373_dai *adau1373_dai = &adau1373->dais[dai->id]; switch (clk_id) { case ADAU1373_CLK_SRC_PLL1: case ADAU1373_CLK_SRC_PLL2: break; default: return -EINVAL; } adau1373_dai->sysclk = freq; adau1373_dai->clk_src = clk_id; snd_soc_update_bits(dai->codec, ADAU1373_BCLKDIV(dai->id), ADAU1373_BCLKDIV_SOURCE, clk_id << 5); return 0; } static const struct snd_soc_dai_ops adau1373_dai_ops = { .hw_params = adau1373_hw_params, .set_sysclk = adau1373_set_dai_sysclk, .set_fmt = adau1373_set_dai_fmt, }; #define ADAU1373_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver adau1373_dai_driver[] = { { .id = 0, .name = "adau1373-aif1", .playback = { .stream_name = "AIF1 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .capture = { .stream_name = "AIF1 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .ops = &adau1373_dai_ops, .symmetric_rates = 1, }, { .id = 1, .name = "adau1373-aif2", .playback = { .stream_name = "AIF2 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .capture = { .stream_name = "AIF2 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .ops = &adau1373_dai_ops, .symmetric_rates = 1, }, { .id = 2, .name = "adau1373-aif3", .playback = { .stream_name = "AIF3 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .capture = { .stream_name = "AIF3 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = ADAU1373_FORMATS, }, .ops = &adau1373_dai_ops, .symmetric_rates = 1, }, }; static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { unsigned int dpll_div = 0; unsigned int x, r, n, m, i, j, mode; switch (pll_id) { case ADAU1373_PLL1: case ADAU1373_PLL2: break; default: return -EINVAL; } switch (source) { case ADAU1373_PLL_SRC_BCLK1: case ADAU1373_PLL_SRC_BCLK2: case ADAU1373_PLL_SRC_BCLK3: case ADAU1373_PLL_SRC_LRCLK1: case ADAU1373_PLL_SRC_LRCLK2: case ADAU1373_PLL_SRC_LRCLK3: case ADAU1373_PLL_SRC_MCLK1: case ADAU1373_PLL_SRC_MCLK2: case ADAU1373_PLL_SRC_GPIO1: case ADAU1373_PLL_SRC_GPIO2: case ADAU1373_PLL_SRC_GPIO3: case ADAU1373_PLL_SRC_GPIO4: break; default: return -EINVAL; } if (freq_in < 7813 || freq_in > 27000000) return -EINVAL; if (freq_out < 45158000 || freq_out > 49152000) return -EINVAL; /* APLL input needs to be >= 8Mhz, so in case freq_in is less we use the * DPLL to get it there. DPLL_out = (DPLL_in / div) * 1024 */ while (freq_in < 8000000) { freq_in *= 2; dpll_div++; } if (freq_out % freq_in != 0) { /* fout = fin * (r + (n/m)) / x */ x = DIV_ROUND_UP(freq_in, 13500000); freq_in /= x; r = freq_out / freq_in; i = freq_out % freq_in; j = gcd(i, freq_in); n = i / j; m = freq_in / j; x--; mode = 1; } else { /* fout = fin / r */ r = freq_out / freq_in; n = 0; m = 0; x = 0; mode = 0; } if (r < 2 || r > 8 || x > 3 || m > 0xffff || n > 0xffff) return -EINVAL; if (dpll_div) { dpll_div = 11 - dpll_div; snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id), ADAU1373_PLL_CTRL6_DPLL_BYPASS, 0); } else { snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id), ADAU1373_PLL_CTRL6_DPLL_BYPASS, ADAU1373_PLL_CTRL6_DPLL_BYPASS); } snd_soc_write(codec, ADAU1373_DPLL_CTRL(pll_id), (source << 4) | dpll_div); snd_soc_write(codec, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL2(pll_id), m & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL4(pll_id), n & 0xff); snd_soc_write(codec, ADAU1373_PLL_CTRL5(pll_id), (r << 3) | (x << 1) | mode); /* Set sysclk to pll_rate / 4 */ snd_soc_update_bits(codec, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09); return 0; } static void adau1373_load_drc_settings(struct snd_soc_codec *codec, unsigned int nr, uint8_t *drc) { unsigned int i; for (i = 0; i < ADAU1373_DRC_SIZE; ++i) snd_soc_write(codec, ADAU1373_DRC(nr) + i, drc[i]); } static bool adau1373_valid_micbias(enum adau1373_micbias_voltage micbias) { switch (micbias) { case ADAU1373_MICBIAS_2_9V: case ADAU1373_MICBIAS_2_2V: case ADAU1373_MICBIAS_2_6V: case ADAU1373_MICBIAS_1_8V: return true; default: break; } return false; } static int adau1373_probe(struct snd_soc_codec *codec) { struct adau1373_platform_data *pdata = codec->dev->platform_data; bool lineout_differential = false; unsigned int val; int ret; int i; ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C); if (ret) { dev_err(codec->dev, "failed to set cache I/O: %d\n", ret); return ret; } if (pdata) { if (pdata->num_drc > ARRAY_SIZE(pdata->drc_setting)) return -EINVAL; if (!adau1373_valid_micbias(pdata->micbias1) || !adau1373_valid_micbias(pdata->micbias2)) return -EINVAL; for (i = 0; i < pdata->num_drc; ++i) { adau1373_load_drc_settings(codec, i, pdata->drc_setting[i]); } snd_soc_add_codec_controls(codec, adau1373_drc_controls, pdata->num_drc); val = 0; for (i = 0; i < 4; ++i) { if (pdata->input_differential[i]) val |= BIT(i); } snd_soc_write(codec, ADAU1373_INPUT_MODE, val); val = 0; if (pdata->lineout_differential) val |= ADAU1373_OUTPUT_CTRL_LDIFF; if (pdata->lineout_ground_sense) val |= ADAU1373_OUTPUT_CTRL_LNFBEN; snd_soc_write(codec, ADAU1373_OUTPUT_CTRL, val); lineout_differential = pdata->lineout_differential; snd_soc_write(codec, ADAU1373_EP_CTRL, (pdata->micbias1 << ADAU1373_EP_CTRL_MICBIAS1_OFFSET) | (pdata->micbias2 << ADAU1373_EP_CTRL_MICBIAS2_OFFSET)); } if (!lineout_differential) { snd_soc_add_codec_controls(codec, adau1373_lineout2_controls, ARRAY_SIZE(adau1373_lineout2_controls)); } snd_soc_write(codec, ADAU1373_ADC_CTRL, ADAU1373_ADC_CTRL_RESET_FORCE | ADAU1373_ADC_CTRL_PEAK_DETECT); return 0; } static int adau1373_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3, ADAU1373_PWDN_CTRL3_PWR_EN, ADAU1373_PWDN_CTRL3_PWR_EN); break; case SND_SOC_BIAS_OFF: snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3, ADAU1373_PWDN_CTRL3_PWR_EN, 0); break; } codec->dapm.bias_level = level; return 0; } static int adau1373_remove(struct snd_soc_codec *codec) { adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int adau1373_suspend(struct snd_soc_codec *codec) { return adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF); } static int adau1373_resume(struct snd_soc_codec *codec) { adau1373_set_bias_level(codec, SND_SOC_BIAS_STANDBY); snd_soc_cache_sync(codec); return 0; } static struct snd_soc_codec_driver adau1373_codec_driver = { .probe = adau1373_probe, .remove = adau1373_remove, .suspend = adau1373_suspend, .resume = adau1373_resume, .set_bias_level = adau1373_set_bias_level, .idle_bias_off = true, .reg_cache_size = ARRAY_SIZE(adau1373_default_regs), .reg_cache_default = adau1373_default_regs, .reg_word_size = sizeof(uint8_t), .set_pll = adau1373_set_pll, .controls = adau1373_controls, .num_controls = ARRAY_SIZE(adau1373_controls), .dapm_widgets = adau1373_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(adau1373_dapm_widgets), .dapm_routes = adau1373_dapm_routes, .num_dapm_routes = ARRAY_SIZE(adau1373_dapm_routes), }; static int __devinit adau1373_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adau1373 *adau1373; int ret; adau1373 = devm_kzalloc(&client->dev, sizeof(*adau1373), GFP_KERNEL); if (!adau1373) return -ENOMEM; dev_set_drvdata(&client->dev, adau1373); ret = snd_soc_register_codec(&client->dev, &adau1373_codec_driver, adau1373_dai_driver, ARRAY_SIZE(adau1373_dai_driver)); return ret; } static int __devexit adau1373_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id adau1373_i2c_id[] = { { "adau1373", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adau1373_i2c_id); static struct i2c_driver adau1373_i2c_driver = { .driver = { .name = "adau1373", .owner = THIS_MODULE, }, .probe = adau1373_i2c_probe, .remove = __devexit_p(adau1373_i2c_remove), .id_table = adau1373_i2c_id, }; static int __init adau1373_init(void) { return i2c_add_driver(&adau1373_i2c_driver); } module_init(adau1373_init); static void __exit adau1373_exit(void) { i2c_del_driver(&adau1373_i2c_driver); } module_exit(adau1373_exit); MODULE_DESCRIPTION("ASoC ADAU1373 driver"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_LICENSE("GPL");
gpl-2.0
felixsch/linux
scripts/dtc/fdtput.c
6995
8546
/* * Copyright (c) 2011 The Chromium OS Authors. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <assert.h> #include <ctype.h> #include <getopt.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <libfdt.h> #include "util.h" /* These are the operations we support */ enum oper_type { OPER_WRITE_PROP, /* Write a property in a node */ OPER_CREATE_NODE, /* Create a new node */ }; struct display_info { enum oper_type oper; /* operation to perform */ int type; /* data type (s/i/u/x or 0 for default) */ int size; /* data size (1/2/4) */ int verbose; /* verbose output */ int auto_path; /* automatically create all path components */ }; /** * Report an error with a particular node. * * @param name Node name to report error on * @param namelen Length of node name, or -1 to use entire string * @param err Error number to report (-FDT_ERR_...) */ static void report_error(const char *name, int namelen, int err) { if (namelen == -1) namelen = strlen(name); fprintf(stderr, "Error at '%1.*s': %s\n", namelen, name, fdt_strerror(err)); } /** * Encode a series of arguments in a property value. * * @param disp Display information / options * @param arg List of arguments from command line * @param arg_count Number of arguments (may be 0) * @param valuep Returns buffer containing value * @param *value_len Returns length of value encoded */ static int encode_value(struct display_info *disp, char **arg, int arg_count, char **valuep, int *value_len) { char *value = NULL; /* holding area for value */ int value_size = 0; /* size of holding area */ char *ptr; /* pointer to current value position */ int len; /* length of this cell/string/byte */ int ival; int upto; /* the number of bytes we have written to buf */ char fmt[3]; upto = 0; if (disp->verbose) fprintf(stderr, "Decoding value:\n"); fmt[0] = '%'; fmt[1] = disp->type ? disp->type : 'd'; fmt[2] = '\0'; for (; arg_count > 0; arg++, arg_count--, upto += len) { /* assume integer unless told otherwise */ if (disp->type == 's') len = strlen(*arg) + 1; else len = disp->size == -1 ? 4 : disp->size; /* enlarge our value buffer by a suitable margin if needed */ if (upto + len > value_size) { value_size = (upto + len) + 500; value = realloc(value, value_size); if (!value) { fprintf(stderr, "Out of mmory: cannot alloc " "%d bytes\n", value_size); return -1; } } ptr = value + upto; if (disp->type == 's') { memcpy(ptr, *arg, len); if (disp->verbose) fprintf(stderr, "\tstring: '%s'\n", ptr); } else { int *iptr = (int *)ptr; sscanf(*arg, fmt, &ival); if (len == 4) *iptr = cpu_to_fdt32(ival); else *ptr = (uint8_t)ival; if (disp->verbose) { fprintf(stderr, "\t%s: %d\n", disp->size == 1 ? "byte" : disp->size == 2 ? "short" : "int", ival); } } } *value_len = upto; *valuep = value; if (disp->verbose) fprintf(stderr, "Value size %d\n", upto); return 0; } static int store_key_value(void *blob, const char *node_name, const char *property, const char *buf, int len) { int node; int err; node = fdt_path_offset(blob, node_name); if (node < 0) { report_error(node_name, -1, node); return -1; } err = fdt_setprop(blob, node, property, buf, len); if (err) { report_error(property, -1, err); return -1; } return 0; } /** * Create paths as needed for all components of a path * * Any components of the path that do not exist are created. Errors are * reported. * * @param blob FDT blob to write into * @param in_path Path to process * @return 0 if ok, -1 on error */ static int create_paths(void *blob, const char *in_path) { const char *path = in_path; const char *sep; int node, offset = 0; /* skip leading '/' */ while (*path == '/') path++; for (sep = path; *sep; path = sep + 1, offset = node) { /* equivalent to strchrnul(), but it requires _GNU_SOURCE */ sep = strchr(path, '/'); if (!sep) sep = path + strlen(path); node = fdt_subnode_offset_namelen(blob, offset, path, sep - path); if (node == -FDT_ERR_NOTFOUND) { node = fdt_add_subnode_namelen(blob, offset, path, sep - path); } if (node < 0) { report_error(path, sep - path, node); return -1; } } return 0; } /** * Create a new node in the fdt. * * This will overwrite the node_name string. Any error is reported. * * TODO: Perhaps create fdt_path_offset_namelen() so we don't need to do this. * * @param blob FDT blob to write into * @param node_name Name of node to create * @return new node offset if found, or -1 on failure */ static int create_node(void *blob, const char *node_name) { int node = 0; char *p; p = strrchr(node_name, '/'); if (!p) { report_error(node_name, -1, -FDT_ERR_BADPATH); return -1; } *p = '\0'; if (p > node_name) { node = fdt_path_offset(blob, node_name); if (node < 0) { report_error(node_name, -1, node); return -1; } } node = fdt_add_subnode(blob, node, p + 1); if (node < 0) { report_error(p + 1, -1, node); return -1; } return 0; } static int do_fdtput(struct display_info *disp, const char *filename, char **arg, int arg_count) { char *value; char *blob; int len, ret = 0; blob = utilfdt_read(filename); if (!blob) return -1; switch (disp->oper) { case OPER_WRITE_PROP: /* * Convert the arguments into a single binary value, then * store them into the property. */ assert(arg_count >= 2); if (disp->auto_path && create_paths(blob, *arg)) return -1; if (encode_value(disp, arg + 2, arg_count - 2, &value, &len) || store_key_value(blob, *arg, arg[1], value, len)) ret = -1; break; case OPER_CREATE_NODE: for (; ret >= 0 && arg_count--; arg++) { if (disp->auto_path) ret = create_paths(blob, *arg); else ret = create_node(blob, *arg); } break; } if (ret >= 0) ret = utilfdt_write(filename, blob); free(blob); return ret; } static const char *usage_msg = "fdtput - write a property value to a device tree\n" "\n" "The command line arguments are joined together into a single value.\n" "\n" "Usage:\n" " fdtput <options> <dt file> <node> <property> [<value>...]\n" " fdtput -c <options> <dt file> [<node>...]\n" "Options:\n" "\t-c\t\tCreate nodes if they don't already exist\n" "\t-p\t\tAutomatically create nodes as needed for the node path\n" "\t-t <type>\tType of data\n" "\t-v\t\tVerbose: display each value decoded from command line\n" "\t-h\t\tPrint this help\n\n" USAGE_TYPE_MSG; static void usage(const char *msg) { if (msg) fprintf(stderr, "Error: %s\n\n", msg); fprintf(stderr, "%s", usage_msg); exit(2); } int main(int argc, char *argv[]) { struct display_info disp; char *filename = NULL; memset(&disp, '\0', sizeof(disp)); disp.size = -1; disp.oper = OPER_WRITE_PROP; for (;;) { int c = getopt(argc, argv, "chpt:v"); if (c == -1) break; /* * TODO: add options to: * - delete property * - delete node (optionally recursively) * - rename node * - pack fdt before writing * - set amount of free space when writing * - expand fdt if value doesn't fit */ switch (c) { case 'c': disp.oper = OPER_CREATE_NODE; break; case 'h': case '?': usage(NULL); case 'p': disp.auto_path = 1; break; case 't': if (utilfdt_decode_type(optarg, &disp.type, &disp.size)) usage("Invalid type string"); break; case 'v': disp.verbose = 1; break; } } if (optind < argc) filename = argv[optind++]; if (!filename) usage("Missing filename"); argv += optind; argc -= optind; if (disp.oper == OPER_WRITE_PROP) { if (argc < 1) usage("Missing node"); if (argc < 2) usage("Missing property"); } if (do_fdtput(&disp, filename, argv, argc)) return 1; return 0; }
gpl-2.0
Zenfone2-Dev/kernel-FlareM
scripts/dtc/fdtget.c
6995
8781
/* * Copyright (c) 2011 The Chromium OS Authors. All rights reserved. * * Portions from U-Boot cmd_fdt.c (C) Copyright 2007 * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com * Based on code written by: * Pantelis Antoniou <pantelis.antoniou@gmail.com> and * Matthew McClintock <msm@freescale.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <assert.h> #include <ctype.h> #include <getopt.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <libfdt.h> #include "util.h" enum display_mode { MODE_SHOW_VALUE, /* show values for node properties */ MODE_LIST_PROPS, /* list the properties for a node */ MODE_LIST_SUBNODES, /* list the subnodes of a node */ }; /* Holds information which controls our output and options */ struct display_info { int type; /* data type (s/i/u/x or 0 for default) */ int size; /* data size (1/2/4) */ enum display_mode mode; /* display mode that we are using */ const char *default_val; /* default value if node/property not found */ }; static void report_error(const char *where, int err) { fprintf(stderr, "Error at '%s': %s\n", where, fdt_strerror(err)); } /** * Displays data of a given length according to selected options * * If a specific data type is provided in disp, then this is used. Otherwise * we try to guess the data type / size from the contents. * * @param disp Display information / options * @param data Data to display * @param len Maximum length of buffer * @return 0 if ok, -1 if data does not match format */ static int show_data(struct display_info *disp, const char *data, int len) { int i, size; const uint8_t *p = (const uint8_t *)data; const char *s; int value; int is_string; char fmt[3]; /* no data, don't print */ if (len == 0) return 0; is_string = (disp->type) == 's' || (!disp->type && util_is_printable_string(data, len)); if (is_string) { if (data[len - 1] != '\0') { fprintf(stderr, "Unterminated string\n"); return -1; } for (s = data; s - data < len; s += strlen(s) + 1) { if (s != data) printf(" "); printf("%s", (const char *)s); } return 0; } size = disp->size; if (size == -1) { size = (len % 4) == 0 ? 4 : 1; } else if (len % size) { fprintf(stderr, "Property length must be a multiple of " "selected data size\n"); return -1; } fmt[0] = '%'; fmt[1] = disp->type ? disp->type : 'd'; fmt[2] = '\0'; for (i = 0; i < len; i += size, p += size) { if (i) printf(" "); value = size == 4 ? fdt32_to_cpu(*(const uint32_t *)p) : size == 2 ? (*p << 8) | p[1] : *p; printf(fmt, value); } return 0; } /** * List all properties in a node, one per line. * * @param blob FDT blob * @param node Node to display * @return 0 if ok, or FDT_ERR... if not. */ static int list_properties(const void *blob, int node) { const struct fdt_property *data; const char *name; int prop; prop = fdt_first_property_offset(blob, node); do { /* Stop silently when there are no more properties */ if (prop < 0) return prop == -FDT_ERR_NOTFOUND ? 0 : prop; data = fdt_get_property_by_offset(blob, prop, NULL); name = fdt_string(blob, fdt32_to_cpu(data->nameoff)); if (name) puts(name); prop = fdt_next_property_offset(blob, prop); } while (1); } #define MAX_LEVEL 32 /* how deeply nested we will go */ /** * List all subnodes in a node, one per line * * @param blob FDT blob * @param node Node to display * @return 0 if ok, or FDT_ERR... if not. */ static int list_subnodes(const void *blob, int node) { int nextoffset; /* next node offset from libfdt */ uint32_t tag; /* current tag */ int level = 0; /* keep track of nesting level */ const char *pathp; int depth = 1; /* the assumed depth of this node */ while (level >= 0) { tag = fdt_next_tag(blob, node, &nextoffset); switch (tag) { case FDT_BEGIN_NODE: pathp = fdt_get_name(blob, node, NULL); if (level <= depth) { if (pathp == NULL) pathp = "/* NULL pointer error */"; if (*pathp == '\0') pathp = "/"; /* root is nameless */ if (level == 1) puts(pathp); } level++; if (level >= MAX_LEVEL) { printf("Nested too deep, aborting.\n"); return 1; } break; case FDT_END_NODE: level--; if (level == 0) level = -1; /* exit the loop */ break; case FDT_END: return 1; case FDT_PROP: break; default: if (level <= depth) printf("Unknown tag 0x%08X\n", tag); return 1; } node = nextoffset; } return 0; } /** * Show the data for a given node (and perhaps property) according to the * display option provided. * * @param blob FDT blob * @param disp Display information / options * @param node Node to display * @param property Name of property to display, or NULL if none * @return 0 if ok, -ve on error */ static int show_data_for_item(const void *blob, struct display_info *disp, int node, const char *property) { const void *value = NULL; int len, err = 0; switch (disp->mode) { case MODE_LIST_PROPS: err = list_properties(blob, node); break; case MODE_LIST_SUBNODES: err = list_subnodes(blob, node); break; default: assert(property); value = fdt_getprop(blob, node, property, &len); if (value) { if (show_data(disp, value, len)) err = -1; else printf("\n"); } else if (disp->default_val) { puts(disp->default_val); } else { report_error(property, len); err = -1; } break; } return err; } /** * Run the main fdtget operation, given a filename and valid arguments * * @param disp Display information / options * @param filename Filename of blob file * @param arg List of arguments to process * @param arg_count Number of arguments * @param return 0 if ok, -ve on error */ static int do_fdtget(struct display_info *disp, const char *filename, char **arg, int arg_count, int args_per_step) { char *blob; const char *prop; int i, node; blob = utilfdt_read(filename); if (!blob) return -1; for (i = 0; i + args_per_step <= arg_count; i += args_per_step) { node = fdt_path_offset(blob, arg[i]); if (node < 0) { if (disp->default_val) { puts(disp->default_val); continue; } else { report_error(arg[i], node); return -1; } } prop = args_per_step == 1 ? NULL : arg[i + 1]; if (show_data_for_item(blob, disp, node, prop)) return -1; } return 0; } static const char *usage_msg = "fdtget - read values from device tree\n" "\n" "Each value is printed on a new line.\n\n" "Usage:\n" " fdtget <options> <dt file> [<node> <property>]...\n" " fdtget -p <options> <dt file> [<node> ]...\n" "Options:\n" "\t-t <type>\tType of data\n" "\t-p\t\tList properties for each node\n" "\t-l\t\tList subnodes for each node\n" "\t-d\t\tDefault value to display when the property is " "missing\n" "\t-h\t\tPrint this help\n\n" USAGE_TYPE_MSG; static void usage(const char *msg) { if (msg) fprintf(stderr, "Error: %s\n\n", msg); fprintf(stderr, "%s", usage_msg); exit(2); } int main(int argc, char *argv[]) { char *filename = NULL; struct display_info disp; int args_per_step = 2; /* set defaults */ memset(&disp, '\0', sizeof(disp)); disp.size = -1; disp.mode = MODE_SHOW_VALUE; for (;;) { int c = getopt(argc, argv, "d:hlpt:"); if (c == -1) break; switch (c) { case 'h': case '?': usage(NULL); case 't': if (utilfdt_decode_type(optarg, &disp.type, &disp.size)) usage("Invalid type string"); break; case 'p': disp.mode = MODE_LIST_PROPS; args_per_step = 1; break; case 'l': disp.mode = MODE_LIST_SUBNODES; args_per_step = 1; break; case 'd': disp.default_val = optarg; break; } } if (optind < argc) filename = argv[optind++]; if (!filename) usage("Missing filename"); argv += optind; argc -= optind; /* Allow no arguments, and silently succeed */ if (!argc) return 0; /* Check for node, property arguments */ if (args_per_step == 2 && (argc % 2)) usage("Must have an even number of arguments"); if (do_fdtget(&disp, filename, argv, argc, args_per_step)) return 1; return 0; }
gpl-2.0
Team-Hydra/android_kernel_htc_msm8660-caf
arch/sh/mm/alignment.c
7507
4763
/* * Alignment access counters and corresponding user-space interfaces. * * Copyright (C) 2009 ST Microelectronics * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <asm/alignment.h> #include <asm/processor.h> static unsigned long se_user; static unsigned long se_sys; static unsigned long se_half; static unsigned long se_word; static unsigned long se_dword; static unsigned long se_multi; /* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not valid! */ static int se_usermode = UM_WARN | UM_FIXUP; /* 0: no warning 1: print a warning message, disabled by default */ static int se_kernmode_warn; core_param(alignment, se_usermode, int, 0600); void inc_unaligned_byte_access(void) { se_half++; } void inc_unaligned_word_access(void) { se_word++; } void inc_unaligned_dword_access(void) { se_dword++; } void inc_unaligned_multi_access(void) { se_multi++; } void inc_unaligned_user_access(void) { se_user++; } void inc_unaligned_kernel_access(void) { se_sys++; } /* * This defaults to the global policy which can be set from the command * line, while processes can overload their preferences via prctl(). */ unsigned int unaligned_user_action(void) { unsigned int action = se_usermode; if (current->thread.flags & SH_THREAD_UAC_SIGBUS) { action &= ~UM_FIXUP; action |= UM_SIGNAL; } if (current->thread.flags & SH_THREAD_UAC_NOPRINT) action &= ~UM_WARN; return action; } int get_unalign_ctl(struct task_struct *tsk, unsigned long addr) { return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK, (unsigned int __user *)addr); } int set_unalign_ctl(struct task_struct *tsk, unsigned int val) { tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) | (val & SH_THREAD_UAC_MASK); return 0; } void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn, struct pt_regs *regs) { if (user_mode(regs) && (se_usermode & UM_WARN)) pr_notice_ratelimited("Fixing up unaligned userspace access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); else if (se_kernmode_warn) pr_notice_ratelimited("Fixing up unaligned kernel access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); } static const char *se_usermode_action[] = { "ignored", "warn", "fixup", "fixup+warn", "signal", "signal+warn" }; static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", se_user); seq_printf(m, "System:\t\t%lu\n", se_sys); seq_printf(m, "Half:\t\t%lu\n", se_half); seq_printf(m, "Word:\t\t%lu\n", se_word); seq_printf(m, "DWord:\t\t%lu\n", se_dword); seq_printf(m, "Multi:\t\t%lu\n", se_multi); seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, se_usermode_action[se_usermode]); seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, se_kernmode_warn ? "+warn" : ""); return 0; } static int alignment_proc_open(struct inode *inode, struct file *file) { return single_open(file, alignment_proc_show, NULL); } static ssize_t alignment_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { int *data = PDE(file->f_path.dentry->d_inode)->data; char mode; if (count > 0) { if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') *data = mode - '0'; } return count; } static const struct file_operations alignment_proc_fops = { .owner = THIS_MODULE, .open = alignment_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = alignment_proc_write, }; /* * This needs to be done after sysctl_init, otherwise sys/ will be * overwritten. Actually, this shouldn't be in sys/ at all since * it isn't a sysctl, and it doesn't contain sysctl information. * We now locate it in /proc/cpu/alignment instead. */ static int __init alignment_init(void) { struct proc_dir_entry *dir, *res; dir = proc_mkdir("cpu", NULL); if (!dir) return -ENOMEM; res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, &alignment_proc_fops, &se_usermode); if (!res) return -ENOMEM; res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, &alignment_proc_fops, &se_kernmode_warn); if (!res) return -ENOMEM; return 0; } fs_initcall(alignment_init);
gpl-2.0
ywzjackal/dmmu_linux
drivers/usb/host/fhci-mem.c
13907
2682
/* * Freescale QUICC Engine USB Host Controller Driver * * Copyright (c) Freescale Semicondutor, Inc. 2006. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) Logic Product Development, Inc. 2007 * Peter Barada <peterb@logicpd.com> * Copyright (c) MontaVista Software, Inc. 2008. * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include "fhci.h" static void init_td(struct td *td) { memset(td, 0, sizeof(*td)); INIT_LIST_HEAD(&td->node); INIT_LIST_HEAD(&td->frame_lh); } static void init_ed(struct ed *ed) { memset(ed, 0, sizeof(*ed)); INIT_LIST_HEAD(&ed->td_list); INIT_LIST_HEAD(&ed->node); } static struct td *get_empty_td(struct fhci_hcd *fhci) { struct td *td; if (!list_empty(&fhci->empty_tds)) { td = list_entry(fhci->empty_tds.next, struct td, node); list_del(fhci->empty_tds.next); } else { td = kmalloc(sizeof(*td), GFP_ATOMIC); if (!td) fhci_err(fhci, "No memory to allocate to TD\n"); else init_td(td); } return td; } void fhci_recycle_empty_td(struct fhci_hcd *fhci, struct td *td) { init_td(td); list_add(&td->node, &fhci->empty_tds); } struct ed *fhci_get_empty_ed(struct fhci_hcd *fhci) { struct ed *ed; if (!list_empty(&fhci->empty_eds)) { ed = list_entry(fhci->empty_eds.next, struct ed, node); list_del(fhci->empty_eds.next); } else { ed = kmalloc(sizeof(*ed), GFP_ATOMIC); if (!ed) fhci_err(fhci, "No memory to allocate to ED\n"); else init_ed(ed); } return ed; } void fhci_recycle_empty_ed(struct fhci_hcd *fhci, struct ed *ed) { init_ed(ed); list_add(&ed->node, &fhci->empty_eds); } struct td *fhci_td_fill(struct fhci_hcd *fhci, struct urb *urb, struct urb_priv *urb_priv, struct ed *ed, u16 index, enum fhci_ta_type type, int toggle, u8 *data, u32 len, u16 interval, u16 start_frame, bool ioc) { struct td *td = get_empty_td(fhci); if (!td) return NULL; td->urb = urb; td->ed = ed; td->type = type; td->toggle = toggle; td->data = data; td->len = len; td->iso_index = index; td->interval = interval; td->start_frame = start_frame; td->ioc = ioc; td->status = USB_TD_OK; urb_priv->tds[index] = td; return td; }
gpl-2.0
jabez1314/linux
drivers/mfd/omap-usb-tll.c
84
13508
/** * omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI * * Copyright (C) 2012-2013 Texas Instruments Incorporated - http://www.ti.com * Author: Keshava Munegowda <keshava_mgowda@ti.com> * Author: Roger Quadros <rogerq@ti.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 of * the License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/err.h> #include <linux/pm_runtime.h> #include <linux/platform_data/usb-omap.h> #include <linux/of.h> #define USBTLL_DRIVER_NAME "usbhs_tll" /* TLL Register Set */ #define OMAP_USBTLL_REVISION (0x00) #define OMAP_USBTLL_SYSCONFIG (0x10) #define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8) #define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3) #define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2) #define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1) #define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0) #define OMAP_USBTLL_SYSSTATUS (0x14) #define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0) #define OMAP_USBTLL_IRQSTATUS (0x18) #define OMAP_USBTLL_IRQENABLE (0x1C) #define OMAP_TLL_SHARED_CONF (0x30) #define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6) #define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5) #define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2) #define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1) #define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0) #define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num) #define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24 #define OMAP_TLL_CHANNEL_CONF_DRVVBUS (1 << 16) #define OMAP_TLL_CHANNEL_CONF_CHRGVBUS (1 << 15) #define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11) #define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10) #define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9) #define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8) #define OMAP_TLL_CHANNEL_CONF_MODE_TRANSPARENT_UTMI (2 << 1) #define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1) #define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0) #define OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0 0x0 #define OMAP_TLL_FSLSMODE_6PIN_PHY_DP_DM 0x1 #define OMAP_TLL_FSLSMODE_3PIN_PHY 0x2 #define OMAP_TLL_FSLSMODE_4PIN_PHY 0x3 #define OMAP_TLL_FSLSMODE_6PIN_TLL_DAT_SE0 0x4 #define OMAP_TLL_FSLSMODE_6PIN_TLL_DP_DM 0x5 #define OMAP_TLL_FSLSMODE_3PIN_TLL 0x6 #define OMAP_TLL_FSLSMODE_4PIN_TLL 0x7 #define OMAP_TLL_FSLSMODE_2PIN_TLL_DAT_SE0 0xA #define OMAP_TLL_FSLSMODE_2PIN_DAT_DP_DM 0xB #define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num) #define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num) #define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num) #define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num) #define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num) #define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num) #define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num) #define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num) #define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num) #define OMAP_REV2_TLL_CHANNEL_COUNT 2 #define OMAP_TLL_CHANNEL_COUNT 3 #define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 0) #define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 1) #define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 2) /* Values of USBTLL_REVISION - Note: these are not given in the TRM */ #define OMAP_USBTLL_REV1 0x00000015 /* OMAP3 */ #define OMAP_USBTLL_REV2 0x00000018 /* OMAP 3630 */ #define OMAP_USBTLL_REV3 0x00000004 /* OMAP4 */ #define OMAP_USBTLL_REV4 0x00000006 /* OMAP5 */ #define is_ehci_tll_mode(x) (x == OMAP_EHCI_PORT_MODE_TLL) /* only PHY and UNUSED modes don't need TLL */ #define omap_usb_mode_needs_tll(x) ((x) != OMAP_USBHS_PORT_MODE_UNUSED &&\ (x) != OMAP_EHCI_PORT_MODE_PHY) struct usbtll_omap { int nch; /* num. of channels */ struct clk **ch_clk; void __iomem *base; }; /*-------------------------------------------------------------------------*/ static const char usbtll_driver_name[] = USBTLL_DRIVER_NAME; static struct device *tll_dev; static DEFINE_SPINLOCK(tll_lock); /* serialize access to tll_dev */ /*-------------------------------------------------------------------------*/ static inline void usbtll_write(void __iomem *base, u32 reg, u32 val) { writel_relaxed(val, base + reg); } static inline u32 usbtll_read(void __iomem *base, u32 reg) { return readl_relaxed(base + reg); } static inline void usbtll_writeb(void __iomem *base, u8 reg, u8 val) { writeb_relaxed(val, base + reg); } static inline u8 usbtll_readb(void __iomem *base, u8 reg) { return readb_relaxed(base + reg); } /*-------------------------------------------------------------------------*/ static bool is_ohci_port(enum usbhs_omap_port_mode pmode) { switch (pmode) { case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: return true; default: return false; } } /* * convert the port-mode enum to a value we can use in the FSLSMODE * field of USBTLL_CHANNEL_CONF */ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode) { switch (mode) { case OMAP_USBHS_PORT_MODE_UNUSED: case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: return OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0; case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: return OMAP_TLL_FSLSMODE_6PIN_PHY_DP_DM; case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: return OMAP_TLL_FSLSMODE_3PIN_PHY; case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: return OMAP_TLL_FSLSMODE_4PIN_PHY; case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: return OMAP_TLL_FSLSMODE_6PIN_TLL_DAT_SE0; case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: return OMAP_TLL_FSLSMODE_6PIN_TLL_DP_DM; case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: return OMAP_TLL_FSLSMODE_3PIN_TLL; case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: return OMAP_TLL_FSLSMODE_4PIN_TLL; case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: return OMAP_TLL_FSLSMODE_2PIN_TLL_DAT_SE0; case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: return OMAP_TLL_FSLSMODE_2PIN_DAT_DP_DM; default: pr_warn("Invalid port mode, using default\n"); return OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0; } } /** * usbtll_omap_probe - initialize TI-based HCDs * * Allocates basic resources for this USB host controller. */ static int usbtll_omap_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct usbtll_omap *tll; int ret = 0; int i, ver; dev_dbg(dev, "starting TI HSUSB TLL Controller\n"); tll = devm_kzalloc(dev, sizeof(struct usbtll_omap), GFP_KERNEL); if (!tll) { dev_err(dev, "Memory allocation failed\n"); return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); tll->base = devm_ioremap_resource(dev, res); if (IS_ERR(tll->base)) return PTR_ERR(tll->base); platform_set_drvdata(pdev, tll); pm_runtime_enable(dev); pm_runtime_get_sync(dev); ver = usbtll_read(tll->base, OMAP_USBTLL_REVISION); switch (ver) { case OMAP_USBTLL_REV1: case OMAP_USBTLL_REV4: tll->nch = OMAP_TLL_CHANNEL_COUNT; break; case OMAP_USBTLL_REV2: case OMAP_USBTLL_REV3: tll->nch = OMAP_REV2_TLL_CHANNEL_COUNT; break; default: tll->nch = OMAP_TLL_CHANNEL_COUNT; dev_dbg(dev, "USB TLL Rev : 0x%x not recognized, assuming %d channels\n", ver, tll->nch); break; } tll->ch_clk = devm_kzalloc(dev, sizeof(struct clk *) * tll->nch, GFP_KERNEL); if (!tll->ch_clk) { ret = -ENOMEM; dev_err(dev, "Couldn't allocate memory for channel clocks\n"); goto err_clk_alloc; } for (i = 0; i < tll->nch; i++) { char clkname[] = "usb_tll_hs_usb_chx_clk"; snprintf(clkname, sizeof(clkname), "usb_tll_hs_usb_ch%d_clk", i); tll->ch_clk[i] = clk_get(dev, clkname); if (IS_ERR(tll->ch_clk[i])) dev_dbg(dev, "can't get clock : %s\n", clkname); else clk_prepare(tll->ch_clk[i]); } pm_runtime_put_sync(dev); /* only after this can omap_tll_enable/disable work */ spin_lock(&tll_lock); tll_dev = dev; spin_unlock(&tll_lock); return 0; err_clk_alloc: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; } /** * usbtll_omap_remove - shutdown processing for UHH & TLL HCDs * @pdev: USB Host Controller being removed * * Reverses the effect of usbtll_omap_probe(). */ static int usbtll_omap_remove(struct platform_device *pdev) { struct usbtll_omap *tll = platform_get_drvdata(pdev); int i; spin_lock(&tll_lock); tll_dev = NULL; spin_unlock(&tll_lock); for (i = 0; i < tll->nch; i++) { if (!IS_ERR(tll->ch_clk[i])) { clk_unprepare(tll->ch_clk[i]); clk_put(tll->ch_clk[i]); } } pm_runtime_disable(&pdev->dev); return 0; } static const struct of_device_id usbtll_omap_dt_ids[] = { { .compatible = "ti,usbhs-tll" }, { } }; MODULE_DEVICE_TABLE(of, usbtll_omap_dt_ids); static struct platform_driver usbtll_omap_driver = { .driver = { .name = (char *)usbtll_driver_name, .of_match_table = usbtll_omap_dt_ids, }, .probe = usbtll_omap_probe, .remove = usbtll_omap_remove, }; int omap_tll_init(struct usbhs_omap_platform_data *pdata) { int i; bool needs_tll; unsigned reg; struct usbtll_omap *tll; if (!tll_dev) return -ENODEV; pm_runtime_get_sync(tll_dev); spin_lock(&tll_lock); tll = dev_get_drvdata(tll_dev); needs_tll = false; for (i = 0; i < tll->nch; i++) needs_tll |= omap_usb_mode_needs_tll(pdata->port_mode[i]); if (needs_tll) { void __iomem *base = tll->base; /* Program Common TLL register */ reg = usbtll_read(base, OMAP_TLL_SHARED_CONF); reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON | OMAP_TLL_SHARED_CONF_USB_DIVRATION); reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN; reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN; usbtll_write(base, OMAP_TLL_SHARED_CONF, reg); /* Enable channels now */ for (i = 0; i < tll->nch; i++) { reg = usbtll_read(base, OMAP_TLL_CHANNEL_CONF(i)); if (is_ohci_port(pdata->port_mode[i])) { reg |= ohci_omap3_fslsmode(pdata->port_mode[i]) << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT; reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS; } else if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_TLL) { /* * Disable AutoIdle, BitStuffing * and use SDR Mode */ reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); } else if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_HSIC) { /* * HSIC Mode requires UTMI port configurations */ reg |= OMAP_TLL_CHANNEL_CONF_DRVVBUS | OMAP_TLL_CHANNEL_CONF_CHRGVBUS | OMAP_TLL_CHANNEL_CONF_MODE_TRANSPARENT_UTMI | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF; } else { continue; } reg |= OMAP_TLL_CHANNEL_CONF_CHANEN; usbtll_write(base, OMAP_TLL_CHANNEL_CONF(i), reg); usbtll_writeb(base, OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe); } } spin_unlock(&tll_lock); pm_runtime_put_sync(tll_dev); return 0; } EXPORT_SYMBOL_GPL(omap_tll_init); int omap_tll_enable(struct usbhs_omap_platform_data *pdata) { int i; struct usbtll_omap *tll; if (!tll_dev) return -ENODEV; pm_runtime_get_sync(tll_dev); spin_lock(&tll_lock); tll = dev_get_drvdata(tll_dev); for (i = 0; i < tll->nch; i++) { if (omap_usb_mode_needs_tll(pdata->port_mode[i])) { int r; if (IS_ERR(tll->ch_clk[i])) continue; r = clk_enable(tll->ch_clk[i]); if (r) { dev_err(tll_dev, "Error enabling ch %d clock: %d\n", i, r); } } } spin_unlock(&tll_lock); return 0; } EXPORT_SYMBOL_GPL(omap_tll_enable); int omap_tll_disable(struct usbhs_omap_platform_data *pdata) { int i; struct usbtll_omap *tll; if (!tll_dev) return -ENODEV; spin_lock(&tll_lock); tll = dev_get_drvdata(tll_dev); for (i = 0; i < tll->nch; i++) { if (omap_usb_mode_needs_tll(pdata->port_mode[i])) { if (!IS_ERR(tll->ch_clk[i])) clk_disable(tll->ch_clk[i]); } } spin_unlock(&tll_lock); pm_runtime_put_sync(tll_dev); return 0; } EXPORT_SYMBOL_GPL(omap_tll_disable); MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>"); MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); MODULE_ALIAS("platform:" USBHS_DRIVER_NAME); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("usb tll driver for TI OMAP EHCI and OHCI controllers"); static int __init omap_usbtll_drvinit(void) { return platform_driver_register(&usbtll_omap_driver); } /* * init before usbhs core driver; * The usbtll driver should be initialized before * the usbhs core driver probe function is called. */ fs_initcall(omap_usbtll_drvinit); static void __exit omap_usbtll_drvexit(void) { platform_driver_unregister(&usbtll_omap_driver); } module_exit(omap_usbtll_drvexit);
gpl-2.0
Quallenauge/kernel-archosg9
arch/arm/mach-omap2/omap4-mpuss-lowpower.c
84
24367
/* * OMAP4 MPUSS low power code * * Copyright (C) 2011 Texas Instruments, Inc. * Written by Santosh Shilimkar <santosh.shilimkar@ti.com> * * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, * CPU0 and CPU1 LPRM modules. * CPU0, CPU1 and MPUSS each have there own power domain and * hence multiple low power combinations of MPUSS are possible. * * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) * because the mode is not supported by hw constraints of dormant * mode. While waking up from the dormant mode, a reset signal * to the Cortex-A9 processor must be asserted by the external * power controller. * * With architectural inputs and hardware recommendations, only * below modes are supported from power gain vs latency point of view. * * CPU0 CPU1 MPUSS * ---------------------------------------------- * ON ON ON * ON(Inactive) OFF ON(Inactive) * OFF OFF CSWR * OFF OFF OSWR * OFF OFF OFF * ---------------------------------------------- * * Note: CPU0 is the master core and it is the last CPU to go down * and first to wake-up when MPUSS low power states are excercised * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/linkage.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <linux/dma-mapping.h> #include <asm/tlbflush.h> #include <asm/smp_scu.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/hardware/gic.h> #include <asm/hardware/cache-l2x0.h> #include <plat/omap44xx.h> #include <mach/omap4-common.h> #include <mach/omap-wakeupgen.h> #include <linux/clk.h> #include "omap4-sar-layout.h" #include "pm.h" #include "prcm_mpu44xx.h" #include "prminst44xx.h" #include "prcm44xx.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" #include "cm.h" #include "prm.h" #include "cm44xx.h" #include "prcm-common.h" #include "clockdomain.h" #ifdef CONFIG_SMP #define GIC_MASK_ALL 0x0 #define GIC_ISR_NON_SECURE 0xffffffff #define SPI_ENABLE_SET_OFFSET 0x04 #define PPI_PRI_OFFSET 0x1c #define SPI_PRI_OFFSET 0x20 #define SPI_TARGET_OFFSET 0x20 #define SPI_CONFIG_OFFSET 0x20 /* GIC save SAR bank base */ static struct powerdomain *mpuss_pd; /* * Maximum Secure memory storage size. */ #define OMAP4_SECURE_RAM_STORAGE (88 * SZ_1K) /* * Physical address of secure memory storage */ dma_addr_t omap4_secure_ram_phys; static void *secure_ram; struct clk *l3_main_3_ick; /* Variables to store maximum spi(Shared Peripheral Interrupts) registers. */ static u32 max_spi_irq, max_spi_reg; struct omap4_cpu_pm_info { struct powerdomain *pwrdm; void __iomem *scu_sar_addr; }; static void __iomem *gic_dist_base; static void __iomem *gic_cpu_base; static void __iomem *sar_base; static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); #define PPI_CONTEXT_SIZE 11 static DEFINE_PER_CPU(u32[PPI_CONTEXT_SIZE], gic_ppi_context); static DEFINE_PER_CPU(u32, gic_ppi_enable_mask); /* Helper functions */ static inline void sar_writel(u32 val, u32 offset, u8 idx) { __raw_writel(val, sar_base + offset + 4 * idx); } static inline u32 gic_readl(u32 offset, u8 idx) { return __raw_readl(gic_dist_base + offset + 4 * idx); } u32 gic_cpu_read(u32 reg) { return __raw_readl(gic_cpu_base + reg); } /* * Set the CPUx powerdomain's previous power state */ static inline void set_cpu_next_pwrst(unsigned int cpu_id, unsigned int power_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); } /* * Read CPU's previous power state */ static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); return pwrdm_read_prev_pwrst(pm_info->pwrdm); } /* * Clear the CPUx powerdomain's previous power state */ static inline void clear_cpu_prev_pwrst(unsigned int cpu_id) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); } struct reg_tuple { void __iomem *addr; u32 val; }; static struct reg_tuple tesla_reg[] = { {.addr = OMAP4430_CM_TESLA_CLKSTCTRL}, {.addr = OMAP4430_CM_TESLA_TESLA_CLKCTRL}, {.addr = OMAP4430_PM_TESLA_PWRSTCTRL}, }; static struct reg_tuple ivahd_reg[] = { {.addr = OMAP4430_CM_IVAHD_CLKSTCTRL}, {.addr = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL}, {.addr = OMAP4430_CM_IVAHD_SL2_CLKCTRL}, {.addr = OMAP4430_PM_IVAHD_PWRSTCTRL} }; static struct reg_tuple l3instr_reg[] = { {.addr = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL}, {.addr = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL}, {.addr = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL}, }; /* * Store the SCU power status value to scratchpad memory */ static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); u32 scu_pwr_st; switch (cpu_state) { case PWRDM_POWER_RET: scu_pwr_st = SCU_PM_DORMANT; break; case PWRDM_POWER_OFF: scu_pwr_st = SCU_PM_POWEROFF; break; case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: default: scu_pwr_st = SCU_PM_NORMAL; break; } __raw_writel(scu_pwr_st, pm_info->scu_sar_addr); } static void gic_save_ppi(void) { void __iomem *gic_dist_base = omap4_get_gic_dist_base(); u32 *context = __get_cpu_var(gic_ppi_context); int i = 0; context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x4); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x8); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0xc); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x10); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x14); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x18); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x1c); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_CONFIG); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_CONFIG + 0x4); context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_ENABLE_SET); BUG_ON(i != PPI_CONTEXT_SIZE); } static void gic_restore_ppi(void) { void __iomem *gic_dist_base = omap4_get_gic_dist_base(); u32 *context = __get_cpu_var(gic_ppi_context); int i = 0; writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x4); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x8); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0xc); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x10); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x14); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x18); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x1c); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_CONFIG); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_CONFIG + 0x4); writel_relaxed(context[i++], gic_dist_base + GIC_DIST_ENABLE_SET); BUG_ON(i != PPI_CONTEXT_SIZE); } /* * Mask all the PPIs. This should only be called after they have been saved * through secure trap or through save_ppi(). This is primarily needed to * mask the local timer irq that could be pending since timekeeping gets * suspended after the local irqs are disabled. The pending interrupt would * kick the CPU out of WFI immediately, and prevent it from going to the lower * power states. The correct value will be restored when the CPU is brought * back up by restore. */ static void gic_mask_ppi(void) { void __iomem *gic_dist_base = omap4_get_gic_dist_base(); __get_cpu_var(gic_ppi_enable_mask) = readl_relaxed(gic_dist_base + GIC_DIST_ENABLE_SET); writel_relaxed(0xffffffff, gic_dist_base + GIC_DIST_ENABLE_CLEAR); } static void gic_unmask_ppi(void) { void __iomem *gic_dist_base = omap4_get_gic_dist_base(); writel_relaxed(__get_cpu_var(gic_ppi_enable_mask), gic_dist_base + GIC_DIST_ENABLE_SET); } /* * Save GIC context in SAR RAM. Restore is done by ROM code * GIC is lost only when MPU hits OSWR or OFF. It consists * of a distributor and a per-CPU interface module. The GIC * save restore is optimised to save only necessary registers. */ static void gic_save_context(void) { u8 i; u32 val; /* * Interrupt Clear Enable registers are inverse of set enable * and hence not needed to be saved. ROM code programs it * based on Set Enable register values. */ /* Save CPU 0 Interrupt Set Enable register */ val = gic_readl(GIC_DIST_ENABLE_SET, 0); sar_writel(val, ICDISER_CPU0_OFFSET, 0); /* Disable interrupts on CPU1 */ sar_writel(GIC_MASK_ALL, ICDISER_CPU1_OFFSET, 0); /* Save all SPI Set Enable register */ for (i = 0; i < max_spi_reg; i++) { val = gic_readl(GIC_DIST_ENABLE_SET + SPI_ENABLE_SET_OFFSET, i); sar_writel(val, ICDISER_SPI_OFFSET, i); } /* * Interrupt Priority Registers * Secure sw accesses, last 5 bits of the 8 bits (bit[7:3] are used) * Non-Secure sw accesses, last 4 bits (i.e. bits[7:4] are used) * But the Secure Bits[7:3] are shifted by 1 in Non-Secure access. * Secure (bits[7:3] << 1)== Non Secure bits[7:4] * Hence right shift the value by 1 while saving the priority */ /* Save SGI priority registers (Software Generated Interrupt) */ for (i = 0; i < 4; i++) { val = gic_readl(GIC_DIST_PRI, i); /* Save the priority bits of the Interrupts */ sar_writel(val >> 0x1, ICDIPR_SFI_CPU0_OFFSET, i); /* Disable the interrupts on CPU1 */ sar_writel(GIC_MASK_ALL, ICDIPR_SFI_CPU1_OFFSET, i); } /* Save PPI priority registers (Private Peripheral Intterupts) */ val = gic_readl(GIC_DIST_PRI + PPI_PRI_OFFSET, 0); sar_writel(val >> 0x1, ICDIPR_PPI_CPU0_OFFSET, 0); sar_writel(GIC_MASK_ALL, ICDIPR_PPI_CPU1_OFFSET, 0); /* SPI priority registers - 4 interrupts/register */ for (i = 0; i < (max_spi_irq / 4); i++) { val = gic_readl((GIC_DIST_PRI + SPI_PRI_OFFSET), i); sar_writel(val >> 0x1, ICDIPR_SPI_OFFSET, i); } /* SPI Interrupt Target registers - 4 interrupts/register */ for (i = 0; i < (max_spi_irq / 4); i++) { val = gic_readl((GIC_DIST_TARGET + SPI_TARGET_OFFSET), i); sar_writel(val, ICDIPTR_SPI_OFFSET, i); } /* SPI Interrupt Congigeration eegisters- 16 interrupts/register */ for (i = 0; i < (max_spi_irq / 16); i++) { val = gic_readl((GIC_DIST_CONFIG + SPI_CONFIG_OFFSET), i); sar_writel(val, ICDICFR_OFFSET, i); } /* Set the Backup Bit Mask status for GIC */ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET); val |= (SAR_BACKUP_STATUS_GIC_CPU0 | SAR_BACKUP_STATUS_GIC_CPU1); __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET); } /* * API to save GIC and Wakeupgen using secure API * for HS/EMU device */ static void save_gic_wakeupgen_secure(void) { u32 ret; ret = omap4_secure_dispatcher(HAL_SAVEGIC_INDEX, FLAG_START_CRITICAL, 0, 0, 0, 0, 0); if (!ret) pr_debug("GIC and Wakeupgen context save failed\n"); } /* * API to save Secure RAM, GIC, WakeupGen Registers using secure API * for HS/EMU device */ static void save_secure_all(void) { u32 ret; ret = omap4_secure_dispatcher(HAL_SAVEALL_INDEX, FLAG_START_CRITICAL, 1, omap4_secure_ram_phys, 0, 0, 0); if (ret) pr_debug("Secure all context save failed\n"); } /* * API to save Secure RAM using secure API * for HS/EMU device */ static void save_secure_ram(void) { u32 ret; ret = omap4_secure_dispatcher(HAL_SAVESECURERAM_INDEX, FLAG_START_CRITICAL, 1, omap4_secure_ram_phys, 0, 0, 0); if (!ret) pr_debug("Secure ram context save failed\n"); } /* Helper functions for MPUSS OSWR */ static inline u32 mpuss_read_prev_logic_pwrst(void) { u32 reg; reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); reg &= OMAP4430_LOSTCONTEXT_DFF_MASK; return reg; } static inline void mpuss_clear_prev_logic_pwrst(void) { u32 reg; reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION, OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); } static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) { u32 reg; if (cpu_id) { reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET); omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST, OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET); } else { reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET); omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST, OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET); } } static inline void save_ivahd_tesla_regs(void) { int i; for (i = 0; i < ARRAY_SIZE(tesla_reg); i++) tesla_reg[i].val = __raw_readl(tesla_reg[i].addr); for (i = 0; i < ARRAY_SIZE(ivahd_reg); i++) ivahd_reg[i].val = __raw_readl(ivahd_reg[i].addr); } static inline void restore_ivahd_tesla_regs(void) { int i; for (i = 0; i < ARRAY_SIZE(tesla_reg); i++) __raw_writel(tesla_reg[i].val, tesla_reg[i].addr); for (i = 0; i < ARRAY_SIZE(ivahd_reg); i++) __raw_writel(ivahd_reg[i].val, ivahd_reg[i].addr); } static inline void save_l3instr_regs(void) { int i; for (i = 0; i < ARRAY_SIZE(l3instr_reg); i++) l3instr_reg[i].val = __raw_readl(l3instr_reg[i].addr); } static inline void restore_l3instr_regs(void) { int i; for (i = 0; i < ARRAY_SIZE(l3instr_reg); i++) __raw_writel(l3instr_reg[i].val, l3instr_reg[i].addr); } /* * OMAP4 MPUSS Low Power Entry Function * * The purpose of this function is to manage low power programming * of OMAP4 MPUSS subsystem * Paramenters: * cpu : CPU ID * power_state: Targetted Low power state. * * MPUSS Low power states * The basic rule is that the MPUSS power domain must be at the higher or * equal power state (state that consume more power) than the higher of the * two CPUs. For example, it is illegal for system power to be OFF, while * the power of one or both of the CPU is DORMANT. When an illegal state is * entered, then the hardware behavior is unpredictable. * * MPUSS state for the context save * save_state = * 0 - Nothing lost and no need to save: MPUSS INACTIVE * 1 - CPUx L1 and logic lost: MPUSS CSWR * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF */ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) { unsigned int save_state = 0; unsigned int wakeup_cpu; unsigned int inst_clk_enab = 0; if ((cpu >= NR_CPUS) || (omap_rev() == OMAP4430_REV_ES1_0)) goto ret; switch (power_state) { case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: save_state = 0; break; case PWRDM_POWER_OFF: save_state = 1; break; case PWRDM_POWER_RET: default: /* * CPUx CSWR is invalid hardware state. Also CPUx OSWR * doesn't make much scense, since logic is lost and $L1 * needs to be cleaned because of coherency. This makes * CPUx OSWR equivalent to CPUX OFF and hence not supported */ WARN_ON(1); goto ret; } /* * MPUSS book keeping should be executed by master * CPU only which is also the last CPU to go down. */ if (cpu) goto cpu_prepare; pwrdm_pre_transition(); /* * Check MPUSS next state and save GIC if needed * GIC lost during MPU OFF and OSWR */ pwrdm_clear_all_prev_pwrst(mpuss_pd); mpuss_clear_prev_logic_pwrst(); if (omap4_device_next_state_off()) { if (omap_type() == OMAP2_DEVICE_TYPE_GP) { omap_wakeupgen_save(); gic_save_context(); } else { /* FIXME: Check if this can be optimised */ /* l3_main inst clock must be enabled for * a save ram operation */ if (!l3_main_3_ick->usecount) { inst_clk_enab = 1; clk_enable(l3_main_3_ick); } save_secure_all(); if (inst_clk_enab == 1) clk_disable(l3_main_3_ick); save_ivahd_tesla_regs(); save_l3instr_regs(); } save_state = 3; goto cpu_prepare; } switch (pwrdm_read_next_pwrst(mpuss_pd)) { case PWRDM_POWER_RET: /* * MPUSS OSWR - Complete logic lost + L2$ retained. * MPUSS CSWR - Complete logic retained + L2$ retained. */ if (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF) { if (omap_type() == OMAP2_DEVICE_TYPE_GP) { omap_wakeupgen_save(); gic_save_context(); } else { save_gic_wakeupgen_secure(); save_ivahd_tesla_regs(); save_l3instr_regs(); } save_state = 2; } break; case PWRDM_POWER_OFF: /* MPUSS OFF - logic lost + L2$ lost */ if (omap_type() == OMAP2_DEVICE_TYPE_GP) { omap_wakeupgen_save(); gic_save_context(); } else { /* l3_main inst clock must be enabled for * a save ram operation */ if (!l3_main_3_ick->usecount) { inst_clk_enab = 1; clk_enable(l3_main_3_ick); } save_gic_wakeupgen_secure(); save_ivahd_tesla_regs(); save_l3instr_regs(); save_secure_ram(); if (inst_clk_enab == 1) clk_disable(l3_main_3_ick); } save_state = 3; break; case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: /* No need to save MPUSS context */ default: ; } cpu_prepare: if (cpu) gic_save_ppi(); /* * mask all PPIs to prevent them from kicking us out of wfi. */ gic_mask_ppi(); clear_cpu_prev_pwrst(cpu); cpu_clear_prev_logic_pwrst(cpu); set_cpu_next_pwrst(cpu, power_state); scu_pwrst_prepare(cpu, power_state); /* * Call low level function with targeted CPU id * and its low power state. */ stop_critical_timings(); omap4_cpu_suspend(cpu, save_state); start_critical_timings(); /* * Restore the CPUx power state to ON otherwise CPUx * power domain can transitions to programmed low power * state while doing WFI outside the low powe code. On * secure devices, CPUx does WFI which can result in * domain transition */ wakeup_cpu = hard_smp_processor_id(); set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON); /* * If we didn't actually get into the low power state (e.g. immediately * exited wfi due to a pending interrupt), the secure side * would not have restored CPU0's GIC PPI enable mask. * For other CPUs, gic_restore_ppi will do that for us. */ if (cpu) gic_restore_ppi(); else gic_unmask_ppi(); /* * If !master cpu return to hotplug-path. * * GIC distributor control register has changed between * CortexA9 r1pX and r2pX. The Control Register secure * banked version is now composed of 2 bits: * bit 0 == Secure Enable * bit 1 == Non-Secure Enable * The Non-Secure banked register has not changed * Because the ROM Code is based on the r1pX GIC, the CPU1 * GIC restoration will cause a problem to CPU0 Non-Secure SW. * The workaround must be: * 1) Before doing the CPU1 wakeup, CPU0 must disable * the GIC distributor * 2) CPU1 must re-enable the GIC distributor on * it's wakeup path. */ if (wakeup_cpu) { if (!cpu_is_omap443x()) gic_dist_enable(); goto ret; } /* Check if MPUSS lost it's logic */ if (mpuss_read_prev_logic_pwrst()) { /* Clear SAR BACKUP status on GP devices */ if (omap_type() == OMAP2_DEVICE_TYPE_GP) __raw_writel(0x0, sar_base + SAR_BACKUP_STATUS_OFFSET); /* Enable GIC distributor and interface on CPU0*/ gic_cpu_enable(); gic_dist_enable(); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* * Dummy dispatcher call after OSWR and OFF * Restore the right return Kernel address (with MMU on) for * subsequent calls to secure ROM. Otherwise the return address * will be to a PA return address and the system will hang. */ omap4_secure_dispatcher(PPA_SERVICE_0, FLAG_START_CRITICAL, 0, 0, 0, 0, 0); /* Due to ROM BUG at wake up from MPU OSWR/OFF * on HS/EMU device only (not GP device), * the ROM Code reconfigures some of * IVAHD/TESLA/L3INSTR registers. * So these IVAHD/TESLA and L3INSTR registers * need to be restored.*/ restore_ivahd_tesla_regs(); restore_l3instr_regs(); } } pwrdm_post_transition(); ret: return 0; } static void save_l2x0_auxctrl(void) { #ifdef CONFIG_CACHE_L2X0 /* * Save the L2X0 AUXCTRL value to SAR memory. Its used to * in every restore patch MPUSS OFF path. */ void __iomem *l2x0_base = omap4_get_l2cache_base(); u32 val; val = __raw_readl(l2x0_base + L2X0_AUX_CTRL); __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET); /* * Save the L2X0 PREFETCH_CTRL value to SAR memory. * Its used in every restore path MPUSS OFF path. */ val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL); __raw_writel(val, sar_base + L2X0_PREFETCHCTRL_OFFSET); /* Save L2X0 LOCKDOWN_OFFSET0 during SAR */ val = readl_relaxed(l2x0_base + 0x900); writel_relaxed(val, sar_base + L2X0_LOCKDOWN_OFFSET0); #endif } /* * Initialise OMAP4 MPUSS */ int __init omap4_mpuss_init(void) { struct omap4_cpu_pm_info *pm_info; u8 i; /* Get GIC and SAR RAM base addresses */ sar_base = omap4_get_sar_ram_base(); gic_dist_base = omap4_get_gic_dist_base(); gic_cpu_base = omap4_get_gic_cpu_base(); if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } /* Initilaise per CPU PM information */ pm_info = &per_cpu(omap4_pm_info, 0x0); pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU0 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(0); /* Initialise CPU0 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); pm_info = &per_cpu(omap4_pm_info, 0x1); pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU1 pwrdm\n"); return -ENODEV; } /* * Check the OMAP type and store it to scratchpad */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* Memory not released */ secure_ram = dma_alloc_coherent(NULL, OMAP4_SECURE_RAM_STORAGE, (dma_addr_t *)&omap4_secure_ram_phys, GFP_ATOMIC); if (!secure_ram) pr_err("Unable to allocate secure ram storage\n"); writel(0x1, sar_base + OMAP_TYPE_OFFSET); } else { writel(0x0, sar_base + OMAP_TYPE_OFFSET); } /* * Store OMAP revision to scratchpad. * ATM required for POR restore on GP devices. */ writel(omap_rev(), sar_base + OMAP_REV_OFFSET); /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(1); /* Initialise CPU1 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); /* * Program the wakeup routine address for the CPU0 and CPU1 * used for OFF or DORMANT wakeup. Wakeup routine address * is fixed so programit in init itself. */ __raw_writel(virt_to_phys(omap4_cpu_resume), sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET); __raw_writel(virt_to_phys(omap4_cpu_resume), sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET); mpuss_pd = pwrdm_lookup("mpu_pwrdm"); if (!mpuss_pd) { pr_err("Failed to get lookup for MPUSS pwrdm\n"); return -ENODEV; } l3_main_3_ick = clk_get(NULL, "l3_main_3_ick"); /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(mpuss_pd); mpuss_clear_prev_logic_pwrst(); /* * Find out how many interrupts are supported. * OMAP4 supports max of 128 SPIs where as GIC can support * up to 1020 interrupt sources. On OMAP4, maximum SPIs are * fused in DIST_CTR bit-fields as 128. Hence the code is safe * from reserved register writes since its well within 1020. */ max_spi_reg = __raw_readl(gic_dist_base + GIC_DIST_CTR) & 0x1f; max_spi_irq = max_spi_reg * 32; /* * Mark the PPI and SPI interrupts as non-secure. * program the SAR locations for interrupt security registers to * reflect the same. */ if (omap_type() == OMAP2_DEVICE_TYPE_GP) { sar_writel(GIC_ISR_NON_SECURE, ICDISR_CPU0_OFFSET, 0); sar_writel(GIC_ISR_NON_SECURE, ICDISR_CPU1_OFFSET, 0); for (i = 0; i < max_spi_reg; i++) sar_writel(GIC_ISR_NON_SECURE, ICDISR_SPI_OFFSET, i); } save_l2x0_auxctrl(); return 0; } #endif
gpl-2.0
Philippe12/linux-3.4-a20
fs/ubifs/dir.c
340
33224
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * Copyright (C) 2006, 2007 University of Szeged, Hungary * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter * Zoltan Sogor */ /* * This file implements directory operations. * * All FS operations in this file allocate budget before writing anything to the * media. If they fail to allocate it, the error is returned. The only * exceptions are 'ubifs_unlink()' and 'ubifs_rmdir()' which keep working even * if they unable to allocate the budget, because deletion %-ENOSPC failure is * not what users are usually ready to get. UBIFS budgeting subsystem has some * space reserved for these purposes. * * All operations in this file write all inodes which they change straight * away, instead of marking them dirty. For example, 'ubifs_link()' changes * @i_size of the parent inode and writes the parent inode together with the * target inode. This was done to simplify file-system recovery which would * otherwise be very difficult to do. The only exception is rename which marks * the re-named inode dirty (because its @i_ctime is updated) but does not * write it, but just marks it as dirty. */ #include "ubifs.h" /** * inherit_flags - inherit flags of the parent inode. * @dir: parent inode * @mode: new inode mode flags * * This is a helper function for 'ubifs_new_inode()' which inherits flag of the * parent directory inode @dir. UBIFS inodes inherit the following flags: * o %UBIFS_COMPR_FL, which is useful to switch compression on/of on * sub-directory basis; * o %UBIFS_SYNC_FL - useful for the same reasons; * o %UBIFS_DIRSYNC_FL - similar, but relevant only to directories. * * This function returns the inherited flags. */ static int inherit_flags(const struct inode *dir, umode_t mode) { int flags; const struct ubifs_inode *ui = ubifs_inode(dir); if (!S_ISDIR(dir->i_mode)) /* * The parent is not a directory, which means that an extended * attribute inode is being created. No flags. */ return 0; flags = ui->flags & (UBIFS_COMPR_FL | UBIFS_SYNC_FL | UBIFS_DIRSYNC_FL); if (!S_ISDIR(mode)) /* The "DIRSYNC" flag only applies to directories */ flags &= ~UBIFS_DIRSYNC_FL; return flags; } /** * ubifs_new_inode - allocate new UBIFS inode object. * @c: UBIFS file-system description object * @dir: parent directory inode * @mode: inode mode flags * * This function finds an unused inode number, allocates new inode and * initializes it. Returns new inode in case of success and an error code in * case of failure. */ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, umode_t mode) { struct inode *inode; struct ubifs_inode *ui; inode = new_inode(c->vfs_sb); ui = ubifs_inode(inode); if (!inode) return ERR_PTR(-ENOMEM); /* * Set 'S_NOCMTIME' to prevent VFS form updating [mc]time of inodes and * marking them dirty in file write path (see 'file_update_time()'). * UBIFS has to fully control "clean <-> dirty" transitions of inodes * to make budgeting work. */ inode->i_flags |= S_NOCMTIME; inode_init_owner(inode, dir, mode); inode->i_mtime = inode->i_atime = inode->i_ctime = ubifs_current_time(inode); inode->i_mapping->nrpages = 0; /* Disable readahead */ inode->i_mapping->backing_dev_info = &c->bdi; switch (mode & S_IFMT) { case S_IFREG: inode->i_mapping->a_ops = &ubifs_file_address_operations; inode->i_op = &ubifs_file_inode_operations; inode->i_fop = &ubifs_file_operations; break; case S_IFDIR: inode->i_op = &ubifs_dir_inode_operations; inode->i_fop = &ubifs_dir_operations; inode->i_size = ui->ui_size = UBIFS_INO_NODE_SZ; break; case S_IFLNK: inode->i_op = &ubifs_symlink_inode_operations; break; case S_IFSOCK: case S_IFIFO: case S_IFBLK: case S_IFCHR: inode->i_op = &ubifs_file_inode_operations; break; default: BUG(); } ui->flags = inherit_flags(dir, mode); ubifs_set_inode_flags(inode); if (S_ISREG(mode)) ui->compr_type = c->default_compr; else ui->compr_type = UBIFS_COMPR_NONE; ui->synced_i_size = 0; spin_lock(&c->cnt_lock); /* Inode number overflow is currently not supported */ if (c->highest_inum >= INUM_WARN_WATERMARK) { if (c->highest_inum >= INUM_WATERMARK) { spin_unlock(&c->cnt_lock); ubifs_err("out of inode numbers"); make_bad_inode(inode); iput(inode); return ERR_PTR(-EINVAL); } ubifs_warn("running out of inode numbers (current %lu, max %d)", (unsigned long)c->highest_inum, INUM_WATERMARK); } inode->i_ino = ++c->highest_inum; /* * The creation sequence number remains with this inode for its * lifetime. All nodes for this inode have a greater sequence number, * and so it is possible to distinguish obsolete nodes belonging to a * previous incarnation of the same inode number - for example, for the * purpose of rebuilding the index. */ ui->creat_sqnum = ++c->max_sqnum; spin_unlock(&c->cnt_lock); return inode; } #ifdef CONFIG_UBIFS_FS_DEBUG static int dbg_check_name(const struct ubifs_info *c, const struct ubifs_dent_node *dent, const struct qstr *nm) { if (!dbg_is_chk_gen(c)) return 0; if (le16_to_cpu(dent->nlen) != nm->len) return -EINVAL; if (memcmp(dent->name, nm->name, nm->len)) return -EINVAL; return 0; } #else #define dbg_check_name(c, dent, nm) 0 #endif static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { int err; union ubifs_key key; struct inode *inode = NULL; struct ubifs_dent_node *dent; struct ubifs_info *c = dir->i_sb->s_fs_info; dbg_gen("'%.*s' in dir ino %lu", dentry->d_name.len, dentry->d_name.name, dir->i_ino); if (dentry->d_name.len > UBIFS_MAX_NLEN) return ERR_PTR(-ENAMETOOLONG); dent = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS); if (!dent) return ERR_PTR(-ENOMEM); dent_key_init(c, &key, dir->i_ino, &dentry->d_name); err = ubifs_tnc_lookup_nm(c, &key, dent, &dentry->d_name); if (err) { if (err == -ENOENT) { dbg_gen("not found"); goto done; } goto out; } if (dbg_check_name(c, dent, &dentry->d_name)) { err = -EINVAL; goto out; } inode = ubifs_iget(dir->i_sb, le64_to_cpu(dent->inum)); if (IS_ERR(inode)) { /* * This should not happen. Probably the file-system needs * checking. */ err = PTR_ERR(inode); ubifs_err("dead directory entry '%.*s', error %d", dentry->d_name.len, dentry->d_name.name, err); ubifs_ro_mode(c, err); goto out; } done: kfree(dent); /* * Note, d_splice_alias() would be required instead if we supported * NFS. */ d_add(dentry, inode); return NULL; out: kfree(dent); return ERR_PTR(err); } static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { struct inode *inode; struct ubifs_info *c = dir->i_sb->s_fs_info; int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len); struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, .dirtied_ino = 1 }; struct ubifs_inode *dir_ui = ubifs_inode(dir); /* * Budget request settings: new inode, new direntry, changing the * parent directory inode. */ dbg_gen("dent '%.*s', mode %#hx in dir ino %lu", dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino); err = ubifs_budget_space(c, &req); if (err) return err; inode = ubifs_new_inode(c, dir, mode); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_budg; } mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0); if (err) goto out_cancel; mutex_unlock(&dir_ui->ui_mutex); ubifs_release_budget(c, &req); insert_inode_hash(inode); d_instantiate(dentry, inode); return 0; out_cancel: dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; mutex_unlock(&dir_ui->ui_mutex); make_bad_inode(inode); iput(inode); out_budg: ubifs_release_budget(c, &req); ubifs_err("cannot create regular file, error %d", err); return err; } /** * vfs_dent_type - get VFS directory entry type. * @type: UBIFS directory entry type * * This function converts UBIFS directory entry type into VFS directory entry * type. */ static unsigned int vfs_dent_type(uint8_t type) { switch (type) { case UBIFS_ITYPE_REG: return DT_REG; case UBIFS_ITYPE_DIR: return DT_DIR; case UBIFS_ITYPE_LNK: return DT_LNK; case UBIFS_ITYPE_BLK: return DT_BLK; case UBIFS_ITYPE_CHR: return DT_CHR; case UBIFS_ITYPE_FIFO: return DT_FIFO; case UBIFS_ITYPE_SOCK: return DT_SOCK; default: BUG(); } return 0; } /* * The classical Unix view for directory is that it is a linear array of * (name, inode number) entries. Linux/VFS assumes this model as well. * Particularly, 'readdir()' call wants us to return a directory entry offset * which later may be used to continue 'readdir()'ing the directory or to * 'seek()' to that specific direntry. Obviously UBIFS does not really fit this * model because directory entries are identified by keys, which may collide. * * UBIFS uses directory entry hash value for directory offsets, so * 'seekdir()'/'telldir()' may not always work because of possible key * collisions. But UBIFS guarantees that consecutive 'readdir()' calls work * properly by means of saving full directory entry name in the private field * of the file description object. * * This means that UBIFS cannot support NFS which requires full * 'seekdir()'/'telldir()' support. */ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) { int err, over = 0; struct qstr nm; union ubifs_key key; struct ubifs_dent_node *dent; struct inode *dir = file->f_path.dentry->d_inode; struct ubifs_info *c = dir->i_sb->s_fs_info; dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos); if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2) /* * The directory was seek'ed to a senseless position or there * are no more entries. */ return 0; /* File positions 0 and 1 correspond to "." and ".." */ if (file->f_pos == 0) { ubifs_assert(!file->private_data); over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR); if (over) return 0; file->f_pos = 1; } if (file->f_pos == 1) { ubifs_assert(!file->private_data); over = filldir(dirent, "..", 2, 1, parent_ino(file->f_path.dentry), DT_DIR); if (over) return 0; /* Find the first entry in TNC and save it */ lowest_dent_key(c, &key, dir->i_ino); nm.name = NULL; dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { err = PTR_ERR(dent); goto out; } file->f_pos = key_hash_flash(c, &dent->key); file->private_data = dent; } dent = file->private_data; if (!dent) { /* * The directory was seek'ed to and is now readdir'ed. * Find the entry corresponding to @file->f_pos or the * closest one. */ dent_key_init_hash(c, &key, dir->i_ino, file->f_pos); nm.name = NULL; dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { err = PTR_ERR(dent); goto out; } file->f_pos = key_hash_flash(c, &dent->key); file->private_data = dent; } while (1) { dbg_gen("feed '%s', ino %llu, new f_pos %#x", dent->name, (unsigned long long)le64_to_cpu(dent->inum), key_hash_flash(c, &dent->key)); ubifs_assert(le64_to_cpu(dent->ch.sqnum) > ubifs_inode(dir)->creat_sqnum); nm.len = le16_to_cpu(dent->nlen); over = filldir(dirent, dent->name, nm.len, file->f_pos, le64_to_cpu(dent->inum), vfs_dent_type(dent->type)); if (over) return 0; /* Switch to the next entry */ key_read(c, &dent->key, &key); nm.name = dent->name; dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { err = PTR_ERR(dent); goto out; } kfree(file->private_data); file->f_pos = key_hash_flash(c, &dent->key); file->private_data = dent; cond_resched(); } out: if (err != -ENOENT) { ubifs_err("cannot find next direntry, error %d", err); return err; } kfree(file->private_data); file->private_data = NULL; file->f_pos = 2; return 0; } /* If a directory is seeked, we have to free saved readdir() state */ static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin) { kfree(file->private_data); file->private_data = NULL; return generic_file_llseek(file, offset, origin); } /* Free saved readdir() state when the directory is closed */ static int ubifs_dir_release(struct inode *dir, struct file *file) { kfree(file->private_data); file->private_data = NULL; return 0; } /** * lock_2_inodes - a wrapper for locking two UBIFS inodes. * @inode1: first inode * @inode2: second inode * * We do not implement any tricks to guarantee strict lock ordering, because * VFS has already done it for us on the @i_mutex. So this is just a simple * wrapper function. */ static void lock_2_inodes(struct inode *inode1, struct inode *inode2) { mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); } /** * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes. * @inode1: first inode * @inode2: second inode */ static void unlock_2_inodes(struct inode *inode1, struct inode *inode2) { mutex_unlock(&ubifs_inode(inode2)->ui_mutex); mutex_unlock(&ubifs_inode(inode1)->ui_mutex); } static int ubifs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct ubifs_info *c = dir->i_sb->s_fs_info; struct inode *inode = old_dentry->d_inode; struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_inode *dir_ui = ubifs_inode(dir); int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len); struct ubifs_budget_req req = { .new_dent = 1, .dirtied_ino = 2, .dirtied_ino_d = ALIGN(ui->data_len, 8) }; /* * Budget request settings: new direntry, changing the target inode, * changing the parent inode. */ dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu", dentry->d_name.len, dentry->d_name.name, inode->i_ino, inode->i_nlink, dir->i_ino); ubifs_assert(mutex_is_locked(&dir->i_mutex)); ubifs_assert(mutex_is_locked(&inode->i_mutex)); err = dbg_check_synced_i_size(c, inode); if (err) return err; err = ubifs_budget_space(c, &req); if (err) return err; lock_2_inodes(dir, inode); inc_nlink(inode); ihold(inode); inode->i_ctime = ubifs_current_time(inode); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0); if (err) goto out_cancel; unlock_2_inodes(dir, inode); ubifs_release_budget(c, &req); d_instantiate(dentry, inode); return 0; out_cancel: dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; drop_nlink(inode); unlock_2_inodes(dir, inode); ubifs_release_budget(c, &req); iput(inode); return err; } static int ubifs_unlink(struct inode *dir, struct dentry *dentry) { struct ubifs_info *c = dir->i_sb->s_fs_info; struct inode *inode = dentry->d_inode; struct ubifs_inode *dir_ui = ubifs_inode(dir); int sz_change = CALC_DENT_SIZE(dentry->d_name.len); int err, budgeted = 1; struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 }; unsigned int saved_nlink = inode->i_nlink; /* * Budget request settings: deletion direntry, deletion inode (+1 for * @dirtied_ino), changing the parent directory inode. If budgeting * fails, go ahead anyway because we have extra space reserved for * deletions. */ dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu", dentry->d_name.len, dentry->d_name.name, inode->i_ino, inode->i_nlink, dir->i_ino); ubifs_assert(mutex_is_locked(&dir->i_mutex)); ubifs_assert(mutex_is_locked(&inode->i_mutex)); err = dbg_check_synced_i_size(c, inode); if (err) return err; err = ubifs_budget_space(c, &req); if (err) { if (err != -ENOSPC) return err; budgeted = 0; } lock_2_inodes(dir, inode); inode->i_ctime = ubifs_current_time(dir); drop_nlink(inode); dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0); if (err) goto out_cancel; unlock_2_inodes(dir, inode); if (budgeted) ubifs_release_budget(c, &req); else { /* We've deleted something - clean the "no space" flags */ c->bi.nospace = c->bi.nospace_rp = 0; smp_wmb(); } return 0; out_cancel: dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; set_nlink(inode, saved_nlink); unlock_2_inodes(dir, inode); if (budgeted) ubifs_release_budget(c, &req); return err; } /** * check_dir_empty - check if a directory is empty or not. * @c: UBIFS file-system description object * @dir: VFS inode object of the directory to check * * This function checks if directory @dir is empty. Returns zero if the * directory is empty, %-ENOTEMPTY if it is not, and other negative error codes * in case of of errors. */ static int check_dir_empty(struct ubifs_info *c, struct inode *dir) { struct qstr nm = { .name = NULL }; struct ubifs_dent_node *dent; union ubifs_key key; int err; lowest_dent_key(c, &key, dir->i_ino); dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { err = PTR_ERR(dent); if (err == -ENOENT) err = 0; } else { kfree(dent); err = -ENOTEMPTY; } return err; } static int ubifs_rmdir(struct inode *dir, struct dentry *dentry) { struct ubifs_info *c = dir->i_sb->s_fs_info; struct inode *inode = dentry->d_inode; int sz_change = CALC_DENT_SIZE(dentry->d_name.len); int err, budgeted = 1; struct ubifs_inode *dir_ui = ubifs_inode(dir); struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 }; /* * Budget request settings: deletion direntry, deletion inode and * changing the parent inode. If budgeting fails, go ahead anyway * because we have extra space reserved for deletions. */ dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len, dentry->d_name.name, inode->i_ino, dir->i_ino); ubifs_assert(mutex_is_locked(&dir->i_mutex)); ubifs_assert(mutex_is_locked(&inode->i_mutex)); err = check_dir_empty(c, dentry->d_inode); if (err) return err; err = ubifs_budget_space(c, &req); if (err) { if (err != -ENOSPC) return err; budgeted = 0; } lock_2_inodes(dir, inode); inode->i_ctime = ubifs_current_time(dir); clear_nlink(inode); drop_nlink(dir); dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0); if (err) goto out_cancel; unlock_2_inodes(dir, inode); if (budgeted) ubifs_release_budget(c, &req); else { /* We've deleted something - clean the "no space" flags */ c->bi.nospace = c->bi.nospace_rp = 0; smp_wmb(); } return 0; out_cancel: dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; inc_nlink(dir); set_nlink(inode, 2); unlock_2_inodes(dir, inode); if (budgeted) ubifs_release_budget(c, &req); return err; } static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; struct ubifs_inode *dir_ui = ubifs_inode(dir); struct ubifs_info *c = dir->i_sb->s_fs_info; int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len); struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 }; /* * Budget request settings: new inode, new direntry and changing parent * directory inode. */ dbg_gen("dent '%.*s', mode %#hx in dir ino %lu", dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino); err = ubifs_budget_space(c, &req); if (err) return err; inode = ubifs_new_inode(c, dir, S_IFDIR | mode); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_budg; } mutex_lock(&dir_ui->ui_mutex); insert_inode_hash(inode); inc_nlink(inode); inc_nlink(dir); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0); if (err) { ubifs_err("cannot create directory, error %d", err); goto out_cancel; } mutex_unlock(&dir_ui->ui_mutex); ubifs_release_budget(c, &req); d_instantiate(dentry, inode); return 0; out_cancel: dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; drop_nlink(dir); mutex_unlock(&dir_ui->ui_mutex); make_bad_inode(inode); iput(inode); out_budg: ubifs_release_budget(c, &req); return err; } static int ubifs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; struct ubifs_inode *ui; struct ubifs_inode *dir_ui = ubifs_inode(dir); struct ubifs_info *c = dir->i_sb->s_fs_info; union ubifs_dev_desc *dev = NULL; int sz_change = CALC_DENT_SIZE(dentry->d_name.len); int err, devlen = 0; struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, .new_ino_d = ALIGN(devlen, 8), .dirtied_ino = 1 }; /* * Budget request settings: new inode, new direntry and changing parent * directory inode. */ dbg_gen("dent '%.*s' in dir ino %lu", dentry->d_name.len, dentry->d_name.name, dir->i_ino); if (!new_valid_dev(rdev)) return -EINVAL; if (S_ISBLK(mode) || S_ISCHR(mode)) { dev = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS); if (!dev) return -ENOMEM; devlen = ubifs_encode_dev(dev, rdev); } err = ubifs_budget_space(c, &req); if (err) { kfree(dev); return err; } inode = ubifs_new_inode(c, dir, mode); if (IS_ERR(inode)) { kfree(dev); err = PTR_ERR(inode); goto out_budg; } init_special_inode(inode, inode->i_mode, rdev); inode->i_size = ubifs_inode(inode)->ui_size = devlen; ui = ubifs_inode(inode); ui->data = dev; ui->data_len = devlen; mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0); if (err) goto out_cancel; mutex_unlock(&dir_ui->ui_mutex); ubifs_release_budget(c, &req); insert_inode_hash(inode); d_instantiate(dentry, inode); return 0; out_cancel: dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; mutex_unlock(&dir_ui->ui_mutex); make_bad_inode(inode); iput(inode); out_budg: ubifs_release_budget(c, &req); return err; } static int ubifs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct ubifs_inode *ui; struct ubifs_inode *dir_ui = ubifs_inode(dir); struct ubifs_info *c = dir->i_sb->s_fs_info; int err, len = strlen(symname); int sz_change = CALC_DENT_SIZE(dentry->d_name.len); struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, .new_ino_d = ALIGN(len, 8), .dirtied_ino = 1 }; /* * Budget request settings: new inode, new direntry and changing parent * directory inode. */ dbg_gen("dent '%.*s', target '%s' in dir ino %lu", dentry->d_name.len, dentry->d_name.name, symname, dir->i_ino); if (len > UBIFS_MAX_INO_DATA) return -ENAMETOOLONG; err = ubifs_budget_space(c, &req); if (err) return err; inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_budg; } ui = ubifs_inode(inode); ui->data = kmalloc(len + 1, GFP_NOFS); if (!ui->data) { err = -ENOMEM; goto out_inode; } memcpy(ui->data, symname, len); ((char *)ui->data)[len] = '\0'; /* * The terminating zero byte is not written to the flash media and it * is put just to make later in-memory string processing simpler. Thus, * data length is @len, not @len + %1. */ ui->data_len = len; inode->i_size = ubifs_inode(inode)->ui_size = len; mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; dir->i_mtime = dir->i_ctime = inode->i_ctime; err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0); if (err) goto out_cancel; mutex_unlock(&dir_ui->ui_mutex); ubifs_release_budget(c, &req); insert_inode_hash(inode); d_instantiate(dentry, inode); return 0; out_cancel: dir->i_size -= sz_change; dir_ui->ui_size = dir->i_size; mutex_unlock(&dir_ui->ui_mutex); out_inode: make_bad_inode(inode); iput(inode); out_budg: ubifs_release_budget(c, &req); return err; } /** * lock_3_inodes - a wrapper for locking three UBIFS inodes. * @inode1: first inode * @inode2: second inode * @inode3: third inode * * This function is used for 'ubifs_rename()' and @inode1 may be the same as * @inode2 whereas @inode3 may be %NULL. * * We do not implement any tricks to guarantee strict lock ordering, because * VFS has already done it for us on the @i_mutex. So this is just a simple * wrapper function. */ static void lock_3_inodes(struct inode *inode1, struct inode *inode2, struct inode *inode3) { mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); if (inode2 != inode1) mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); if (inode3) mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3); } /** * unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename. * @inode1: first inode * @inode2: second inode * @inode3: third inode */ static void unlock_3_inodes(struct inode *inode1, struct inode *inode2, struct inode *inode3) { if (inode3) mutex_unlock(&ubifs_inode(inode3)->ui_mutex); if (inode1 != inode2) mutex_unlock(&ubifs_inode(inode2)->ui_mutex); mutex_unlock(&ubifs_inode(inode1)->ui_mutex); } static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct ubifs_info *c = old_dir->i_sb->s_fs_info; struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct ubifs_inode *old_inode_ui = ubifs_inode(old_inode); int err, release, sync = 0, move = (new_dir != old_dir); int is_dir = S_ISDIR(old_inode->i_mode); int unlink = !!new_inode; int new_sz = CALC_DENT_SIZE(new_dentry->d_name.len); int old_sz = CALC_DENT_SIZE(old_dentry->d_name.len); struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1, .dirtied_ino = 3 }; struct ubifs_budget_req ino_req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; struct timespec time; unsigned int uninitialized_var(saved_nlink); /* * Budget request settings: deletion direntry, new direntry, removing * the old inode, and changing old and new parent directory inodes. * * However, this operation also marks the target inode as dirty and * does not write it, so we allocate budget for the target inode * separately. */ dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in " "dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name, old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len, new_dentry->d_name.name, new_dir->i_ino); ubifs_assert(mutex_is_locked(&old_dir->i_mutex)); ubifs_assert(mutex_is_locked(&new_dir->i_mutex)); if (unlink) ubifs_assert(mutex_is_locked(&new_inode->i_mutex)); if (unlink && is_dir) { err = check_dir_empty(c, new_inode); if (err) return err; } err = ubifs_budget_space(c, &req); if (err) return err; err = ubifs_budget_space(c, &ino_req); if (err) { ubifs_release_budget(c, &req); return err; } lock_3_inodes(old_dir, new_dir, new_inode); /* * Like most other Unix systems, set the @i_ctime for inodes on a * rename. */ time = ubifs_current_time(old_dir); old_inode->i_ctime = time; /* We must adjust parent link count when renaming directories */ if (is_dir) { if (move) { /* * @old_dir loses a link because we are moving * @old_inode to a different directory. */ drop_nlink(old_dir); /* * @new_dir only gains a link if we are not also * overwriting an existing directory. */ if (!unlink) inc_nlink(new_dir); } else { /* * @old_inode is not moving to a different directory, * but @old_dir still loses a link if we are * overwriting an existing directory. */ if (unlink) drop_nlink(old_dir); } } old_dir->i_size -= old_sz; ubifs_inode(old_dir)->ui_size = old_dir->i_size; old_dir->i_mtime = old_dir->i_ctime = time; new_dir->i_mtime = new_dir->i_ctime = time; /* * And finally, if we unlinked a direntry which happened to have the * same name as the moved direntry, we have to decrement @i_nlink of * the unlinked inode and change its ctime. */ if (unlink) { /* * Directories cannot have hard-links, so if this is a * directory, just clear @i_nlink. */ saved_nlink = new_inode->i_nlink; if (is_dir) clear_nlink(new_inode); else drop_nlink(new_inode); new_inode->i_ctime = time; } else { new_dir->i_size += new_sz; ubifs_inode(new_dir)->ui_size = new_dir->i_size; } /* * Do not ask 'ubifs_jnl_rename()' to flush write-buffer if @old_inode * is dirty, because this will be done later on at the end of * 'ubifs_rename()'. */ if (IS_SYNC(old_inode)) { sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir); if (unlink && IS_SYNC(new_inode)) sync = 1; } err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry, sync); if (err) goto out_cancel; unlock_3_inodes(old_dir, new_dir, new_inode); ubifs_release_budget(c, &req); mutex_lock(&old_inode_ui->ui_mutex); release = old_inode_ui->dirty; mark_inode_dirty_sync(old_inode); mutex_unlock(&old_inode_ui->ui_mutex); if (release) ubifs_release_budget(c, &ino_req); if (IS_SYNC(old_inode)) err = old_inode->i_sb->s_op->write_inode(old_inode, NULL); return err; out_cancel: if (unlink) { set_nlink(new_inode, saved_nlink); } else { new_dir->i_size -= new_sz; ubifs_inode(new_dir)->ui_size = new_dir->i_size; } old_dir->i_size += old_sz; ubifs_inode(old_dir)->ui_size = old_dir->i_size; if (is_dir) { if (move) { inc_nlink(old_dir); if (!unlink) drop_nlink(new_dir); } else { if (unlink) inc_nlink(old_dir); } } unlock_3_inodes(old_dir, new_dir, new_inode); ubifs_release_budget(c, &ino_req); ubifs_release_budget(c, &req); return err; } int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { loff_t size; struct inode *inode = dentry->d_inode; struct ubifs_inode *ui = ubifs_inode(inode); mutex_lock(&ui->ui_mutex); stat->dev = inode->i_sb->s_dev; stat->ino = inode->i_ino; stat->mode = inode->i_mode; stat->nlink = inode->i_nlink; stat->uid = inode->i_uid; stat->gid = inode->i_gid; stat->rdev = inode->i_rdev; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; stat->blksize = UBIFS_BLOCK_SIZE; stat->size = ui->ui_size; /* * Unfortunately, the 'stat()' system call was designed for block * device based file systems, and it is not appropriate for UBIFS, * because UBIFS does not have notion of "block". For example, it is * difficult to tell how many block a directory takes - it actually * takes less than 300 bytes, but we have to round it to block size, * which introduces large mistake. This makes utilities like 'du' to * report completely senseless numbers. This is the reason why UBIFS * goes the same way as JFFS2 - it reports zero blocks for everything * but regular files, which makes more sense than reporting completely * wrong sizes. */ if (S_ISREG(inode->i_mode)) { size = ui->xattr_size; size += stat->size; size = ALIGN(size, UBIFS_BLOCK_SIZE); /* * Note, user-space expects 512-byte blocks count irrespectively * of what was reported in @stat->size. */ stat->blocks = size >> 9; } else stat->blocks = 0; mutex_unlock(&ui->ui_mutex); return 0; } const struct inode_operations ubifs_dir_inode_operations = { .lookup = ubifs_lookup, .create = ubifs_create, .link = ubifs_link, .symlink = ubifs_symlink, .unlink = ubifs_unlink, .mkdir = ubifs_mkdir, .rmdir = ubifs_rmdir, .mknod = ubifs_mknod, .rename = ubifs_rename, .setattr = ubifs_setattr, .getattr = ubifs_getattr, #ifdef CONFIG_UBIFS_FS_XATTR .setxattr = ubifs_setxattr, .getxattr = ubifs_getxattr, .listxattr = ubifs_listxattr, .removexattr = ubifs_removexattr, #endif }; const struct file_operations ubifs_dir_operations = { .llseek = ubifs_dir_llseek, .release = ubifs_dir_release, .read = generic_read_dir, .readdir = ubifs_readdir, .fsync = ubifs_fsync, .unlocked_ioctl = ubifs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ubifs_compat_ioctl, #endif };
gpl-2.0
sakuramilk/linux-2.6.32.y
net/netfilter/nf_conntrack_irc.c
596
8073
/* IRC extension for IP connection tracking, Version 1.21 * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> * based on RR's ip_conntrack_ftp.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <linux/netfilter/nf_conntrack_irc.h> #define MAX_PORTS 8 static unsigned short ports[MAX_PORTS]; static unsigned int ports_c; static unsigned int max_dcc_channels = 8; static unsigned int dcc_timeout __read_mostly = 300; /* This is slow, but it's simple. --RR */ static char *irc_buffer; static DEFINE_SPINLOCK(irc_buffer_lock); unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_irc_hook); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("IRC (DCC) connection tracking helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_conntrack_irc"); MODULE_ALIAS_NFCT_HELPER("irc"); module_param_array(ports, ushort, &ports_c, 0400); MODULE_PARM_DESC(ports, "port numbers of IRC servers"); module_param(max_dcc_channels, uint, 0400); MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per " "IRC session"); module_param(dcc_timeout, uint, 0400); MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels"); static const char *const dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " }; #define MINMATCHLEN 5 /* tries to get the ip_addr and port out of a dcc command * return value: -1 on failure, 0 on success * data pointer to first byte of DCC command data * data_end pointer to last byte of dcc command data * ip returns parsed ip of dcc command * port returns parsed port of dcc command * ad_beg_p returns pointer to first byte of addr data * ad_end_p returns pointer to last byte of addr data */ static int parse_dcc(char *data, const char *data_end, __be32 *ip, u_int16_t *port, char **ad_beg_p, char **ad_end_p) { char *tmp; /* at least 12: "AAAAAAAA P\1\n" */ while (*data++ != ' ') if (data > data_end - 12) return -1; /* Make sure we have a newline character within the packet boundaries * because simple_strtoul parses until the first invalid character. */ for (tmp = data; tmp <= data_end; tmp++) if (*tmp == '\n') break; if (tmp > data_end || *tmp != '\n') return -1; *ad_beg_p = data; *ip = cpu_to_be32(simple_strtoul(data, &data, 10)); /* skip blanks between ip and port */ while (*data == ' ') { if (data >= data_end) return -1; data++; } *port = simple_strtoul(data, &data, 10); *ad_end_p = data; return 0; } static int help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { unsigned int dataoff; const struct iphdr *iph; const struct tcphdr *th; struct tcphdr _tcph; const char *data_limit; char *data, *ib_ptr; int dir = CTINFO2DIR(ctinfo); struct nf_conntrack_expect *exp; struct nf_conntrack_tuple *tuple; __be32 dcc_ip; u_int16_t dcc_port; __be16 port; int i, ret = NF_ACCEPT; char *addr_beg_p, *addr_end_p; typeof(nf_nat_irc_hook) nf_nat_irc; /* If packet is coming from IRC server */ if (dir == IP_CT_DIR_REPLY) return NF_ACCEPT; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) return NF_ACCEPT; /* Not a full tcp header? */ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; /* No data? */ dataoff = protoff + th->doff*4; if (dataoff >= skb->len) return NF_ACCEPT; spin_lock_bh(&irc_buffer_lock); ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff, irc_buffer); BUG_ON(ib_ptr == NULL); data = ib_ptr; data_limit = ib_ptr + skb->len - dataoff; /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ while (data < data_limit - (19 + MINMATCHLEN)) { if (memcmp(data, "\1DCC ", 5)) { data++; continue; } data += 5; /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ iph = ip_hdr(skb); pr_debug("DCC found in master %pI4:%u %pI4:%u\n", &iph->saddr, ntohs(th->source), &iph->daddr, ntohs(th->dest)); for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { /* no match */ continue; } data += strlen(dccprotos[i]); pr_debug("DCC %s detected\n", dccprotos[i]); /* we have at least * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid * data left (== 14/13 bytes) */ if (parse_dcc(data, data_limit, &dcc_ip, &dcc_port, &addr_beg_p, &addr_end_p)) { pr_debug("unable to parse dcc command\n"); continue; } pr_debug("DCC bound ip/port: %pI4:%u\n", &dcc_ip, dcc_port); /* dcc_ip can be the internal OR external (NAT'ed) IP */ tuple = &ct->tuplehash[dir].tuple; if (tuple->src.u3.ip != dcc_ip && tuple->dst.u3.ip != dcc_ip) { if (net_ratelimit()) printk(KERN_WARNING "Forged DCC command from %pI4: %pI4:%u\n", &tuple->src.u3.ip, &dcc_ip, dcc_port); continue; } exp = nf_ct_expect_alloc(ct); if (exp == NULL) { ret = NF_DROP; goto out; } tuple = &ct->tuplehash[!dir].tuple; port = htons(dcc_port); nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, tuple->src.l3num, NULL, &tuple->dst.u3, IPPROTO_TCP, NULL, &port); nf_nat_irc = rcu_dereference(nf_nat_irc_hook); if (nf_nat_irc && ct->status & IPS_NAT_MASK) ret = nf_nat_irc(skb, ctinfo, addr_beg_p - ib_ptr, addr_end_p - addr_beg_p, exp); else if (nf_ct_expect_related(exp) != 0) ret = NF_DROP; nf_ct_expect_put(exp); goto out; } } out: spin_unlock_bh(&irc_buffer_lock); return ret; } static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly; static struct nf_conntrack_expect_policy irc_exp_policy; static void nf_conntrack_irc_fini(void); static int __init nf_conntrack_irc_init(void) { int i, ret; char *tmpname; if (max_dcc_channels < 1) { printk("nf_ct_irc: max_dcc_channels must not be zero\n"); return -EINVAL; } irc_exp_policy.max_expected = max_dcc_channels; irc_exp_policy.timeout = dcc_timeout; irc_buffer = kmalloc(65536, GFP_KERNEL); if (!irc_buffer) return -ENOMEM; /* If no port given, default to standard irc port */ if (ports_c == 0) ports[ports_c++] = IRC_PORT; for (i = 0; i < ports_c; i++) { irc[i].tuple.src.l3num = AF_INET; irc[i].tuple.src.u.tcp.port = htons(ports[i]); irc[i].tuple.dst.protonum = IPPROTO_TCP; irc[i].expect_policy = &irc_exp_policy; irc[i].me = THIS_MODULE; irc[i].help = help; tmpname = &irc_names[i][0]; if (ports[i] == IRC_PORT) sprintf(tmpname, "irc"); else sprintf(tmpname, "irc-%u", i); irc[i].name = tmpname; ret = nf_conntrack_helper_register(&irc[i]); if (ret) { printk("nf_ct_irc: failed to register helper " "for pf: %u port: %u\n", irc[i].tuple.src.l3num, ports[i]); nf_conntrack_irc_fini(); return ret; } } return 0; } /* This function is intentionally _NOT_ defined as __exit, because * it is needed by the init function */ static void nf_conntrack_irc_fini(void) { int i; for (i = 0; i < ports_c; i++) nf_conntrack_helper_unregister(&irc[i]); kfree(irc_buffer); } module_init(nf_conntrack_irc_init); module_exit(nf_conntrack_irc_fini);
gpl-2.0
Elite-Kernels/HTC-10
drivers/regulator/pcap-regulator.c
852
7576
/* * PCAP2 Regulator Driver * * Copyright (c) 2009 Daniel Ribeiro <drwyrm@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/mfd/ezx-pcap.h> static const unsigned int V1_table[] = { 2775000, 1275000, 1600000, 1725000, 1825000, 1925000, 2075000, 2275000, }; static const unsigned int V2_table[] = { 2500000, 2775000, }; static const unsigned int V3_table[] = { 1075000, 1275000, 1550000, 1725000, 1876000, 1950000, 2075000, 2275000, }; static const unsigned int V4_table[] = { 1275000, 1550000, 1725000, 1875000, 1950000, 2075000, 2275000, 2775000, }; static const unsigned int V5_table[] = { 1875000, 2275000, 2475000, 2775000, }; static const unsigned int V6_table[] = { 2475000, 2775000, }; static const unsigned int V7_table[] = { 1875000, 2775000, }; #define V8_table V4_table static const unsigned int V9_table[] = { 1575000, 1875000, 2475000, 2775000, }; static const unsigned int V10_table[] = { 5000000, }; static const unsigned int VAUX1_table[] = { 1875000, 2475000, 2775000, 3000000, }; #define VAUX2_table VAUX1_table static const unsigned int VAUX3_table[] = { 1200000, 1200000, 1200000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, }; static const unsigned int VAUX4_table[] = { 1800000, 1800000, 3000000, 5000000, }; static const unsigned int VSIM_table[] = { 1875000, 3000000, }; static const unsigned int VSIM2_table[] = { 1875000, }; static const unsigned int VVIB_table[] = { 1300000, 1800000, 2000000, 3000000, }; static const unsigned int SW1_table[] = { 900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1600000, 1875000, 2250000, }; #define SW2_table SW1_table static const unsigned int SW3_table[] = { 4000000, 4500000, 5000000, 5500000, }; struct pcap_regulator { const u8 reg; const u8 en; const u8 index; const u8 stby; const u8 lowpwr; }; #define NA 0xff #define VREG_INFO(_vreg, _reg, _en, _index, _stby, _lowpwr) \ [_vreg] = { \ .reg = _reg, \ .en = _en, \ .index = _index, \ .stby = _stby, \ .lowpwr = _lowpwr, \ } static struct pcap_regulator vreg_table[] = { VREG_INFO(V1, PCAP_REG_VREG1, 1, 2, 18, 0), VREG_INFO(V2, PCAP_REG_VREG1, 5, 6, 19, 22), VREG_INFO(V3, PCAP_REG_VREG1, 7, 8, 20, 23), VREG_INFO(V4, PCAP_REG_VREG1, 11, 12, 21, 24), /* V5 STBY and LOWPWR are on PCAP_REG_VREG2 */ VREG_INFO(V5, PCAP_REG_VREG1, 15, 16, 12, 19), VREG_INFO(V6, PCAP_REG_VREG2, 1, 2, 14, 20), VREG_INFO(V7, PCAP_REG_VREG2, 3, 4, 15, 21), VREG_INFO(V8, PCAP_REG_VREG2, 5, 6, 16, 22), VREG_INFO(V9, PCAP_REG_VREG2, 9, 10, 17, 23), VREG_INFO(V10, PCAP_REG_VREG2, 10, NA, 18, 24), VREG_INFO(VAUX1, PCAP_REG_AUXVREG, 1, 2, 22, 23), /* VAUX2 ... VSIM2 STBY and LOWPWR are on PCAP_REG_LOWPWR */ VREG_INFO(VAUX2, PCAP_REG_AUXVREG, 4, 5, 0, 1), VREG_INFO(VAUX3, PCAP_REG_AUXVREG, 7, 8, 2, 3), VREG_INFO(VAUX4, PCAP_REG_AUXVREG, 12, 13, 4, 5), VREG_INFO(VSIM, PCAP_REG_AUXVREG, 17, 18, NA, 6), VREG_INFO(VSIM2, PCAP_REG_AUXVREG, 16, NA, NA, 7), VREG_INFO(VVIB, PCAP_REG_AUXVREG, 19, 20, NA, NA), VREG_INFO(SW1, PCAP_REG_SWCTRL, 1, 2, NA, NA), VREG_INFO(SW2, PCAP_REG_SWCTRL, 6, 7, NA, NA), /* SW3 STBY is on PCAP_REG_AUXVREG */ VREG_INFO(SW3, PCAP_REG_SWCTRL, 11, 12, 24, NA), /* SWxS used to control SWx voltage on standby */ /* VREG_INFO(SW1S, PCAP_REG_LOWPWR, NA, 12, NA, NA), VREG_INFO(SW2S, PCAP_REG_LOWPWR, NA, 20, NA, NA), */ }; static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); /* the regulator doesn't support voltage switching */ if (rdev->desc->n_voltages == 1) return -EINVAL; return ezx_pcap_set_bits(pcap, vreg->reg, (rdev->desc->n_voltages - 1) << vreg->index, selector << vreg->index); } static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev) { struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); u32 tmp; if (rdev->desc->n_voltages == 1) return 0; ezx_pcap_read(pcap, vreg->reg, &tmp); tmp = ((tmp >> vreg->index) & (rdev->desc->n_voltages - 1)); return tmp; } static int pcap_regulator_enable(struct regulator_dev *rdev) { struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); if (vreg->en == NA) return -EINVAL; return ezx_pcap_set_bits(pcap, vreg->reg, 1 << vreg->en, 1 << vreg->en); } static int pcap_regulator_disable(struct regulator_dev *rdev) { struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); if (vreg->en == NA) return -EINVAL; return ezx_pcap_set_bits(pcap, vreg->reg, 1 << vreg->en, 0); } static int pcap_regulator_is_enabled(struct regulator_dev *rdev) { struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); u32 tmp; if (vreg->en == NA) return -EINVAL; ezx_pcap_read(pcap, vreg->reg, &tmp); return (tmp >> vreg->en) & 1; } static struct regulator_ops pcap_regulator_ops = { .list_voltage = regulator_list_voltage_table, .set_voltage_sel = pcap_regulator_set_voltage_sel, .get_voltage_sel = pcap_regulator_get_voltage_sel, .enable = pcap_regulator_enable, .disable = pcap_regulator_disable, .is_enabled = pcap_regulator_is_enabled, }; #define VREG(_vreg) \ [_vreg] = { \ .name = #_vreg, \ .id = _vreg, \ .n_voltages = ARRAY_SIZE(_vreg##_table), \ .volt_table = _vreg##_table, \ .ops = &pcap_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ } static const struct regulator_desc pcap_regulators[] = { VREG(V1), VREG(V2), VREG(V3), VREG(V4), VREG(V5), VREG(V6), VREG(V7), VREG(V8), VREG(V9), VREG(V10), VREG(VAUX1), VREG(VAUX2), VREG(VAUX3), VREG(VAUX4), VREG(VSIM), VREG(VSIM2), VREG(VVIB), VREG(SW1), VREG(SW2), }; static int pcap_regulator_probe(struct platform_device *pdev) { struct regulator_dev *rdev; void *pcap = dev_get_drvdata(pdev->dev.parent); struct regulator_config config = { }; config.dev = &pdev->dev; config.init_data = dev_get_platdata(&pdev->dev); config.driver_data = pcap; rdev = devm_regulator_register(&pdev->dev, &pcap_regulators[pdev->id], &config); if (IS_ERR(rdev)) return PTR_ERR(rdev); platform_set_drvdata(pdev, rdev); return 0; } static struct platform_driver pcap_regulator_driver = { .driver = { .name = "pcap-regulator", .owner = THIS_MODULE, }, .probe = pcap_regulator_probe, }; static int __init pcap_regulator_init(void) { return platform_driver_register(&pcap_regulator_driver); } static void __exit pcap_regulator_exit(void) { platform_driver_unregister(&pcap_regulator_driver); } subsys_initcall(pcap_regulator_init); module_exit(pcap_regulator_exit); MODULE_AUTHOR("Daniel Ribeiro <drwyrm@gmail.com>"); MODULE_DESCRIPTION("PCAP2 Regulator Driver"); MODULE_LICENSE("GPL");
gpl-2.0
markyzq/kernel-drm-rockchip
drivers/power/max8925_power.c
852
15336
/* * Battery driver for Maxim MAX8925 * * Copyright (c) 2009-2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/max8925.h> /* registers in GPM */ #define MAX8925_OUT5VEN 0x54 #define MAX8925_OUT3VEN 0x58 #define MAX8925_CHG_CNTL1 0x7c /* bits definition */ #define MAX8925_CHG_STAT_VSYSLOW (1 << 0) #define MAX8925_CHG_STAT_MODE_MASK (3 << 2) #define MAX8925_CHG_STAT_EN_MASK (1 << 4) #define MAX8925_CHG_MBDET (1 << 1) #define MAX8925_CHG_AC_RANGE_MASK (3 << 6) /* registers in ADC */ #define MAX8925_ADC_RES_CNFG1 0x06 #define MAX8925_ADC_AVG_CNFG1 0x07 #define MAX8925_ADC_ACQ_CNFG1 0x08 #define MAX8925_ADC_ACQ_CNFG2 0x09 /* 2 bytes registers in below. MSB is 1st, LSB is 2nd. */ #define MAX8925_ADC_AUX2 0x62 #define MAX8925_ADC_VCHG 0x64 #define MAX8925_ADC_VBBATT 0x66 #define MAX8925_ADC_VMBATT 0x68 #define MAX8925_ADC_ISNS 0x6a #define MAX8925_ADC_THM 0x6c #define MAX8925_ADC_TDIE 0x6e #define MAX8925_CMD_AUX2 0xc8 #define MAX8925_CMD_VCHG 0xd0 #define MAX8925_CMD_VBBATT 0xd8 #define MAX8925_CMD_VMBATT 0xe0 #define MAX8925_CMD_ISNS 0xe8 #define MAX8925_CMD_THM 0xf0 #define MAX8925_CMD_TDIE 0xf8 enum { MEASURE_AUX2, MEASURE_VCHG, MEASURE_VBBATT, MEASURE_VMBATT, MEASURE_ISNS, MEASURE_THM, MEASURE_TDIE, MEASURE_MAX, }; struct max8925_power_info { struct max8925_chip *chip; struct i2c_client *gpm; struct i2c_client *adc; struct power_supply *ac; struct power_supply *usb; struct power_supply *battery; int irq_base; unsigned ac_online:1; unsigned usb_online:1; unsigned bat_online:1; unsigned chg_mode:2; unsigned batt_detect:1; /* detecing MB by ID pin */ unsigned topoff_threshold:2; unsigned fast_charge:3; unsigned no_temp_support:1; unsigned no_insert_detect:1; int (*set_charger) (int); }; static int __set_charger(struct max8925_power_info *info, int enable) { struct max8925_chip *chip = info->chip; if (enable) { /* enable charger in platform */ if (info->set_charger) info->set_charger(1); /* enable charger */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 0); } else { /* disable charge */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 1 << 7); if (info->set_charger) info->set_charger(0); } dev_dbg(chip->dev, "%s\n", (enable) ? "Enable charger" : "Disable charger"); return 0; } static irqreturn_t max8925_charger_handler(int irq, void *data) { struct max8925_power_info *info = (struct max8925_power_info *)data; struct max8925_chip *chip = info->chip; switch (irq - chip->irq_base) { case MAX8925_IRQ_VCHG_DC_R: info->ac_online = 1; __set_charger(info, 1); dev_dbg(chip->dev, "Adapter inserted\n"); break; case MAX8925_IRQ_VCHG_DC_F: info->ac_online = 0; __set_charger(info, 0); dev_dbg(chip->dev, "Adapter removed\n"); break; case MAX8925_IRQ_VCHG_THM_OK_F: /* Battery is not ready yet */ dev_dbg(chip->dev, "Battery temperature is out of range\n"); case MAX8925_IRQ_VCHG_DC_OVP: dev_dbg(chip->dev, "Error detection\n"); __set_charger(info, 0); break; case MAX8925_IRQ_VCHG_THM_OK_R: /* Battery is ready now */ dev_dbg(chip->dev, "Battery temperature is in range\n"); break; case MAX8925_IRQ_VCHG_SYSLOW_R: /* VSYS is low */ dev_info(chip->dev, "Sys power is too low\n"); break; case MAX8925_IRQ_VCHG_SYSLOW_F: dev_dbg(chip->dev, "Sys power is above low threshold\n"); break; case MAX8925_IRQ_VCHG_DONE: __set_charger(info, 0); dev_dbg(chip->dev, "Charging is done\n"); break; case MAX8925_IRQ_VCHG_TOPOFF: dev_dbg(chip->dev, "Charging in top-off mode\n"); break; case MAX8925_IRQ_VCHG_TMR_FAULT: __set_charger(info, 0); dev_dbg(chip->dev, "Safe timer is expired\n"); break; case MAX8925_IRQ_VCHG_RST: __set_charger(info, 0); dev_dbg(chip->dev, "Charger is reset\n"); break; } return IRQ_HANDLED; } static int start_measure(struct max8925_power_info *info, int type) { unsigned char buf[2] = {0, 0}; int meas_cmd; int meas_reg = 0, ret; switch (type) { case MEASURE_VCHG: meas_cmd = MAX8925_CMD_VCHG; meas_reg = MAX8925_ADC_VCHG; break; case MEASURE_VBBATT: meas_cmd = MAX8925_CMD_VBBATT; meas_reg = MAX8925_ADC_VBBATT; break; case MEASURE_VMBATT: meas_cmd = MAX8925_CMD_VMBATT; meas_reg = MAX8925_ADC_VMBATT; break; case MEASURE_ISNS: meas_cmd = MAX8925_CMD_ISNS; meas_reg = MAX8925_ADC_ISNS; break; default: return -EINVAL; } max8925_reg_write(info->adc, meas_cmd, 0); max8925_bulk_read(info->adc, meas_reg, 2, buf); ret = ((buf[0]<<8) | buf[1]) >> 4; return ret; } static int max8925_ac_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct max8925_power_info *info = dev_get_drvdata(psy->dev.parent); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = info->ac_online; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (info->ac_online) { ret = start_measure(info, MEASURE_VCHG); if (ret >= 0) { val->intval = ret * 2000; /* unit is uV */ goto out; } } ret = -ENODATA; break; default: ret = -ENODEV; break; } out: return ret; } static enum power_supply_property max8925_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; static int max8925_usb_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct max8925_power_info *info = dev_get_drvdata(psy->dev.parent); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = info->usb_online; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (info->usb_online) { ret = start_measure(info, MEASURE_VCHG); if (ret >= 0) { val->intval = ret * 2000; /* unit is uV */ goto out; } } ret = -ENODATA; break; default: ret = -ENODEV; break; } out: return ret; } static enum power_supply_property max8925_usb_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; static int max8925_bat_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct max8925_power_info *info = dev_get_drvdata(psy->dev.parent); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = info->bat_online; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (info->bat_online) { ret = start_measure(info, MEASURE_VMBATT); if (ret >= 0) { val->intval = ret * 2000; /* unit is uV */ ret = 0; break; } } ret = -ENODATA; break; case POWER_SUPPLY_PROP_CURRENT_NOW: if (info->bat_online) { ret = start_measure(info, MEASURE_ISNS); if (ret >= 0) { /* assume r_sns is 0.02 */ ret = ((ret * 6250) - 3125) /* uA */; val->intval = 0; if (ret > 0) val->intval = ret; /* unit is mA */ ret = 0; break; } } ret = -ENODATA; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: if (!info->bat_online) { ret = -ENODATA; break; } ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS); ret = (ret & MAX8925_CHG_STAT_MODE_MASK) >> 2; switch (ret) { case 1: val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; break; case 0: case 2: val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; break; case 3: val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE; break; } ret = 0; break; case POWER_SUPPLY_PROP_STATUS: if (!info->bat_online) { ret = -ENODATA; break; } ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS); if (info->usb_online || info->ac_online) { val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; if (ret & MAX8925_CHG_STAT_EN_MASK) val->intval = POWER_SUPPLY_STATUS_CHARGING; } else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; ret = 0; break; default: ret = -ENODEV; break; } return ret; } static enum power_supply_property max8925_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_STATUS, }; static const struct power_supply_desc ac_desc = { .name = "max8925-ac", .type = POWER_SUPPLY_TYPE_MAINS, .properties = max8925_ac_props, .num_properties = ARRAY_SIZE(max8925_ac_props), .get_property = max8925_ac_get_prop, }; static const struct power_supply_desc usb_desc = { .name = "max8925-usb", .type = POWER_SUPPLY_TYPE_USB, .properties = max8925_usb_props, .num_properties = ARRAY_SIZE(max8925_usb_props), .get_property = max8925_usb_get_prop, }; static const struct power_supply_desc battery_desc = { .name = "max8925-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = max8925_battery_props, .num_properties = ARRAY_SIZE(max8925_battery_props), .get_property = max8925_bat_get_prop, }; #define REQUEST_IRQ(_irq, _name) \ do { \ ret = request_threaded_irq(chip->irq_base + _irq, NULL, \ max8925_charger_handler, \ IRQF_ONESHOT, _name, info); \ if (ret) \ dev_err(chip->dev, "Failed to request IRQ #%d: %d\n", \ _irq, ret); \ } while (0) static int max8925_init_charger(struct max8925_chip *chip, struct max8925_power_info *info) { int ret; REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_OVP, "ac-ovp"); if (!info->no_insert_detect) { REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_F, "ac-remove"); REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_R, "ac-insert"); } if (!info->no_temp_support) { REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_R, "batt-temp-in-range"); REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_F, "batt-temp-out-range"); } REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_F, "vsys-high"); REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_R, "vsys-low"); REQUEST_IRQ(MAX8925_IRQ_VCHG_RST, "charger-reset"); REQUEST_IRQ(MAX8925_IRQ_VCHG_DONE, "charger-done"); REQUEST_IRQ(MAX8925_IRQ_VCHG_TOPOFF, "charger-topoff"); REQUEST_IRQ(MAX8925_IRQ_VCHG_TMR_FAULT, "charger-timer-expire"); info->usb_online = 0; info->bat_online = 0; /* check for power - can miss interrupt at boot time */ if (start_measure(info, MEASURE_VCHG) * 2000 > 500000) info->ac_online = 1; else info->ac_online = 0; ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS); if (ret >= 0) { /* * If battery detection is enabled, ID pin of battery is * connected to MBDET pin of MAX8925. It could be used to * detect battery presence. * Otherwise, we have to assume that battery is always on. */ if (info->batt_detect) info->bat_online = (ret & MAX8925_CHG_MBDET) ? 0 : 1; else info->bat_online = 1; if (ret & MAX8925_CHG_AC_RANGE_MASK) info->ac_online = 1; else info->ac_online = 0; } /* disable charge */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 1 << 7); /* set charging current in charge topoff mode */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 3 << 5, info->topoff_threshold << 5); /* set charing current in fast charge mode */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 7, info->fast_charge); return 0; } static int max8925_deinit_charger(struct max8925_power_info *info) { struct max8925_chip *chip = info->chip; int irq; irq = chip->irq_base + MAX8925_IRQ_VCHG_DC_OVP; for (; irq <= chip->irq_base + MAX8925_IRQ_VCHG_TMR_FAULT; irq++) free_irq(irq, info); return 0; } #ifdef CONFIG_OF static struct max8925_power_pdata * max8925_power_dt_init(struct platform_device *pdev) { struct device_node *nproot = pdev->dev.parent->of_node; struct device_node *np; int batt_detect; int topoff_threshold; int fast_charge; int no_temp_support; int no_insert_detect; struct max8925_power_pdata *pdata; if (!nproot) return pdev->dev.platform_data; np = of_get_child_by_name(nproot, "charger"); if (!np) { dev_err(&pdev->dev, "failed to find charger node\n"); return NULL; } pdata = devm_kzalloc(&pdev->dev, sizeof(struct max8925_power_pdata), GFP_KERNEL); if (!pdata) goto ret; of_property_read_u32(np, "topoff-threshold", &topoff_threshold); of_property_read_u32(np, "batt-detect", &batt_detect); of_property_read_u32(np, "fast-charge", &fast_charge); of_property_read_u32(np, "no-insert-detect", &no_insert_detect); of_property_read_u32(np, "no-temp-support", &no_temp_support); pdata->batt_detect = batt_detect; pdata->fast_charge = fast_charge; pdata->topoff_threshold = topoff_threshold; pdata->no_insert_detect = no_insert_detect; pdata->no_temp_support = no_temp_support; ret: of_node_put(np); return pdata; } #else static struct max8925_power_pdata * max8925_power_dt_init(struct platform_device *pdev) { return pdev->dev.platform_data; } #endif static int max8925_power_probe(struct platform_device *pdev) { struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); struct power_supply_config psy_cfg = {}; /* Only for ac and usb */ struct max8925_power_pdata *pdata = NULL; struct max8925_power_info *info; int ret; pdata = max8925_power_dt_init(pdev); if (!pdata) { dev_err(&pdev->dev, "platform data isn't assigned to " "power supply\n"); return -EINVAL; } info = devm_kzalloc(&pdev->dev, sizeof(struct max8925_power_info), GFP_KERNEL); if (!info) return -ENOMEM; info->chip = chip; info->gpm = chip->i2c; info->adc = chip->adc; platform_set_drvdata(pdev, info); psy_cfg.supplied_to = pdata->supplied_to; psy_cfg.num_supplicants = pdata->num_supplicants; info->ac = power_supply_register(&pdev->dev, &ac_desc, &psy_cfg); if (IS_ERR(info->ac)) { ret = PTR_ERR(info->ac); goto out; } info->ac->dev.parent = &pdev->dev; info->usb = power_supply_register(&pdev->dev, &usb_desc, &psy_cfg); if (IS_ERR(info->usb)) { ret = PTR_ERR(info->usb); goto out_usb; } info->usb->dev.parent = &pdev->dev; info->battery = power_supply_register(&pdev->dev, &battery_desc, NULL); if (IS_ERR(info->battery)) { ret = PTR_ERR(info->battery); goto out_battery; } info->battery->dev.parent = &pdev->dev; info->batt_detect = pdata->batt_detect; info->topoff_threshold = pdata->topoff_threshold; info->fast_charge = pdata->fast_charge; info->set_charger = pdata->set_charger; info->no_temp_support = pdata->no_temp_support; info->no_insert_detect = pdata->no_insert_detect; max8925_init_charger(chip, info); return 0; out_battery: power_supply_unregister(info->battery); out_usb: power_supply_unregister(info->ac); out: return ret; } static int max8925_power_remove(struct platform_device *pdev) { struct max8925_power_info *info = platform_get_drvdata(pdev); if (info) { power_supply_unregister(info->ac); power_supply_unregister(info->usb); power_supply_unregister(info->battery); max8925_deinit_charger(info); } return 0; } static struct platform_driver max8925_power_driver = { .probe = max8925_power_probe, .remove = max8925_power_remove, .driver = { .name = "max8925-power", }, }; module_platform_driver(max8925_power_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Power supply driver for MAX8925"); MODULE_ALIAS("platform:max8925-power");
gpl-2.0
vakkov/android-n900-nitdroid_kernel
arch/cris/boot/compressed/misc.c
1364
9049
/* * misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * puts by Nick Holloway 1993, better puts by Martin Mares 1995 * adaptation for Linux/CRIS Axis Communications AB, 1999 * */ /* where the piggybacked kernel image expects itself to live. * it is the same address we use when we network load an uncompressed * image into DRAM, and it is the address the kernel is linked to live * at by vmlinux.lds.S */ #define KERNEL_LOAD_ADR 0x40004000 #include <linux/types.h> #ifdef CONFIG_ETRAX_ARCH_V32 #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/ser_defs.h> #include <hwregs/pinmux_defs.h> #ifdef CONFIG_CRIS_MACH_ARTPEC3 #include <hwregs/clkgen_defs.h> #endif #else #include <arch/svinto.h> #endif /* * gzip declarations */ #define OF(args) args #define STATIC static void *memset(void *s, int c, size_t n); void *memcpy(void *__dest, __const void *__src, size_t __n); #define memzero(s, n) memset((s), 0, (n)) typedef unsigned char uch; typedef unsigned short ush; typedef unsigned long ulg; #define WSIZE 0x8000 /* Window size must be at least 32k, */ /* and a power of two */ static uch *inbuf; /* input buffer */ static uch window[WSIZE]; /* Sliding window buffer */ unsigned inptr = 0; /* index of next byte to be processed in inbuf * After decompression it will contain the * compressed size, and head.S will read it. */ static unsigned outcnt = 0; /* bytes in output buffer */ /* gzip flag byte */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ #define COMMENT 0x10 /* bit 4 set: file comment present */ #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ #define RESERVED 0xC0 /* bit 6,7: reserved */ #define get_byte() (inbuf[inptr++]) /* Diagnostic functions */ #ifdef DEBUG # define Assert(cond, msg) do { \ if (!(cond)) \ error(msg); \ } while (0) # define Trace(x) fprintf x # define Tracev(x) do { \ if (verbose) \ fprintf x; \ } while (0) # define Tracevv(x) do { \ if (verbose > 1) \ fprintf x; \ } while (0) # define Tracec(c, x) do { \ if (verbose && (c)) \ fprintf x; \ } while (0) # define Tracecv(c, x) do { \ if (verbose > 1 && (c)) \ fprintf x; \ } while (0) #else # define Assert(cond, msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c, x) # define Tracecv(c, x) #endif static void flush_window(void); static void error(char *m); static void puts(const char *); extern char *input_data; /* lives in head.S */ static long bytes_out; static uch *output_data; static unsigned long output_ptr; /* the "heap" is put directly after the BSS ends, at end */ extern int _end; static long free_mem_ptr = (long)&_end; static long free_mem_end_ptr; #include "../../../../../lib/inflate.c" /* decompressor info and error messages to serial console */ #ifdef CONFIG_ETRAX_ARCH_V32 static inline void serout(const char *s, reg_scope_instances regi_ser) { reg_ser_rs_stat_din rs; reg_ser_rw_dout dout = {.data = *s}; do { rs = REG_RD(ser, regi_ser, rs_stat_din); } while (!rs.tr_rdy);/* Wait for transceiver. */ REG_WR(ser, regi_ser, rw_dout, dout); } #endif static void puts(const char *s) { #ifndef CONFIG_ETRAX_DEBUG_PORT_NULL while (*s) { #ifdef CONFIG_ETRAX_DEBUG_PORT0 #ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser0); #else while (!(*R_SERIAL0_STATUS & (1 << 5))) ; *R_SERIAL0_TR_DATA = *s++; #endif #endif #ifdef CONFIG_ETRAX_DEBUG_PORT1 #ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser1); #else while (!(*R_SERIAL1_STATUS & (1 << 5))) ; *R_SERIAL1_TR_DATA = *s++; #endif #endif #ifdef CONFIG_ETRAX_DEBUG_PORT2 #ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser2); #else while (!(*R_SERIAL2_STATUS & (1 << 5))) ; *R_SERIAL2_TR_DATA = *s++; #endif #endif #ifdef CONFIG_ETRAX_DEBUG_PORT3 #ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser3); #else while (!(*R_SERIAL3_STATUS & (1 << 5))) ; *R_SERIAL3_TR_DATA = *s++; #endif #endif *s++; } /* CONFIG_ETRAX_DEBUG_PORT_NULL */ #endif } void *memset(void *s, int c, size_t n) { int i; char *ss = (char*)s; for (i=0;i<n;i++) ss[i] = c; return s; } void *memcpy(void *__dest, __const void *__src, size_t __n) { int i; char *d = (char *)__dest, *s = (char *)__src; for (i = 0; i < __n; i++) d[i] = s[i]; return __dest; } /* =========================================================================== * Write the output window window[0..outcnt-1] and update crc and bytes_out. * (Used for the decompressed data only.) */ static void flush_window(void) { ulg c = crc; /* temporary variable */ unsigned n; uch *in, *out, ch; in = window; out = &output_data[output_ptr]; for (n = 0; n < outcnt; n++) { ch = *out = *in; out++; in++; c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); } crc = c; bytes_out += (ulg)outcnt; output_ptr += (ulg)outcnt; outcnt = 0; } static void error(char *x) { puts("\n\n"); puts(x); puts("\n\n -- System halted\n"); while(1); /* Halt */ } void setup_normal_output_buffer(void) { output_data = (char *)KERNEL_LOAD_ADR; } #ifdef CONFIG_ETRAX_ARCH_V32 static inline void serial_setup(reg_scope_instances regi_ser) { reg_ser_rw_xoff xoff; reg_ser_rw_tr_ctrl tr_ctrl; reg_ser_rw_rec_ctrl rec_ctrl; reg_ser_rw_tr_baud_div tr_baud; reg_ser_rw_rec_baud_div rec_baud; /* Turn off XOFF. */ xoff = REG_RD(ser, regi_ser, rw_xoff); xoff.chr = 0; xoff.automatic = regk_ser_no; REG_WR(ser, regi_ser, rw_xoff, xoff); /* Set baudrate and stopbits. */ tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl); rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl); tr_baud = REG_RD(ser, regi_ser, rw_tr_baud_div); rec_baud = REG_RD(ser, regi_ser, rw_rec_baud_div); tr_ctrl.stop_bits = 1; /* 2 stop bits. */ tr_ctrl.en = 1; /* enable transmitter */ rec_ctrl.en = 1; /* enabler receiver */ /* * The baudrate setup used to be a bit fishy, but now transmitter and * receiver are both set to the intended baud rate, 115200. * The magic value is 29.493 MHz. */ tr_ctrl.base_freq = regk_ser_f29_493; rec_ctrl.base_freq = regk_ser_f29_493; tr_baud.div = (29493000 / 8) / 115200; rec_baud.div = (29493000 / 8) / 115200; REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl); REG_WR(ser, regi_ser, rw_tr_baud_div, tr_baud); REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl); REG_WR(ser, regi_ser, rw_rec_baud_div, rec_baud); } #endif void decompress_kernel(void) { char revision; char compile_rev; #ifdef CONFIG_ETRAX_ARCH_V32 /* Need at least a CRISv32 to run. */ compile_rev = 32; #if defined(CONFIG_ETRAX_DEBUG_PORT1) || \ defined(CONFIG_ETRAX_DEBUG_PORT2) || \ defined(CONFIG_ETRAX_DEBUG_PORT3) reg_pinmux_rw_hwprot hwprot; #ifdef CONFIG_CRIS_MACH_ARTPEC3 reg_clkgen_rw_clk_ctrl clk_ctrl; /* Enable corresponding clock region when serial 1..3 selected */ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes; REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); #endif /* pinmux setup for ports 1..3 */ hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot); #endif #ifdef CONFIG_ETRAX_DEBUG_PORT0 serial_setup(regi_ser0); #endif #ifdef CONFIG_ETRAX_DEBUG_PORT1 hwprot.ser1 = regk_pinmux_yes; serial_setup(regi_ser1); #endif #ifdef CONFIG_ETRAX_DEBUG_PORT2 hwprot.ser2 = regk_pinmux_yes; serial_setup(regi_ser2); #endif #ifdef CONFIG_ETRAX_DEBUG_PORT3 hwprot.ser3 = regk_pinmux_yes; serial_setup(regi_ser3); #endif #if defined(CONFIG_ETRAX_DEBUG_PORT1) || \ defined(CONFIG_ETRAX_DEBUG_PORT2) || \ defined(CONFIG_ETRAX_DEBUG_PORT3) REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot); #endif /* input_data is set in head.S */ inbuf = input_data; #else /* CRISv10 */ /* Need at least a crisv10 to run. */ compile_rev = 10; /* input_data is set in head.S */ inbuf = input_data; #ifdef CONFIG_ETRAX_DEBUG_PORT0 *R_SERIAL0_XOFF = 0; *R_SERIAL0_BAUD = 0x99; *R_SERIAL0_TR_CTRL = 0x40; #endif #ifdef CONFIG_ETRAX_DEBUG_PORT1 *R_SERIAL1_XOFF = 0; *R_SERIAL1_BAUD = 0x99; *R_SERIAL1_TR_CTRL = 0x40; #endif #ifdef CONFIG_ETRAX_DEBUG_PORT2 *R_GEN_CONFIG = 0x08; *R_SERIAL2_XOFF = 0; *R_SERIAL2_BAUD = 0x99; *R_SERIAL2_TR_CTRL = 0x40; #endif #ifdef CONFIG_ETRAX_DEBUG_PORT3 *R_GEN_CONFIG = 0x100; *R_SERIAL3_XOFF = 0; *R_SERIAL3_BAUD = 0x99; *R_SERIAL3_TR_CTRL = 0x40; #endif #endif setup_normal_output_buffer(); makecrc(); __asm__ volatile ("move $vr,%0" : "=rm" (revision)); if (revision < compile_rev) { #ifdef CONFIG_ETRAX_ARCH_V32 puts("You need an ETRAX FS to run Linux 2.6/crisv32\n"); #else puts("You need an ETRAX 100LX to run linux 2.6\n"); #endif while(1); } puts("Uncompressing Linux...\n"); gunzip(); puts("Done. Now booting the kernel\n"); }
gpl-2.0
christiantroy/linux-allwinner
drivers/net/sfc/filter.c
1876
20035
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/in.h> #include <net/ip.h> #include "efx.h" #include "filter.h" #include "io.h" #include "nic.h" #include "regs.h" /* "Fudge factors" - difference between programmed value and actual depth. * Due to pipelined implementation we need to program H/W with a value that * is larger than the hop limit we want. */ #define FILTER_CTL_SRCH_FUDGE_WILD 3 #define FILTER_CTL_SRCH_FUDGE_FULL 1 /* Hard maximum hop limit. Hardware will time-out beyond 200-something. * We also need to avoid infinite loops in efx_filter_search() when the * table is full. */ #define FILTER_CTL_SRCH_MAX 200 /* Don't try very hard to find space for performance hints, as this is * counter-productive. */ #define FILTER_CTL_SRCH_HINT_MAX 5 enum efx_filter_table_id { EFX_FILTER_TABLE_RX_IP = 0, EFX_FILTER_TABLE_RX_MAC, EFX_FILTER_TABLE_COUNT, }; struct efx_filter_table { enum efx_filter_table_id id; u32 offset; /* address of table relative to BAR */ unsigned size; /* number of entries */ unsigned step; /* step between entries */ unsigned used; /* number currently used */ unsigned long *used_bitmap; struct efx_filter_spec *spec; unsigned search_depth[EFX_FILTER_TYPE_COUNT]; }; struct efx_filter_state { spinlock_t lock; struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; #ifdef CONFIG_RFS_ACCEL u32 *rps_flow_id; unsigned rps_expire_index; #endif }; /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit * key derived from the n-tuple. The initial LFSR state is 0xffff. */ static u16 efx_filter_hash(u32 key) { u16 tmp; /* First 16 rounds */ tmp = 0x1fff ^ key >> 16; tmp = tmp ^ tmp >> 3 ^ tmp >> 6; tmp = tmp ^ tmp >> 9; /* Last 16 rounds */ tmp = tmp ^ tmp << 13 ^ key; tmp = tmp ^ tmp >> 3 ^ tmp >> 6; return tmp ^ tmp >> 9; } /* To allow for hash collisions, filter search continues at these * increments from the first possible entry selected by the hash. */ static u16 efx_filter_increment(u32 key) { return key * 2 - 1; } static enum efx_filter_table_id efx_filter_spec_table_id(const struct efx_filter_spec *spec) { BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2)); BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2)); BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2)); BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2)); BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2)); BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2)); EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC); return spec->type >> 2; } static struct efx_filter_table * efx_filter_spec_table(struct efx_filter_state *state, const struct efx_filter_spec *spec) { if (spec->type == EFX_FILTER_UNSPEC) return NULL; else return &state->table[efx_filter_spec_table_id(spec)]; } static void efx_filter_table_reset_search_depth(struct efx_filter_table *table) { memset(table->search_depth, 0, sizeof(table->search_depth)); } static void efx_filter_push_rx_limits(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table; efx_oword_t filter_ctl; efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); table = &state->table[EFX_FILTER_TABLE_RX_IP]; EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, table->search_depth[EFX_FILTER_TCP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, table->search_depth[EFX_FILTER_TCP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, table->search_depth[EFX_FILTER_UDP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, table->search_depth[EFX_FILTER_UDP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); table = &state->table[EFX_FILTER_TABLE_RX_MAC]; if (table->size) { EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, table->search_depth[EFX_FILTER_MAC_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, table->search_depth[EFX_FILTER_MAC_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); } static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, __be32 host1, __be16 port1, __be32 host2, __be16 port2) { spec->data[0] = ntohl(host1) << 16 | ntohs(port1); spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; spec->data[2] = ntohl(host2); } /** * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port * @spec: Specification to initialise * @proto: Transport layer protocol number * @host: Local host address (network byte order) * @port: Local port (network byte order) */ int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, __be32 host, __be16 port) { __be32 host1; __be16 port1; EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); /* This cannot currently be combined with other filtering */ if (spec->type != EFX_FILTER_UNSPEC) return -EPROTONOSUPPORT; if (port == 0) return -EINVAL; switch (proto) { case IPPROTO_TCP: spec->type = EFX_FILTER_TCP_WILD; break; case IPPROTO_UDP: spec->type = EFX_FILTER_UDP_WILD; break; default: return -EPROTONOSUPPORT; } /* Filter is constructed in terms of source and destination, * with the odd wrinkle that the ports are swapped in a UDP * wildcard filter. We need to convert from local and remote * (= zero for wildcard) addresses. */ host1 = 0; if (proto != IPPROTO_UDP) { port1 = 0; } else { port1 = port; port = 0; } __efx_filter_set_ipv4(spec, host1, port1, host, port); return 0; } /** * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports * @spec: Specification to initialise * @proto: Transport layer protocol number * @host: Local host address (network byte order) * @port: Local port (network byte order) * @rhost: Remote host address (network byte order) * @rport: Remote port (network byte order) */ int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, __be32 host, __be16 port, __be32 rhost, __be16 rport) { EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); /* This cannot currently be combined with other filtering */ if (spec->type != EFX_FILTER_UNSPEC) return -EPROTONOSUPPORT; if (port == 0 || rport == 0) return -EINVAL; switch (proto) { case IPPROTO_TCP: spec->type = EFX_FILTER_TCP_FULL; break; case IPPROTO_UDP: spec->type = EFX_FILTER_UDP_FULL; break; default: return -EPROTONOSUPPORT; } __efx_filter_set_ipv4(spec, rhost, rport, host, port); return 0; } /** * efx_filter_set_eth_local - specify local Ethernet address and optional VID * @spec: Specification to initialise * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC * @addr: Local Ethernet MAC address */ int efx_filter_set_eth_local(struct efx_filter_spec *spec, u16 vid, const u8 *addr) { EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); /* This cannot currently be combined with other filtering */ if (spec->type != EFX_FILTER_UNSPEC) return -EPROTONOSUPPORT; if (vid == EFX_FILTER_VID_UNSPEC) { spec->type = EFX_FILTER_MAC_WILD; spec->data[0] = 0; } else { spec->type = EFX_FILTER_MAC_FULL; spec->data[0] = vid; } spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5]; spec->data[2] = addr[0] << 8 | addr[1]; return 0; } /* Build a filter entry and return its n-tuple key. */ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) { u32 data3; switch (efx_filter_spec_table_id(spec)) { case EFX_FILTER_TABLE_RX_IP: { bool is_udp = (spec->type == EFX_FILTER_UDP_FULL || spec->type == EFX_FILTER_UDP_WILD); EFX_POPULATE_OWORD_7( *filter, FRF_BZ_RSS_EN, !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), FRF_BZ_SCATTER_EN, !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), FRF_BZ_TCP_UDP, is_udp, FRF_BZ_RXQ_ID, spec->dmaq_id, EFX_DWORD_2, spec->data[2], EFX_DWORD_1, spec->data[1], EFX_DWORD_0, spec->data[0]); data3 = is_udp; break; } case EFX_FILTER_TABLE_RX_MAC: { bool is_wild = spec->type == EFX_FILTER_MAC_WILD; EFX_POPULATE_OWORD_8( *filter, FRF_CZ_RMFT_RSS_EN, !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), FRF_CZ_RMFT_SCATTER_EN, !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), FRF_CZ_RMFT_IP_OVERRIDE, !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP), FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], FRF_CZ_RMFT_VLAN_ID, spec->data[0]); data3 = is_wild; break; } default: BUG(); } return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; } static bool efx_filter_equal(const struct efx_filter_spec *left, const struct efx_filter_spec *right) { if (left->type != right->type || memcmp(left->data, right->data, sizeof(left->data))) return false; return true; } static int efx_filter_search(struct efx_filter_table *table, struct efx_filter_spec *spec, u32 key, bool for_insert, int *depth_required) { unsigned hash, incr, filter_idx, depth, depth_max; struct efx_filter_spec *cmp; hash = efx_filter_hash(key); incr = efx_filter_increment(key); depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ? FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX); for (depth = 1, filter_idx = hash & (table->size - 1); depth <= depth_max && test_bit(filter_idx, table->used_bitmap); ++depth) { cmp = &table->spec[filter_idx]; if (efx_filter_equal(spec, cmp)) goto found; filter_idx = (filter_idx + incr) & (table->size - 1); } if (!for_insert) return -ENOENT; if (depth > depth_max) return -EBUSY; found: *depth_required = depth; return filter_idx; } /* Construct/deconstruct external filter IDs */ static inline int efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index) { return table_id << 16 | index; } /** * efx_filter_insert_filter - add or replace a filter * @efx: NIC in which to insert the filter * @spec: Specification for the filter * @replace: Flag for whether the specified filter may replace a filter * with an identical match expression and equal or lower priority * * On success, return the filter ID. * On failure, return a negative error code. */ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = efx_filter_spec_table(state, spec); struct efx_filter_spec *saved_spec; efx_oword_t filter; int filter_idx, depth; u32 key; int rc; if (!table || table->size == 0) return -EINVAL; key = efx_filter_build(&filter, spec); netif_vdbg(efx, hw, efx->net_dev, "%s: type %d search_depth=%d", __func__, spec->type, table->search_depth[spec->type]); spin_lock_bh(&state->lock); rc = efx_filter_search(table, spec, key, true, &depth); if (rc < 0) goto out; filter_idx = rc; BUG_ON(filter_idx >= table->size); saved_spec = &table->spec[filter_idx]; if (test_bit(filter_idx, table->used_bitmap)) { /* Should we replace the existing filter? */ if (!replace) { rc = -EEXIST; goto out; } if (spec->priority < saved_spec->priority) { rc = -EPERM; goto out; } } else { __set_bit(filter_idx, table->used_bitmap); ++table->used; } *saved_spec = *spec; if (table->search_depth[spec->type] < depth) { table->search_depth[spec->type] = depth; efx_filter_push_rx_limits(efx); } efx_writeo(efx, &filter, table->offset + table->step * filter_idx); netif_vdbg(efx, hw, efx->net_dev, "%s: filter type %d index %d rxq %u set", __func__, spec->type, filter_idx, spec->dmaq_id); rc = efx_filter_make_id(table->id, filter_idx); out: spin_unlock_bh(&state->lock); return rc; } static void efx_filter_table_clear_entry(struct efx_nic *efx, struct efx_filter_table *table, int filter_idx) { static efx_oword_t filter; if (test_bit(filter_idx, table->used_bitmap)) { __clear_bit(filter_idx, table->used_bitmap); --table->used; memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); efx_writeo(efx, &filter, table->offset + table->step * filter_idx); } } /** * efx_filter_remove_filter - remove a filter by specification * @efx: NIC from which to remove the filter * @spec: Specification for the filter * * On success, return zero. * On failure, return a negative error code. */ int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = efx_filter_spec_table(state, spec); struct efx_filter_spec *saved_spec; efx_oword_t filter; int filter_idx, depth; u32 key; int rc; if (!table) return -EINVAL; key = efx_filter_build(&filter, spec); spin_lock_bh(&state->lock); rc = efx_filter_search(table, spec, key, false, &depth); if (rc < 0) goto out; filter_idx = rc; saved_spec = &table->spec[filter_idx]; if (spec->priority < saved_spec->priority) { rc = -EPERM; goto out; } efx_filter_table_clear_entry(efx, table, filter_idx); if (table->used == 0) efx_filter_table_reset_search_depth(table); rc = 0; out: spin_unlock_bh(&state->lock); return rc; } static void efx_filter_table_clear(struct efx_nic *efx, enum efx_filter_table_id table_id, enum efx_filter_priority priority) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = &state->table[table_id]; int filter_idx; spin_lock_bh(&state->lock); for (filter_idx = 0; filter_idx < table->size; ++filter_idx) if (table->spec[filter_idx].priority <= priority) efx_filter_table_clear_entry(efx, table, filter_idx); if (table->used == 0) efx_filter_table_reset_search_depth(table); spin_unlock_bh(&state->lock); } /** * efx_filter_clear_rx - remove RX filters by priority * @efx: NIC from which to remove the filters * @priority: Maximum priority to remove */ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) { efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority); efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority); } /* Restore filter stater after reset */ void efx_restore_filters(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; enum efx_filter_table_id table_id; struct efx_filter_table *table; efx_oword_t filter; int filter_idx; spin_lock_bh(&state->lock); for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { table = &state->table[table_id]; for (filter_idx = 0; filter_idx < table->size; filter_idx++) { if (!test_bit(filter_idx, table->used_bitmap)) continue; efx_filter_build(&filter, &table->spec[filter_idx]); efx_writeo(efx, &filter, table->offset + table->step * filter_idx); } } efx_filter_push_rx_limits(efx); spin_unlock_bh(&state->lock); } int efx_probe_filters(struct efx_nic *efx) { struct efx_filter_state *state; struct efx_filter_table *table; unsigned table_id; state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL); if (!state) return -ENOMEM; efx->filter_state = state; spin_lock_init(&state->lock); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { #ifdef CONFIG_RFS_ACCEL state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS, sizeof(*state->rps_flow_id), GFP_KERNEL); if (!state->rps_flow_id) goto fail; #endif table = &state->table[EFX_FILTER_TABLE_RX_IP]; table->id = EFX_FILTER_TABLE_RX_IP; table->offset = FR_BZ_RX_FILTER_TBL0; table->size = FR_BZ_RX_FILTER_TBL0_ROWS; table->step = FR_BZ_RX_FILTER_TBL0_STEP; } if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { table = &state->table[EFX_FILTER_TABLE_RX_MAC]; table->id = EFX_FILTER_TABLE_RX_MAC; table->offset = FR_CZ_RX_MAC_FILTER_TBL0; table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; } for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { table = &state->table[table_id]; if (table->size == 0) continue; table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), sizeof(unsigned long), GFP_KERNEL); if (!table->used_bitmap) goto fail; table->spec = vzalloc(table->size * sizeof(*table->spec)); if (!table->spec) goto fail; } return 0; fail: efx_remove_filters(efx); return -ENOMEM; } void efx_remove_filters(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; enum efx_filter_table_id table_id; for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { kfree(state->table[table_id].used_bitmap); vfree(state->table[table_id].spec); } #ifdef CONFIG_RFS_ACCEL kfree(state->rps_flow_id); #endif kfree(state); } #ifdef CONFIG_RFS_ACCEL int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_filter_state *state = efx->filter_state; struct efx_filter_spec spec; const struct iphdr *ip; const __be16 *ports; int nhoff; int rc; nhoff = skb_network_offset(skb); if (skb->protocol != htons(ETH_P_IP)) return -EPROTONOSUPPORT; /* RFS must validate the IP header length before calling us */ EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip))); ip = (const struct iphdr *)(skb->data + nhoff); if (ip->frag_off & htons(IP_MF | IP_OFFSET)) return -EPROTONOSUPPORT; EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4)); ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index); rc = efx_filter_set_ipv4_full(&spec, ip->protocol, ip->daddr, ports[1], ip->saddr, ports[0]); if (rc) return rc; rc = efx_filter_insert_filter(efx, &spec, true); if (rc < 0) return rc; /* Remember this so we can check whether to expire the filter later */ state->rps_flow_id[rc] = flow_id; channel = efx_get_channel(efx, skb_get_rx_queue(skb)); ++channel->rfs_filters_added; netif_info(efx, rx_status, efx->net_dev, "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), rxq_index, flow_id, rc); return rc; } bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP]; unsigned mask = table->size - 1; unsigned index; unsigned stop; if (!spin_trylock_bh(&state->lock)) return false; index = state->rps_expire_index; stop = (index + quota) & mask; while (index != stop) { if (test_bit(index, table->used_bitmap) && table->spec[index].priority == EFX_FILTER_PRI_HINT && rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, state->rps_flow_id[index], index)) { netif_info(efx, rx_status, efx->net_dev, "expiring filter %d [flow %u]\n", index, state->rps_flow_id[index]); efx_filter_table_clear_entry(efx, table, index); } index = (index + 1) & mask; } state->rps_expire_index = stop; if (table->used == 0) efx_filter_table_reset_search_depth(table); spin_unlock_bh(&state->lock); return true; } #endif /* CONFIG_RFS_ACCEL */
gpl-2.0
qtekfun/htcDesire820Kernel
kernel/drivers/crypto/sahara.c
2132
28439
/* * Cryptographic API. * * Support for SAHARA cryptographic accelerator. * * Copyright (c) 2013 Vista Silicon S.L. * Author: Javier Martin <javier.martin@vista-silicon.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Based on omap-aes.c and tegra-aes.c */ #include <crypto/algapi.h> #include <crypto/aes.h> #include <linux/clk.h> #include <linux/crypto.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #define SAHARA_NAME "sahara" #define SAHARA_VERSION_3 3 #define SAHARA_TIMEOUT_MS 1000 #define SAHARA_MAX_HW_DESC 2 #define SAHARA_MAX_HW_LINK 20 #define FLAGS_MODE_MASK 0x000f #define FLAGS_ENCRYPT BIT(0) #define FLAGS_CBC BIT(1) #define FLAGS_NEW_KEY BIT(3) #define FLAGS_BUSY 4 #define SAHARA_HDR_BASE 0x00800000 #define SAHARA_HDR_SKHA_ALG_AES 0 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2) #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3) #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3) #define SAHARA_HDR_FORM_DATA (5 << 16) #define SAHARA_HDR_FORM_KEY (8 << 16) #define SAHARA_HDR_LLO (1 << 24) #define SAHARA_HDR_CHA_SKHA (1 << 28) #define SAHARA_HDR_CHA_MDHA (2 << 28) #define SAHARA_HDR_PARITY_BIT (1 << 31) /* SAHARA can only process one request at a time */ #define SAHARA_QUEUE_LENGTH 1 #define SAHARA_REG_VERSION 0x00 #define SAHARA_REG_DAR 0x04 #define SAHARA_REG_CONTROL 0x08 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24) #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16) #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7) #define SAHARA_CONTROL_ENABLE_INT (1 << 4) #define SAHARA_REG_CMD 0x0C #define SAHARA_CMD_RESET (1 << 0) #define SAHARA_CMD_CLEAR_INT (1 << 8) #define SAHARA_CMD_CLEAR_ERR (1 << 9) #define SAHARA_CMD_SINGLE_STEP (1 << 10) #define SAHARA_CMD_MODE_BATCH (1 << 16) #define SAHARA_CMD_MODE_DEBUG (1 << 18) #define SAHARA_REG_STATUS 0x10 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7) #define SAHARA_STATE_IDLE 0 #define SAHARA_STATE_BUSY 1 #define SAHARA_STATE_ERR 2 #define SAHARA_STATE_FAULT 3 #define SAHARA_STATE_COMPLETE 4 #define SAHARA_STATE_COMP_FLAG (1 << 2) #define SAHARA_STATUS_DAR_FULL (1 << 3) #define SAHARA_STATUS_ERROR (1 << 4) #define SAHARA_STATUS_SECURE (1 << 5) #define SAHARA_STATUS_FAIL (1 << 6) #define SAHARA_STATUS_INIT (1 << 7) #define SAHARA_STATUS_RNG_RESEED (1 << 8) #define SAHARA_STATUS_ACTIVE_RNG (1 << 9) #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10) #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11) #define SAHARA_STATUS_MODE_BATCH (1 << 16) #define SAHARA_STATUS_MODE_DEDICATED (1 << 17) #define SAHARA_STATUS_MODE_DEBUG (1 << 18) #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff) #define SAHARA_REG_ERRSTATUS 0x14 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf) #define SAHARA_ERRSOURCE_CHA 14 #define SAHARA_ERRSOURCE_DMA 15 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8) #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3) #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7) #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff) #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3) #define SAHARA_REG_FADDR 0x18 #define SAHARA_REG_CDAR 0x1C #define SAHARA_REG_IDAR 0x20 struct sahara_hw_desc { u32 hdr; u32 len1; dma_addr_t p1; u32 len2; dma_addr_t p2; dma_addr_t next; }; struct sahara_hw_link { u32 len; dma_addr_t p; dma_addr_t next; }; struct sahara_ctx { struct sahara_dev *dev; unsigned long flags; int keylen; u8 key[AES_KEYSIZE_128]; struct crypto_ablkcipher *fallback; }; struct sahara_aes_reqctx { unsigned long mode; }; struct sahara_dev { struct device *device; void __iomem *regs_base; struct clk *clk_ipg; struct clk *clk_ahb; struct sahara_ctx *ctx; spinlock_t lock; struct crypto_queue queue; unsigned long flags; struct tasklet_struct done_task; struct tasklet_struct queue_task; struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC]; dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC]; u8 *key_base; dma_addr_t key_phys_base; u8 *iv_base; dma_addr_t iv_phys_base; struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK]; dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK]; struct ablkcipher_request *req; size_t total; struct scatterlist *in_sg; unsigned int nb_in_sg; struct scatterlist *out_sg; unsigned int nb_out_sg; u32 error; struct timer_list watchdog; }; static struct sahara_dev *dev_ptr; static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg) { writel(data, dev->regs_base + reg); } static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg) { return readl(dev->regs_base + reg); } static u32 sahara_aes_key_hdr(struct sahara_dev *dev) { u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES | SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO | SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT; if (dev->flags & FLAGS_CBC) { hdr |= SAHARA_HDR_SKHA_MODE_CBC; hdr ^= SAHARA_HDR_PARITY_BIT; } if (dev->flags & FLAGS_ENCRYPT) { hdr |= SAHARA_HDR_SKHA_OP_ENC; hdr ^= SAHARA_HDR_PARITY_BIT; } return hdr; } static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev) { return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA | SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT; } static int sahara_sg_length(struct scatterlist *sg, unsigned int total) { int sg_nb; unsigned int len; struct scatterlist *sg_list; sg_nb = 0; sg_list = sg; while (total) { len = min(sg_list->length, total); sg_nb++; total -= len; sg_list = sg_next(sg_list); if (!sg_list) total = 0; } return sg_nb; } static char *sahara_err_src[16] = { "No error", "Header error", "Descriptor length error", "Descriptor length or pointer error", "Link length error", "Link pointer error", "Input buffer error", "Output buffer error", "Output buffer starvation", "Internal state fault", "General descriptor problem", "Reserved", "Descriptor address error", "Link address error", "CHA error", "DMA error" }; static char *sahara_err_dmasize[4] = { "Byte transfer", "Half-word transfer", "Word transfer", "Reserved" }; static char *sahara_err_dmasrc[8] = { "No error", "AHB bus error", "Internal IP bus error", "Parity error", "DMA crosses 256 byte boundary", "DMA is busy", "Reserved", "DMA HW error" }; static char *sahara_cha_errsrc[12] = { "Input buffer non-empty", "Illegal address", "Illegal mode", "Illegal data size", "Illegal key size", "Write during processing", "CTX read during processing", "HW error", "Input buffer disabled/underflow", "Output buffer disabled/overflow", "DES key parity error", "Reserved" }; static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" }; static void sahara_decode_error(struct sahara_dev *dev, unsigned int error) { u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error); u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error)); dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error); dev_err(dev->device, " - %s.\n", sahara_err_src[source]); if (source == SAHARA_ERRSOURCE_DMA) { if (error & SAHARA_ERRSTATUS_DMA_DIR) dev_err(dev->device, " * DMA read.\n"); else dev_err(dev->device, " * DMA write.\n"); dev_err(dev->device, " * %s.\n", sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]); dev_err(dev->device, " * %s.\n", sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]); } else if (source == SAHARA_ERRSOURCE_CHA) { dev_err(dev->device, " * %s.\n", sahara_cha_errsrc[chasrc]); dev_err(dev->device, " * %s.\n", sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]); } dev_err(dev->device, "\n"); } static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" }; static void sahara_decode_status(struct sahara_dev *dev, unsigned int status) { u8 state; if (!IS_ENABLED(DEBUG)) return; state = SAHARA_STATUS_GET_STATE(status); dev_dbg(dev->device, "%s: Status Register = 0x%08x\n", __func__, status); dev_dbg(dev->device, " - State = %d:\n", state); if (state & SAHARA_STATE_COMP_FLAG) dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n"); dev_dbg(dev->device, " * %s.\n", sahara_state[state & ~SAHARA_STATE_COMP_FLAG]); if (status & SAHARA_STATUS_DAR_FULL) dev_dbg(dev->device, " - DAR Full.\n"); if (status & SAHARA_STATUS_ERROR) dev_dbg(dev->device, " - Error.\n"); if (status & SAHARA_STATUS_SECURE) dev_dbg(dev->device, " - Secure.\n"); if (status & SAHARA_STATUS_FAIL) dev_dbg(dev->device, " - Fail.\n"); if (status & SAHARA_STATUS_RNG_RESEED) dev_dbg(dev->device, " - RNG Reseed Request.\n"); if (status & SAHARA_STATUS_ACTIVE_RNG) dev_dbg(dev->device, " - RNG Active.\n"); if (status & SAHARA_STATUS_ACTIVE_MDHA) dev_dbg(dev->device, " - MDHA Active.\n"); if (status & SAHARA_STATUS_ACTIVE_SKHA) dev_dbg(dev->device, " - SKHA Active.\n"); if (status & SAHARA_STATUS_MODE_BATCH) dev_dbg(dev->device, " - Batch Mode.\n"); else if (status & SAHARA_STATUS_MODE_DEDICATED) dev_dbg(dev->device, " - Decidated Mode.\n"); else if (status & SAHARA_STATUS_MODE_DEBUG) dev_dbg(dev->device, " - Debug Mode.\n"); dev_dbg(dev->device, " - Internal state = 0x%02x\n", SAHARA_STATUS_GET_ISTATE(status)); dev_dbg(dev->device, "Current DAR: 0x%08x\n", sahara_read(dev, SAHARA_REG_CDAR)); dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n", sahara_read(dev, SAHARA_REG_IDAR)); } static void sahara_dump_descriptors(struct sahara_dev *dev) { int i; if (!IS_ENABLED(DEBUG)) return; for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n", i, dev->hw_phys_desc[i]); dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr); dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1); dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1); dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2); dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2); dev_dbg(dev->device, "\tnext = 0x%08x\n", dev->hw_desc[i]->next); } dev_dbg(dev->device, "\n"); } static void sahara_dump_links(struct sahara_dev *dev) { int i; if (!IS_ENABLED(DEBUG)) return; for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { dev_dbg(dev->device, "Link (%d) (0x%08x):\n", i, dev->hw_phys_link[i]); dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len); dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p); dev_dbg(dev->device, "\tnext = 0x%08x\n", dev->hw_link[i]->next); } dev_dbg(dev->device, "\n"); } static void sahara_aes_done_task(unsigned long data) { struct sahara_dev *dev = (struct sahara_dev *)data; dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_TO_DEVICE); dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_FROM_DEVICE); spin_lock(&dev->lock); clear_bit(FLAGS_BUSY, &dev->flags); spin_unlock(&dev->lock); dev->req->base.complete(&dev->req->base, dev->error); } void sahara_watchdog(unsigned long data) { struct sahara_dev *dev = (struct sahara_dev *)data; unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS); sahara_decode_status(dev, stat); sahara_decode_error(dev, err); dev->error = -ETIMEDOUT; sahara_aes_done_task(data); } static int sahara_hw_descriptor_create(struct sahara_dev *dev) { struct sahara_ctx *ctx = dev->ctx; struct scatterlist *sg; int ret; int i, j; /* Copy new key if necessary */ if (ctx->flags & FLAGS_NEW_KEY) { memcpy(dev->key_base, ctx->key, ctx->keylen); ctx->flags &= ~FLAGS_NEW_KEY; if (dev->flags & FLAGS_CBC) { dev->hw_desc[0]->len1 = AES_BLOCK_SIZE; dev->hw_desc[0]->p1 = dev->iv_phys_base; } else { dev->hw_desc[0]->len1 = 0; dev->hw_desc[0]->p1 = 0; } dev->hw_desc[0]->len2 = ctx->keylen; dev->hw_desc[0]->p2 = dev->key_phys_base; dev->hw_desc[0]->next = dev->hw_phys_desc[1]; } dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev); dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total); dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total); if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { dev_err(dev->device, "not enough hw links (%d)\n", dev->nb_in_sg + dev->nb_out_sg); return -EINVAL; } ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); if (ret != dev->nb_in_sg) { dev_err(dev->device, "couldn't map in sg\n"); goto unmap_in; } ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_FROM_DEVICE); if (ret != dev->nb_out_sg) { dev_err(dev->device, "couldn't map out sg\n"); goto unmap_out; } /* Create input links */ dev->hw_desc[1]->p1 = dev->hw_phys_link[0]; sg = dev->in_sg; for (i = 0; i < dev->nb_in_sg; i++) { dev->hw_link[i]->len = sg->length; dev->hw_link[i]->p = sg->dma_address; if (i == (dev->nb_in_sg - 1)) { dev->hw_link[i]->next = 0; } else { dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; sg = sg_next(sg); } } /* Create output links */ dev->hw_desc[1]->p2 = dev->hw_phys_link[i]; sg = dev->out_sg; for (j = i; j < dev->nb_out_sg + i; j++) { dev->hw_link[j]->len = sg->length; dev->hw_link[j]->p = sg->dma_address; if (j == (dev->nb_out_sg + i - 1)) { dev->hw_link[j]->next = 0; } else { dev->hw_link[j]->next = dev->hw_phys_link[j + 1]; sg = sg_next(sg); } } /* Fill remaining fields of hw_desc[1] */ dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev); dev->hw_desc[1]->len1 = dev->total; dev->hw_desc[1]->len2 = dev->total; dev->hw_desc[1]->next = 0; sahara_dump_descriptors(dev); sahara_dump_links(dev); /* Start processing descriptor chain. */ mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS)); sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR); return 0; unmap_out: dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_TO_DEVICE); unmap_in: dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_FROM_DEVICE); return -EINVAL; } static void sahara_aes_queue_task(unsigned long data) { struct sahara_dev *dev = (struct sahara_dev *)data; struct crypto_async_request *async_req, *backlog; struct sahara_ctx *ctx; struct sahara_aes_reqctx *rctx; struct ablkcipher_request *req; int ret; spin_lock(&dev->lock); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); if (!async_req) clear_bit(FLAGS_BUSY, &dev->flags); spin_unlock(&dev->lock); if (!async_req) return; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ablkcipher_request_cast(async_req); /* Request is ready to be dispatched by the device */ dev_dbg(dev->device, "dispatch request (nbytes=%d, src=%p, dst=%p)\n", req->nbytes, req->src, req->dst); /* assign new request to device */ dev->req = req; dev->total = req->nbytes; dev->in_sg = req->src; dev->out_sg = req->dst; rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; if ((dev->flags & FLAGS_CBC) && req->info) memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); /* assign new context to device */ ctx->dev = dev; dev->ctx = ctx; ret = sahara_hw_descriptor_create(dev); if (ret < 0) { spin_lock(&dev->lock); clear_bit(FLAGS_BUSY, &dev->flags); spin_unlock(&dev->lock); dev->req->base.complete(&dev->req->base, ret); } } static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm); int ret; ctx->keylen = keylen; /* SAHARA only supports 128bit keys */ if (keylen == AES_KEYSIZE_128) { memcpy(ctx->key, key, keylen); ctx->flags |= FLAGS_NEW_KEY; return 0; } if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; /* * The requested key size is not supported by HW, do a fallback. */ ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback->base.crt_flags |= (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); if (ret) { struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm); tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK; tfm_aux->crt_flags |= (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK); } return ret; } static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct sahara_dev *dev = dev_ptr; int err = 0; int busy; dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { dev_err(dev->device, "request size is not exact amount of AES blocks\n"); return -EINVAL; } ctx->dev = dev; rctx->mode = mode; spin_lock_bh(&dev->lock); err = ablkcipher_enqueue_request(&dev->queue, req); busy = test_and_set_bit(FLAGS_BUSY, &dev->flags); spin_unlock_bh(&dev->lock); if (!busy) tasklet_schedule(&dev->queue_task); return err; } static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { ablkcipher_request_set_tfm(req, ctx->fallback); err = crypto_ablkcipher_encrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; } return sahara_aes_crypt(req, FLAGS_ENCRYPT); } static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { ablkcipher_request_set_tfm(req, ctx->fallback); err = crypto_ablkcipher_decrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; } return sahara_aes_crypt(req, 0); } static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { ablkcipher_request_set_tfm(req, ctx->fallback); err = crypto_ablkcipher_encrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; } return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); } static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { ablkcipher_request_set_tfm(req, ctx->fallback); err = crypto_ablkcipher_decrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; } return sahara_aes_crypt(req, FLAGS_CBC); } static int sahara_aes_cra_init(struct crypto_tfm *tfm) { const char *name = tfm->__crt_alg->cra_name; struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); ctx->fallback = crypto_alloc_ablkcipher(name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { pr_err("Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->fallback); } tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx); return 0; } static void sahara_aes_cra_exit(struct crypto_tfm *tfm) { struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) crypto_free_ablkcipher(ctx->fallback); ctx->fallback = NULL; } static struct crypto_alg aes_algs[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "sahara-ecb-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sahara_ctx), .cra_alignmask = 0x0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = sahara_aes_cra_init, .cra_exit = sahara_aes_cra_exit, .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE , .max_keysize = AES_MAX_KEY_SIZE, .setkey = sahara_aes_setkey, .encrypt = sahara_aes_ecb_encrypt, .decrypt = sahara_aes_ecb_decrypt, } }, { .cra_name = "cbc(aes)", .cra_driver_name = "sahara-cbc-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sahara_ctx), .cra_alignmask = 0x0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = sahara_aes_cra_init, .cra_exit = sahara_aes_cra_exit, .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE , .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = sahara_aes_setkey, .encrypt = sahara_aes_cbc_encrypt, .decrypt = sahara_aes_cbc_decrypt, } } }; static irqreturn_t sahara_irq_handler(int irq, void *data) { struct sahara_dev *dev = (struct sahara_dev *)data; unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS); unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); del_timer(&dev->watchdog); sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR, SAHARA_REG_CMD); sahara_decode_status(dev, stat); if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) { return IRQ_NONE; } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) { dev->error = 0; } else { sahara_decode_error(dev, err); dev->error = -EINVAL; } tasklet_schedule(&dev->done_task); return IRQ_HANDLED; } static int sahara_register_algs(struct sahara_dev *dev) { int err, i, j; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { INIT_LIST_HEAD(&aes_algs[i].cra_list); err = crypto_register_alg(&aes_algs[i]); if (err) goto err_aes_algs; } return 0; err_aes_algs: for (j = 0; j < i; j++) crypto_unregister_alg(&aes_algs[j]); return err; } static void sahara_unregister_algs(struct sahara_dev *dev) { int i; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) crypto_unregister_alg(&aes_algs[i]); } static struct platform_device_id sahara_platform_ids[] = { { .name = "sahara-imx27" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, sahara_platform_ids); static struct of_device_id sahara_dt_ids[] = { { .compatible = "fsl,imx27-sahara" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sahara_dt_ids); static int sahara_probe(struct platform_device *pdev) { struct sahara_dev *dev; struct resource *res; u32 version; int irq; int err; int i; dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL); if (dev == NULL) { dev_err(&pdev->dev, "unable to alloc data struct.\n"); return -ENOMEM; } dev->device = &pdev->dev; platform_set_drvdata(pdev, dev); /* Get the base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory region resource\n"); return -ENODEV; } if (devm_request_mem_region(&pdev->dev, res->start, resource_size(res), SAHARA_NAME) == NULL) { dev_err(&pdev->dev, "failed to request memory region\n"); return -ENOENT; } dev->regs_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!dev->regs_base) { dev_err(&pdev->dev, "failed to ioremap address region\n"); return -ENOENT; } /* Get the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get irq resource\n"); return irq; } if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler, 0, SAHARA_NAME, dev) < 0) { dev_err(&pdev->dev, "failed to request irq\n"); return -ENOENT; } /* clocks */ dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(dev->clk_ipg)) { dev_err(&pdev->dev, "Could not get ipg clock\n"); return PTR_ERR(dev->clk_ipg); } dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(dev->clk_ahb)) { dev_err(&pdev->dev, "Could not get ahb clock\n"); return PTR_ERR(dev->clk_ahb); } /* Allocate HW descriptors */ dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev, SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), &dev->hw_phys_desc[0], GFP_KERNEL); if (!dev->hw_desc[0]) { dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); return -ENOMEM; } dev->hw_desc[1] = dev->hw_desc[0] + 1; dev->hw_phys_desc[1] = dev->hw_phys_desc[0] + sizeof(struct sahara_hw_desc); /* Allocate space for iv and key */ dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, &dev->key_phys_base, GFP_KERNEL); if (!dev->key_base) { dev_err(&pdev->dev, "Could not allocate memory for key\n"); err = -ENOMEM; goto err_key; } dev->iv_base = dev->key_base + AES_KEYSIZE_128; dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128; /* Allocate space for HW links */ dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), &dev->hw_phys_link[0], GFP_KERNEL); if (!dev->hw_link) { dev_err(&pdev->dev, "Could not allocate hw links\n"); err = -ENOMEM; goto err_link; } for (i = 1; i < SAHARA_MAX_HW_LINK; i++) { dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] + sizeof(struct sahara_hw_link); dev->hw_link[i] = dev->hw_link[i - 1] + 1; } crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); dev_ptr = dev; tasklet_init(&dev->queue_task, sahara_aes_queue_task, (unsigned long)dev); tasklet_init(&dev->done_task, sahara_aes_done_task, (unsigned long)dev); init_timer(&dev->watchdog); dev->watchdog.function = &sahara_watchdog; dev->watchdog.data = (unsigned long)dev; clk_prepare_enable(dev->clk_ipg); clk_prepare_enable(dev->clk_ahb); version = sahara_read(dev, SAHARA_REG_VERSION); if (version != SAHARA_VERSION_3) { dev_err(&pdev->dev, "SAHARA version %d not supported\n", version); err = -ENODEV; goto err_algs; } sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH, SAHARA_REG_CMD); sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) | SAHARA_CONTROL_SET_MAXBURST(8) | SAHARA_CONTROL_RNG_AUTORSD | SAHARA_CONTROL_ENABLE_INT, SAHARA_REG_CONTROL); err = sahara_register_algs(dev); if (err) goto err_algs; dev_info(&pdev->dev, "SAHARA version %d initialized\n", version); return 0; err_algs: dma_free_coherent(&pdev->dev, SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), dev->hw_link[0], dev->hw_phys_link[0]); clk_disable_unprepare(dev->clk_ipg); clk_disable_unprepare(dev->clk_ahb); dev_ptr = NULL; err_link: dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->key_base, dev->key_phys_base); err_key: dma_free_coherent(&pdev->dev, SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), dev->hw_desc[0], dev->hw_phys_desc[0]); return err; } static int sahara_remove(struct platform_device *pdev) { struct sahara_dev *dev = platform_get_drvdata(pdev); dma_free_coherent(&pdev->dev, SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), dev->hw_link[0], dev->hw_phys_link[0]); dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->key_base, dev->key_phys_base); dma_free_coherent(&pdev->dev, SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), dev->hw_desc[0], dev->hw_phys_desc[0]); tasklet_kill(&dev->done_task); tasklet_kill(&dev->queue_task); sahara_unregister_algs(dev); clk_disable_unprepare(dev->clk_ipg); clk_disable_unprepare(dev->clk_ahb); dev_ptr = NULL; return 0; } static struct platform_driver sahara_driver = { .probe = sahara_probe, .remove = sahara_remove, .driver = { .name = SAHARA_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(sahara_dt_ids), }, .id_table = sahara_platform_ids, }; module_platform_driver(sahara_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>"); MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
gpl-2.0
JamesKid/linux
arch/powerpc/kernel/smp-tbsync.c
2132
3112
/* * Smp timebase synchronization for ppc. * * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se) * */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/atomic.h> #include <asm/smp.h> #include <asm/time.h> #define NUM_ITER 300 enum { kExit=0, kSetAndTest, kTest }; static struct { volatile u64 tb; volatile u64 mark; volatile int cmd; volatile int handshake; int filler[2]; volatile int ack; int filler2[7]; volatile int race_result; } *tbsync; static volatile int running; static void enter_contest(u64 mark, long add) { while (get_tb() < mark) tbsync->race_result = add; } void smp_generic_take_timebase(void) { int cmd; u64 tb; unsigned long flags; local_irq_save(flags); while (!running) barrier(); rmb(); for (;;) { tbsync->ack = 1; while (!tbsync->handshake) barrier(); rmb(); cmd = tbsync->cmd; tb = tbsync->tb; mb(); tbsync->ack = 0; if (cmd == kExit) break; while (tbsync->handshake) barrier(); if (cmd == kSetAndTest) set_tb(tb >> 32, tb & 0xfffffffful); enter_contest(tbsync->mark, -1); } local_irq_restore(flags); } static int start_contest(int cmd, long offset, int num) { int i, score=0; u64 tb; u64 mark; tbsync->cmd = cmd; local_irq_disable(); for (i = -3; i < num; ) { tb = get_tb() + 400; tbsync->tb = tb + offset; tbsync->mark = mark = tb + 400; wmb(); tbsync->handshake = 1; while (tbsync->ack) barrier(); while (get_tb() <= tb) barrier(); tbsync->handshake = 0; enter_contest(mark, 1); while (!tbsync->ack) barrier(); if (i++ > 0) score += tbsync->race_result; } local_irq_enable(); return score; } void smp_generic_give_timebase(void) { int i, score, score2, old, min=0, max=5000, offset=1000; pr_debug("Software timebase sync\n"); /* if this fails then this kernel won't work anyway... */ tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); mb(); running = 1; while (!tbsync->ack) barrier(); pr_debug("Got ack\n"); /* binary search */ for (old = -1; old != offset ; offset = (min+max) / 2) { score = start_contest(kSetAndTest, offset, NUM_ITER); pr_debug("score %d, offset %d\n", score, offset ); if( score > 0 ) max = offset; else min = offset; old = offset; } score = start_contest(kSetAndTest, min, NUM_ITER); score2 = start_contest(kSetAndTest, max, NUM_ITER); pr_debug("Min %d (score %d), Max %d (score %d)\n", min, score, max, score2); score = abs(score); score2 = abs(score2); offset = (score < score2) ? min : max; /* guard against inaccurate mttb */ for (i = 0; i < 10; i++) { start_contest(kSetAndTest, offset, NUM_ITER/10); if ((score2 = start_contest(kTest, offset, NUM_ITER)) < 0) score2 = -score2; if (score2 <= score || score2 < 20) break; } pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); /* exiting */ tbsync->cmd = kExit; wmb(); tbsync->handshake = 1; while (tbsync->ack) barrier(); tbsync->handshake = 0; kfree(tbsync); tbsync = NULL; running = 0; }
gpl-2.0
pingyi/I9100
arch/arm/mach-msm/devices-qsd8x50.c
2388
8916
/* * Copyright (C) 2008 Google, Inc. * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/clkdev.h> #include <linux/dma-mapping.h> #include <mach/irqs.h> #include <mach/msm_iomap.h> #include <mach/dma.h> #include <mach/board.h> #include "devices.h" #include <asm/mach/flash.h> #include <mach/mmc.h> #include "clock-pcom.h" static struct resource resources_uart3[] = { { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, { .start = MSM_UART3_PHYS, .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1, .flags = IORESOURCE_MEM, .name = "uart_resource" }, }; struct platform_device msm_device_uart3 = { .name = "msm_serial", .id = 2, .num_resources = ARRAY_SIZE(resources_uart3), .resource = resources_uart3, }; struct platform_device msm_device_smd = { .name = "msm_smd", .id = -1, }; static struct resource resources_otg[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_otg = { .name = "msm_otg", .id = -1, .num_resources = ARRAY_SIZE(resources_otg), .resource = resources_otg, .dev = { .coherent_dma_mask = 0xffffffff, }, }; static struct resource resources_hsusb[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb = { .name = "msm_hsusb", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb), .resource = resources_hsusb, .dev = { .coherent_dma_mask = 0xffffffff, }, }; static u64 dma_mask = 0xffffffffULL; static struct resource resources_hsusb_host[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_host = { .name = "msm_hsusb_host", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb_host), .resource = resources_hsusb_host, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct resource resources_sdc1[] = { { .start = MSM_SDC1_PHYS, .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_SDC1_0, .end = INT_SDC1_0, .flags = IORESOURCE_IRQ, .name = "cmd_irq", }, { .start = INT_SDC1_1, .end = INT_SDC1_1, .flags = IORESOURCE_IRQ, .name = "pio_irq", }, { .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, .name = "status_irq" }, { .start = 8, .end = 8, .flags = IORESOURCE_DMA, }, }; static struct resource resources_sdc2[] = { { .start = MSM_SDC2_PHYS, .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_SDC2_0, .end = INT_SDC2_0, .flags = IORESOURCE_IRQ, .name = "cmd_irq", }, { .start = INT_SDC2_1, .end = INT_SDC2_1, .flags = IORESOURCE_IRQ, .name = "pio_irq", }, { .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, .name = "status_irq" }, { .start = 8, .end = 8, .flags = IORESOURCE_DMA, }, }; static struct resource resources_sdc3[] = { { .start = MSM_SDC3_PHYS, .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_SDC3_0, .end = INT_SDC3_0, .flags = IORESOURCE_IRQ, .name = "cmd_irq", }, { .start = INT_SDC3_1, .end = INT_SDC3_1, .flags = IORESOURCE_IRQ, .name = "pio_irq", }, { .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, .name = "status_irq" }, { .start = 8, .end = 8, .flags = IORESOURCE_DMA, }, }; static struct resource resources_sdc4[] = { { .start = MSM_SDC4_PHYS, .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_SDC4_0, .end = INT_SDC4_0, .flags = IORESOURCE_IRQ, .name = "cmd_irq", }, { .start = INT_SDC4_1, .end = INT_SDC4_1, .flags = IORESOURCE_IRQ, .name = "pio_irq", }, { .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, .name = "status_irq" }, { .start = 8, .end = 8, .flags = IORESOURCE_DMA, }, }; struct platform_device msm_device_sdc1 = { .name = "msm_sdcc", .id = 1, .num_resources = ARRAY_SIZE(resources_sdc1), .resource = resources_sdc1, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc2 = { .name = "msm_sdcc", .id = 2, .num_resources = ARRAY_SIZE(resources_sdc2), .resource = resources_sdc2, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc3 = { .name = "msm_sdcc", .id = 3, .num_resources = ARRAY_SIZE(resources_sdc3), .resource = resources_sdc3, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc4 = { .name = "msm_sdcc", .id = 4, .num_resources = ARRAY_SIZE(resources_sdc4), .resource = resources_sdc4, .dev = { .coherent_dma_mask = 0xffffffff, }, }; static struct platform_device *msm_sdcc_devices[] __initdata = { &msm_device_sdc1, &msm_device_sdc2, &msm_device_sdc3, &msm_device_sdc4, }; int __init msm_add_sdcc(unsigned int controller, struct msm_mmc_platform_data *plat, unsigned int stat_irq, unsigned long stat_irq_flags) { struct platform_device *pdev; struct resource *res; if (controller < 1 || controller > 4) return -EINVAL; pdev = msm_sdcc_devices[controller-1]; pdev->dev.platform_data = plat; res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq"); if (!res) return -EINVAL; else if (stat_irq) { res->start = res->end = stat_irq; res->flags &= ~IORESOURCE_DISABLED; res->flags |= stat_irq_flags; } return platform_device_register(pdev); } struct clk_lookup msm_clocks_8x50[] = { CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), CLK_PCOM("ce_clk", CE_CLK, NULL, 0), CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN), CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0), CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0), CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX), CLK_PCOM("gp_clk", GP_CLK, NULL, 0), CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0), CLK_PCOM("i2c_clk", I2C_CLK, NULL, 0), CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0), CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0), CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF), CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0), CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX), CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF), CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0), CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0), CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0), CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN), CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0), CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF), CLK_PCOM("sdc_clk", SDC1_CLK, "msm_sdcc.1", OFF), CLK_PCOM("sdc_pclk", SDC1_P_CLK, "msm_sdcc.1", OFF), CLK_PCOM("sdc_clk", SDC2_CLK, "msm_sdcc.2", OFF), CLK_PCOM("sdc_pclk", SDC2_P_CLK, "msm_sdcc.2", OFF), CLK_PCOM("sdc_clk", SDC3_CLK, "msm_sdcc.3", OFF), CLK_PCOM("sdc_pclk", SDC3_P_CLK, "msm_sdcc.3", OFF), CLK_PCOM("sdc_clk", SDC4_CLK, "msm_sdcc.4", OFF), CLK_PCOM("sdc_pclk", SDC4_P_CLK, "msm_sdcc.4", OFF), CLK_PCOM("spi_clk", SPI_CLK, NULL, 0), CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0), CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), CLK_PCOM("uart_clk", UART1_CLK, NULL, OFF), CLK_PCOM("uart_clk", UART2_CLK, NULL, 0), CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF), CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF), CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0), CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0), CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN), CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF), CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF), CLK_PCOM("vfe_axi_clk", VFE_AXI_CLK, NULL, OFF), CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF), CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF), CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF), CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF), CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0), }; unsigned msm_num_clocks_8x50 = ARRAY_SIZE(msm_clocks_8x50);
gpl-2.0
VanirAOSP/kernel_samsung_skomer
net/tipc/link.c
2388
80058
/* * net/tipc/link.c: TIPC link code * * Copyright (c) 1996-2007, Ericsson AB * Copyright (c) 2004-2007, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "link.h" #include "port.h" #include "name_distr.h" #include "discover.h" #include "config.h" /* * Out-of-range value for link session numbers */ #define INVALID_SESSION 0x10000 /* * Link state events: */ #define STARTING_EVT 856384768 /* link processing trigger */ #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ #define TIMEOUT_EVT 560817u /* link timer expired */ /* * The following two 'message types' is really just implementation * data conveniently stored in the message header. * They must not be considered part of the protocol */ #define OPEN_MSG 0 #define CLOSED_MSG 1 /* * State value stored in 'exp_msg_count' */ #define START_CHANGEOVER 100000u /** * struct link_name - deconstructed link name * @addr_local: network address of node at this end * @if_local: name of interface at this end * @addr_peer: network address of node at far end * @if_peer: name of interface at far end */ struct link_name { u32 addr_local; char if_local[TIPC_MAX_IF_NAME]; u32 addr_peer; char if_peer[TIPC_MAX_IF_NAME]; }; static void link_handle_out_of_seq_msg(struct link *l_ptr, struct sk_buff *buf); static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf); static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf); static void link_set_supervision_props(struct link *l_ptr, u32 tolerance); static int link_send_sections_long(struct tipc_port *sender, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, u32 destnode); static void link_check_defragm_bufs(struct link *l_ptr); static void link_state_event(struct link *l_ptr, u32 event); static void link_reset_statistics(struct link *l_ptr); static void link_print(struct link *l_ptr, const char *str); static void link_start(struct link *l_ptr); static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf); /* * Simple link routines */ static unsigned int align(unsigned int i) { return (i + 3) & ~3u; } static void link_init_max_pkt(struct link *l_ptr) { u32 max_pkt; max_pkt = (l_ptr->b_ptr->mtu & ~3); if (max_pkt > MAX_MSG_SIZE) max_pkt = MAX_MSG_SIZE; l_ptr->max_pkt_target = max_pkt; if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) l_ptr->max_pkt = l_ptr->max_pkt_target; else l_ptr->max_pkt = MAX_PKT_DEFAULT; l_ptr->max_pkt_probes = 0; } static u32 link_next_sent(struct link *l_ptr) { if (l_ptr->next_out) return msg_seqno(buf_msg(l_ptr->next_out)); return mod(l_ptr->next_out_no); } static u32 link_last_sent(struct link *l_ptr) { return mod(link_next_sent(l_ptr) - 1); } /* * Simple non-static link routines (i.e. referenced outside this file) */ int tipc_link_is_up(struct link *l_ptr) { if (!l_ptr) return 0; return link_working_working(l_ptr) || link_working_unknown(l_ptr); } int tipc_link_is_active(struct link *l_ptr) { return (l_ptr->owner->active_links[0] == l_ptr) || (l_ptr->owner->active_links[1] == l_ptr); } /** * link_name_validate - validate & (optionally) deconstruct link name * @name - ptr to link name string * @name_parts - ptr to area for link name components (or NULL if not needed) * * Returns 1 if link name is valid, otherwise 0. */ static int link_name_validate(const char *name, struct link_name *name_parts) { char name_copy[TIPC_MAX_LINK_NAME]; char *addr_local; char *if_local; char *addr_peer; char *if_peer; char dummy; u32 z_local, c_local, n_local; u32 z_peer, c_peer, n_peer; u32 if_local_len; u32 if_peer_len; /* copy link name & ensure length is OK */ name_copy[TIPC_MAX_LINK_NAME - 1] = 0; /* need above in case non-Posix strncpy() doesn't pad with nulls */ strncpy(name_copy, name, TIPC_MAX_LINK_NAME); if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0) return 0; /* ensure all component parts of link name are present */ addr_local = name_copy; if_local = strchr(addr_local, ':'); if (if_local == NULL) return 0; *(if_local++) = 0; addr_peer = strchr(if_local, '-'); if (addr_peer == NULL) return 0; *(addr_peer++) = 0; if_local_len = addr_peer - if_local; if_peer = strchr(addr_peer, ':'); if (if_peer == NULL) return 0; *(if_peer++) = 0; if_peer_len = strlen(if_peer) + 1; /* validate component parts of link name */ if ((sscanf(addr_local, "%u.%u.%u%c", &z_local, &c_local, &n_local, &dummy) != 3) || (sscanf(addr_peer, "%u.%u.%u%c", &z_peer, &c_peer, &n_peer, &dummy) != 3) || (z_local > 255) || (c_local > 4095) || (n_local > 4095) || (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) || (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) || (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) || (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1))) return 0; /* return link name components, if necessary */ if (name_parts) { name_parts->addr_local = tipc_addr(z_local, c_local, n_local); strcpy(name_parts->if_local, if_local); name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer); strcpy(name_parts->if_peer, if_peer); } return 1; } /** * link_timeout - handle expiration of link timer * @l_ptr: pointer to link * * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict * with tipc_link_delete(). (There is no risk that the node will be deleted by * another thread because tipc_link_delete() always cancels the link timer before * tipc_node_delete() is called.) */ static void link_timeout(struct link *l_ptr) { tipc_node_lock(l_ptr->owner); /* update counters used in statistical profiling of send traffic */ l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; l_ptr->stats.queue_sz_counts++; if (l_ptr->first_out) { struct tipc_msg *msg = buf_msg(l_ptr->first_out); u32 length = msg_size(msg); if ((msg_user(msg) == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { length = msg_size(msg_get_wrapped(msg)); } if (length) { l_ptr->stats.msg_lengths_total += length; l_ptr->stats.msg_length_counts++; if (length <= 64) l_ptr->stats.msg_length_profile[0]++; else if (length <= 256) l_ptr->stats.msg_length_profile[1]++; else if (length <= 1024) l_ptr->stats.msg_length_profile[2]++; else if (length <= 4096) l_ptr->stats.msg_length_profile[3]++; else if (length <= 16384) l_ptr->stats.msg_length_profile[4]++; else if (length <= 32768) l_ptr->stats.msg_length_profile[5]++; else l_ptr->stats.msg_length_profile[6]++; } } /* do all other link processing performed on a periodic basis */ link_check_defragm_bufs(l_ptr); link_state_event(l_ptr, TIMEOUT_EVT); if (l_ptr->next_out) tipc_link_push_queue(l_ptr); tipc_node_unlock(l_ptr->owner); } static void link_set_timer(struct link *l_ptr, u32 time) { k_start_timer(&l_ptr->timer, time); } /** * tipc_link_create - create a new link * @n_ptr: pointer to associated node * @b_ptr: pointer to associated bearer * @media_addr: media address to use when sending messages over link * * Returns pointer to link. */ struct link *tipc_link_create(struct tipc_node *n_ptr, struct tipc_bearer *b_ptr, const struct tipc_media_addr *media_addr) { struct link *l_ptr; struct tipc_msg *msg; char *if_name; char addr_string[16]; u32 peer = n_ptr->addr; if (n_ptr->link_cnt >= 2) { tipc_addr_string_fill(addr_string, n_ptr->addr); err("Attempt to establish third link to %s\n", addr_string); return NULL; } if (n_ptr->links[b_ptr->identity]) { tipc_addr_string_fill(addr_string, n_ptr->addr); err("Attempt to establish second link on <%s> to %s\n", b_ptr->name, addr_string); return NULL; } l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); if (!l_ptr) { warn("Link creation failed, no memory\n"); return NULL; } l_ptr->addr = peer; if_name = strchr(b_ptr->name, ':') + 1; sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:", tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), tipc_node(tipc_own_addr), if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); /* note: peer i/f is appended to link name by reset/activate */ memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); l_ptr->owner = n_ptr; l_ptr->checkpoint = 1; l_ptr->b_ptr = b_ptr; link_set_supervision_props(l_ptr, b_ptr->media->tolerance); l_ptr->state = RESET_UNKNOWN; l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; msg = l_ptr->pmsg; tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); msg_set_size(msg, sizeof(l_ptr->proto_msg)); msg_set_session(msg, (tipc_random & 0xffff)); msg_set_bearer_id(msg, b_ptr->identity); strcpy((char *)msg_data(msg), if_name); l_ptr->priority = b_ptr->priority; tipc_link_set_queue_limits(l_ptr, b_ptr->media->window); link_init_max_pkt(l_ptr); l_ptr->next_out_no = 1; INIT_LIST_HEAD(&l_ptr->waiting_ports); link_reset_statistics(l_ptr); tipc_node_attach_link(n_ptr, l_ptr); k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr); list_add_tail(&l_ptr->link_list, &b_ptr->links); tipc_k_signal((Handler)link_start, (unsigned long)l_ptr); return l_ptr; } /** * tipc_link_delete - delete a link * @l_ptr: pointer to link * * Note: 'tipc_net_lock' is write_locked, bearer is locked. * This routine must not grab the node lock until after link timer cancellation * to avoid a potential deadlock situation. */ void tipc_link_delete(struct link *l_ptr) { if (!l_ptr) { err("Attempt to delete non-existent link\n"); return; } k_cancel_timer(&l_ptr->timer); tipc_node_lock(l_ptr->owner); tipc_link_reset(l_ptr); tipc_node_detach_link(l_ptr->owner, l_ptr); tipc_link_stop(l_ptr); list_del_init(&l_ptr->link_list); tipc_node_unlock(l_ptr->owner); k_term_timer(&l_ptr->timer); kfree(l_ptr); } static void link_start(struct link *l_ptr) { tipc_node_lock(l_ptr->owner); link_state_event(l_ptr, STARTING_EVT); tipc_node_unlock(l_ptr->owner); } /** * link_schedule_port - schedule port for deferred sending * @l_ptr: pointer to link * @origport: reference to sending port * @sz: amount of data to be sent * * Schedules port for renewed sending of messages after link congestion * has abated. */ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz) { struct tipc_port *p_ptr; spin_lock_bh(&tipc_port_list_lock); p_ptr = tipc_port_lock(origport); if (p_ptr) { if (!p_ptr->wakeup) goto exit; if (!list_empty(&p_ptr->wait_list)) goto exit; p_ptr->congested = 1; p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); l_ptr->stats.link_congs++; exit: tipc_port_unlock(p_ptr); } spin_unlock_bh(&tipc_port_list_lock); return -ELINKCONG; } void tipc_link_wakeup_ports(struct link *l_ptr, int all) { struct tipc_port *p_ptr; struct tipc_port *temp_p_ptr; int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; if (all) win = 100000; if (win <= 0) return; if (!spin_trylock_bh(&tipc_port_list_lock)) return; if (link_congested(l_ptr)) goto exit; list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, wait_list) { if (win <= 0) break; list_del_init(&p_ptr->wait_list); spin_lock_bh(p_ptr->lock); p_ptr->congested = 0; p_ptr->wakeup(p_ptr); win -= p_ptr->waiting_pkts; spin_unlock_bh(p_ptr->lock); } exit: spin_unlock_bh(&tipc_port_list_lock); } /** * link_release_outqueue - purge link's outbound message queue * @l_ptr: pointer to link */ static void link_release_outqueue(struct link *l_ptr) { struct sk_buff *buf = l_ptr->first_out; struct sk_buff *next; while (buf) { next = buf->next; buf_discard(buf); buf = next; } l_ptr->first_out = NULL; l_ptr->out_queue_size = 0; } /** * tipc_link_reset_fragments - purge link's inbound message fragments queue * @l_ptr: pointer to link */ void tipc_link_reset_fragments(struct link *l_ptr) { struct sk_buff *buf = l_ptr->defragm_buf; struct sk_buff *next; while (buf) { next = buf->next; buf_discard(buf); buf = next; } l_ptr->defragm_buf = NULL; } /** * tipc_link_stop - purge all inbound and outbound messages associated with link * @l_ptr: pointer to link */ void tipc_link_stop(struct link *l_ptr) { struct sk_buff *buf; struct sk_buff *next; buf = l_ptr->oldest_deferred_in; while (buf) { next = buf->next; buf_discard(buf); buf = next; } buf = l_ptr->first_out; while (buf) { next = buf->next; buf_discard(buf); buf = next; } tipc_link_reset_fragments(l_ptr); buf_discard(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; } /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */ #define link_send_event(fcn, l_ptr, up) do { } while (0) void tipc_link_reset(struct link *l_ptr) { struct sk_buff *buf; u32 prev_state = l_ptr->state; u32 checkpoint = l_ptr->next_in_no; int was_active_link = tipc_link_is_active(l_ptr); msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); /* Link is down, accept any session */ l_ptr->peer_session = INVALID_SESSION; /* Prepare for max packet size negotiation */ link_init_max_pkt(l_ptr); l_ptr->state = RESET_UNKNOWN; if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) return; tipc_node_link_down(l_ptr->owner, l_ptr); tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); if (was_active_link && tipc_node_active_links(l_ptr->owner) && l_ptr->owner->permit_changeover) { l_ptr->reset_checkpoint = checkpoint; l_ptr->exp_msg_count = START_CHANGEOVER; } /* Clean up all queues: */ link_release_outqueue(l_ptr); buf_discard(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; buf = l_ptr->oldest_deferred_in; while (buf) { struct sk_buff *next = buf->next; buf_discard(buf); buf = next; } if (!list_empty(&l_ptr->waiting_ports)) tipc_link_wakeup_ports(l_ptr, 1); l_ptr->retransm_queue_head = 0; l_ptr->retransm_queue_size = 0; l_ptr->last_out = NULL; l_ptr->first_out = NULL; l_ptr->next_out = NULL; l_ptr->unacked_window = 0; l_ptr->checkpoint = 1; l_ptr->next_out_no = 1; l_ptr->deferred_inqueue_sz = 0; l_ptr->oldest_deferred_in = NULL; l_ptr->newest_deferred_in = NULL; l_ptr->fsm_msg_cnt = 0; l_ptr->stale_count = 0; link_reset_statistics(l_ptr); link_send_event(tipc_cfg_link_event, l_ptr, 0); if (!in_own_cluster(l_ptr->addr)) link_send_event(tipc_disc_link_event, l_ptr, 0); } static void link_activate(struct link *l_ptr) { l_ptr->next_in_no = l_ptr->stats.recv_info = 1; tipc_node_link_up(l_ptr->owner, l_ptr); tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); link_send_event(tipc_cfg_link_event, l_ptr, 1); if (!in_own_cluster(l_ptr->addr)) link_send_event(tipc_disc_link_event, l_ptr, 1); } /** * link_state_event - link finite state machine * @l_ptr: pointer to link * @event: state machine event to process */ static void link_state_event(struct link *l_ptr, unsigned event) { struct link *other; u32 cont_intv = l_ptr->continuity_interval; if (!l_ptr->started && (event != STARTING_EVT)) return; /* Not yet. */ if (link_blocked(l_ptr)) { if (event == TIMEOUT_EVT) link_set_timer(l_ptr, cont_intv); return; /* Changeover going on */ } switch (l_ptr->state) { case WORKING_WORKING: switch (event) { case TRAFFIC_MSG_EVT: case ACTIVATE_MSG: break; case TIMEOUT_EVT: if (l_ptr->next_in_no != l_ptr->checkpoint) { l_ptr->checkpoint = l_ptr->next_in_no; if (tipc_bclink_acks_missing(l_ptr->owner)) { tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; } link_set_timer(l_ptr, cont_intv); break; } l_ptr->state = WORKING_UNKNOWN; l_ptr->fsm_msg_cnt = 0; tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv / 4); break; case RESET_MSG: info("Resetting link <%s>, requested by peer\n", l_ptr->name); tipc_link_reset(l_ptr); l_ptr->state = RESET_RESET; l_ptr->fsm_msg_cnt = 0; tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); break; default: err("Unknown link event %u in WW state\n", event); } break; case WORKING_UNKNOWN: switch (event) { case TRAFFIC_MSG_EVT: case ACTIVATE_MSG: l_ptr->state = WORKING_WORKING; l_ptr->fsm_msg_cnt = 0; link_set_timer(l_ptr, cont_intv); break; case RESET_MSG: info("Resetting link <%s>, requested by peer " "while probing\n", l_ptr->name); tipc_link_reset(l_ptr); l_ptr->state = RESET_RESET; l_ptr->fsm_msg_cnt = 0; tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); break; case TIMEOUT_EVT: if (l_ptr->next_in_no != l_ptr->checkpoint) { l_ptr->state = WORKING_WORKING; l_ptr->fsm_msg_cnt = 0; l_ptr->checkpoint = l_ptr->next_in_no; if (tipc_bclink_acks_missing(l_ptr->owner)) { tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; } link_set_timer(l_ptr, cont_intv); } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv / 4); } else { /* Link has failed */ warn("Resetting link <%s>, peer not responding\n", l_ptr->name); tipc_link_reset(l_ptr); l_ptr->state = RESET_UNKNOWN; l_ptr->fsm_msg_cnt = 0; tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); } break; default: err("Unknown link event %u in WU state\n", event); } break; case RESET_UNKNOWN: switch (event) { case TRAFFIC_MSG_EVT: break; case ACTIVATE_MSG: other = l_ptr->owner->active_links[0]; if (other && link_working_unknown(other)) break; l_ptr->state = WORKING_WORKING; l_ptr->fsm_msg_cnt = 0; link_activate(l_ptr); tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); break; case RESET_MSG: l_ptr->state = RESET_RESET; l_ptr->fsm_msg_cnt = 0; tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); break; case STARTING_EVT: l_ptr->started = 1; /* fall through */ case TIMEOUT_EVT: tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); break; default: err("Unknown link event %u in RU state\n", event); } break; case RESET_RESET: switch (event) { case TRAFFIC_MSG_EVT: case ACTIVATE_MSG: other = l_ptr->owner->active_links[0]; if (other && link_working_unknown(other)) break; l_ptr->state = WORKING_WORKING; l_ptr->fsm_msg_cnt = 0; link_activate(l_ptr); tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); break; case RESET_MSG: break; case TIMEOUT_EVT: tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); l_ptr->fsm_msg_cnt++; link_set_timer(l_ptr, cont_intv); break; default: err("Unknown link event %u in RR state\n", event); } break; default: err("Unknown link state %u/%u\n", l_ptr->state, event); } } /* * link_bundle_buf(): Append contents of a buffer to * the tail of an existing one. */ static int link_bundle_buf(struct link *l_ptr, struct sk_buff *bundler, struct sk_buff *buf) { struct tipc_msg *bundler_msg = buf_msg(bundler); struct tipc_msg *msg = buf_msg(buf); u32 size = msg_size(msg); u32 bundle_size = msg_size(bundler_msg); u32 to_pos = align(bundle_size); u32 pad = to_pos - bundle_size; if (msg_user(bundler_msg) != MSG_BUNDLER) return 0; if (msg_type(bundler_msg) != OPEN_MSG) return 0; if (skb_tailroom(bundler) < (pad + size)) return 0; if (l_ptr->max_pkt < (to_pos + size)) return 0; skb_put(bundler, pad + size); skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); msg_set_size(bundler_msg, to_pos + size); msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); buf_discard(buf); l_ptr->stats.sent_bundled++; return 1; } static void link_add_to_outqueue(struct link *l_ptr, struct sk_buff *buf, struct tipc_msg *msg) { u32 ack = mod(l_ptr->next_in_no - 1); u32 seqno = mod(l_ptr->next_out_no++); msg_set_word(msg, 2, ((ack << 16) | seqno)); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); buf->next = NULL; if (l_ptr->first_out) { l_ptr->last_out->next = buf; l_ptr->last_out = buf; } else l_ptr->first_out = l_ptr->last_out = buf; l_ptr->out_queue_size++; if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; } static void link_add_chain_to_outqueue(struct link *l_ptr, struct sk_buff *buf_chain, u32 long_msgno) { struct sk_buff *buf; struct tipc_msg *msg; if (!l_ptr->next_out) l_ptr->next_out = buf_chain; while (buf_chain) { buf = buf_chain; buf_chain = buf_chain->next; msg = buf_msg(buf); msg_set_long_msgno(msg, long_msgno); link_add_to_outqueue(l_ptr, buf, msg); } } /* * tipc_link_send_buf() is the 'full path' for messages, called from * inside TIPC when the 'fast path' in tipc_send_buf * has failed, and from link_send() */ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); u32 size = msg_size(msg); u32 dsz = msg_data_sz(msg); u32 queue_size = l_ptr->out_queue_size; u32 imp = tipc_msg_tot_importance(msg); u32 queue_limit = l_ptr->queue_limit[imp]; u32 max_packet = l_ptr->max_pkt; msg_set_prevnode(msg, tipc_own_addr); /* If routed message */ /* Match msg importance against queue limits: */ if (unlikely(queue_size >= queue_limit)) { if (imp <= TIPC_CRITICAL_IMPORTANCE) { link_schedule_port(l_ptr, msg_origport(msg), size); buf_discard(buf); return -ELINKCONG; } buf_discard(buf); if (imp > CONN_MANAGER) { warn("Resetting link <%s>, send queue full", l_ptr->name); tipc_link_reset(l_ptr); } return dsz; } /* Fragmentation needed ? */ if (size > max_packet) return link_send_long_buf(l_ptr, buf); /* Packet can be queued or sent: */ if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && !link_congested(l_ptr))) { link_add_to_outqueue(l_ptr, buf, msg); if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { l_ptr->unacked_window = 0; } else { tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); l_ptr->stats.bearer_congs++; l_ptr->next_out = buf; } return dsz; } /* Congestion: can message be bundled ?: */ if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && (msg_user(msg) != MSG_FRAGMENTER)) { /* Try adding message to an existing bundle */ if (l_ptr->next_out && link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); return dsz; } /* Try creating a new bundle */ if (size <= max_packet * 2 / 3) { struct sk_buff *bundler = tipc_buf_acquire(max_packet); struct tipc_msg bundler_hdr; if (bundler) { tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, INT_H_SIZE, l_ptr->addr); skb_copy_to_linear_data(bundler, &bundler_hdr, INT_H_SIZE); skb_trim(bundler, INT_H_SIZE); link_bundle_buf(l_ptr, bundler, buf); buf = bundler; msg = buf_msg(buf); l_ptr->stats.sent_bundles++; } } } if (!l_ptr->next_out) l_ptr->next_out = buf; link_add_to_outqueue(l_ptr, buf, msg); tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); return dsz; } /* * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has * not been selected yet, and the the owner node is not locked * Called by TIPC internal users, e.g. the name distributor */ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) { struct link *l_ptr; struct tipc_node *n_ptr; int res = -ELINKCONG; read_lock_bh(&tipc_net_lock); n_ptr = tipc_node_find(dest); if (n_ptr) { tipc_node_lock(n_ptr); l_ptr = n_ptr->active_links[selector & 1]; if (l_ptr) res = tipc_link_send_buf(l_ptr, buf); else buf_discard(buf); tipc_node_unlock(n_ptr); } else { buf_discard(buf); } read_unlock_bh(&tipc_net_lock); return res; } /* * link_send_buf_fast: Entry for data messages where the * destination link is known and the header is complete, * inclusive total message length. Very time critical. * Link is locked. Returns user data length. */ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf, u32 *used_max_pkt) { struct tipc_msg *msg = buf_msg(buf); int res = msg_data_sz(msg); if (likely(!link_congested(l_ptr))) { if (likely(msg_size(msg) <= l_ptr->max_pkt)) { if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { link_add_to_outqueue(l_ptr, buf, msg); if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { l_ptr->unacked_window = 0; return res; } tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); l_ptr->stats.bearer_congs++; l_ptr->next_out = buf; return res; } } else *used_max_pkt = l_ptr->max_pkt; } return tipc_link_send_buf(l_ptr, buf); /* All other cases */ } /* * tipc_send_buf_fast: Entry for data messages where the * destination node is known and the header is complete, * inclusive total message length. * Returns user data length. */ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode) { struct link *l_ptr; struct tipc_node *n_ptr; int res; u32 selector = msg_origport(buf_msg(buf)) & 1; u32 dummy; if (destnode == tipc_own_addr) return tipc_port_recv_msg(buf); read_lock_bh(&tipc_net_lock); n_ptr = tipc_node_find(destnode); if (likely(n_ptr)) { tipc_node_lock(n_ptr); l_ptr = n_ptr->active_links[selector]; if (likely(l_ptr)) { res = link_send_buf_fast(l_ptr, buf, &dummy); tipc_node_unlock(n_ptr); read_unlock_bh(&tipc_net_lock); return res; } tipc_node_unlock(n_ptr); } read_unlock_bh(&tipc_net_lock); res = msg_data_sz(buf_msg(buf)); tipc_reject_msg(buf, TIPC_ERR_NO_NODE); return res; } /* * tipc_link_send_sections_fast: Entry for messages where the * destination processor is known and the header is complete, * except for total message length. * Returns user data length or errno. */ int tipc_link_send_sections_fast(struct tipc_port *sender, struct iovec const *msg_sect, const u32 num_sect, unsigned int total_len, u32 destaddr) { struct tipc_msg *hdr = &sender->phdr; struct link *l_ptr; struct sk_buff *buf; struct tipc_node *node; int res; u32 selector = msg_origport(hdr) & 1; again: /* * Try building message using port's max_pkt hint. * (Must not hold any locks while building message.) */ res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, sender->max_pkt, !sender->user_port, &buf); read_lock_bh(&tipc_net_lock); node = tipc_node_find(destaddr); if (likely(node)) { tipc_node_lock(node); l_ptr = node->active_links[selector]; if (likely(l_ptr)) { if (likely(buf)) { res = link_send_buf_fast(l_ptr, buf, &sender->max_pkt); exit: tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); return res; } /* Exit if build request was invalid */ if (unlikely(res < 0)) goto exit; /* Exit if link (or bearer) is congested */ if (link_congested(l_ptr) || !list_empty(&l_ptr->b_ptr->cong_links)) { res = link_schedule_port(l_ptr, sender->ref, res); goto exit; } /* * Message size exceeds max_pkt hint; update hint, * then re-try fast path or fragment the message */ sender->max_pkt = l_ptr->max_pkt; tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) goto again; return link_send_sections_long(sender, msg_sect, num_sect, total_len, destaddr); } tipc_node_unlock(node); } read_unlock_bh(&tipc_net_lock); /* Couldn't find a link to the destination node */ if (buf) return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); if (res >= 0) return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, total_len, TIPC_ERR_NO_NODE); return res; } /* * link_send_sections_long(): Entry for long messages where the * destination node is known and the header is complete, * inclusive total message length. * Link and bearer congestion status have been checked to be ok, * and are ignored if they change. * * Note that fragments do not use the full link MTU so that they won't have * to undergo refragmentation if link changeover causes them to be sent * over another link with an additional tunnel header added as prefix. * (Refragmentation will still occur if the other link has a smaller MTU.) * * Returns user data length or errno. */ static int link_send_sections_long(struct tipc_port *sender, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, u32 destaddr) { struct link *l_ptr; struct tipc_node *node; struct tipc_msg *hdr = &sender->phdr; u32 dsz = total_len; u32 max_pkt, fragm_sz, rest; struct tipc_msg fragm_hdr; struct sk_buff *buf, *buf_chain, *prev; u32 fragm_crs, fragm_rest, hsz, sect_rest; const unchar *sect_crs; int curr_sect; u32 fragm_no; again: fragm_no = 1; max_pkt = sender->max_pkt - INT_H_SIZE; /* leave room for tunnel header in case of link changeover */ fragm_sz = max_pkt - INT_H_SIZE; /* leave room for fragmentation header in each fragment */ rest = dsz; fragm_crs = 0; fragm_rest = 0; sect_rest = 0; sect_crs = NULL; curr_sect = -1; /* Prepare reusable fragment header: */ tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(hdr)); msg_set_size(&fragm_hdr, max_pkt); msg_set_fragm_no(&fragm_hdr, 1); /* Prepare header of first fragment: */ buf_chain = buf = tipc_buf_acquire(max_pkt); if (!buf) return -ENOMEM; buf->next = NULL; skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); hsz = msg_hdr_sz(hdr); skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); /* Chop up message: */ fragm_crs = INT_H_SIZE + hsz; fragm_rest = fragm_sz - hsz; do { /* For all sections */ u32 sz; if (!sect_rest) { sect_rest = msg_sect[++curr_sect].iov_len; sect_crs = (const unchar *)msg_sect[curr_sect].iov_base; } if (sect_rest < fragm_rest) sz = sect_rest; else sz = fragm_rest; if (likely(!sender->user_port)) { if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { error: for (; buf_chain; buf_chain = buf) { buf = buf_chain->next; buf_discard(buf_chain); } return -EFAULT; } } else skb_copy_to_linear_data_offset(buf, fragm_crs, sect_crs, sz); sect_crs += sz; sect_rest -= sz; fragm_crs += sz; fragm_rest -= sz; rest -= sz; if (!fragm_rest && rest) { /* Initiate new fragment: */ if (rest <= fragm_sz) { fragm_sz = rest; msg_set_type(&fragm_hdr, LAST_FRAGMENT); } else { msg_set_type(&fragm_hdr, FRAGMENT); } msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); msg_set_fragm_no(&fragm_hdr, ++fragm_no); prev = buf; buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); if (!buf) goto error; buf->next = NULL; prev->next = buf; skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); fragm_crs = INT_H_SIZE; fragm_rest = fragm_sz; } } while (rest > 0); /* * Now we have a buffer chain. Select a link and check * that packet size is still OK */ node = tipc_node_find(destaddr); if (likely(node)) { tipc_node_lock(node); l_ptr = node->active_links[sender->ref & 1]; if (!l_ptr) { tipc_node_unlock(node); goto reject; } if (l_ptr->max_pkt < max_pkt) { sender->max_pkt = l_ptr->max_pkt; tipc_node_unlock(node); for (; buf_chain; buf_chain = buf) { buf = buf_chain->next; buf_discard(buf_chain); } goto again; } } else { reject: for (; buf_chain; buf_chain = buf) { buf = buf_chain->next; buf_discard(buf_chain); } return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, total_len, TIPC_ERR_NO_NODE); } /* Append chain of fragments to send queue & send them */ l_ptr->long_msg_seq_no++; link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); l_ptr->stats.sent_fragments += fragm_no; l_ptr->stats.sent_fragmented++; tipc_link_push_queue(l_ptr); tipc_node_unlock(node); return dsz; } /* * tipc_link_push_packet: Push one unsent packet to the media */ u32 tipc_link_push_packet(struct link *l_ptr) { struct sk_buff *buf = l_ptr->first_out; u32 r_q_size = l_ptr->retransm_queue_size; u32 r_q_head = l_ptr->retransm_queue_head; /* Step to position where retransmission failed, if any, */ /* consider that buffers may have been released in meantime */ if (r_q_size && buf) { u32 last = lesser(mod(r_q_head + r_q_size), link_last_sent(l_ptr)); u32 first = msg_seqno(buf_msg(buf)); while (buf && less(first, r_q_head)) { first = mod(first + 1); buf = buf->next; } l_ptr->retransm_queue_head = r_q_head = first; l_ptr->retransm_queue_size = r_q_size = mod(last - first); } /* Continue retransmission now, if there is anything: */ if (r_q_size && buf) { msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { l_ptr->retransm_queue_head = mod(++r_q_head); l_ptr->retransm_queue_size = --r_q_size; l_ptr->stats.retransmitted++; return 0; } else { l_ptr->stats.bearer_congs++; return PUSH_FAILED; } } /* Send deferred protocol message, if any: */ buf = l_ptr->proto_msg_queue; if (buf) { msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { l_ptr->unacked_window = 0; buf_discard(buf); l_ptr->proto_msg_queue = NULL; return 0; } else { l_ptr->stats.bearer_congs++; return PUSH_FAILED; } } /* Send one deferred data message, if send window not full: */ buf = l_ptr->next_out; if (buf) { struct tipc_msg *msg = buf_msg(buf); u32 next = msg_seqno(msg); u32 first = msg_seqno(buf_msg(l_ptr->first_out)); if (mod(next - first) < l_ptr->queue_limit[0]) { msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { if (msg_user(msg) == MSG_BUNDLER) msg_set_type(msg, CLOSED_MSG); l_ptr->next_out = buf->next; return 0; } else { l_ptr->stats.bearer_congs++; return PUSH_FAILED; } } } return PUSH_FINISHED; } /* * push_queue(): push out the unsent messages of a link where * congestion has abated. Node is locked */ void tipc_link_push_queue(struct link *l_ptr) { u32 res; if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) return; do { res = tipc_link_push_packet(l_ptr); } while (!res); if (res == PUSH_FAILED) tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); } static void link_reset_all(unsigned long addr) { struct tipc_node *n_ptr; char addr_string[16]; u32 i; read_lock_bh(&tipc_net_lock); n_ptr = tipc_node_find((u32)addr); if (!n_ptr) { read_unlock_bh(&tipc_net_lock); return; /* node no longer exists */ } tipc_node_lock(n_ptr); warn("Resetting all links to %s\n", tipc_addr_string_fill(addr_string, n_ptr->addr)); for (i = 0; i < MAX_BEARERS; i++) { if (n_ptr->links[i]) { link_print(n_ptr->links[i], "Resetting link\n"); tipc_link_reset(n_ptr->links[i]); } } tipc_node_unlock(n_ptr); read_unlock_bh(&tipc_net_lock); } static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); warn("Retransmission failure on link <%s>\n", l_ptr->name); if (l_ptr->addr) { /* Handle failure on standard link */ link_print(l_ptr, "Resetting link\n"); tipc_link_reset(l_ptr); } else { /* Handle failure on broadcast link */ struct tipc_node *n_ptr; char addr_string[16]; info("Msg seq number: %u, ", msg_seqno(msg)); info("Outstanding acks: %lu\n", (unsigned long) TIPC_SKB_CB(buf)->handle); n_ptr = tipc_bclink_retransmit_to(); tipc_node_lock(n_ptr); tipc_addr_string_fill(addr_string, n_ptr->addr); info("Multicast link info for %s\n", addr_string); info("Supported: %d, ", n_ptr->bclink.supported); info("Acked: %u\n", n_ptr->bclink.acked); info("Last in: %u, ", n_ptr->bclink.last_in); info("Gap after: %u, ", n_ptr->bclink.gap_after); info("Gap to: %u\n", n_ptr->bclink.gap_to); info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync); tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); tipc_node_unlock(n_ptr); l_ptr->stale_count = 0; } } void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, u32 retransmits) { struct tipc_msg *msg; if (!buf) return; msg = buf_msg(buf); if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { if (l_ptr->retransm_queue_size == 0) { l_ptr->retransm_queue_head = msg_seqno(msg); l_ptr->retransm_queue_size = retransmits; } else { err("Unexpected retransmit on link %s (qsize=%d)\n", l_ptr->name, l_ptr->retransm_queue_size); } return; } else { /* Detect repeated retransmit failures on uncongested bearer */ if (l_ptr->last_retransmitted == msg_seqno(msg)) { if (++l_ptr->stale_count > 100) { link_retransmit_failure(l_ptr, buf); return; } } else { l_ptr->last_retransmitted = msg_seqno(msg); l_ptr->stale_count = 1; } } while (retransmits && (buf != l_ptr->next_out) && buf) { msg = buf_msg(buf); msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { buf = buf->next; retransmits--; l_ptr->stats.retransmitted++; } else { tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); l_ptr->stats.bearer_congs++; l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); l_ptr->retransm_queue_size = retransmits; return; } } l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; } /** * link_insert_deferred_queue - insert deferred messages back into receive chain */ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, struct sk_buff *buf) { u32 seq_no; if (l_ptr->oldest_deferred_in == NULL) return buf; seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in)); if (seq_no == mod(l_ptr->next_in_no)) { l_ptr->newest_deferred_in->next = buf; buf = l_ptr->oldest_deferred_in; l_ptr->oldest_deferred_in = NULL; l_ptr->deferred_inqueue_sz = 0; } return buf; } /** * link_recv_buf_validate - validate basic format of received message * * This routine ensures a TIPC message has an acceptable header, and at least * as much data as the header indicates it should. The routine also ensures * that the entire message header is stored in the main fragment of the message * buffer, to simplify future access to message header fields. * * Note: Having extra info present in the message header or data areas is OK. * TIPC will ignore the excess, under the assumption that it is optional info * introduced by a later release of the protocol. */ static int link_recv_buf_validate(struct sk_buff *buf) { static u32 min_data_hdr_size[8] = { SHORT_H_SIZE, MCAST_H_SIZE, LONG_H_SIZE, DIR_MSG_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE }; struct tipc_msg *msg; u32 tipc_hdr[2]; u32 size; u32 hdr_size; u32 min_hdr_size; if (unlikely(buf->len < MIN_H_SIZE)) return 0; msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); if (msg == NULL) return 0; if (unlikely(msg_version(msg) != TIPC_VERSION)) return 0; size = msg_size(msg); hdr_size = msg_hdr_sz(msg); min_hdr_size = msg_isdata(msg) ? min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; if (unlikely((hdr_size < min_hdr_size) || (size < hdr_size) || (buf->len < size) || (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) return 0; return pskb_may_pull(buf, hdr_size); } /** * tipc_recv_msg - process TIPC messages arriving from off-node * @head: pointer to message buffer chain * @tb_ptr: pointer to bearer message arrived on * * Invoked with no locks held. Bearer pointer must point to a valid bearer * structure (i.e. cannot be NULL), but bearer can be inactive. */ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) { read_lock_bh(&tipc_net_lock); while (head) { struct tipc_node *n_ptr; struct link *l_ptr; struct sk_buff *crs; struct sk_buff *buf = head; struct tipc_msg *msg; u32 seq_no; u32 ackd; u32 released = 0; int type; head = head->next; /* Ensure bearer is still enabled */ if (unlikely(!b_ptr->active)) goto cont; /* Ensure message is well-formed */ if (unlikely(!link_recv_buf_validate(buf))) goto cont; /* Ensure message data is a single contiguous unit */ if (unlikely(buf_linearize(buf))) goto cont; /* Handle arrival of a non-unicast link message */ msg = buf_msg(buf); if (unlikely(msg_non_seq(msg))) { if (msg_user(msg) == LINK_CONFIG) tipc_disc_recv_msg(buf, b_ptr); else tipc_bclink_recv_pkt(buf); continue; } if (unlikely(!msg_short(msg) && (msg_destnode(msg) != tipc_own_addr))) goto cont; /* Discard non-routeable messages destined for another node */ if (unlikely(!msg_isdata(msg) && (msg_destnode(msg) != tipc_own_addr))) { if ((msg_user(msg) != CONN_MANAGER) && (msg_user(msg) != MSG_FRAGMENTER)) goto cont; } /* Locate neighboring node that sent message */ n_ptr = tipc_node_find(msg_prevnode(msg)); if (unlikely(!n_ptr)) goto cont; tipc_node_lock(n_ptr); /* Don't talk to neighbor during cleanup after last session */ if (n_ptr->cleanup_required) { tipc_node_unlock(n_ptr); goto cont; } /* Locate unicast link endpoint that should handle message */ l_ptr = n_ptr->links[b_ptr->identity]; if (unlikely(!l_ptr)) { tipc_node_unlock(n_ptr); goto cont; } /* Validate message sequence number info */ seq_no = msg_seqno(msg); ackd = msg_ack(msg); /* Release acked messages */ if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) { if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported) tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); } crs = l_ptr->first_out; while ((crs != l_ptr->next_out) && less_eq(msg_seqno(buf_msg(crs)), ackd)) { struct sk_buff *next = crs->next; buf_discard(crs); crs = next; released++; } if (released) { l_ptr->first_out = crs; l_ptr->out_queue_size -= released; } /* Try sending any messages link endpoint has pending */ if (unlikely(l_ptr->next_out)) tipc_link_push_queue(l_ptr); if (unlikely(!list_empty(&l_ptr->waiting_ports))) tipc_link_wakeup_ports(l_ptr, 0); if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { l_ptr->stats.sent_acks++; tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); } /* Now (finally!) process the incoming message */ protocol_check: if (likely(link_working_working(l_ptr))) { if (likely(seq_no == mod(l_ptr->next_in_no))) { l_ptr->next_in_no++; if (unlikely(l_ptr->oldest_deferred_in)) head = link_insert_deferred_queue(l_ptr, head); if (likely(msg_is_dest(msg, tipc_own_addr))) { deliver: if (likely(msg_isdata(msg))) { tipc_node_unlock(n_ptr); tipc_port_recv_msg(buf); continue; } switch (msg_user(msg)) { case MSG_BUNDLER: l_ptr->stats.recv_bundles++; l_ptr->stats.recv_bundled += msg_msgcnt(msg); tipc_node_unlock(n_ptr); tipc_link_recv_bundle(buf); continue; case NAME_DISTRIBUTOR: tipc_node_unlock(n_ptr); tipc_named_recv(buf); continue; case CONN_MANAGER: tipc_node_unlock(n_ptr); tipc_port_recv_proto_msg(buf); continue; case MSG_FRAGMENTER: l_ptr->stats.recv_fragments++; if (tipc_link_recv_fragment(&l_ptr->defragm_buf, &buf, &msg)) { l_ptr->stats.recv_fragmented++; goto deliver; } break; case CHANGEOVER_PROTOCOL: type = msg_type(msg); if (link_recv_changeover_msg(&l_ptr, &buf)) { msg = buf_msg(buf); seq_no = msg_seqno(msg); if (type == ORIGINAL_MSG) goto deliver; goto protocol_check; } break; default: buf_discard(buf); buf = NULL; break; } } tipc_node_unlock(n_ptr); tipc_net_route_msg(buf); continue; } link_handle_out_of_seq_msg(l_ptr, buf); head = link_insert_deferred_queue(l_ptr, head); tipc_node_unlock(n_ptr); continue; } if (msg_user(msg) == LINK_PROTOCOL) { link_recv_proto_msg(l_ptr, buf); head = link_insert_deferred_queue(l_ptr, head); tipc_node_unlock(n_ptr); continue; } link_state_event(l_ptr, TRAFFIC_MSG_EVT); if (link_working_working(l_ptr)) { /* Re-insert in front of queue */ buf->next = head; head = buf; tipc_node_unlock(n_ptr); continue; } tipc_node_unlock(n_ptr); cont: buf_discard(buf); } read_unlock_bh(&tipc_net_lock); } /* * link_defer_buf(): Sort a received out-of-sequence packet * into the deferred reception queue. * Returns the increase of the queue length,i.e. 0 or 1 */ u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, struct sk_buff *buf) { struct sk_buff *prev = NULL; struct sk_buff *crs = *head; u32 seq_no = msg_seqno(buf_msg(buf)); buf->next = NULL; /* Empty queue ? */ if (*head == NULL) { *head = *tail = buf; return 1; } /* Last ? */ if (less(msg_seqno(buf_msg(*tail)), seq_no)) { (*tail)->next = buf; *tail = buf; return 1; } /* Scan through queue and sort it in */ do { struct tipc_msg *msg = buf_msg(crs); if (less(seq_no, msg_seqno(msg))) { buf->next = crs; if (prev) prev->next = buf; else *head = buf; return 1; } if (seq_no == msg_seqno(msg)) break; prev = crs; crs = crs->next; } while (crs); /* Message is a duplicate of an existing message */ buf_discard(buf); return 0; } /** * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet */ static void link_handle_out_of_seq_msg(struct link *l_ptr, struct sk_buff *buf) { u32 seq_no = msg_seqno(buf_msg(buf)); if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { link_recv_proto_msg(l_ptr, buf); return; } /* Record OOS packet arrival (force mismatch on next timeout) */ l_ptr->checkpoint--; /* * Discard packet if a duplicate; otherwise add it to deferred queue * and notify peer of gap as per protocol specification */ if (less(seq_no, mod(l_ptr->next_in_no))) { l_ptr->stats.duplicates++; buf_discard(buf); return; } if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, &l_ptr->newest_deferred_in, buf)) { l_ptr->deferred_inqueue_sz++; l_ptr->stats.deferred_recv++; if ((l_ptr->deferred_inqueue_sz % 16) == 1) tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); } else l_ptr->stats.duplicates++; } /* * Send protocol message to the other endpoint. */ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg, u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) { struct sk_buff *buf = NULL; struct tipc_msg *msg = l_ptr->pmsg; u32 msg_size = sizeof(l_ptr->proto_msg); int r_flag; if (link_blocked(l_ptr)) return; msg_set_type(msg, msg_typ); msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); if (msg_typ == STATE_MSG) { u32 next_sent = mod(l_ptr->next_out_no); if (!tipc_link_is_up(l_ptr)) return; if (l_ptr->next_out) next_sent = msg_seqno(buf_msg(l_ptr->next_out)); msg_set_next_sent(msg, next_sent); if (l_ptr->oldest_deferred_in) { u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in)); gap = mod(rec - mod(l_ptr->next_in_no)); } msg_set_seq_gap(msg, gap); if (gap) l_ptr->stats.sent_nacks++; msg_set_link_tolerance(msg, tolerance); msg_set_linkprio(msg, priority); msg_set_max_pkt(msg, ack_mtu); msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); msg_set_probe(msg, probe_msg != 0); if (probe_msg) { u32 mtu = l_ptr->max_pkt; if ((mtu < l_ptr->max_pkt_target) && link_working_working(l_ptr) && l_ptr->fsm_msg_cnt) { msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; if (l_ptr->max_pkt_probes == 10) { l_ptr->max_pkt_target = (msg_size - 4); l_ptr->max_pkt_probes = 0; msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; } l_ptr->max_pkt_probes++; } l_ptr->stats.sent_probes++; } l_ptr->stats.sent_states++; } else { /* RESET_MSG or ACTIVATE_MSG */ msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); msg_set_seq_gap(msg, 0); msg_set_next_sent(msg, 1); msg_set_probe(msg, 0); msg_set_link_tolerance(msg, l_ptr->tolerance); msg_set_linkprio(msg, l_ptr->priority); msg_set_max_pkt(msg, l_ptr->max_pkt_target); } r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); msg_set_redundant_link(msg, r_flag); msg_set_linkprio(msg, l_ptr->priority); /* Ensure sequence number will not fit : */ msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); /* Congestion? */ if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { if (!l_ptr->proto_msg_queue) { l_ptr->proto_msg_queue = tipc_buf_acquire(sizeof(l_ptr->proto_msg)); } buf = l_ptr->proto_msg_queue; if (!buf) return; skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); return; } /* Message can be sent */ buf = tipc_buf_acquire(msg_size); if (!buf) return; skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); msg_set_size(buf_msg(buf), msg_size); if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { l_ptr->unacked_window = 0; buf_discard(buf); return; } /* New congestion */ tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); l_ptr->proto_msg_queue = buf; l_ptr->stats.bearer_congs++; } /* * Receive protocol message : * Note that network plane id propagates through the network, and may * change at any time. The node with lowest address rules */ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf) { u32 rec_gap = 0; u32 max_pkt_info; u32 max_pkt_ack; u32 msg_tol; struct tipc_msg *msg = buf_msg(buf); if (link_blocked(l_ptr)) goto exit; /* record unnumbered packet arrival (force mismatch on next timeout) */ l_ptr->checkpoint--; if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) if (tipc_own_addr > msg_prevnode(msg)) l_ptr->b_ptr->net_plane = msg_net_plane(msg); l_ptr->owner->permit_changeover = msg_redundant_link(msg); switch (msg_type(msg)) { case RESET_MSG: if (!link_working_unknown(l_ptr) && (l_ptr->peer_session != INVALID_SESSION)) { if (msg_session(msg) == l_ptr->peer_session) break; /* duplicate: ignore */ } /* fall thru' */ case ACTIVATE_MSG: /* Update link settings according other endpoint's values */ strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); msg_tol = msg_link_tolerance(msg); if (msg_tol > l_ptr->tolerance) link_set_supervision_props(l_ptr, msg_tol); if (msg_linkprio(msg) > l_ptr->priority) l_ptr->priority = msg_linkprio(msg); max_pkt_info = msg_max_pkt(msg); if (max_pkt_info) { if (max_pkt_info < l_ptr->max_pkt_target) l_ptr->max_pkt_target = max_pkt_info; if (l_ptr->max_pkt > l_ptr->max_pkt_target) l_ptr->max_pkt = l_ptr->max_pkt_target; } else { l_ptr->max_pkt = l_ptr->max_pkt_target; } l_ptr->owner->bclink.supported = (max_pkt_info != 0); link_state_event(l_ptr, msg_type(msg)); l_ptr->peer_session = msg_session(msg); l_ptr->peer_bearer_id = msg_bearer_id(msg); /* Synchronize broadcast sequence numbers */ if (!tipc_node_redundant_links(l_ptr->owner)) l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg)); break; case STATE_MSG: msg_tol = msg_link_tolerance(msg); if (msg_tol) link_set_supervision_props(l_ptr, msg_tol); if (msg_linkprio(msg) && (msg_linkprio(msg) != l_ptr->priority)) { warn("Resetting link <%s>, priority change %u->%u\n", l_ptr->name, l_ptr->priority, msg_linkprio(msg)); l_ptr->priority = msg_linkprio(msg); tipc_link_reset(l_ptr); /* Enforce change to take effect */ break; } link_state_event(l_ptr, TRAFFIC_MSG_EVT); l_ptr->stats.recv_states++; if (link_reset_unknown(l_ptr)) break; if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { rec_gap = mod(msg_next_sent(msg) - mod(l_ptr->next_in_no)); } max_pkt_ack = msg_max_pkt(msg); if (max_pkt_ack > l_ptr->max_pkt) { l_ptr->max_pkt = max_pkt_ack; l_ptr->max_pkt_probes = 0; } max_pkt_ack = 0; if (msg_probe(msg)) { l_ptr->stats.recv_probes++; if (msg_size(msg) > sizeof(l_ptr->proto_msg)) max_pkt_ack = msg_size(msg); } /* Protocol message before retransmits, reduce loss risk */ tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg)); if (rec_gap || (msg_probe(msg))) { tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, rec_gap, 0, 0, max_pkt_ack); } if (msg_seq_gap(msg)) { l_ptr->stats.recv_nacks++; tipc_link_retransmit(l_ptr, l_ptr->first_out, msg_seq_gap(msg)); } break; } exit: buf_discard(buf); } /* * tipc_link_tunnel(): Send one message via a link belonging to * another bearer. Owner node is locked. */ static void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tunnel_hdr, struct tipc_msg *msg, u32 selector) { struct link *tunnel; struct sk_buff *buf; u32 length = msg_size(msg); tunnel = l_ptr->owner->active_links[selector & 1]; if (!tipc_link_is_up(tunnel)) { warn("Link changeover error, " "tunnel link no longer available\n"); return; } msg_set_size(tunnel_hdr, length + INT_H_SIZE); buf = tipc_buf_acquire(length + INT_H_SIZE); if (!buf) { warn("Link changeover error, " "unable to send tunnel msg\n"); return; } skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); tipc_link_send_buf(tunnel, buf); } /* * changeover(): Send whole message queue via the remaining link * Owner node is locked. */ void tipc_link_changeover(struct link *l_ptr) { u32 msgcount = l_ptr->out_queue_size; struct sk_buff *crs = l_ptr->first_out; struct link *tunnel = l_ptr->owner->active_links[0]; struct tipc_msg tunnel_hdr; int split_bundles; if (!tunnel) return; if (!l_ptr->owner->permit_changeover) { warn("Link changeover error, " "peer did not permit changeover\n"); return; } tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); msg_set_msgcnt(&tunnel_hdr, msgcount); if (!l_ptr->first_out) { struct sk_buff *buf; buf = tipc_buf_acquire(INT_H_SIZE); if (buf) { skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); msg_set_size(&tunnel_hdr, INT_H_SIZE); tipc_link_send_buf(tunnel, buf); } else { warn("Link changeover error, " "unable to send changeover msg\n"); } return; } split_bundles = (l_ptr->owner->active_links[0] != l_ptr->owner->active_links[1]); while (crs) { struct tipc_msg *msg = buf_msg(crs); if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { struct tipc_msg *m = msg_get_wrapped(msg); unchar *pos = (unchar *)m; msgcount = msg_msgcnt(msg); while (msgcount--) { msg_set_seqno(m, msg_seqno(msg)); tipc_link_tunnel(l_ptr, &tunnel_hdr, m, msg_link_selector(m)); pos += align(msg_size(m)); m = (struct tipc_msg *)pos; } } else { tipc_link_tunnel(l_ptr, &tunnel_hdr, msg, msg_link_selector(msg)); } crs = crs->next; } } void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel) { struct sk_buff *iter; struct tipc_msg tunnel_hdr; tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); iter = l_ptr->first_out; while (iter) { struct sk_buff *outbuf; struct tipc_msg *msg = buf_msg(iter); u32 length = msg_size(msg); if (msg_user(msg) == MSG_BUNDLER) msg_set_type(msg, CLOSED_MSG); msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); msg_set_size(&tunnel_hdr, length + INT_H_SIZE); outbuf = tipc_buf_acquire(length + INT_H_SIZE); if (outbuf == NULL) { warn("Link changeover error, " "unable to send duplicate msg\n"); return; } skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, length); tipc_link_send_buf(tunnel, outbuf); if (!tipc_link_is_up(l_ptr)) return; iter = iter->next; } } /** * buf_extract - extracts embedded TIPC message from another message * @skb: encapsulating message buffer * @from_pos: offset to extract from * * Returns a new message buffer containing an embedded message. The * encapsulating message itself is left unchanged. */ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) { struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); u32 size = msg_size(msg); struct sk_buff *eb; eb = tipc_buf_acquire(size); if (eb) skb_copy_to_linear_data(eb, msg, size); return eb; } /* * link_recv_changeover_msg(): Receive tunneled packet sent * via other link. Node is locked. Return extracted buffer. */ static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf) { struct sk_buff *tunnel_buf = *buf; struct link *dest_link; struct tipc_msg *msg; struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); u32 msg_typ = msg_type(tunnel_msg); u32 msg_count = msg_msgcnt(tunnel_msg); dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)]; if (!dest_link) goto exit; if (dest_link == *l_ptr) { err("Unexpected changeover message on link <%s>\n", (*l_ptr)->name); goto exit; } *l_ptr = dest_link; msg = msg_get_wrapped(tunnel_msg); if (msg_typ == DUPLICATE_MSG) { if (less(msg_seqno(msg), mod(dest_link->next_in_no))) goto exit; *buf = buf_extract(tunnel_buf, INT_H_SIZE); if (*buf == NULL) { warn("Link changeover error, duplicate msg dropped\n"); goto exit; } buf_discard(tunnel_buf); return 1; } /* First original message ?: */ if (tipc_link_is_up(dest_link)) { info("Resetting link <%s>, changeover initiated by peer\n", dest_link->name); tipc_link_reset(dest_link); dest_link->exp_msg_count = msg_count; if (!msg_count) goto exit; } else if (dest_link->exp_msg_count == START_CHANGEOVER) { dest_link->exp_msg_count = msg_count; if (!msg_count) goto exit; } /* Receive original message */ if (dest_link->exp_msg_count == 0) { warn("Link switchover error, " "got too many tunnelled messages\n"); goto exit; } dest_link->exp_msg_count--; if (less(msg_seqno(msg), dest_link->reset_checkpoint)) { goto exit; } else { *buf = buf_extract(tunnel_buf, INT_H_SIZE); if (*buf != NULL) { buf_discard(tunnel_buf); return 1; } else { warn("Link changeover error, original msg dropped\n"); } } exit: *buf = NULL; buf_discard(tunnel_buf); return 0; } /* * Bundler functionality: */ void tipc_link_recv_bundle(struct sk_buff *buf) { u32 msgcount = msg_msgcnt(buf_msg(buf)); u32 pos = INT_H_SIZE; struct sk_buff *obuf; while (msgcount--) { obuf = buf_extract(buf, pos); if (obuf == NULL) { warn("Link unable to unbundle message(s)\n"); break; } pos += align(msg_size(buf_msg(obuf))); tipc_net_route_msg(obuf); } buf_discard(buf); } /* * Fragmentation/defragmentation: */ /* * link_send_long_buf: Entry for buffers needing fragmentation. * The buffer is complete, inclusive total message length. * Returns user data length. */ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) { struct sk_buff *buf_chain = NULL; struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; struct tipc_msg *inmsg = buf_msg(buf); struct tipc_msg fragm_hdr; u32 insize = msg_size(inmsg); u32 dsz = msg_data_sz(inmsg); unchar *crs = buf->data; u32 rest = insize; u32 pack_sz = l_ptr->max_pkt; u32 fragm_sz = pack_sz - INT_H_SIZE; u32 fragm_no = 0; u32 destaddr; if (msg_short(inmsg)) destaddr = l_ptr->addr; else destaddr = msg_destnode(inmsg); /* Prepare reusable fragment header: */ tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, destaddr); /* Chop up message: */ while (rest > 0) { struct sk_buff *fragm; if (rest <= fragm_sz) { fragm_sz = rest; msg_set_type(&fragm_hdr, LAST_FRAGMENT); } fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); if (fragm == NULL) { buf_discard(buf); while (buf_chain) { buf = buf_chain; buf_chain = buf_chain->next; buf_discard(buf); } return -ENOMEM; } msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); fragm_no++; msg_set_fragm_no(&fragm_hdr, fragm_no); skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, fragm_sz); buf_chain_tail->next = fragm; buf_chain_tail = fragm; rest -= fragm_sz; crs += fragm_sz; msg_set_type(&fragm_hdr, FRAGMENT); } buf_discard(buf); /* Append chain of fragments to send queue & send them */ l_ptr->long_msg_seq_no++; link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); l_ptr->stats.sent_fragments += fragm_no; l_ptr->stats.sent_fragmented++; tipc_link_push_queue(l_ptr); return dsz; } /* * A pending message being re-assembled must store certain values * to handle subsequent fragments correctly. The following functions * help storing these values in unused, available fields in the * pending message. This makes dynamic memory allocation unnecessary. */ static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) { msg_set_seqno(buf_msg(buf), seqno); } static u32 get_fragm_size(struct sk_buff *buf) { return msg_ack(buf_msg(buf)); } static void set_fragm_size(struct sk_buff *buf, u32 sz) { msg_set_ack(buf_msg(buf), sz); } static u32 get_expected_frags(struct sk_buff *buf) { return msg_bcast_ack(buf_msg(buf)); } static void set_expected_frags(struct sk_buff *buf, u32 exp) { msg_set_bcast_ack(buf_msg(buf), exp); } static u32 get_timer_cnt(struct sk_buff *buf) { return msg_reroute_cnt(buf_msg(buf)); } static void incr_timer_cnt(struct sk_buff *buf) { msg_incr_reroute_cnt(buf_msg(buf)); } /* * tipc_link_recv_fragment(): Called with node lock on. Returns * the reassembled buffer if message is complete. */ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, struct tipc_msg **m) { struct sk_buff *prev = NULL; struct sk_buff *fbuf = *fb; struct tipc_msg *fragm = buf_msg(fbuf); struct sk_buff *pbuf = *pending; u32 long_msg_seq_no = msg_long_msgno(fragm); *fb = NULL; /* Is there an incomplete message waiting for this fragment? */ while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { prev = pbuf; pbuf = pbuf->next; } if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) { struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm); u32 msg_sz = msg_size(imsg); u32 fragm_sz = msg_data_sz(fragm); u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz); u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE; if (msg_type(imsg) == TIPC_MCAST_MSG) max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE; if (msg_size(imsg) > max) { buf_discard(fbuf); return 0; } pbuf = tipc_buf_acquire(msg_size(imsg)); if (pbuf != NULL) { pbuf->next = *pending; *pending = pbuf; skb_copy_to_linear_data(pbuf, imsg, msg_data_sz(fragm)); /* Prepare buffer for subsequent fragments. */ set_long_msg_seqno(pbuf, long_msg_seq_no); set_fragm_size(pbuf, fragm_sz); set_expected_frags(pbuf, exp_fragm_cnt - 1); } else { warn("Link unable to reassemble fragmented message\n"); } buf_discard(fbuf); return 0; } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) { u32 dsz = msg_data_sz(fragm); u32 fsz = get_fragm_size(pbuf); u32 crs = ((msg_fragm_no(fragm) - 1) * fsz); u32 exp_frags = get_expected_frags(pbuf) - 1; skb_copy_to_linear_data_offset(pbuf, crs, msg_data(fragm), dsz); buf_discard(fbuf); /* Is message complete? */ if (exp_frags == 0) { if (prev) prev->next = pbuf->next; else *pending = pbuf->next; msg_reset_reroute_cnt(buf_msg(pbuf)); *fb = pbuf; *m = buf_msg(pbuf); return 1; } set_expected_frags(pbuf, exp_frags); return 0; } buf_discard(fbuf); return 0; } /** * link_check_defragm_bufs - flush stale incoming message fragments * @l_ptr: pointer to link */ static void link_check_defragm_bufs(struct link *l_ptr) { struct sk_buff *prev = NULL; struct sk_buff *next = NULL; struct sk_buff *buf = l_ptr->defragm_buf; if (!buf) return; if (!link_working_working(l_ptr)) return; while (buf) { u32 cnt = get_timer_cnt(buf); next = buf->next; if (cnt < 4) { incr_timer_cnt(buf); prev = buf; } else { if (prev) prev->next = buf->next; else l_ptr->defragm_buf = buf->next; buf_discard(buf); } buf = next; } } static void link_set_supervision_props(struct link *l_ptr, u32 tolerance) { if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) return; l_ptr->tolerance = tolerance; l_ptr->continuity_interval = ((tolerance / 4) > 500) ? 500 : tolerance / 4; l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); } void tipc_link_set_queue_limits(struct link *l_ptr, u32 window) { /* Data messages from this node, inclusive FIRST_FRAGM */ l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; /* Transiting data messages,inclusive FIRST_FRAGM */ l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; l_ptr->queue_limit[CONN_MANAGER] = 1200; l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; /* FRAGMENT and LAST_FRAGMENT packets */ l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; } /** * link_find_link - locate link by name * @name - ptr to link name string * @node - ptr to area to be filled with ptr to associated node * * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; * this also prevents link deletion. * * Returns pointer to link (or 0 if invalid link name). */ static struct link *link_find_link(const char *name, struct tipc_node **node) { struct link_name link_name_parts; struct tipc_bearer *b_ptr; struct link *l_ptr; if (!link_name_validate(name, &link_name_parts)) return NULL; b_ptr = tipc_bearer_find_interface(link_name_parts.if_local); if (!b_ptr) return NULL; *node = tipc_node_find(link_name_parts.addr_peer); if (!*node) return NULL; l_ptr = (*node)->links[b_ptr->identity]; if (!l_ptr || strcmp(l_ptr->name, name)) return NULL; return l_ptr; } struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd) { struct tipc_link_config *args; u32 new_value; struct link *l_ptr; struct tipc_node *node; int res; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); new_value = ntohl(args->value); if (!strcmp(args->name, tipc_bclink_name)) { if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && (tipc_bclink_set_queue_limits(new_value) == 0)) return tipc_cfg_reply_none(); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (cannot change setting on broadcast link)"); } read_lock_bh(&tipc_net_lock); l_ptr = link_find_link(args->name, &node); if (!l_ptr) { read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_error_string("link not found"); } tipc_node_lock(node); res = -EINVAL; switch (cmd) { case TIPC_CMD_SET_LINK_TOL: if ((new_value >= TIPC_MIN_LINK_TOL) && (new_value <= TIPC_MAX_LINK_TOL)) { link_set_supervision_props(l_ptr, new_value); tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, new_value, 0, 0); res = 0; } break; case TIPC_CMD_SET_LINK_PRI: if ((new_value >= TIPC_MIN_LINK_PRI) && (new_value <= TIPC_MAX_LINK_PRI)) { l_ptr->priority = new_value; tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, new_value, 0); res = 0; } break; case TIPC_CMD_SET_LINK_WINDOW: if ((new_value >= TIPC_MIN_LINK_WIN) && (new_value <= TIPC_MAX_LINK_WIN)) { tipc_link_set_queue_limits(l_ptr, new_value); res = 0; } break; } tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); if (res) return tipc_cfg_reply_error_string("cannot change link setting"); return tipc_cfg_reply_none(); } /** * link_reset_statistics - reset link statistics * @l_ptr: pointer to link */ static void link_reset_statistics(struct link *l_ptr) { memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); l_ptr->stats.sent_info = l_ptr->next_out_no; l_ptr->stats.recv_info = l_ptr->next_in_no; } struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) { char *link_name; struct link *l_ptr; struct tipc_node *node; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); link_name = (char *)TLV_DATA(req_tlv_area); if (!strcmp(link_name, tipc_bclink_name)) { if (tipc_bclink_reset_stats()) return tipc_cfg_reply_error_string("link not found"); return tipc_cfg_reply_none(); } read_lock_bh(&tipc_net_lock); l_ptr = link_find_link(link_name, &node); if (!l_ptr) { read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_error_string("link not found"); } tipc_node_lock(node); link_reset_statistics(l_ptr); tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_none(); } /** * percent - convert count to a percentage of total (rounding up or down) */ static u32 percent(u32 count, u32 total) { return (count * 100 + (total / 2)) / total; } /** * tipc_link_stats - print link statistics * @name: link name * @buf: print buffer area * @buf_size: size of print buffer area * * Returns length of print buffer data string (or 0 if error) */ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) { struct print_buf pb; struct link *l_ptr; struct tipc_node *node; char *status; u32 profile_total = 0; if (!strcmp(name, tipc_bclink_name)) return tipc_bclink_stats(buf, buf_size); tipc_printbuf_init(&pb, buf, buf_size); read_lock_bh(&tipc_net_lock); l_ptr = link_find_link(name, &node); if (!l_ptr) { read_unlock_bh(&tipc_net_lock); return 0; } tipc_node_lock(node); if (tipc_link_is_active(l_ptr)) status = "ACTIVE"; else if (tipc_link_is_up(l_ptr)) status = "STANDBY"; else status = "DEFUNCT"; tipc_printf(&pb, "Link <%s>\n" " %s MTU:%u Priority:%u Tolerance:%u ms" " Window:%u packets\n", l_ptr->name, status, l_ptr->max_pkt, l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]); tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", l_ptr->next_in_no - l_ptr->stats.recv_info, l_ptr->stats.recv_fragments, l_ptr->stats.recv_fragmented, l_ptr->stats.recv_bundles, l_ptr->stats.recv_bundled); tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", l_ptr->next_out_no - l_ptr->stats.sent_info, l_ptr->stats.sent_fragments, l_ptr->stats.sent_fragmented, l_ptr->stats.sent_bundles, l_ptr->stats.sent_bundled); profile_total = l_ptr->stats.msg_length_counts; if (!profile_total) profile_total = 1; tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n" " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " "-16354:%u%% -32768:%u%% -66000:%u%%\n", l_ptr->stats.msg_length_counts, l_ptr->stats.msg_lengths_total / profile_total, percent(l_ptr->stats.msg_length_profile[0], profile_total), percent(l_ptr->stats.msg_length_profile[1], profile_total), percent(l_ptr->stats.msg_length_profile[2], profile_total), percent(l_ptr->stats.msg_length_profile[3], profile_total), percent(l_ptr->stats.msg_length_profile[4], profile_total), percent(l_ptr->stats.msg_length_profile[5], profile_total), percent(l_ptr->stats.msg_length_profile[6], profile_total)); tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n", l_ptr->stats.recv_states, l_ptr->stats.recv_probes, l_ptr->stats.recv_nacks, l_ptr->stats.deferred_recv, l_ptr->stats.duplicates); tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n", l_ptr->stats.sent_states, l_ptr->stats.sent_probes, l_ptr->stats.sent_nacks, l_ptr->stats.sent_acks, l_ptr->stats.retransmitted); tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", l_ptr->stats.bearer_congs, l_ptr->stats.link_congs, l_ptr->stats.max_queue_sz, l_ptr->stats.queue_sz_counts ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts) : 0); tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); return tipc_printbuf_validate(&pb); } #define MAX_LINK_STATS_INFO 2000 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) { struct sk_buff *buf; struct tlv_desc *rep_tlv; int str_len; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO)); if (!buf) return NULL; rep_tlv = (struct tlv_desc *)buf->data; str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO); if (!str_len) { buf_discard(buf); return tipc_cfg_reply_error_string("link not found"); } skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); return buf; } /** * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination * @dest: network address of destination node * @selector: used to select from set of active links * * If no active link can be found, uses default maximum packet size. */ u32 tipc_link_get_max_pkt(u32 dest, u32 selector) { struct tipc_node *n_ptr; struct link *l_ptr; u32 res = MAX_PKT_DEFAULT; if (dest == tipc_own_addr) return MAX_MSG_SIZE; read_lock_bh(&tipc_net_lock); n_ptr = tipc_node_find(dest); if (n_ptr) { tipc_node_lock(n_ptr); l_ptr = n_ptr->active_links[selector & 1]; if (l_ptr) res = l_ptr->max_pkt; tipc_node_unlock(n_ptr); } read_unlock_bh(&tipc_net_lock); return res; } static void link_print(struct link *l_ptr, const char *str) { char print_area[256]; struct print_buf pb; struct print_buf *buf = &pb; tipc_printbuf_init(buf, print_area, sizeof(print_area)); tipc_printf(buf, str); tipc_printf(buf, "Link %x<%s>:", l_ptr->addr, l_ptr->b_ptr->name); #ifdef CONFIG_TIPC_DEBUG if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr)) goto print_state; tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no)); tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no)); tipc_printf(buf, "SQUE"); if (l_ptr->first_out) { tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out))); if (l_ptr->next_out) tipc_printf(buf, "%u..", msg_seqno(buf_msg(l_ptr->next_out))); tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out))); if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - msg_seqno(buf_msg(l_ptr->first_out))) != (l_ptr->out_queue_size - 1)) || (l_ptr->last_out->next != NULL)) { tipc_printf(buf, "\nSend queue inconsistency\n"); tipc_printf(buf, "first_out= %p ", l_ptr->first_out); tipc_printf(buf, "next_out= %p ", l_ptr->next_out); tipc_printf(buf, "last_out= %p ", l_ptr->last_out); } } else tipc_printf(buf, "[]"); tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size); if (l_ptr->oldest_deferred_in) { u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in)); u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in)); tipc_printf(buf, ":RQUE[%u..%u]", o, n); if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) { tipc_printf(buf, ":RQSIZ(%u)", l_ptr->deferred_inqueue_sz); } } print_state: #endif if (link_working_unknown(l_ptr)) tipc_printf(buf, ":WU"); else if (link_reset_reset(l_ptr)) tipc_printf(buf, ":RR"); else if (link_reset_unknown(l_ptr)) tipc_printf(buf, ":RU"); else if (link_working_working(l_ptr)) tipc_printf(buf, ":WW"); tipc_printf(buf, "\n"); tipc_printbuf_validate(buf); info("%s", print_area); }
gpl-2.0
EuphoriaOS/android_kernel_samsung_exynos5410
arch/arm/mach-omap2/twl-common.c
4692
10399
/* * twl-common.c * * Copyright (C) 2011 Texas Instruments, Inc.. * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/i2c.h> #include <linux/i2c/twl.h> #include <linux/gpio.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <plat/i2c.h> #include <plat/usb.h> #include "twl-common.h" #include "pm.h" static struct i2c_board_info __initdata pmic_i2c_board_info = { .addr = 0x48, .flags = I2C_CLIENT_WAKE, }; static struct i2c_board_info __initdata omap4_i2c1_board_info[] = { { .addr = 0x48, .flags = I2C_CLIENT_WAKE, }, { I2C_BOARD_INFO("twl6040", 0x4b), }, }; void __init omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, struct twl4030_platform_data *pmic_data) { strncpy(pmic_i2c_board_info.type, pmic_type, sizeof(pmic_i2c_board_info.type)); pmic_i2c_board_info.irq = pmic_irq; pmic_i2c_board_info.platform_data = pmic_data; omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); } void __init omap4_pmic_init(const char *pmic_type, struct twl4030_platform_data *pmic_data, struct twl6040_platform_data *twl6040_data, int twl6040_irq) { /* PMIC part*/ strncpy(omap4_i2c1_board_info[0].type, pmic_type, sizeof(omap4_i2c1_board_info[0].type)); omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N; omap4_i2c1_board_info[0].platform_data = pmic_data; /* TWL6040 audio IC part */ omap4_i2c1_board_info[1].irq = twl6040_irq; omap4_i2c1_board_info[1].platform_data = twl6040_data; omap_register_i2c_bus(1, 400, omap4_i2c1_board_info, 2); } void __init omap_pmic_late_init(void) { /* Init the OMAP TWL parameters (if PMIC has been registerd) */ if (pmic_i2c_board_info.irq) omap3_twl_init(); if (omap4_i2c1_board_info[0].irq) omap4_twl_init(); } #if defined(CONFIG_ARCH_OMAP3) static struct twl4030_usb_data omap3_usb_pdata = { .usb_mode = T2_USB_MODE_ULPI, }; static int omap3_batt_table[] = { /* 0 C */ 30800, 29500, 28300, 27100, 26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900, 17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100, 11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310, 8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830, 5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170, 4040, 3910, 3790, 3670, 3550 }; static struct twl4030_bci_platform_data omap3_bci_pdata = { .battery_tmp_tbl = omap3_batt_table, .tblsize = ARRAY_SIZE(omap3_batt_table), }; static struct twl4030_madc_platform_data omap3_madc_pdata = { .irq_line = 1, }; static struct twl4030_codec_data omap3_codec; static struct twl4030_audio_data omap3_audio_pdata = { .audio_mclk = 26000000, .codec = &omap3_codec, }; static struct regulator_consumer_supply omap3_vdda_dac_supplies[] = { REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"), }; static struct regulator_init_data omap3_vdac_idata = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(omap3_vdda_dac_supplies), .consumer_supplies = omap3_vdda_dac_supplies, }; static struct regulator_consumer_supply omap3_vpll2_supplies[] = { REGULATOR_SUPPLY("vdds_dsi", "omapdss"), REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"), }; static struct regulator_init_data omap3_vpll2_idata = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(omap3_vpll2_supplies), .consumer_supplies = omap3_vpll2_supplies, }; void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, u32 pdata_flags, u32 regulators_flags) { if (!pmic_data->irq_base) pmic_data->irq_base = TWL4030_IRQ_BASE; if (!pmic_data->irq_end) pmic_data->irq_end = TWL4030_IRQ_END; /* Common platform data configurations */ if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) pmic_data->usb = &omap3_usb_pdata; if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci) pmic_data->bci = &omap3_bci_pdata; if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc) pmic_data->madc = &omap3_madc_pdata; if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio) pmic_data->audio = &omap3_audio_pdata; /* Common regulator configurations */ if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) pmic_data->vdac = &omap3_vdac_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2) pmic_data->vpll2 = &omap3_vpll2_idata; } #endif /* CONFIG_ARCH_OMAP3 */ #if defined(CONFIG_ARCH_OMAP4) static struct twl4030_usb_data omap4_usb_pdata = { .phy_init = omap4430_phy_init, .phy_exit = omap4430_phy_exit, .phy_power = omap4430_phy_power, .phy_set_clock = omap4430_phy_set_clk, .phy_suspend = omap4430_phy_suspend, }; static struct regulator_init_data omap4_vdac_idata = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_vaux2_idata = { .constraints = { .min_uV = 1200000, .max_uV = 2800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_vaux3_idata = { .constraints = { .min_uV = 1000000, .max_uV = 3000000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_consumer_supply omap4_vmmc_supply[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"), }; /* VMMC1 for MMC1 card */ static struct regulator_init_data omap4_vmmc_idata = { .constraints = { .min_uV = 1200000, .max_uV = 3000000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(omap4_vmmc_supply), .consumer_supplies = omap4_vmmc_supply, }; static struct regulator_init_data omap4_vpp_idata = { .constraints = { .min_uV = 1800000, .max_uV = 2500000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_vana_idata = { .constraints = { .min_uV = 2100000, .max_uV = 2100000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_consumer_supply omap4_vcxio_supply[] = { REGULATOR_SUPPLY("vdds_dsi", "omapdss_dss"), REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"), REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.1"), }; static struct regulator_init_data omap4_vcxio_idata = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, .always_on = true, }, .num_consumer_supplies = ARRAY_SIZE(omap4_vcxio_supply), .consumer_supplies = omap4_vcxio_supply, }; static struct regulator_init_data omap4_vusb_idata = { .constraints = { .min_uV = 3300000, .max_uV = 3300000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_clk32kg_idata = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, }; void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data, u32 pdata_flags, u32 regulators_flags) { if (!pmic_data->irq_base) pmic_data->irq_base = TWL6030_IRQ_BASE; if (!pmic_data->irq_end) pmic_data->irq_end = TWL6030_IRQ_END; /* Common platform data configurations */ if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) pmic_data->usb = &omap4_usb_pdata; /* Common regulator configurations */ if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) pmic_data->vdac = &omap4_vdac_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VAUX2 && !pmic_data->vaux2) pmic_data->vaux2 = &omap4_vaux2_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VAUX3 && !pmic_data->vaux3) pmic_data->vaux3 = &omap4_vaux3_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VMMC && !pmic_data->vmmc) pmic_data->vmmc = &omap4_vmmc_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VPP && !pmic_data->vpp) pmic_data->vpp = &omap4_vpp_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VANA && !pmic_data->vana) pmic_data->vana = &omap4_vana_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VCXIO && !pmic_data->vcxio) pmic_data->vcxio = &omap4_vcxio_idata; if (regulators_flags & TWL_COMMON_REGULATOR_VUSB && !pmic_data->vusb) pmic_data->vusb = &omap4_vusb_idata; if (regulators_flags & TWL_COMMON_REGULATOR_CLK32KG && !pmic_data->clk32kg) pmic_data->clk32kg = &omap4_clk32kg_idata; } #endif /* CONFIG_ARCH_OMAP4 */
gpl-2.0
TeamEOS/kernel_oppo_msm8974
drivers/media/video/s5p-tv/sdo_drv.c
4948
11447
/* * Samsung Standard Definition Output (SDO) driver * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * * Tomasz Stanislawski, <t.stanislaws@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundiation. either version 2 of the License, * or (at your option) any later version */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <media/v4l2-subdev.h> #include "regs-sdo.h" MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>"); MODULE_DESCRIPTION("Samsung Standard Definition Output (SDO)"); MODULE_LICENSE("GPL"); #define SDO_DEFAULT_STD V4L2_STD_PAL struct sdo_format { v4l2_std_id id; /* all modes are 720 pixels wide */ unsigned int height; unsigned int cookie; }; struct sdo_device { /** pointer to device parent */ struct device *dev; /** base address of SDO registers */ void __iomem *regs; /** SDO interrupt */ unsigned int irq; /** DAC source clock */ struct clk *sclk_dac; /** DAC clock */ struct clk *dac; /** DAC physical interface */ struct clk *dacphy; /** clock for control of VPLL */ struct clk *fout_vpll; /** regulator for SDO IP power */ struct regulator *vdac; /** regulator for SDO plug detection */ struct regulator *vdet; /** subdev used as device interface */ struct v4l2_subdev sd; /** current format */ const struct sdo_format *fmt; }; static inline struct sdo_device *sd_to_sdev(struct v4l2_subdev *sd) { return container_of(sd, struct sdo_device, sd); } static inline void sdo_write_mask(struct sdo_device *sdev, u32 reg_id, u32 value, u32 mask) { u32 old = readl(sdev->regs + reg_id); value = (value & mask) | (old & ~mask); writel(value, sdev->regs + reg_id); } static inline void sdo_write(struct sdo_device *sdev, u32 reg_id, u32 value) { writel(value, sdev->regs + reg_id); } static inline u32 sdo_read(struct sdo_device *sdev, u32 reg_id) { return readl(sdev->regs + reg_id); } static irqreturn_t sdo_irq_handler(int irq, void *dev_data) { struct sdo_device *sdev = dev_data; /* clear interrupt */ sdo_write_mask(sdev, SDO_IRQ, ~0, SDO_VSYNC_IRQ_PEND); return IRQ_HANDLED; } static void sdo_reg_debug(struct sdo_device *sdev) { #define DBGREG(reg_id) \ dev_info(sdev->dev, #reg_id " = %08x\n", \ sdo_read(sdev, reg_id)) DBGREG(SDO_CLKCON); DBGREG(SDO_CONFIG); DBGREG(SDO_VBI); DBGREG(SDO_DAC); DBGREG(SDO_IRQ); DBGREG(SDO_IRQMASK); DBGREG(SDO_VERSION); } static const struct sdo_format sdo_format[] = { { V4L2_STD_PAL_N, .height = 576, .cookie = SDO_PAL_N }, { V4L2_STD_PAL_Nc, .height = 576, .cookie = SDO_PAL_NC }, { V4L2_STD_PAL_M, .height = 480, .cookie = SDO_PAL_M }, { V4L2_STD_PAL_60, .height = 480, .cookie = SDO_PAL_60 }, { V4L2_STD_NTSC_443, .height = 480, .cookie = SDO_NTSC_443 }, { V4L2_STD_PAL, .height = 576, .cookie = SDO_PAL_BGHID }, { V4L2_STD_NTSC_M, .height = 480, .cookie = SDO_NTSC_M }, }; static const struct sdo_format *sdo_find_format(v4l2_std_id id) { int i; for (i = 0; i < ARRAY_SIZE(sdo_format); ++i) if (sdo_format[i].id & id) return &sdo_format[i]; return NULL; } static int sdo_g_tvnorms_output(struct v4l2_subdev *sd, v4l2_std_id *std) { *std = V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | V4L2_STD_NTSC_443 | V4L2_STD_PAL_60; return 0; } static int sdo_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct sdo_device *sdev = sd_to_sdev(sd); const struct sdo_format *fmt; fmt = sdo_find_format(std); if (fmt == NULL) return -EINVAL; sdev->fmt = fmt; return 0; } static int sdo_g_std_output(struct v4l2_subdev *sd, v4l2_std_id *std) { *std = sd_to_sdev(sd)->fmt->id; return 0; } static int sdo_g_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct sdo_device *sdev = sd_to_sdev(sd); if (!sdev->fmt) return -ENXIO; /* all modes are 720 pixels wide */ fmt->width = 720; fmt->height = sdev->fmt->height; fmt->code = V4L2_MBUS_FMT_FIXED; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_JPEG; return 0; } static int sdo_s_power(struct v4l2_subdev *sd, int on) { struct sdo_device *sdev = sd_to_sdev(sd); struct device *dev = sdev->dev; int ret; dev_info(dev, "sdo_s_power(%d)\n", on); if (on) ret = pm_runtime_get_sync(dev); else ret = pm_runtime_put_sync(dev); /* only values < 0 indicate errors */ return IS_ERR_VALUE(ret) ? ret : 0; } static int sdo_streamon(struct sdo_device *sdev) { /* set proper clock for Timing Generator */ clk_set_rate(sdev->fout_vpll, 54000000); dev_info(sdev->dev, "fout_vpll.rate = %lu\n", clk_get_rate(sdev->fout_vpll)); /* enable clock in SDO */ sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON); clk_enable(sdev->dacphy); /* enable DAC */ sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC); sdo_reg_debug(sdev); return 0; } static int sdo_streamoff(struct sdo_device *sdev) { int tries; sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC); clk_disable(sdev->dacphy); sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON); for (tries = 100; tries; --tries) { if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY) break; mdelay(1); } if (tries == 0) dev_err(sdev->dev, "failed to stop streaming\n"); return tries ? 0 : -EIO; } static int sdo_s_stream(struct v4l2_subdev *sd, int on) { struct sdo_device *sdev = sd_to_sdev(sd); return on ? sdo_streamon(sdev) : sdo_streamoff(sdev); } static const struct v4l2_subdev_core_ops sdo_sd_core_ops = { .s_power = sdo_s_power, }; static const struct v4l2_subdev_video_ops sdo_sd_video_ops = { .s_std_output = sdo_s_std_output, .g_std_output = sdo_g_std_output, .g_tvnorms_output = sdo_g_tvnorms_output, .g_mbus_fmt = sdo_g_mbus_fmt, .s_stream = sdo_s_stream, }; static const struct v4l2_subdev_ops sdo_sd_ops = { .core = &sdo_sd_core_ops, .video = &sdo_sd_video_ops, }; static int sdo_runtime_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct sdo_device *sdev = sd_to_sdev(sd); dev_info(dev, "suspend\n"); regulator_disable(sdev->vdet); regulator_disable(sdev->vdac); clk_disable(sdev->sclk_dac); return 0; } static int sdo_runtime_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct sdo_device *sdev = sd_to_sdev(sd); dev_info(dev, "resume\n"); clk_enable(sdev->sclk_dac); regulator_enable(sdev->vdac); regulator_enable(sdev->vdet); /* software reset */ sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_SW_RESET); mdelay(10); sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_SW_RESET); /* setting TV mode */ sdo_write_mask(sdev, SDO_CONFIG, sdev->fmt->cookie, SDO_STANDARD_MASK); /* XXX: forcing interlaced mode using undocumented bit */ sdo_write_mask(sdev, SDO_CONFIG, 0, SDO_PROGRESSIVE); /* turn all VBI off */ sdo_write_mask(sdev, SDO_VBI, 0, SDO_CVBS_WSS_INS | SDO_CVBS_CLOSED_CAPTION_MASK); /* turn all post processing off */ sdo_write_mask(sdev, SDO_CCCON, ~0, SDO_COMPENSATION_BHS_ADJ_OFF | SDO_COMPENSATION_CVBS_COMP_OFF); sdo_reg_debug(sdev); return 0; } static const struct dev_pm_ops sdo_pm_ops = { .runtime_suspend = sdo_runtime_suspend, .runtime_resume = sdo_runtime_resume, }; static int __devinit sdo_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sdo_device *sdev; struct resource *res; int ret = 0; struct clk *sclk_vpll; dev_info(dev, "probe start\n"); sdev = devm_kzalloc(&pdev->dev, sizeof *sdev, GFP_KERNEL); if (!sdev) { dev_err(dev, "not enough memory.\n"); ret = -ENOMEM; goto fail; } sdev->dev = dev; /* mapping registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); ret = -ENXIO; goto fail; } sdev->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (sdev->regs == NULL) { dev_err(dev, "register mapping failed.\n"); ret = -ENXIO; goto fail; } /* acquiring interrupt */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(dev, "get interrupt resource failed.\n"); ret = -ENXIO; goto fail; } ret = devm_request_irq(&pdev->dev, res->start, sdo_irq_handler, 0, "s5p-sdo", sdev); if (ret) { dev_err(dev, "request interrupt failed.\n"); goto fail; } sdev->irq = res->start; /* acquire clocks */ sdev->sclk_dac = clk_get(dev, "sclk_dac"); if (IS_ERR_OR_NULL(sdev->sclk_dac)) { dev_err(dev, "failed to get clock 'sclk_dac'\n"); ret = -ENXIO; goto fail; } sdev->dac = clk_get(dev, "dac"); if (IS_ERR_OR_NULL(sdev->dac)) { dev_err(dev, "failed to get clock 'dac'\n"); ret = -ENXIO; goto fail_sclk_dac; } sdev->dacphy = clk_get(dev, "dacphy"); if (IS_ERR_OR_NULL(sdev->dacphy)) { dev_err(dev, "failed to get clock 'dacphy'\n"); ret = -ENXIO; goto fail_dac; } sclk_vpll = clk_get(dev, "sclk_vpll"); if (IS_ERR_OR_NULL(sclk_vpll)) { dev_err(dev, "failed to get clock 'sclk_vpll'\n"); ret = -ENXIO; goto fail_dacphy; } clk_set_parent(sdev->sclk_dac, sclk_vpll); clk_put(sclk_vpll); sdev->fout_vpll = clk_get(dev, "fout_vpll"); if (IS_ERR_OR_NULL(sdev->fout_vpll)) { dev_err(dev, "failed to get clock 'fout_vpll'\n"); goto fail_dacphy; } dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll)); /* acquire regulator */ sdev->vdac = regulator_get(dev, "vdd33a_dac"); if (IS_ERR_OR_NULL(sdev->vdac)) { dev_err(dev, "failed to get regulator 'vdac'\n"); goto fail_fout_vpll; } sdev->vdet = regulator_get(dev, "vdet"); if (IS_ERR_OR_NULL(sdev->vdet)) { dev_err(dev, "failed to get regulator 'vdet'\n"); goto fail_vdac; } /* enable gate for dac clock, because mixer uses it */ clk_enable(sdev->dac); /* configure power management */ pm_runtime_enable(dev); /* configuration of interface subdevice */ v4l2_subdev_init(&sdev->sd, &sdo_sd_ops); sdev->sd.owner = THIS_MODULE; strlcpy(sdev->sd.name, "s5p-sdo", sizeof sdev->sd.name); /* set default format */ sdev->fmt = sdo_find_format(SDO_DEFAULT_STD); BUG_ON(sdev->fmt == NULL); /* keeping subdev in device's private for use by other drivers */ dev_set_drvdata(dev, &sdev->sd); dev_info(dev, "probe succeeded\n"); return 0; fail_vdac: regulator_put(sdev->vdac); fail_fout_vpll: clk_put(sdev->fout_vpll); fail_dacphy: clk_put(sdev->dacphy); fail_dac: clk_put(sdev->dac); fail_sclk_dac: clk_put(sdev->sclk_dac); fail: dev_info(dev, "probe failed\n"); return ret; } static int __devexit sdo_remove(struct platform_device *pdev) { struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev); struct sdo_device *sdev = sd_to_sdev(sd); pm_runtime_disable(&pdev->dev); clk_disable(sdev->dac); regulator_put(sdev->vdet); regulator_put(sdev->vdac); clk_put(sdev->fout_vpll); clk_put(sdev->dacphy); clk_put(sdev->dac); clk_put(sdev->sclk_dac); dev_info(&pdev->dev, "remove successful\n"); return 0; } static struct platform_driver sdo_driver __refdata = { .probe = sdo_probe, .remove = __devexit_p(sdo_remove), .driver = { .name = "s5p-sdo", .owner = THIS_MODULE, .pm = &sdo_pm_ops, } }; module_platform_driver(sdo_driver);
gpl-2.0
djdeeles/android_kernel_lge_g3
arch/mips/oprofile/backtrace.c
6740
4529
#include <linux/oprofile.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> #include <linux/stacktrace.h> #include <linux/kernel.h> #include <asm/sections.h> #include <asm/inst.h> struct stackframe { unsigned long sp; unsigned long pc; unsigned long ra; }; static inline int get_mem(unsigned long addr, unsigned long *result) { unsigned long *address = (unsigned long *) addr; if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) return -1; if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) return -3; return 0; } /* * These two instruction helpers were taken from process.c */ static inline int is_ra_save_ins(union mips_instruction *ip) { /* sw / sd $ra, offset($sp) */ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ip->i_format.rs == 29 && ip->i_format.rt == 31; } static inline int is_sp_move_ins(union mips_instruction *ip) { /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) return 0; if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) return 1; return 0; } /* * Looks for specific instructions that mark the end of a function. * This usually means we ran into the code area of the previous function. */ static inline int is_end_of_function_marker(union mips_instruction *ip) { /* jr ra */ if (ip->r_format.func == jr_op && ip->r_format.rs == 31) return 1; /* lui gp */ if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28) return 1; return 0; } /* * TODO for userspace stack unwinding: * - handle cases where the stack is adjusted inside a function * (generally doesn't happen) * - find optimal value for max_instr_check * - try to find a way to handle leaf functions */ static inline int unwind_user_frame(struct stackframe *old_frame, const unsigned int max_instr_check) { struct stackframe new_frame = *old_frame; off_t ra_offset = 0; size_t stack_size = 0; unsigned long addr; if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0) return -9; for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc) && (!ra_offset || !stack_size); --addr) { union mips_instruction ip; if (get_mem(addr, (unsigned long *) &ip)) return -11; if (is_sp_move_ins(&ip)) { int stack_adjustment = ip.i_format.simmediate; if (stack_adjustment > 0) /* This marks the end of the previous function, which means we overran. */ break; stack_size = (unsigned) stack_adjustment; } else if (is_ra_save_ins(&ip)) { int ra_slot = ip.i_format.simmediate; if (ra_slot < 0) /* This shouldn't happen. */ break; ra_offset = ra_slot; } else if (is_end_of_function_marker(&ip)) break; } if (!ra_offset || !stack_size) return -1; if (ra_offset) { new_frame.ra = old_frame->sp + ra_offset; if (get_mem(new_frame.ra, &(new_frame.ra))) return -13; } if (stack_size) { new_frame.sp = old_frame->sp + stack_size; if (get_mem(new_frame.sp, &(new_frame.sp))) return -14; } if (new_frame.sp > old_frame->sp) return -2; new_frame.pc = old_frame->ra; *old_frame = new_frame; return 0; } static inline void do_user_backtrace(unsigned long low_addr, struct stackframe *frame, unsigned int depth) { const unsigned int max_instr_check = 512; const unsigned long high_addr = low_addr + THREAD_SIZE; while (depth-- && !unwind_user_frame(frame, max_instr_check)) { oprofile_add_trace(frame->ra); if (frame->sp < low_addr || frame->sp > high_addr) break; } } #ifndef CONFIG_KALLSYMS static inline void do_kernel_backtrace(unsigned long low_addr, struct stackframe *frame, unsigned int depth) { } #else static inline void do_kernel_backtrace(unsigned long low_addr, struct stackframe *frame, unsigned int depth) { while (depth-- && frame->pc) { frame->pc = unwind_stack_by_address(low_addr, &(frame->sp), frame->pc, &(frame->ra)); oprofile_add_trace(frame->ra); } } #endif void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth) { struct stackframe frame = { .sp = regs->regs[29], .pc = regs->cp0_epc, .ra = regs->regs[31] }; const int userspace = user_mode(regs); const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE); if (userspace) do_user_backtrace(low_addr, &frame, depth); else do_kernel_backtrace(low_addr, &frame, depth); }
gpl-2.0
DirtyUnicorns/android_kernel_samsung_jf
drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
8020
235941
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ /*Created on 2008/11/18, 3: 7*/ #include "r8192E_hwimg.h" u8 Rtl8192PciEFwBootArray[BootArrayLengthPciE] = { 0x10,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x3c,0x08,0xbf,0xc0,0x25,0x08,0x00,0x08, 0x3c,0x09,0xb0,0x03,0xad,0x28,0x00,0x20,0x40,0x80,0x68,0x00,0x00,0x00,0x00,0x00, 0x3c,0x0a,0xd0,0x00,0x40,0x8a,0x60,0x00,0x00,0x00,0x00,0x00,0x3c,0x08,0x80,0x01, 0x25,0x08,0xa8,0x04,0x24,0x09,0x00,0x01,0x3c,0x01,0x7f,0xff,0x34,0x21,0xff,0xff, 0x01,0x01,0x50,0x24,0x00,0x09,0x48,0x40,0x35,0x29,0x00,0x01,0x01,0x2a,0x10,0x2b, 0x14,0x40,0xff,0xfc,0x00,0x00,0x00,0x00,0x3c,0x0a,0x00,0x00,0x25,0x4a,0x00,0x00, 0x4c,0x8a,0x00,0x00,0x4c,0x89,0x08,0x00,0x00,0x00,0x00,0x00,0x3c,0x08,0x80,0x01, 0x25,0x08,0xa8,0x04,0x3c,0x01,0x80,0x00,0x01,0x21,0x48,0x25,0x3c,0x0a,0xbf,0xc0, 0x25,0x4a,0x00,0x7c,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,0xad,0x00,0x00,0x00, 0x21,0x08,0x00,0x04,0x01,0x09,0x10,0x2b,0x14,0x40,0xff,0xf8,0x00,0x00,0x00,0x00, 0x3c,0x08,0x80,0x01,0x25,0x08,0x7f,0xff,0x24,0x09,0x00,0x01,0x3c,0x01,0x7f,0xff, 0x34,0x21,0xff,0xff,0x01,0x01,0x50,0x24,0x00,0x09,0x48,0x40,0x35,0x29,0x00,0x01, 0x01,0x2a,0x10,0x2b,0x14,0x40,0xff,0xfc,0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x01, 0x25,0x4a,0x00,0x00,0x3c,0x01,0x7f,0xff,0x34,0x21,0xff,0xff,0x01,0x41,0x50,0x24, 0x3c,0x09,0x00,0x01,0x35,0x29,0x7f,0xff,0x4c,0x8a,0x20,0x00,0x4c,0x89,0x28,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x24,0x08,0x04,0x10, 0x00,0x00,0x00,0x00,0x40,0x88,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x3c,0x08,0xbf,0xc0,0x00,0x00,0x00,0x00,0x8d,0x09,0x00,0x00,0x00,0x00,0x00,0x00, 0x3c,0x0a,0xbf,0xc0,0x25,0x4a,0x01,0x20,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20, 0x3c,0x08,0xb0,0x03,0x8d,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x35,0x29,0x00,0x10, 0xad,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0x08,0x80,0x00,0x25,0x08,0x4b,0x94, 0x01,0x00,0x00,0x08,0x00,0x00,0x00,0x00,}; u8 Rtl8192PciEFwMainArray[MainArrayLengthPciE] = { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x40,0x04,0x68,0x00,0x40,0x05,0x70,0x00,0x40,0x06,0x40,0x00,0x0c,0x00,0x12,0x98, 0x00,0x00,0x00,0x00,0x40,0x1a,0x68,0x00,0x33,0x5b,0x00,0x3c,0x17,0x60,0x00,0x09, 0x00,0x00,0x00,0x00,0x40,0x1b,0x60,0x00,0x00,0x00,0x00,0x00,0x03,0x5b,0xd0,0x24, 0x40,0x1a,0x70,0x00,0x03,0x40,0x00,0x08,0x42,0x00,0x00,0x10,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0xff,0x34,0x42,0xff,0xff,0x8c,0x43,0x00,0x00, 0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x00,0xd0, 0xac,0x62,0x00,0x00,0x00,0x00,0x20,0x21,0x27,0x85,0x8b,0x70,0x00,0x85,0x18,0x21, 0x24,0x84,0x00,0x01,0x28,0x82,0x00,0x0a,0x14,0x40,0xff,0xfc,0xa0,0x60,0x00,0x00, 0x27,0x82,0x8b,0x7a,0x24,0x04,0x00,0x06,0x24,0x84,0xff,0xff,0xa4,0x40,0x00,0x00, 0x04,0x81,0xff,0xfd,0x24,0x42,0x00,0x02,0x24,0x02,0x00,0x03,0xa3,0x82,0x8b,0x70, 0x24,0x02,0x00,0x0a,0x24,0x03,0x09,0xc4,0xa3,0x82,0x8b,0x72,0x24,0x02,0x00,0x04, 0x24,0x04,0x00,0x01,0x24,0x05,0x00,0x02,0xa7,0x83,0x8b,0x86,0xa3,0x82,0x8b,0x78, 0x24,0x03,0x04,0x00,0x24,0x02,0x02,0x00,0xaf,0x83,0x8b,0x8c,0xa3,0x85,0x8b,0x79, 0xa7,0x82,0x8b,0x7a,0xa7,0x84,0x8b,0x7c,0xaf,0x84,0x8b,0x88,0xa3,0x84,0x8b,0x71, 0xa3,0x80,0x8b,0x73,0xa3,0x80,0x8b,0x74,0xa3,0x80,0x8b,0x75,0xa3,0x84,0x8b,0x76, 0xa3,0x85,0x8b,0x77,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03, 0x3c,0x02,0x80,0x00,0x24,0x42,0x01,0x7c,0x34,0x63,0x00,0x20,0xac,0x62,0x00,0x00, 0x27,0x84,0x8b,0x98,0x00,0x00,0x10,0x21,0x24,0x42,0x00,0x01,0x00,0x02,0x16,0x00, 0x00,0x02,0x16,0x03,0x28,0x43,0x00,0x03,0xac,0x80,0xff,0xfc,0xa0,0x80,0x00,0x00, 0x14,0x60,0xff,0xf9,0x24,0x84,0x00,0x0c,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x01,0xc0, 0x3c,0x08,0xb0,0x03,0xac,0x62,0x00,0x00,0x35,0x08,0x00,0x70,0x8d,0x02,0x00,0x00, 0x00,0xa0,0x48,0x21,0x00,0x04,0x26,0x00,0x00,0x02,0x2a,0x43,0x00,0x06,0x36,0x00, 0x00,0x07,0x3e,0x00,0x00,0x02,0x12,0x03,0x29,0x23,0x00,0x03,0x00,0x04,0x56,0x03, 0x00,0x06,0x36,0x03,0x00,0x07,0x3e,0x03,0x30,0x48,0x00,0x01,0x10,0x60,0x00,0x11, 0x30,0xa5,0x00,0x07,0x24,0x02,0x00,0x02,0x00,0x49,0x10,0x23,0x00,0x45,0x10,0x07, 0x30,0x42,0x00,0x01,0x10,0x40,0x00,0x66,0x00,0x00,0x00,0x00,0x8f,0xa2,0x00,0x10, 0x00,0x00,0x00,0x00,0x00,0x02,0x21,0x43,0x11,0x00,0x00,0x10,0x00,0x07,0x20,0x0b, 0x15,0x20,0x00,0x06,0x24,0x02,0x00,0x01,0x3c,0x02,0xb0,0x05,0x34,0x42,0x01,0x20, 0xa4,0x44,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x11,0x22,0x00,0x04, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05,0x08,0x00,0x00,0x94,0x34,0x42,0x01,0x24, 0x3c,0x02,0xb0,0x05,0x08,0x00,0x00,0x94,0x34,0x42,0x01,0x22,0x15,0x20,0x00,0x54, 0x24,0x02,0x00,0x01,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x74,0x90,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0xaf,0x83,0x8b,0x94,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x70, 0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x6b,0x00,0x08,0x11,0x60,0x00,0x18, 0x00,0x09,0x28,0x40,0x00,0x00,0x40,0x21,0x27,0x85,0x8b,0x90,0x8c,0xa3,0x00,0x00, 0x8c,0xa2,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x62,0x38,0x23,0x00,0x43,0x10,0x2a, 0x10,0x40,0x00,0x3d,0x00,0x00,0x00,0x00,0xac,0xa7,0x00,0x00,0x25,0x02,0x00,0x01, 0x00,0x02,0x16,0x00,0x00,0x02,0x46,0x03,0x29,0x03,0x00,0x03,0x14,0x60,0xff,0xf3, 0x24,0xa5,0x00,0x0c,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x70,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x4b,0x10,0x23,0xa0,0x62,0x00,0x00,0x00,0x09,0x28,0x40, 0x00,0xa9,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x8b,0x98,0x00,0x0a,0x20,0x0b, 0x00,0x43,0x18,0x21,0x10,0xc0,0x00,0x05,0x00,0x00,0x38,0x21,0x80,0x62,0x00,0x01, 0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x05,0x00,0x00,0x00,0x00,0x80,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x03,0x00,0xa9,0x10,0x21,0x24,0x07,0x00,0x01, 0x00,0xa9,0x10,0x21,0x00,0x02,0x30,0x80,0x27,0x82,0x8b,0x98,0xa0,0x67,0x00,0x01, 0x00,0xc2,0x38,0x21,0x80,0xe3,0x00,0x01,0x00,0x00,0x00,0x00,0x10,0x60,0x00,0x07, 0x00,0x00,0x00,0x00,0x27,0x83,0x8b,0x90,0x00,0xc3,0x18,0x21,0x8c,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x21,0xac,0x62,0x00,0x00,0x27,0x85,0x8b,0x94, 0x27,0x82,0x8b,0x90,0x00,0xc5,0x28,0x21,0x00,0xc2,0x10,0x21,0x8c,0x43,0x00,0x00, 0x8c,0xa4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x64,0x18,0x2a,0x14,0x60,0x00,0x03, 0x24,0x02,0x00,0x01,0x03,0xe0,0x00,0x08,0xa0,0xe2,0x00,0x00,0xa0,0xe0,0x00,0x00, 0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0xb7,0xac,0xa0,0x00,0x00, 0x11,0x22,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x7c, 0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0xaf,0x83,0x8b,0xac,0x08,0x00,0x00,0xa7, 0x3c,0x02,0xb0,0x03,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x78,0x90,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0xaf,0x83,0x8b,0xa0,0x08,0x00,0x00,0xa7,0x3c,0x02,0xb0,0x03, 0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x04,0x10, 0x3c,0x05,0xb0,0x03,0xac,0x62,0x00,0x00,0x34,0xa5,0x00,0x70,0x8c,0xa2,0x00,0x00, 0x90,0x84,0x00,0x08,0x3c,0x06,0xb0,0x03,0x00,0x02,0x16,0x00,0x2c,0x83,0x00,0x03, 0x34,0xc6,0x00,0x72,0x24,0x07,0x00,0x01,0x10,0x60,0x00,0x11,0x00,0x02,0x2f,0xc2, 0x90,0xc2,0x00,0x00,0x00,0x00,0x18,0x21,0x00,0x02,0x16,0x00,0x10,0xa7,0x00,0x09, 0x00,0x02,0x16,0x03,0x14,0x80,0x00,0x0c,0x30,0x43,0x00,0x03,0x83,0x82,0x8b,0x98, 0x00,0x00,0x00,0x00,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0x02,0x16,0x00, 0x00,0x02,0x1e,0x03,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x72,0xa0,0x43,0x00,0x00, 0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0x45,0x00,0x05,0x10,0x87,0x00,0x04, 0x30,0x43,0x00,0x06,0x93,0x82,0x8b,0xb0,0x08,0x00,0x01,0x1f,0x00,0x43,0x10,0x21, 0x83,0x82,0x8b,0xa4,0x00,0x00,0x00,0x00,0x00,0x02,0x10,0x40,0x08,0x00,0x01,0x1f, 0x00,0x45,0x10,0x21,0x10,0x80,0x00,0x05,0x00,0x00,0x18,0x21,0x24,0x63,0x00,0x01, 0x00,0x64,0x10,0x2b,0x14,0x40,0xff,0xfd,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08, 0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x04,0xe4, 0x3c,0x04,0xb0,0x02,0x34,0x63,0x00,0x20,0xac,0x62,0x00,0x00,0x34,0x84,0x00,0x08, 0x24,0x02,0x00,0x01,0xaf,0x84,0x8b,0xc0,0xa3,0x82,0x8b,0xd0,0xa7,0x80,0x8b,0xc4, 0xa7,0x80,0x8b,0xc6,0xaf,0x80,0x8b,0xc8,0xaf,0x80,0x8b,0xcc,0x03,0xe0,0x00,0x08, 0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20, 0x24,0x42,0x05,0x24,0x3c,0x04,0xb0,0x03,0xac,0x62,0x00,0x00,0x34,0x84,0x00,0xac, 0x80,0xa2,0x00,0x15,0x8c,0x83,0x00,0x00,0x27,0xbd,0xff,0xf0,0x00,0x43,0x10,0x21, 0xac,0x82,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x10,0x3c,0x02,0xb0,0x03, 0x3c,0x03,0x80,0x00,0x34,0x42,0x00,0x20,0x24,0x63,0x05,0x5c,0x27,0xbd,0xff,0xe0, 0xac,0x43,0x00,0x00,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x18, 0x8f,0x90,0x8b,0xc0,0x0c,0x00,0x02,0x98,0x00,0x80,0x88,0x21,0x14,0x40,0x00,0x2a, 0x3c,0x02,0x00,0x80,0x16,0x20,0x00,0x02,0x34,0x42,0x02,0x01,0x24,0x02,0x02,0x01, 0xae,0x02,0x00,0x00,0x97,0x84,0x8b,0xc4,0x97,0x82,0x8b,0xc6,0x3c,0x03,0xb0,0x02, 0x00,0x83,0x20,0x21,0x24,0x42,0x00,0x04,0xa7,0x82,0x8b,0xc6,0xa4,0x82,0x00,0x00, 0x8f,0x84,0x8b,0xc8,0x8f,0x82,0x8b,0xc0,0x93,0x85,0x8b,0x72,0x24,0x84,0x00,0x01, 0x24,0x42,0x00,0x04,0x24,0x03,0x8f,0xff,0x3c,0x07,0xb0,0x06,0x3c,0x06,0xb0,0x03, 0x00,0x43,0x10,0x24,0x00,0x85,0x28,0x2a,0x34,0xe7,0x80,0x18,0xaf,0x82,0x8b,0xc0, 0xaf,0x84,0x8b,0xc8,0x10,0xa0,0x00,0x08,0x34,0xc6,0x01,0x08,0x8f,0x83,0x8b,0xcc, 0x8f,0x84,0x8b,0x8c,0x8c,0xc2,0x00,0x00,0x00,0x64,0x18,0x21,0x00,0x43,0x10,0x2b, 0x14,0x40,0x00,0x09,0x00,0x00,0x00,0x00,0x8c,0xe2,0x00,0x00,0x3c,0x03,0x0f,0x00, 0x3c,0x04,0x04,0x00,0x00,0x43,0x10,0x24,0x10,0x44,0x00,0x03,0x00,0x00,0x00,0x00, 0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x18,0x7b,0xb0,0x00,0xbc, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x27,0xbd,0xff,0xd8,0x3c,0x02,0xb0,0x03, 0x3c,0x03,0x80,0x00,0x24,0x63,0x06,0x48,0xaf,0xb0,0x00,0x10,0x34,0x42,0x00,0x20, 0x8f,0x90,0x8b,0xc0,0xac,0x43,0x00,0x00,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18, 0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x20,0x00,0x80,0x88,0x21,0x00,0xa0,0x90,0x21, 0x0c,0x00,0x02,0x98,0x00,0xc0,0x98,0x21,0x24,0x07,0x8f,0xff,0x14,0x40,0x00,0x19, 0x26,0x03,0x00,0x04,0x24,0x02,0x0e,0x03,0xae,0x02,0x00,0x00,0x00,0x67,0x80,0x24, 0x26,0x02,0x00,0x04,0xae,0x11,0x00,0x00,0x00,0x47,0x80,0x24,0x97,0x86,0x8b,0xc4, 0x26,0x03,0x00,0x04,0xae,0x12,0x00,0x00,0x00,0x67,0x80,0x24,0xae,0x13,0x00,0x00, 0x8f,0x84,0x8b,0xc0,0x3c,0x02,0xb0,0x02,0x97,0x85,0x8b,0xc6,0x00,0xc2,0x30,0x21, 0x8f,0x82,0x8b,0xc8,0x24,0x84,0x00,0x10,0x24,0xa5,0x00,0x10,0x00,0x87,0x20,0x24, 0x24,0x42,0x00,0x01,0xa7,0x85,0x8b,0xc6,0xaf,0x84,0x8b,0xc0,0xaf,0x82,0x8b,0xc8, 0xa4,0xc5,0x00,0x00,0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10, 0x94,0x82,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x42,0xe0,0x00,0x14,0x40,0x00,0x14, 0x00,0x00,0x00,0x00,0x90,0x82,0x00,0x02,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfc, 0x00,0x82,0x28,0x21,0x8c,0xa4,0x00,0x00,0x3c,0x02,0x00,0x70,0x8c,0xa6,0x00,0x08, 0x00,0x82,0x10,0x21,0x2c,0x43,0x00,0x06,0x10,0x60,0x00,0x09,0x3c,0x03,0x80,0x01, 0x00,0x02,0x10,0x80,0x24,0x63,0x01,0xe8,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0xaf,0x86,0x80,0x14, 0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18, 0x8c,0xa4,0x00,0x00,0x0c,0x00,0x17,0x84,0x00,0x00,0x00,0x00,0x08,0x00,0x01,0xdc, 0x00,0x00,0x00,0x00,0x0c,0x00,0x24,0x49,0x00,0xc0,0x20,0x21,0x08,0x00,0x01,0xdc, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08,0x8c,0x44,0x00,0x00, 0x8f,0x82,0x80,0x18,0x3c,0x03,0x00,0x0f,0x34,0x63,0x42,0x40,0x00,0x43,0x10,0x21, 0x00,0x82,0x20,0x2b,0x10,0x80,0x00,0x09,0x24,0x03,0x00,0x05,0x8f,0x82,0x83,0x60, 0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xaf,0x82,0x83,0x60,0x10,0x43,0x00,0x03, 0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03, 0x8c,0x63,0x01,0x08,0x24,0x02,0x00,0x01,0xa3,0x82,0x80,0x11,0xaf,0x80,0x83,0x60, 0xaf,0x83,0x80,0x18,0x08,0x00,0x01,0xf9,0x00,0x00,0x00,0x00,0x30,0x84,0x00,0xff, 0x14,0x80,0x00,0x2f,0x00,0x00,0x00,0x00,0x8f,0x82,0x80,0x14,0xa3,0x85,0x83,0x93, 0x10,0x40,0x00,0x2b,0x2c,0xa2,0x00,0x04,0x14,0x40,0x00,0x06,0x00,0x05,0x10,0x40, 0x24,0xa2,0xff,0xfc,0x2c,0x42,0x00,0x08,0x10,0x40,0x00,0x09,0x24,0xa2,0xff,0xf0, 0x00,0x05,0x10,0x40,0x27,0x84,0x83,0x9c,0x00,0x44,0x10,0x21,0x94,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0x24,0x63,0x00,0x01,0x03,0xe0,0x00,0x08,0xa4,0x43,0x00,0x00, 0x2c,0x42,0x00,0x10,0x14,0x40,0x00,0x0a,0x00,0x05,0x10,0x40,0x24,0xa2,0xff,0xe0, 0x2c,0x42,0x00,0x10,0x14,0x40,0x00,0x06,0x00,0x05,0x10,0x40,0x24,0xa2,0xff,0xd0, 0x2c,0x42,0x00,0x10,0x10,0x40,0x00,0x09,0x24,0xa2,0xff,0xc0,0x00,0x05,0x10,0x40, 0x27,0x84,0x83,0x9c,0x00,0x44,0x10,0x21,0x94,0x43,0xff,0xf8,0x00,0x00,0x00,0x00, 0x24,0x63,0x00,0x01,0x03,0xe0,0x00,0x08,0xa4,0x43,0xff,0xf8,0x2c,0x42,0x00,0x10, 0x10,0x40,0x00,0x07,0x00,0x05,0x10,0x40,0x27,0x84,0x83,0x9c,0x00,0x44,0x10,0x21, 0x94,0x43,0xff,0xf8,0x00,0x00,0x00,0x00,0x24,0x63,0x00,0x01,0xa4,0x43,0xff,0xf8, 0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x8f,0x86,0x8b,0xc0,0x8f,0x82,0x80,0x14, 0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10,0x10,0x40,0x00,0x2a,0x00,0xc0,0x38,0x21, 0x24,0x02,0x00,0x07,0x24,0x03,0xff,0x9c,0xa3,0x82,0x83,0x9b,0xa3,0x83,0x83,0x9a, 0x27,0x8a,0x83,0x98,0x00,0x00,0x20,0x21,0x24,0x09,0x8f,0xff,0x00,0x04,0x10,0x80, 0x00,0x4a,0x28,0x21,0x8c,0xa2,0x00,0x00,0x24,0xe3,0x00,0x04,0x24,0x88,0x00,0x01, 0xac,0xe2,0x00,0x00,0x10,0x80,0x00,0x02,0x00,0x69,0x38,0x24,0xac,0xa0,0x00,0x00, 0x31,0x04,0x00,0xff,0x2c,0x82,0x00,0x27,0x14,0x40,0xff,0xf5,0x00,0x04,0x10,0x80, 0x97,0x83,0x8b,0xc6,0x97,0x85,0x8b,0xc4,0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x9c, 0x00,0xa2,0x28,0x21,0x3c,0x04,0xb0,0x06,0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18, 0xa4,0xa3,0x00,0x00,0x8c,0x85,0x00,0x00,0x24,0x02,0x8f,0xff,0x24,0xc6,0x00,0x9c, 0x3c,0x03,0x0f,0x00,0x00,0xc2,0x30,0x24,0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00, 0xaf,0x86,0x8b,0xc0,0x10,0xa2,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96, 0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x18,0x8f,0x86,0x8b,0xc0,0x27,0xbd,0xff,0xc8,0x24,0x02,0x00,0x08, 0x24,0x03,0x00,0x20,0xaf,0xbf,0x00,0x30,0xa3,0xa2,0x00,0x13,0xa3,0xa3,0x00,0x12, 0xa7,0xa4,0x00,0x10,0x00,0xc0,0x28,0x21,0x27,0xa9,0x00,0x10,0x00,0x00,0x38,0x21, 0x24,0x08,0x8f,0xff,0x00,0x07,0x10,0x80,0x00,0x49,0x10,0x21,0x8c,0x44,0x00,0x00, 0x24,0xe3,0x00,0x01,0x30,0x67,0x00,0xff,0x24,0xa2,0x00,0x04,0x2c,0xe3,0x00,0x08, 0xac,0xa4,0x00,0x00,0x14,0x60,0xff,0xf7,0x00,0x48,0x28,0x24,0x97,0x83,0x8b,0xc6, 0x97,0x85,0x8b,0xc4,0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x20,0x00,0xa2,0x28,0x21, 0x3c,0x04,0xb0,0x06,0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18,0xa4,0xa3,0x00,0x00, 0x8c,0x85,0x00,0x00,0x24,0x02,0x8f,0xff,0x24,0xc6,0x00,0x20,0x3c,0x03,0x0f,0x00, 0x00,0xc2,0x30,0x24,0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00,0xaf,0x86,0x8b,0xc0, 0x10,0xa2,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00, 0x8f,0xbf,0x00,0x30,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38, 0x93,0x82,0x8b,0xd0,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x11,0x24,0x06,0x00,0x01, 0x8f,0x82,0x8b,0xc8,0x3c,0x05,0xb0,0x06,0x3c,0x04,0xb0,0x03,0x34,0xa5,0x80,0x18, 0x34,0x84,0x01,0x08,0x14,0x40,0x00,0x09,0x00,0x00,0x30,0x21,0x97,0x82,0x8b,0xc4, 0x8c,0x84,0x00,0x00,0x3c,0x03,0xb0,0x02,0x00,0x43,0x10,0x21,0xaf,0x84,0x8b,0xcc, 0xa7,0x80,0x8b,0xc6,0xac,0x40,0x00,0x00,0xac,0x40,0x00,0x04,0x8c,0xa2,0x00,0x00, 0x03,0xe0,0x00,0x08,0x00,0xc0,0x10,0x21,0x8f,0x86,0x8b,0xc0,0x8f,0x82,0x8b,0xc8, 0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10,0x00,0xc0,0x40,0x21,0x14,0x40,0x00,0x0a, 0x00,0x40,0x50,0x21,0x00,0x00,0x38,0x21,0x27,0x89,0x83,0x68,0x24,0xe2,0x00,0x01, 0x00,0x07,0x18,0x80,0x30,0x47,0x00,0xff,0x00,0x69,0x18,0x21,0x2c,0xe2,0x00,0x0a, 0x14,0x40,0xff,0xfa,0xac,0x60,0x00,0x00,0x3c,0x02,0x00,0x80,0x10,0x82,0x00,0x6f, 0x00,0x00,0x00,0x00,0x97,0x82,0x83,0x6e,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01, 0xa7,0x82,0x83,0x6e,0x90,0xa3,0x00,0x15,0x97,0x82,0x83,0x70,0x00,0x03,0x1e,0x00, 0x00,0x03,0x1e,0x03,0x00,0x43,0x10,0x21,0xa7,0x82,0x83,0x70,0x8c,0xa4,0x00,0x20, 0x3c,0x02,0x00,0x60,0x3c,0x03,0x00,0x20,0x00,0x82,0x20,0x24,0x10,0x83,0x00,0x54, 0x00,0x00,0x00,0x00,0x14,0x80,0x00,0x47,0x00,0x00,0x00,0x00,0x97,0x82,0x83,0x74, 0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xa7,0x82,0x83,0x74,0x84,0xa3,0x00,0x06, 0x8f,0x82,0x83,0x84,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x21,0xaf,0x82,0x83,0x84, 0x25,0x42,0x00,0x01,0x28,0x43,0x27,0x10,0xaf,0x82,0x8b,0xc8,0x10,0x60,0x00,0x09, 0x24,0x02,0x00,0x04,0x93,0x83,0x80,0x11,0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x05, 0x24,0x02,0x00,0x04,0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x18,0x24,0x03,0x00,0x28,0xa3,0x83,0x83,0x6a,0xa3,0x82,0x83,0x6b, 0x90,0xa2,0x00,0x18,0x93,0x83,0x83,0x93,0x00,0x00,0x38,0x21,0x00,0x02,0x16,0x00, 0x00,0x02,0x16,0x03,0xa7,0x82,0x83,0x7e,0xa3,0x83,0x83,0x8c,0x27,0x89,0x83,0x68, 0x24,0x05,0x8f,0xff,0x00,0x07,0x10,0x80,0x00,0x49,0x10,0x21,0x8c,0x44,0x00,0x00, 0x24,0xe3,0x00,0x01,0x30,0x67,0x00,0xff,0x25,0x02,0x00,0x04,0x2c,0xe3,0x00,0x0a, 0xad,0x04,0x00,0x00,0x14,0x60,0xff,0xf7,0x00,0x45,0x40,0x24,0x97,0x83,0x8b,0xc6, 0x97,0x85,0x8b,0xc4,0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x28,0x00,0xa2,0x28,0x21, 0x3c,0x04,0xb0,0x06,0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18,0xa4,0xa3,0x00,0x00, 0x8c,0x85,0x00,0x00,0x24,0x02,0x8f,0xff,0x24,0xc6,0x00,0x28,0x3c,0x03,0x0f,0x00, 0x00,0xc2,0x30,0x24,0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00,0xaf,0x86,0x8b,0xc0, 0x10,0xa2,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00, 0x0c,0x00,0x02,0x36,0x00,0x00,0x00,0x00,0xa3,0x80,0x80,0x11,0x08,0x00,0x02,0xe5, 0x00,0x00,0x00,0x00,0x97,0x82,0x83,0x76,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01, 0xa7,0x82,0x83,0x76,0x84,0xa3,0x00,0x06,0x8f,0x82,0x83,0x88,0x00,0x00,0x00,0x00, 0x00,0x43,0x10,0x21,0xaf,0x82,0x83,0x88,0x08,0x00,0x02,0xdd,0x25,0x42,0x00,0x01, 0x97,0x82,0x83,0x72,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xa7,0x82,0x83,0x72, 0x84,0xa3,0x00,0x06,0x8f,0x82,0x83,0x80,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x21, 0xaf,0x82,0x83,0x80,0x08,0x00,0x02,0xdd,0x25,0x42,0x00,0x01,0x97,0x82,0x83,0x6c, 0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xa7,0x82,0x83,0x6c,0x08,0x00,0x02,0xc5, 0x00,0x00,0x00,0x00,0x27,0xbd,0xff,0xd0,0xaf,0xbf,0x00,0x28,0x8c,0xa3,0x00,0x20, 0x8f,0x8a,0x8b,0xc0,0x3c,0x02,0x00,0x10,0x00,0x62,0x10,0x24,0x00,0xa0,0x38,0x21, 0x01,0x40,0x48,0x21,0x10,0x40,0x00,0x3d,0x00,0x80,0x28,0x21,0x8c,0xe4,0x00,0x1c, 0x34,0xa5,0x12,0x06,0xaf,0xa5,0x00,0x10,0x8c,0x82,0x00,0x08,0x00,0x03,0x1c,0x42, 0x30,0x63,0x00,0x30,0x00,0x02,0x13,0x02,0x30,0x42,0x00,0x40,0x00,0x43,0x10,0x25, 0x90,0xe6,0x00,0x10,0x90,0xe4,0x00,0x13,0x94,0xe8,0x00,0x0c,0x94,0xe3,0x00,0x1a, 0x00,0x02,0x16,0x00,0x90,0xe7,0x00,0x12,0x00,0xa2,0x28,0x25,0x24,0x02,0x12,0x34, 0xa7,0xa2,0x00,0x1c,0x24,0x02,0x56,0x78,0xaf,0xa5,0x00,0x10,0xa3,0xa6,0x00,0x18, 0xa3,0xa7,0x00,0x1f,0xa7,0xa3,0x00,0x1a,0xa3,0xa4,0x00,0x19,0xa7,0xa8,0x00,0x20, 0xa7,0xa2,0x00,0x22,0x00,0x00,0x28,0x21,0x27,0xa7,0x00,0x10,0x24,0x06,0x8f,0xff, 0x00,0x05,0x10,0x80,0x00,0x47,0x10,0x21,0x8c,0x44,0x00,0x00,0x24,0xa3,0x00,0x01, 0x30,0x65,0x00,0xff,0x25,0x22,0x00,0x04,0x2c,0xa3,0x00,0x05,0xad,0x24,0x00,0x00, 0x14,0x60,0xff,0xf7,0x00,0x46,0x48,0x24,0x97,0x83,0x8b,0xc6,0x97,0x85,0x8b,0xc4, 0x3c,0x02,0xb0,0x02,0x24,0x63,0x00,0x14,0x00,0xa2,0x28,0x21,0x3c,0x04,0xb0,0x06, 0xa7,0x83,0x8b,0xc6,0x34,0x84,0x80,0x18,0xa4,0xa3,0x00,0x00,0x8c,0x85,0x00,0x00, 0x24,0x02,0x8f,0xff,0x25,0x46,0x00,0x14,0x3c,0x03,0x0f,0x00,0x00,0xc2,0x50,0x24, 0x00,0xa3,0x28,0x24,0x3c,0x02,0x04,0x00,0xaf,0x8a,0x8b,0xc0,0x10,0xa2,0x00,0x03, 0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x28, 0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30,0x3c,0x05,0xb0,0x03, 0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xc8,0x00,0x04,0x22,0x00,0x34,0xa5,0x00,0x20, 0x24,0x42,0x0d,0xfc,0x3c,0x03,0xb0,0x00,0xaf,0xb5,0x00,0x24,0xaf,0xb4,0x00,0x20, 0xaf,0xb2,0x00,0x18,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x30,0x00,0x83,0x80,0x21, 0xaf,0xb7,0x00,0x2c,0xaf,0xb6,0x00,0x28,0xaf,0xb3,0x00,0x1c,0xaf,0xb1,0x00,0x14, 0xac,0xa2,0x00,0x00,0x8e,0x09,0x00,0x00,0x00,0x00,0x90,0x21,0x26,0x10,0x00,0x08, 0x00,0x09,0xa6,0x02,0x12,0x80,0x00,0x13,0x00,0x00,0xa8,0x21,0x24,0x13,0x00,0x02, 0x3c,0x16,0x00,0xff,0x3c,0x17,0xff,0x00,0x8e,0x09,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x09,0x12,0x02,0x24,0x42,0x00,0x02,0x31,0x25,0x00,0xff,0x10,0xb3,0x00,0x76, 0x30,0x51,0x00,0xff,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x18,0x00,0x00,0x00,0x00, 0x02,0x51,0x10,0x21,0x30,0x52,0xff,0xff,0x02,0x54,0x18,0x2b,0x14,0x60,0xff,0xf2, 0x02,0x11,0x80,0x21,0x12,0xa0,0x00,0x0a,0x3c,0x02,0xb0,0x06,0x34,0x42,0x80,0x18, 0x8c,0x43,0x00,0x00,0x3c,0x04,0x0f,0x00,0x3c,0x02,0x04,0x00,0x00,0x64,0x18,0x24, 0x10,0x62,0x00,0x03,0x00,0x00,0x00,0x00,0x0c,0x00,0x04,0x96,0x00,0x00,0x00,0x00, 0x8f,0xbf,0x00,0x30,0x7b,0xb6,0x01,0x7c,0x7b,0xb4,0x01,0x3c,0x7b,0xb2,0x00,0xfc, 0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38,0x8e,0x09,0x00,0x04, 0x24,0x15,0x00,0x01,0x8e,0x06,0x00,0x0c,0x00,0x09,0x11,0x42,0x00,0x09,0x18,0xc2, 0x30,0x48,0x00,0x03,0x00,0x09,0x14,0x02,0x30,0x6c,0x00,0x03,0x00,0x09,0x26,0x02, 0x11,0x15,0x00,0x45,0x30,0x43,0x00,0x0f,0x29,0x02,0x00,0x02,0x14,0x40,0x00,0x26, 0x00,0x00,0x00,0x00,0x11,0x13,0x00,0x0f,0x00,0x00,0x38,0x21,0x00,0x07,0x22,0x02, 0x30,0x84,0xff,0x00,0x3c,0x03,0x00,0xff,0x00,0x07,0x2e,0x02,0x00,0x07,0x12,0x00, 0x00,0x43,0x10,0x24,0x00,0xa4,0x28,0x25,0x00,0xa2,0x28,0x25,0x00,0x07,0x1e,0x00, 0x00,0xa3,0x28,0x25,0x0c,0x00,0x01,0x92,0x01,0x20,0x20,0x21,0x08,0x00,0x03,0xa5, 0x02,0x51,0x10,0x21,0x11,0x95,0x00,0x0f,0x00,0x00,0x00,0x00,0x11,0x88,0x00,0x07, 0x00,0x00,0x00,0x00,0x00,0x04,0x10,0x80,0x27,0x83,0x8b,0x70,0x00,0x43,0x10,0x21, 0x8c,0x47,0x00,0x18,0x08,0x00,0x03,0xcc,0x00,0x07,0x22,0x02,0x00,0x04,0x10,0x40, 0x27,0x83,0x8b,0x78,0x00,0x43,0x10,0x21,0x94,0x47,0x00,0x02,0x08,0x00,0x03,0xcc, 0x00,0x07,0x22,0x02,0x27,0x82,0x8b,0x70,0x00,0x82,0x10,0x21,0x90,0x47,0x00,0x00, 0x08,0x00,0x03,0xcc,0x00,0x07,0x22,0x02,0x15,0x00,0xff,0xdc,0x00,0x00,0x38,0x21, 0x10,0x75,0x00,0x05,0x00,0x80,0x38,0x21,0x00,0x65,0x18,0x26,0x24,0x82,0x01,0x00, 0x00,0x00,0x38,0x21,0x00,0x43,0x38,0x0a,0x24,0x02,0x00,0x01,0x11,0x82,0x00,0x0e, 0x3c,0x02,0xb0,0x03,0x24,0x02,0x00,0x02,0x11,0x82,0x00,0x06,0x00,0x00,0x00,0x00, 0x3c,0x02,0xb0,0x03,0x00,0xe2,0x10,0x21,0x8c,0x47,0x00,0x00,0x08,0x00,0x03,0xcc, 0x00,0x07,0x22,0x02,0x3c,0x02,0xb0,0x03,0x00,0xe2,0x10,0x21,0x94,0x43,0x00,0x00, 0x08,0x00,0x03,0xcb,0x30,0x67,0xff,0xff,0x00,0xe2,0x10,0x21,0x90,0x43,0x00,0x00, 0x08,0x00,0x03,0xcb,0x30,0x67,0x00,0xff,0x30,0x62,0x00,0x03,0x00,0x02,0x12,0x00, 0x11,0x95,0x00,0x07,0x00,0x44,0x38,0x21,0x11,0x93,0x00,0x03,0x00,0x00,0x00,0x00, 0x08,0x00,0x03,0xfd,0x3c,0x02,0xb0,0x0a,0x08,0x00,0x04,0x02,0x3c,0x02,0xb0,0x0a, 0x08,0x00,0x04,0x06,0x3c,0x02,0xb0,0x0a,0x8e,0x09,0x00,0x04,0x8e,0x02,0x00,0x08, 0x8e,0x03,0x00,0x0c,0x00,0x09,0x41,0x42,0x00,0x02,0x22,0x02,0x00,0x03,0x3a,0x02, 0x30,0x84,0xff,0x00,0x30,0xe7,0xff,0x00,0x00,0x02,0x5e,0x02,0x00,0x02,0x32,0x00, 0x00,0x03,0x56,0x02,0x00,0x03,0x2a,0x00,0x01,0x64,0x58,0x25,0x00,0xd6,0x30,0x24, 0x01,0x47,0x50,0x25,0x00,0x02,0x16,0x00,0x00,0xb6,0x28,0x24,0x00,0x03,0x1e,0x00, 0x01,0x66,0x58,0x25,0x01,0x45,0x50,0x25,0x00,0x57,0x10,0x24,0x00,0x77,0x18,0x24, 0x01,0x62,0x38,0x25,0x01,0x43,0x30,0x25,0x00,0x09,0x10,0xc2,0x00,0x09,0x1c,0x02, 0x31,0x08,0x00,0x03,0x30,0x4c,0x00,0x03,0x30,0x63,0x00,0x0f,0x00,0x09,0x26,0x02, 0x00,0xe0,0x58,0x21,0x15,0x00,0x00,0x28,0x00,0xc0,0x50,0x21,0x24,0x02,0x00,0x01, 0x10,0x62,0x00,0x06,0x00,0x80,0x28,0x21,0x24,0x02,0x00,0x03,0x14,0x62,0xff,0x69, 0x02,0x51,0x10,0x21,0x24,0x85,0x01,0x00,0x24,0x02,0x00,0x01,0x11,0x82,0x00,0x15, 0x24,0x02,0x00,0x02,0x11,0x82,0x00,0x0a,0x3c,0x03,0xb0,0x03,0x00,0xa3,0x18,0x21, 0x8c,0x62,0x00,0x00,0x00,0x0a,0x20,0x27,0x01,0x6a,0x28,0x24,0x00,0x44,0x10,0x24, 0x00,0x45,0x10,0x25,0xac,0x62,0x00,0x00,0x08,0x00,0x03,0xa5,0x02,0x51,0x10,0x21, 0x00,0xa3,0x18,0x21,0x94,0x62,0x00,0x00,0x00,0x0a,0x20,0x27,0x01,0x6a,0x28,0x24, 0x00,0x44,0x10,0x24,0x00,0x45,0x10,0x25,0xa4,0x62,0x00,0x00,0x08,0x00,0x03,0xa5, 0x02,0x51,0x10,0x21,0x3c,0x03,0xb0,0x03,0x00,0xa3,0x18,0x21,0x90,0x62,0x00,0x00, 0x00,0x0a,0x20,0x27,0x01,0x6a,0x28,0x24,0x00,0x44,0x10,0x24,0x00,0x45,0x10,0x25, 0x08,0x00,0x03,0xa4,0xa0,0x62,0x00,0x00,0x24,0x02,0x00,0x01,0x11,0x02,0x00,0x21, 0x00,0x00,0x00,0x00,0x15,0x13,0xff,0x42,0x00,0x00,0x00,0x00,0x11,0x82,0x00,0x17, 0x00,0x00,0x00,0x00,0x11,0x88,0x00,0x0b,0x00,0x00,0x00,0x00,0x27,0x83,0x8b,0x70, 0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x18,0x00,0x06,0x18,0x27, 0x00,0xe6,0x28,0x24,0x00,0x43,0x10,0x24,0x00,0x45,0x10,0x25,0x08,0x00,0x03,0xa4, 0xac,0x82,0x00,0x18,0x27,0x83,0x8b,0x78,0x00,0x04,0x20,0x40,0x00,0x83,0x20,0x21, 0x94,0x82,0x00,0x02,0x00,0x06,0x18,0x27,0x00,0xe6,0x28,0x24,0x00,0x43,0x10,0x24, 0x00,0x45,0x10,0x25,0x08,0x00,0x03,0xa4,0xa4,0x82,0x00,0x02,0x27,0x83,0x8b,0x70, 0x00,0x83,0x18,0x21,0x90,0x62,0x00,0x00,0x00,0x06,0x20,0x27,0x08,0x00,0x04,0x5a, 0x00,0xe6,0x28,0x24,0x30,0x62,0x00,0x07,0x00,0x02,0x12,0x00,0x11,0x88,0x00,0x0f, 0x00,0x44,0x10,0x21,0x11,0x93,0x00,0x07,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x0a, 0x00,0x43,0x18,0x21,0x8c,0x62,0x00,0x00,0x00,0x06,0x20,0x27,0x08,0x00,0x04,0x47, 0x00,0xe6,0x28,0x24,0x3c,0x03,0xb0,0x0a,0x00,0x43,0x18,0x21,0x94,0x62,0x00,0x00, 0x00,0x06,0x20,0x27,0x08,0x00,0x04,0x50,0x00,0xe6,0x28,0x24,0x3c,0x03,0xb0,0x0a, 0x08,0x00,0x04,0x7d,0x00,0x43,0x18,0x21,0x97,0x85,0x8b,0xc4,0x3c,0x07,0xb0,0x02, 0x3c,0x04,0xb0,0x03,0x3c,0x02,0x80,0x00,0x00,0xa7,0x28,0x21,0x34,0x84,0x00,0x20, 0x24,0x42,0x12,0x58,0x24,0x03,0xff,0x80,0xac,0x82,0x00,0x00,0xa0,0xa3,0x00,0x07, 0x97,0x82,0x8b,0xc6,0x97,0x85,0x8b,0xc4,0x3c,0x06,0xb0,0x06,0x30,0x42,0xff,0xf8, 0x24,0x42,0x00,0x10,0x00,0xa2,0x10,0x21,0x30,0x42,0x0f,0xff,0x24,0x44,0x00,0x08, 0x30,0x84,0x0f,0xff,0x00,0x05,0x28,0xc2,0x3c,0x03,0x00,0x40,0x00,0xa3,0x28,0x25, 0x00,0x87,0x20,0x21,0x34,0xc6,0x80,0x18,0xac,0xc5,0x00,0x00,0xaf,0x84,0x8b,0xc0, 0xa7,0x82,0x8b,0xc4,0xa7,0x80,0x8b,0xc6,0xaf,0x80,0x8b,0xc8,0x03,0xe0,0x00,0x08, 0x00,0x00,0x00,0x00,0x30,0xa5,0x00,0xff,0x30,0x84,0x00,0xff,0x24,0x02,0x00,0x01, 0x00,0xe0,0x48,0x21,0x30,0xc6,0x00,0xff,0x8f,0xa7,0x00,0x10,0x10,0x82,0x00,0x07, 0x00,0xa0,0x40,0x21,0x24,0x02,0x00,0x03,0x10,0x82,0x00,0x03,0x00,0x00,0x00,0x00, 0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x24,0xa8,0x01,0x00,0x3c,0x03,0xb0,0x03, 0x24,0x02,0x00,0x01,0x00,0x07,0x20,0x27,0x01,0x27,0x28,0x24,0x10,0xc2,0x00,0x14, 0x01,0x03,0x18,0x21,0x24,0x02,0x00,0x02,0x10,0xc2,0x00,0x09,0x00,0x07,0x50,0x27, 0x3c,0x03,0xb0,0x03,0x01,0x03,0x18,0x21,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x4a,0x10,0x24,0x00,0x45,0x10,0x25,0x08,0x00,0x04,0xe1,0xac,0x62,0x00,0x00, 0x3c,0x03,0xb0,0x03,0x01,0x03,0x18,0x21,0x94,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x4a,0x10,0x24,0x00,0x45,0x10,0x25,0x03,0xe0,0x00,0x08,0xa4,0x62,0x00,0x00, 0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x24,0x00,0x45,0x10,0x25, 0xa0,0x62,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0x84,0x00,0x07, 0x00,0x04,0x22,0x00,0x30,0xa5,0x00,0xff,0x00,0x85,0x28,0x21,0x3c,0x02,0xb0,0x0a, 0x00,0xa2,0x40,0x21,0x30,0xc6,0x00,0xff,0x24,0x02,0x00,0x01,0x8f,0xa4,0x00,0x10, 0x10,0xc2,0x00,0x14,0x24,0x02,0x00,0x02,0x00,0x04,0x50,0x27,0x10,0xc2,0x00,0x09, 0x00,0xe4,0x48,0x24,0x3c,0x03,0xb0,0x0a,0x00,0xa3,0x18,0x21,0x8c,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x4a,0x10,0x24,0x00,0x49,0x10,0x25,0x03,0xe0,0x00,0x08, 0xac,0x62,0x00,0x00,0x3c,0x03,0xb0,0x0a,0x00,0xa3,0x18,0x21,0x94,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x4a,0x10,0x24,0x00,0x49,0x10,0x25,0x03,0xe0,0x00,0x08, 0xa4,0x62,0x00,0x00,0x91,0x02,0x00,0x00,0x00,0x04,0x18,0x27,0x00,0xe4,0x20,0x24, 0x00,0x43,0x10,0x24,0x00,0x44,0x10,0x25,0x03,0xe0,0x00,0x08,0xa1,0x02,0x00,0x00, 0x30,0xa9,0x00,0xff,0x27,0x83,0x8b,0x70,0x30,0x85,0x00,0xff,0x24,0x02,0x00,0x01, 0x00,0x07,0x50,0x27,0x00,0xc7,0x40,0x24,0x11,0x22,0x00,0x17,0x00,0xa3,0x18,0x21, 0x00,0x05,0x20,0x40,0x27,0x82,0x8b,0x70,0x00,0x05,0x28,0x80,0x27,0x83,0x8b,0x78, 0x00,0x83,0x50,0x21,0x00,0xa2,0x20,0x21,0x24,0x02,0x00,0x02,0x00,0x07,0x40,0x27, 0x11,0x22,0x00,0x07,0x00,0xc7,0x28,0x24,0x8c,0x82,0x00,0x18,0x00,0x00,0x00,0x00, 0x00,0x48,0x10,0x24,0x00,0x45,0x10,0x25,0x03,0xe0,0x00,0x08,0xac,0x82,0x00,0x18, 0x95,0x42,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x48,0x10,0x24,0x00,0x45,0x10,0x25, 0x03,0xe0,0x00,0x08,0xa5,0x42,0x00,0x02,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x4a,0x10,0x24,0x00,0x48,0x10,0x25,0x03,0xe0,0x00,0x08,0xa0,0x62,0x00,0x00, 0x00,0x04,0x32,0x02,0x30,0xc6,0xff,0x00,0x00,0x04,0x16,0x02,0x00,0x04,0x1a,0x00, 0x3c,0x05,0x00,0xff,0x00,0x65,0x18,0x24,0x00,0x46,0x10,0x25,0x00,0x43,0x10,0x25, 0x00,0x04,0x26,0x00,0x03,0xe0,0x00,0x08,0x00,0x44,0x10,0x25,0x3c,0x03,0xb0,0x03, 0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xe8,0x34,0x63,0x00,0x20,0x24,0x42,0x14,0xdc, 0x3c,0x04,0xb0,0x03,0xaf,0xbf,0x00,0x14,0xac,0x62,0x00,0x00,0xaf,0xb0,0x00,0x10, 0x34,0x84,0x00,0x2c,0x8c,0x83,0x00,0x00,0xa7,0x80,0xbc,0x00,0x00,0x03,0x12,0x02, 0x00,0x03,0x2d,0x02,0x30,0x42,0x0f,0xff,0xa3,0x83,0xbc,0x08,0xa7,0x85,0xbc,0x0c, 0xa7,0x82,0xbc,0x0a,0xa7,0x80,0xbc,0x02,0xa7,0x80,0xbc,0x04,0xa7,0x80,0xbc,0x06, 0x0c,0x00,0x06,0xd1,0x24,0x04,0x05,0x00,0x3c,0x05,0x08,0x00,0x00,0x45,0x28,0x25, 0x24,0x04,0x05,0x00,0x0c,0x00,0x06,0xbf,0x00,0x40,0x80,0x21,0x3c,0x02,0xf7,0xff, 0x34,0x42,0xff,0xff,0x02,0x02,0x80,0x24,0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf, 0x24,0x04,0x05,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0xb0,0x03,0x34,0x42,0x01,0x08, 0x34,0x63,0x01,0x18,0x8c,0x45,0x00,0x00,0x8c,0x64,0x00,0x00,0x3c,0x02,0x00,0x0f, 0x3c,0x03,0x00,0x4c,0x30,0x84,0x02,0x00,0x34,0x63,0x4b,0x40,0xaf,0x85,0xbc,0x10, 0x10,0x80,0x00,0x06,0x34,0x42,0x42,0x40,0xaf,0x83,0xbc,0x14,0x8f,0xbf,0x00,0x14, 0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0xaf,0x82,0xbc,0x14, 0x08,0x00,0x05,0x67,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00, 0x27,0xbd,0xff,0xc8,0x34,0x63,0x00,0x20,0x24,0x42,0x15,0xb8,0x30,0x84,0x00,0xff, 0xaf,0xbf,0x00,0x30,0xaf,0xb7,0x00,0x2c,0xaf,0xb6,0x00,0x28,0xaf,0xb5,0x00,0x24, 0xaf,0xb4,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14, 0xaf,0xb0,0x00,0x10,0xac,0x62,0x00,0x00,0x10,0x80,0x00,0x1c,0x24,0x02,0x00,0x02, 0x10,0x82,0x00,0x08,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x30,0x7b,0xb6,0x01,0x7c, 0x7b,0xb4,0x01,0x3c,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x38,0xa7,0x80,0xbc,0x00,0xa7,0x80,0xbc,0x02,0xa7,0x80,0xbc,0x04, 0xa7,0x80,0xbc,0x06,0x0c,0x00,0x06,0xd1,0x24,0x04,0x05,0x00,0x3c,0x05,0x08,0x00, 0x00,0x45,0x28,0x25,0x24,0x04,0x05,0x00,0x0c,0x00,0x06,0xbf,0x00,0x40,0x80,0x21, 0x3c,0x05,0xf7,0xff,0x34,0xa5,0xff,0xff,0x02,0x05,0x28,0x24,0x0c,0x00,0x06,0xbf, 0x24,0x04,0x05,0x00,0x08,0x00,0x05,0x82,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1, 0x24,0x04,0x05,0xa0,0x24,0x04,0x05,0xa4,0x0c,0x00,0x06,0xd1,0x00,0x02,0xbc,0x02, 0x24,0x04,0x05,0xa8,0x00,0x02,0xb4,0x02,0x0c,0x00,0x06,0xd1,0x30,0x55,0xff,0xff, 0x00,0x40,0x80,0x21,0x97,0x84,0xbc,0x00,0x97,0x82,0xbc,0x02,0x97,0x83,0xbc,0x06, 0x02,0xe4,0x20,0x23,0x02,0xa2,0x10,0x23,0x00,0x82,0x20,0x21,0x97,0x82,0xbc,0x04, 0x32,0x14,0xff,0xff,0x02,0x83,0x18,0x23,0x02,0xc2,0x10,0x23,0x00,0x82,0x20,0x21, 0x93,0x82,0xbc,0x08,0x00,0x83,0x20,0x21,0x30,0x84,0xff,0xff,0x00,0x82,0x10,0x2b, 0x14,0x40,0x00,0xaa,0x00,0x00,0x00,0x00,0x97,0x82,0xbc,0x0c,0x00,0x00,0x00,0x00, 0x00,0x44,0x10,0x2b,0x14,0x40,0x00,0x7f,0x00,0x00,0x00,0x00,0x97,0x82,0xbc,0x0a, 0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x2b,0x10,0x40,0x00,0x3a,0x00,0x00,0x00,0x00, 0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x50,0x30,0x51,0x00,0x7f,0x00,0x40,0x80,0x21, 0x2e,0x22,0x00,0x32,0x10,0x40,0x00,0x13,0x24,0x02,0x00,0x20,0x12,0x22,0x00,0x17, 0x24,0x02,0xff,0x80,0x02,0x02,0x10,0x24,0x26,0x31,0x00,0x01,0x00,0x51,0x80,0x25, 0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x50,0x02,0x00,0x28,0x21, 0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x58,0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf, 0x24,0x04,0x04,0x60,0x02,0x00,0x28,0x21,0x24,0x04,0x04,0x68,0x0c,0x00,0x06,0xbf, 0x00,0x00,0x00,0x00,0xa7,0x97,0xbc,0x00,0xa7,0x95,0xbc,0x02,0xa7,0x96,0xbc,0x04, 0xa7,0x94,0xbc,0x06,0x08,0x00,0x05,0x82,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1, 0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0,0x00,0x40,0x28,0x21,0x00,0x44,0x10,0x24, 0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x03,0x10,0x43,0x00,0x07,0x00,0x00,0x00,0x00, 0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff,0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25, 0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c, 0x00,0x40,0x90,0x21,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff,0x02,0x42,0x90,0x24, 0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x2c,0x08,0x00,0x05,0xc9, 0x24,0x02,0xff,0x80,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x50,0x30,0x51,0x00,0x7f, 0x24,0x02,0x00,0x20,0x16,0x22,0xff,0xdb,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1, 0x24,0x04,0x02,0x2c,0x34,0x52,0x40,0x00,0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf, 0x24,0x04,0x02,0x2c,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x58,0x24,0x04,0x02,0x5c, 0x0c,0x00,0x06,0xd1,0x00,0x02,0x9e,0x02,0x30,0x43,0x00,0xff,0x00,0x13,0x12,0x00, 0x00,0x43,0x10,0x25,0x2c,0x43,0x00,0x04,0x14,0x60,0x00,0x1d,0x2c,0x42,0x00,0x11, 0x10,0x40,0x00,0x0b,0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff, 0x02,0x42,0x90,0x24,0x02,0x40,0x28,0x21,0x24,0x04,0x02,0x2c,0x0c,0x00,0x06,0xbf, 0x36,0x52,0x80,0x00,0x02,0x40,0x28,0x21,0x08,0x00,0x05,0xd7,0x24,0x04,0x02,0x2c, 0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0,0x00,0x40,0x28,0x21, 0x00,0x44,0x10,0x24,0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x02,0x14,0x43,0xff,0xee, 0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff,0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25, 0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08,0x08,0x00,0x06,0x13,0x3c,0x02,0xff,0xff, 0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x00,0x40,0x28,0x21,0x00,0x02,0x15,0x82, 0x30,0x42,0x00,0x03,0x24,0x03,0x00,0x03,0x14,0x43,0xff,0xdf,0x3c,0x02,0xff,0x3f, 0x34,0x42,0xff,0xff,0x00,0xa2,0x10,0x24,0x3c,0x03,0x00,0x80,0x08,0x00,0x06,0x28, 0x00,0x43,0x28,0x25,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x50,0x30,0x51,0x00,0x7f, 0x00,0x40,0x80,0x21,0x2e,0x22,0x00,0x32,0x10,0x40,0xff,0x9a,0x24,0x02,0x00,0x20, 0x12,0x22,0x00,0x04,0x24,0x02,0xff,0x80,0x02,0x02,0x10,0x24,0x08,0x00,0x05,0xcb, 0x26,0x31,0x00,0x02,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0, 0x00,0x40,0x28,0x21,0x00,0x44,0x10,0x24,0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x03, 0x10,0x43,0x00,0x07,0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff, 0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08, 0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c,0x00,0x40,0x90,0x21,0x3c,0x02,0xff,0xff, 0x34,0x42,0x3f,0xff,0x02,0x42,0x90,0x24,0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf, 0x24,0x04,0x02,0x2c,0x08,0x00,0x06,0x42,0x24,0x02,0xff,0x80,0x0c,0x00,0x06,0xd1, 0x24,0x04,0x04,0x50,0x00,0x40,0x80,0x21,0x30,0x51,0x00,0x7f,0x24,0x02,0x00,0x20, 0x12,0x22,0x00,0x1d,0x2e,0x22,0x00,0x21,0x14,0x40,0xff,0x72,0x24,0x02,0xff,0x80, 0x02,0x02,0x10,0x24,0x26,0x31,0xff,0xff,0x00,0x51,0x80,0x25,0x24,0x04,0x04,0x50, 0x0c,0x00,0x06,0xbf,0x02,0x00,0x28,0x21,0x24,0x04,0x04,0x58,0x0c,0x00,0x06,0xbf, 0x02,0x00,0x28,0x21,0x24,0x04,0x04,0x60,0x0c,0x00,0x06,0xbf,0x02,0x00,0x28,0x21, 0x02,0x00,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x68,0x24,0x02,0x00,0x20, 0x16,0x22,0xff,0x60,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c, 0x00,0x40,0x90,0x21,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff,0x02,0x42,0x10,0x24, 0x08,0x00,0x06,0x19,0x34,0x52,0x80,0x00,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x2c, 0x34,0x52,0x40,0x00,0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x2c, 0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x58,0x24,0x04,0x02,0x5c,0x0c,0x00,0x06,0xd1, 0x00,0x02,0x9e,0x02,0x30,0x43,0x00,0xff,0x00,0x13,0x12,0x00,0x00,0x43,0x10,0x25, 0x2c,0x43,0x00,0x04,0x14,0x60,0x00,0x20,0x2c,0x42,0x00,0x11,0x10,0x40,0x00,0x0d, 0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0xff,0x34,0x42,0x3f,0xff,0x02,0x42,0x90,0x24, 0x02,0x40,0x28,0x21,0x24,0x04,0x02,0x2c,0x0c,0x00,0x06,0xbf,0x36,0x52,0x80,0x00, 0x02,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x2c,0x08,0x00,0x06,0x66, 0x2e,0x22,0x00,0x21,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08,0x3c,0x04,0x00,0xc0, 0x00,0x40,0x28,0x21,0x00,0x44,0x10,0x24,0x00,0x02,0x15,0x82,0x24,0x03,0x00,0x02, 0x14,0x43,0xff,0xec,0x00,0x00,0x00,0x00,0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff, 0x00,0xa2,0x10,0x24,0x00,0x44,0x28,0x25,0x0c,0x00,0x06,0xbf,0x24,0x04,0x02,0x08, 0x08,0x00,0x06,0x96,0x3c,0x02,0xff,0xff,0x0c,0x00,0x06,0xd1,0x24,0x04,0x02,0x08, 0x00,0x40,0x28,0x21,0x00,0x02,0x15,0x82,0x30,0x42,0x00,0x03,0x24,0x03,0x00,0x03, 0x14,0x43,0xff,0xdc,0x3c,0x03,0x00,0x80,0x3c,0x02,0xff,0x3f,0x34,0x42,0xff,0xff, 0x00,0xa2,0x10,0x24,0x08,0x00,0x06,0xae,0x00,0x43,0x28,0x25,0x30,0x83,0x00,0x03, 0x00,0x04,0x20,0x40,0x00,0x83,0x20,0x23,0x3c,0x02,0xb0,0x0a,0x00,0x82,0x20,0x21, 0x3c,0x06,0x00,0x01,0xac,0x85,0x00,0x00,0x24,0x07,0x00,0x01,0x00,0x00,0x28,0x21, 0x34,0xc6,0x86,0x9f,0x8c,0x82,0x10,0x00,0x24,0xa5,0x00,0x01,0x10,0x47,0x00,0x03, 0x00,0xc5,0x18,0x2b,0x10,0x60,0xff,0xfb,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08, 0x00,0x00,0x00,0x00,0x30,0x83,0x00,0x03,0x00,0x04,0x20,0x40,0x3c,0x02,0xb0,0x0a, 0x00,0x83,0x20,0x23,0x00,0x82,0x20,0x21,0x3c,0x06,0x00,0x01,0x24,0x02,0xff,0xff, 0xac,0x82,0x10,0x00,0x00,0x00,0x28,0x21,0x24,0x07,0x00,0x01,0x34,0xc6,0x86,0x9f, 0x8c,0x82,0x10,0x00,0x24,0xa5,0x00,0x01,0x10,0x47,0x00,0x03,0x00,0xc5,0x18,0x2b, 0x10,0x60,0xff,0xfb,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x00,0x03,0xe0,0x00,0x08, 0x00,0x00,0x00,0x00,0x3c,0x05,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x1b,0x94, 0x24,0x03,0x00,0x01,0x34,0xa5,0x00,0x20,0x3c,0x06,0xb0,0x03,0xac,0xa2,0x00,0x00, 0x34,0xc6,0x01,0x04,0xa0,0x83,0x00,0x48,0xa0,0x80,0x00,0x04,0xa0,0x80,0x00,0x05, 0xa0,0x80,0x00,0x06,0xa0,0x80,0x00,0x07,0xa0,0x80,0x00,0x08,0xa0,0x80,0x00,0x09, 0xa0,0x80,0x00,0x0a,0xa0,0x80,0x00,0x11,0xa0,0x80,0x00,0x13,0xa0,0x80,0x00,0x49, 0x94,0xc2,0x00,0x00,0xac,0x80,0x00,0x00,0xa0,0x80,0x00,0x4e,0x00,0x02,0x14,0x00, 0x00,0x02,0x14,0x03,0x30,0x43,0x00,0xff,0x30,0x42,0xff,0x00,0xa4,0x82,0x00,0x44, 0xa4,0x83,0x00,0x46,0xac,0x80,0x00,0x24,0xac,0x80,0x00,0x28,0xac,0x80,0x00,0x2c, 0xac,0x80,0x00,0x30,0xac,0x80,0x00,0x34,0xac,0x80,0x00,0x38,0xac,0x80,0x00,0x3c, 0x03,0xe0,0x00,0x08,0xac,0x80,0x00,0x40,0x84,0x83,0x00,0x0c,0x3c,0x07,0xb0,0x03, 0x34,0xe7,0x00,0x20,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80, 0x27,0x83,0x90,0x04,0x00,0x43,0x10,0x21,0x8c,0x48,0x00,0x18,0x3c,0x02,0x80,0x00, 0x24,0x42,0x1c,0x28,0xac,0xe2,0x00,0x00,0x8d,0x03,0x00,0x08,0x80,0x82,0x00,0x13, 0x00,0x05,0x2c,0x00,0x00,0x03,0x1e,0x02,0x00,0x02,0x12,0x00,0x30,0x63,0x00,0x7e, 0x00,0x62,0x18,0x21,0x00,0x65,0x18,0x21,0x3c,0x02,0xc0,0x00,0x3c,0x05,0xb0,0x05, 0x34,0x42,0x04,0x00,0x24,0x63,0x00,0x01,0x3c,0x07,0xb0,0x05,0x3c,0x08,0xb0,0x05, 0x34,0xa5,0x04,0x20,0xac,0xa3,0x00,0x00,0x00,0xc2,0x30,0x21,0x34,0xe7,0x04,0x24, 0x35,0x08,0x02,0x28,0x24,0x02,0x00,0x01,0x24,0x03,0x00,0x20,0xac,0xe6,0x00,0x00, 0xac,0x82,0x00,0x3c,0x03,0xe0,0x00,0x08,0xa1,0x03,0x00,0x00,0x27,0xbd,0xff,0xa8, 0x00,0x07,0x60,0x80,0x27,0x82,0xb4,0x00,0xaf,0xbe,0x00,0x50,0xaf,0xb7,0x00,0x4c, 0xaf,0xb5,0x00,0x44,0xaf,0xb4,0x00,0x40,0xaf,0xbf,0x00,0x54,0xaf,0xb6,0x00,0x48, 0xaf,0xb3,0x00,0x3c,0xaf,0xb2,0x00,0x38,0xaf,0xb1,0x00,0x34,0xaf,0xb0,0x00,0x30, 0x01,0x82,0x10,0x21,0x8c,0x43,0x00,0x00,0x00,0xe0,0x70,0x21,0x3c,0x02,0x80,0x00, 0x94,0x73,0x00,0x14,0x3c,0x07,0xb0,0x03,0x34,0xe7,0x00,0x20,0x24,0x42,0x1c,0xbc, 0x3c,0x03,0xb0,0x05,0xac,0xe2,0x00,0x00,0x34,0x63,0x01,0x28,0x90,0x67,0x00,0x00, 0x00,0x13,0xa8,0xc0,0x02,0xb3,0x18,0x21,0x27,0x82,0x90,0x04,0x00,0x03,0x18,0x80, 0x00,0x62,0x18,0x21,0x00,0x05,0x2c,0x00,0x00,0x07,0x3e,0x00,0x28,0xc2,0x00,0x03, 0x00,0xc0,0xa0,0x21,0x00,0x80,0x78,0x21,0x00,0x05,0xbc,0x03,0x8c,0x68,0x00,0x18, 0x02,0xa0,0x58,0x21,0x10,0x40,0x01,0x81,0x00,0x07,0xf6,0x03,0x00,0xde,0x10,0x07, 0x30,0x5e,0x00,0x01,0x01,0x73,0x10,0x21,0x27,0x83,0x90,0x08,0x00,0x02,0x10,0x80, 0x00,0x43,0x10,0x21,0x80,0x4d,0x00,0x06,0x8d,0x03,0x00,0x00,0x8d,0x02,0x00,0x04, 0x8d,0x0a,0x00,0x08,0x8d,0x03,0x00,0x0c,0xaf,0xa2,0x00,0x20,0x11,0xa0,0x01,0x71, 0xaf,0xa3,0x00,0x18,0x27,0x82,0xb4,0x00,0x01,0x82,0x10,0x21,0x8c,0x44,0x00,0x00, 0x00,0x00,0x00,0x00,0x90,0x83,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x04, 0x14,0x60,0x00,0x12,0x00,0x00,0xb0,0x21,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x46, 0x90,0x43,0x00,0x00,0x2a,0x84,0x00,0x04,0x10,0x80,0x01,0x56,0x30,0x65,0x00,0x01, 0x91,0xe2,0x00,0x09,0x00,0x00,0x00,0x00,0x12,0x82,0x00,0x02,0x00,0x00,0x00,0x00, 0x00,0x00,0x28,0x21,0x14,0xa0,0x00,0x03,0x00,0x00,0x38,0x21,0x13,0xc0,0x00,0x03, 0x38,0xf6,0x00,0x01,0x24,0x07,0x00,0x01,0x38,0xf6,0x00,0x01,0x01,0x73,0x10,0x21, 0x00,0x02,0x30,0x80,0x27,0x83,0x90,0x10,0x00,0xc3,0x48,0x21,0x91,0x25,0x00,0x00, 0x8f,0xa4,0x00,0x20,0x2c,0xa3,0x00,0x04,0x00,0x04,0x11,0xc3,0x30,0x42,0x00,0x01, 0x00,0x03,0xb0,0x0b,0x12,0xc0,0x00,0xd8,0xaf,0xa2,0x00,0x24,0x93,0x90,0xbb,0xea, 0x00,0x0a,0x16,0x42,0x30,0x52,0x00,0x3f,0x2e,0x06,0x00,0x0c,0x10,0xc0,0x00,0xc0, 0x00,0xa0,0x20,0x21,0x2c,0xa2,0x00,0x10,0x14,0x40,0x00,0x04,0x00,0x90,0x10,0x2b, 0x30,0xa2,0x00,0x07,0x24,0x44,0x00,0x04,0x00,0x90,0x10,0x2b,0x10,0x40,0x00,0x0b, 0x01,0x73,0x10,0x21,0x27,0x85,0xbb,0x1c,0x00,0x10,0x10,0x40,0x00,0x50,0x10,0x21, 0x00,0x45,0x10,0x21,0x90,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x18,0x2b, 0x14,0x60,0xff,0xfa,0x00,0x10,0x10,0x40,0x01,0x73,0x10,0x21,0x00,0x02,0x10,0x80, 0x27,0x83,0x90,0x08,0x00,0x43,0x10,0x21,0x31,0xa4,0x00,0x01,0x10,0x80,0x00,0xa5, 0xa0,0x50,0x00,0x07,0x3c,0x04,0xb0,0x05,0x34,0x84,0x00,0x08,0x24,0x02,0x00,0x01, 0x3c,0x03,0x80,0x00,0xa1,0xe2,0x00,0x4e,0xac,0x83,0x00,0x00,0x8c,0x85,0x00,0x00, 0x3c,0x02,0x00,0xf0,0x3c,0x03,0x40,0xf0,0x34,0x42,0xf0,0x00,0x34,0x63,0xf0,0x00, 0x24,0x17,0x00,0x0e,0x24,0x13,0x01,0x06,0xac,0x82,0x00,0x00,0xac,0x83,0x00,0x00, 0x27,0x82,0xb4,0x00,0x01,0x82,0x10,0x21,0x8c,0x43,0x00,0x00,0x24,0x05,0x00,0x01, 0xaf,0xa5,0x00,0x1c,0x90,0x62,0x00,0x16,0x00,0x13,0xa8,0xc0,0x32,0x51,0x00,0x02, 0x34,0x42,0x00,0x04,0xa0,0x62,0x00,0x16,0x8f,0xa3,0x00,0x20,0x8f,0xa4,0x00,0x18, 0x00,0x03,0x13,0x43,0x00,0x04,0x1a,0x02,0x30,0x47,0x00,0x01,0x12,0x20,0x00,0x04, 0x30,0x64,0x07,0xff,0x2e,0x03,0x00,0x04,0x32,0x42,0x00,0x33,0x00,0x43,0x90,0x0b, 0x8f,0xa5,0x00,0x24,0x8f,0xa6,0x00,0x1c,0x00,0x12,0x10,0x40,0x00,0x05,0x19,0xc0, 0x00,0x47,0x10,0x21,0x00,0x06,0x2a,0x80,0x00,0x43,0x10,0x21,0x00,0x10,0x32,0x00, 0x00,0x04,0x24,0x80,0x02,0x65,0x28,0x21,0x00,0xa4,0x28,0x21,0x00,0x46,0x10,0x21, 0x00,0x17,0x1c,0x00,0x3c,0x04,0xc0,0x00,0x00,0x43,0x30,0x21,0x16,0x80,0x00,0x29, 0x00,0xa4,0x28,0x21,0x3c,0x02,0xb0,0x05,0x34,0x42,0x04,0x00,0x3c,0x03,0xb0,0x05, 0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x04,0x34,0x84,0x02,0x28, 0x24,0x02,0x00,0x01,0xac,0x65,0x00,0x00,0xa0,0x82,0x00,0x00,0x3c,0x02,0xb0,0x09, 0x34,0x42,0x01,0x46,0x90,0x44,0x00,0x00,0x91,0xe3,0x00,0x09,0x30,0x86,0x00,0x01, 0x02,0x83,0x18,0x26,0x00,0x03,0x30,0x0b,0x14,0xc0,0x00,0x03,0x00,0x00,0x28,0x21, 0x13,0xc0,0x00,0x03,0x02,0xb3,0x10,0x21,0x24,0x05,0x00,0x01,0x02,0xb3,0x10,0x21, 0x27,0x83,0x90,0x08,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x84,0x48,0x00,0x04, 0x00,0xa0,0x30,0x21,0x00,0xe0,0x20,0x21,0x02,0x80,0x28,0x21,0x02,0xc0,0x38,0x21, 0x0c,0x00,0x00,0x70,0xaf,0xa8,0x00,0x10,0x7b,0xbe,0x02,0xbc,0x7b,0xb6,0x02,0x7c, 0x7b,0xb4,0x02,0x3c,0x7b,0xb2,0x01,0xfc,0x7b,0xb0,0x01,0xbc,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x58,0x24,0x02,0x00,0x01,0x12,0x82,0x00,0x3d,0x3c,0x02,0xb0,0x05, 0x24,0x02,0x00,0x02,0x12,0x82,0x00,0x31,0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x03, 0x12,0x82,0x00,0x25,0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x10,0x12,0x82,0x00,0x19, 0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x11,0x12,0x82,0x00,0x0d,0x3c,0x02,0xb0,0x05, 0x24,0x02,0x00,0x12,0x16,0x82,0xff,0xd1,0x3c,0x02,0xb0,0x05,0x3c,0x03,0xb0,0x05, 0x34,0x42,0x04,0x20,0x3c,0x04,0xb0,0x05,0x34,0x63,0x04,0x24,0xac,0x46,0x00,0x00, 0x34,0x84,0x02,0x28,0xac,0x65,0x00,0x00,0x08,0x00,0x07,0xe6,0x24,0x02,0x00,0x20, 0x34,0x42,0x04,0x40,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00, 0x34,0x63,0x04,0x44,0x34,0x84,0x02,0x28,0x24,0x02,0x00,0x40,0x08,0x00,0x07,0xe6, 0xac,0x65,0x00,0x00,0x34,0x42,0x04,0x28,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05, 0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x2c,0x34,0x84,0x02,0x28,0x24,0x02,0xff,0x80, 0x08,0x00,0x07,0xe6,0xac,0x65,0x00,0x00,0x34,0x42,0x04,0x18,0x3c,0x03,0xb0,0x05, 0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x1c,0x34,0x84,0x02,0x28, 0x24,0x02,0x00,0x08,0x08,0x00,0x07,0xe6,0xac,0x65,0x00,0x00,0x34,0x42,0x04,0x10, 0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00,0x34,0x63,0x04,0x14, 0x34,0x84,0x02,0x28,0x24,0x02,0x00,0x04,0x08,0x00,0x07,0xe6,0xac,0x65,0x00,0x00, 0x34,0x42,0x04,0x08,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0xac,0x46,0x00,0x00, 0x34,0x63,0x04,0x0c,0x34,0x84,0x02,0x28,0x24,0x02,0x00,0x02,0x08,0x00,0x07,0xe6, 0xac,0x65,0x00,0x00,0x24,0x17,0x00,0x14,0x08,0x00,0x07,0xb8,0x24,0x13,0x01,0x02, 0x30,0xa2,0x00,0x07,0x24,0x44,0x00,0x0c,0x00,0x90,0x18,0x2b,0x10,0x60,0x00,0x0c, 0x26,0x02,0x00,0x04,0x27,0x85,0xbb,0x1c,0x00,0x10,0x10,0x40,0x00,0x50,0x10,0x21, 0x00,0x45,0x10,0x21,0x90,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x18,0x2b, 0x14,0x60,0xff,0xfa,0x00,0x10,0x10,0x40,0x2e,0x06,0x00,0x0c,0x26,0x02,0x00,0x04, 0x08,0x00,0x07,0xa2,0x00,0x46,0x80,0x0a,0x27,0x82,0xb4,0x00,0x01,0x82,0x20,0x21, 0x8c,0x87,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0xe2,0x00,0x19,0x00,0x00,0x00,0x00, 0x14,0x40,0x00,0x07,0x00,0x00,0x00,0x00,0x27,0x82,0x90,0x20,0x00,0xc2,0x10,0x21, 0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x60,0x00,0x14,0x00,0x00,0x00,0x00, 0x90,0xe3,0x00,0x16,0x27,0x82,0x90,0x08,0x00,0xc2,0x10,0x21,0x34,0x63,0x00,0x20, 0x90,0x50,0x00,0x07,0xa0,0xe3,0x00,0x16,0x8c,0x84,0x00,0x00,0x00,0x0a,0x1e,0x42, 0x24,0x06,0x00,0x01,0x90,0x82,0x00,0x16,0x30,0x71,0x00,0x02,0x30,0x72,0x00,0x3f, 0x30,0x42,0x00,0xfb,0x24,0x17,0x00,0x18,0x24,0x13,0x01,0x03,0x24,0x15,0x08,0x18, 0xaf,0xa6,0x00,0x1c,0x08,0x00,0x07,0xc2,0xa0,0x82,0x00,0x16,0x8d,0x02,0x00,0x04, 0x00,0x0a,0x1c,0x42,0x30,0x42,0x00,0x10,0x14,0x40,0x00,0x15,0x30,0x72,0x00,0x3f, 0x81,0x22,0x00,0x05,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x11,0x30,0x72,0x00,0x3e, 0x27,0x83,0x90,0x18,0x00,0xc3,0x18,0x21,0x80,0x64,0x00,0x00,0x27,0x83,0xb5,0x78, 0x00,0x04,0x11,0x00,0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x23, 0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x05,0x90,0x43,0x00,0x04, 0x00,0x00,0x00,0x00,0x00,0x64,0x18,0x24,0x30,0x63,0x00,0x01,0x02,0x43,0x90,0x25, 0x27,0x85,0xb4,0x00,0x01,0x85,0x28,0x21,0x8c,0xa6,0x00,0x00,0x01,0x73,0x10,0x21, 0x27,0x83,0x90,0x10,0x90,0xc4,0x00,0x16,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21, 0x30,0x84,0x00,0xdf,0x90,0x50,0x00,0x00,0xa0,0xc4,0x00,0x16,0x80,0xc6,0x00,0x12, 0x8c,0xa3,0x00,0x00,0x2d,0xc4,0x00,0x02,0xaf,0xa6,0x00,0x1c,0x90,0x62,0x00,0x16, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfb,0x14,0x80,0x00,0x06,0xa0,0x62,0x00,0x16, 0x24,0x02,0x00,0x06,0x11,0xc2,0x00,0x03,0x24,0x02,0x00,0x04,0x15,0xc2,0xff,0x0e, 0x32,0x51,0x00,0x02,0x32,0x51,0x00,0x02,0x2e,0x02,0x00,0x0c,0x14,0x40,0x00,0x0f, 0x00,0x11,0x18,0x2b,0x32,0x02,0x00,0x0f,0x34,0x42,0x00,0x10,0x00,0x03,0x19,0x00, 0x00,0x43,0x18,0x21,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xe0,0xa0,0x43,0x00,0x00, 0x00,0x00,0x20,0x21,0x02,0x00,0x28,0x21,0x0c,0x00,0x02,0x03,0xaf,0xaf,0x00,0x28, 0x8f,0xaf,0x00,0x28,0x08,0x00,0x07,0xc2,0x00,0x00,0x00,0x00,0x08,0x00,0x08,0xbd, 0x32,0x03,0x00,0xff,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x42,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x0f,0x14,0x40,0xfe,0xaa,0x00,0x00,0x00,0x00, 0x91,0xe2,0x00,0x09,0x00,0x00,0x00,0x00,0x02,0x82,0x10,0x26,0x08,0x00,0x07,0x79, 0x00,0x02,0x28,0x0b,0x08,0x00,0x07,0x7f,0x00,0x00,0xb0,0x21,0x24,0x02,0x00,0x10, 0x10,0xc2,0x00,0x08,0x24,0x02,0x00,0x11,0x10,0xc2,0xfe,0x7d,0x00,0x07,0x17,0x83, 0x24,0x02,0x00,0x12,0x14,0xc2,0xfe,0x7b,0x00,0x07,0x17,0x43,0x08,0x00,0x07,0x59, 0x30,0x5e,0x00,0x01,0x08,0x00,0x07,0x59,0x00,0x07,0xf7,0xc2,0x00,0x04,0x10,0x40, 0x27,0x83,0x80,0x1c,0x00,0x43,0x10,0x21,0x00,0x80,0x40,0x21,0x94,0x44,0x00,0x00, 0x2d,0x07,0x00,0x04,0x24,0xc2,0x00,0x03,0x00,0x47,0x30,0x0a,0x00,0x86,0x00,0x18, 0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x23,0x8c, 0xac,0x62,0x00,0x00,0x2d,0x06,0x00,0x10,0x00,0x00,0x20,0x12,0x00,0x04,0x22,0x42, 0x24,0x84,0x00,0x01,0x24,0x83,0x00,0xc0,0x10,0xe0,0x00,0x0b,0x24,0x82,0x00,0x60, 0x00,0x40,0x20,0x21,0x00,0x65,0x20,0x0a,0x3c,0x03,0xb0,0x03,0x34,0x63,0x01,0x00, 0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x00,0x44,0x20,0x04, 0x03,0xe0,0x00,0x08,0x00,0x80,0x10,0x21,0x24,0x85,0x00,0x28,0x24,0x83,0x00,0x24, 0x31,0x02,0x00,0x08,0x14,0xc0,0xff,0xf4,0x24,0x84,0x00,0x14,0x00,0x60,0x20,0x21, 0x08,0x00,0x08,0xfa,0x00,0xa2,0x20,0x0b,0x27,0xbd,0xff,0xe0,0x3c,0x03,0xb0,0x03, 0x3c,0x02,0x80,0x00,0xaf,0xb0,0x00,0x10,0x24,0x42,0x24,0x28,0x00,0x80,0x80,0x21, 0x34,0x63,0x00,0x20,0x3c,0x04,0xb0,0x03,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14, 0xaf,0xbf,0x00,0x1c,0x83,0xb1,0x00,0x33,0x83,0xa8,0x00,0x37,0x34,0x84,0x01,0x10, 0xac,0x62,0x00,0x00,0x2e,0x02,0x00,0x10,0x00,0xe0,0x90,0x21,0x8c,0x87,0x00,0x00, 0x14,0x40,0x00,0x0c,0x2e,0x02,0x00,0x0c,0x3c,0x02,0x00,0x0f,0x34,0x42,0xf0,0x00, 0x00,0xe2,0x10,0x24,0x14,0x40,0x00,0x37,0x32,0x02,0x00,0x08,0x32,0x02,0x00,0x07, 0x27,0x83,0x80,0xcc,0x00,0x43,0x10,0x21,0x90,0x50,0x00,0x00,0x00,0x00,0x00,0x00, 0x2e,0x02,0x00,0x0c,0x14,0x40,0x00,0x03,0x02,0x00,0x20,0x21,0x32,0x02,0x00,0x0f, 0x24,0x44,0x00,0x0c,0x00,0x87,0x10,0x06,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0x07, 0x2c,0x82,0x00,0x0c,0x00,0x04,0x10,0x80,0x27,0x83,0xb4,0x50,0x00,0x43,0x10,0x21, 0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0x82,0x00,0x0c,0x14,0x40,0x00,0x05, 0x00,0x05,0x10,0x40,0x00,0x46,0x10,0x21,0x00,0x02,0x11,0x00,0x00,0x82,0x10,0x21, 0x24,0x44,0x00,0x04,0x15,0x00,0x00,0x02,0x24,0x06,0x00,0x20,0x24,0x06,0x00,0x0e, 0x0c,0x00,0x08,0xe3,0x00,0x00,0x00,0x00,0x00,0x40,0x30,0x21,0x3c,0x02,0xb0,0x03, 0x34,0x42,0x01,0x00,0x90,0x43,0x00,0x00,0x2e,0x04,0x00,0x04,0x24,0x02,0x00,0x10, 0x24,0x05,0x00,0x0a,0x00,0x44,0x28,0x0a,0x30,0x63,0x00,0x01,0x14,0x60,0x00,0x02, 0x00,0x05,0x10,0x40,0x00,0xa0,0x10,0x21,0x30,0x45,0x00,0xff,0x00,0xc5,0x10,0x21, 0x24,0x46,0x00,0x46,0x02,0x26,0x18,0x04,0xa6,0x43,0x00,0x00,0x8f,0xbf,0x00,0x1c, 0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x00,0xc0,0x10,0x21,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x20,0x10,0x40,0xff,0xcf,0x2e,0x02,0x00,0x0c,0x32,0x02,0x00,0x07, 0x27,0x83,0x80,0xc4,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x00,0x08,0x00,0x09,0x28, 0x02,0x04,0x80,0x23,0x27,0xbd,0xff,0xb8,0x00,0x05,0x38,0x80,0x27,0x82,0xb4,0x00, 0xaf,0xbe,0x00,0x40,0xaf,0xb6,0x00,0x38,0xaf,0xb3,0x00,0x2c,0xaf,0xbf,0x00,0x44, 0xaf,0xb7,0x00,0x3c,0xaf,0xb5,0x00,0x34,0xaf,0xb4,0x00,0x30,0xaf,0xb2,0x00,0x28, 0xaf,0xb1,0x00,0x24,0xaf,0xb0,0x00,0x20,0x00,0xe2,0x38,0x21,0x8c,0xe6,0x00,0x00, 0xaf,0xa5,0x00,0x4c,0x3c,0x02,0x80,0x00,0x3c,0x05,0xb0,0x03,0x34,0xa5,0x00,0x20, 0x24,0x42,0x25,0x84,0x24,0x03,0x00,0x01,0xac,0xa2,0x00,0x00,0xa0,0xc3,0x00,0x12, 0x8c,0xe5,0x00,0x00,0x94,0xc3,0x00,0x06,0x90,0xa2,0x00,0x16,0xa4,0xc3,0x00,0x14, 0x27,0x83,0x90,0x00,0x34,0x42,0x00,0x08,0xa0,0xa2,0x00,0x16,0x8c,0xe8,0x00,0x00, 0xaf,0xa4,0x00,0x48,0x27,0x82,0x90,0x04,0x95,0x11,0x00,0x14,0x00,0x00,0x00,0x00, 0x00,0x11,0x98,0xc0,0x02,0x71,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x82,0x10,0x21, 0x8c,0x52,0x00,0x18,0x00,0x83,0x18,0x21,0x84,0x75,0x00,0x06,0x8e,0x45,0x00,0x08, 0x8e,0x46,0x00,0x04,0x8e,0x47,0x00,0x04,0x00,0x05,0x1c,0x82,0x00,0x06,0x31,0x42, 0x27,0x82,0x90,0x10,0x30,0x63,0x00,0x01,0x30,0xc6,0x00,0x01,0x00,0x82,0x20,0x21, 0xa5,0x15,0x00,0x1a,0x00,0x05,0x14,0x42,0xaf,0xa3,0x00,0x18,0xaf,0xa6,0x00,0x1c, 0x30,0xe7,0x00,0x10,0x30,0x56,0x00,0x01,0x80,0x97,0x00,0x06,0x14,0xe0,0x00,0x47, 0x00,0x05,0xf7,0xc2,0x80,0x82,0x00,0x05,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x44, 0x02,0x71,0x10,0x21,0x93,0x90,0xbb,0xe9,0x00,0x00,0x00,0x00,0x2e,0x02,0x00,0x0c, 0x14,0x40,0x00,0x06,0x02,0x00,0x20,0x21,0x00,0x16,0x10,0x40,0x00,0x43,0x10,0x21, 0x00,0x02,0x11,0x00,0x02,0x02,0x10,0x21,0x24,0x44,0x00,0x04,0x02,0x71,0x10,0x21, 0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0x00,0x80,0x80,0x21, 0xa0,0x44,0x00,0x03,0xa0,0x44,0x00,0x00,0x02,0x00,0x20,0x21,0x02,0xc0,0x28,0x21, 0x0c,0x00,0x08,0xe3,0x02,0xa0,0x30,0x21,0x02,0x71,0x18,0x21,0x00,0x03,0x88,0x80, 0x00,0x40,0xa0,0x21,0x27,0x82,0x90,0x20,0x02,0x22,0x10,0x21,0x8c,0x44,0x00,0x00, 0x26,0xe3,0x00,0x02,0x00,0x03,0x17,0xc2,0x00,0x62,0x18,0x21,0x00,0x04,0x25,0xc2, 0x00,0x03,0x18,0x43,0x30,0x84,0x00,0x01,0x00,0x03,0x18,0x40,0x03,0xc4,0x20,0x24, 0x14,0x80,0x00,0x15,0x02,0x43,0x38,0x21,0x3c,0x08,0xb0,0x03,0x35,0x08,0x00,0x28, 0x8d,0x03,0x00,0x00,0x8f,0xa6,0x00,0x4c,0x8f,0xa4,0x00,0x48,0x27,0x82,0x90,0x08, 0x02,0x22,0x10,0x21,0x24,0x63,0x00,0x01,0x02,0xa0,0x28,0x21,0xa4,0x54,0x00,0x04, 0x00,0xc0,0x38,0x21,0x0c,0x00,0x07,0x2f,0xad,0x03,0x00,0x00,0x7b,0xbe,0x02,0x3c, 0x7b,0xb6,0x01,0xfc,0x7b,0xb4,0x01,0xbc,0x7b,0xb2,0x01,0x7c,0x7b,0xb0,0x01,0x3c, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x48,0x8f,0xa2,0x00,0x1c,0x8f,0xa6,0x00,0x18, 0x02,0x00,0x20,0x21,0x02,0xc0,0x28,0x21,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0x0a, 0xaf,0xa0,0x00,0x14,0x08,0x00,0x09,0xc6,0x02,0x82,0xa0,0x21,0x02,0x71,0x10,0x21, 0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0x90,0x50,0x00,0x00, 0x08,0x00,0x09,0xb2,0xa0,0x50,0x00,0x03,0x27,0xbd,0xff,0xb8,0xaf,0xb1,0x00,0x24, 0x8f,0xb1,0x00,0x5c,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20, 0x24,0x42,0x27,0xa8,0xaf,0xbe,0x00,0x40,0xaf,0xb7,0x00,0x3c,0xaf,0xb6,0x00,0x38, 0xaf,0xb5,0x00,0x34,0xaf,0xb4,0x00,0x30,0xaf,0xa5,0x00,0x4c,0x8f,0xb5,0x00,0x58, 0xaf,0xbf,0x00,0x44,0xaf,0xb3,0x00,0x2c,0xaf,0xb2,0x00,0x28,0xaf,0xb0,0x00,0x20, 0x00,0xe0,0xb0,0x21,0xac,0x62,0x00,0x00,0x00,0x80,0xf0,0x21,0x00,0x00,0xb8,0x21, 0x16,0x20,0x00,0x2b,0x00,0x00,0xa0,0x21,0x27,0x85,0xb4,0x00,0x00,0x07,0x10,0x80, 0x00,0x45,0x10,0x21,0x8c,0x53,0x00,0x00,0x00,0x15,0x18,0x80,0x00,0x65,0x18,0x21, 0x92,0x62,0x00,0x16,0x8c,0x72,0x00,0x00,0x30,0x42,0x00,0x03,0x14,0x40,0x00,0x2d, 0x00,0x00,0x00,0x00,0x92,0x42,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x03, 0x14,0x40,0x00,0x28,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x34,0x00,0x00,0x00,0x00, 0x14,0x40,0x00,0x18,0x02,0x20,0x10,0x21,0x8c,0x82,0x00,0x38,0x00,0x00,0x00,0x00, 0x14,0x40,0x00,0x14,0x02,0x20,0x10,0x21,0x8c,0x82,0x00,0x3c,0x00,0x00,0x00,0x00, 0x14,0x40,0x00,0x0f,0x3c,0x03,0xb0,0x09,0x3c,0x05,0xb0,0x05,0x34,0x63,0x01,0x44, 0x34,0xa5,0x02,0x52,0x94,0x66,0x00,0x00,0x90,0xa2,0x00,0x00,0x8f,0xa3,0x00,0x4c, 0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x06,0x30,0x42,0x00,0x01,0x10,0x40,0x00,0x04, 0x30,0xc6,0xff,0xff,0x2c,0xc2,0x00,0x41,0x10,0x40,0x00,0x09,0x24,0x05,0x00,0x14, 0x02,0x20,0x10,0x21,0x7b,0xbe,0x02,0x3c,0x7b,0xb6,0x01,0xfc,0x7b,0xb4,0x01,0xbc, 0x7b,0xb2,0x01,0x7c,0x7b,0xb0,0x01,0x3c,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x48, 0x0c,0x00,0x07,0x0a,0x24,0x06,0x01,0x07,0x24,0x02,0x00,0x01,0x08,0x00,0x0a,0x2c, 0xa3,0xc2,0x00,0x11,0x10,0xc0,0x00,0x1c,0x24,0x02,0x00,0x01,0x10,0xc2,0x00,0x17, 0x00,0xc0,0x88,0x21,0x96,0x54,0x00,0x1a,0x02,0xa0,0xb8,0x21,0x12,0x20,0xff,0xed, 0x02,0x20,0x10,0x21,0x27,0x83,0xb4,0x00,0x00,0x17,0x10,0x80,0x00,0x43,0x10,0x21, 0x8c,0x44,0x00,0x00,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x28,0x80,0x86,0x00,0x12, 0x8c,0x62,0x00,0x00,0x00,0x14,0x2c,0x00,0x00,0x05,0x2c,0x03,0x00,0x46,0x10,0x21, 0x8f,0xa6,0x00,0x4c,0x02,0xe0,0x38,0x21,0x03,0xc0,0x20,0x21,0x0c,0x00,0x07,0x2f, 0xac,0x62,0x00,0x00,0x08,0x00,0x0a,0x2c,0xaf,0xd1,0x00,0x40,0x96,0x74,0x00,0x1a, 0x08,0x00,0x0a,0x3f,0x02,0xc0,0xb8,0x21,0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08, 0x8c,0x50,0x00,0x00,0x02,0x60,0x20,0x21,0x0c,0x00,0x1e,0xf3,0x02,0x00,0x28,0x21, 0x30,0x42,0x00,0xff,0x02,0x00,0x28,0x21,0x02,0x40,0x20,0x21,0x0c,0x00,0x1e,0xf3, 0xaf,0xa2,0x00,0x18,0x8f,0xa4,0x00,0x18,0x00,0x00,0x00,0x00,0x10,0x80,0x00,0xed, 0x30,0x50,0x00,0xff,0x12,0x00,0x00,0x18,0x24,0x11,0x00,0x01,0x96,0x63,0x00,0x14, 0x96,0x44,0x00,0x14,0x27,0x85,0x90,0x00,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0x45,0x10,0x21,0x00,0x04,0x18,0xc0,0x8c,0x46,0x00,0x08, 0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x65,0x18,0x21,0x00,0x06,0x17,0x02, 0x24,0x04,0x00,0xff,0x8c,0x63,0x00,0x08,0x10,0x44,0x00,0xd6,0x00,0x03,0x17,0x02, 0x10,0x44,0x00,0xd5,0x3c,0x02,0x80,0x00,0x00,0x66,0x18,0x2b,0x24,0x11,0x00,0x02, 0x24,0x02,0x00,0x01,0x00,0x43,0x88,0x0a,0x24,0x02,0x00,0x01,0x12,0x22,0x00,0x5a, 0x24,0x02,0x00,0x02,0x16,0x22,0xff,0xbd,0x00,0x00,0x00,0x00,0x96,0x49,0x00,0x14, 0x27,0x82,0x90,0x04,0x02,0xa0,0xb8,0x21,0x00,0x09,0x50,0xc0,0x01,0x49,0x18,0x21, 0x00,0x03,0x40,0x80,0x01,0x02,0x10,0x21,0x8c,0x43,0x00,0x18,0x00,0x00,0x00,0x00, 0x8c,0x65,0x00,0x08,0x8c,0x62,0x00,0x0c,0x8c,0x62,0x00,0x04,0x00,0x05,0x24,0x42, 0x00,0x05,0x1c,0x82,0x30,0x42,0x00,0x10,0x30,0x66,0x00,0x01,0x14,0x40,0x00,0x41, 0x30,0x87,0x00,0x01,0x27,0x82,0x90,0x18,0x01,0x02,0x10,0x21,0x80,0x44,0x00,0x00, 0x27,0x82,0xb5,0x78,0x00,0x04,0x19,0x00,0x00,0x64,0x18,0x23,0x00,0x03,0x18,0x80, 0x00,0x64,0x18,0x23,0x00,0x03,0x18,0x80,0x00,0x62,0x10,0x21,0x90,0x45,0x00,0x05, 0x27,0x84,0xb4,0xa0,0x00,0x64,0x18,0x21,0x90,0x63,0x00,0x00,0x10,0xa0,0x00,0x2b, 0x2c,0x64,0x00,0x0c,0x14,0x80,0x00,0x04,0x00,0x60,0x10,0x21,0x00,0x06,0x11,0x00, 0x00,0x62,0x10,0x21,0x24,0x42,0x00,0x24,0x3c,0x01,0xb0,0x03,0xa0,0x22,0x00,0xe1, 0x14,0x80,0x00,0x06,0x00,0x60,0x28,0x21,0x00,0x07,0x10,0x40,0x00,0x46,0x10,0x21, 0x00,0x02,0x11,0x00,0x00,0x62,0x10,0x21,0x24,0x45,0x00,0x04,0x01,0x49,0x10,0x21, 0x27,0x83,0x90,0x10,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0xa0,0x18,0x21, 0xa0,0x45,0x00,0x03,0xa0,0x45,0x00,0x00,0x24,0x02,0x00,0x08,0x12,0x02,0x00,0x0b, 0x24,0x02,0x00,0x01,0x00,0x60,0x28,0x21,0x02,0x40,0x20,0x21,0x0c,0x00,0x1f,0x6f, 0xaf,0xa2,0x00,0x10,0x30,0x54,0xff,0xff,0x92,0x42,0x00,0x16,0x00,0x00,0x00,0x00, 0x02,0x02,0x10,0x25,0x08,0x00,0x0a,0x3f,0xa2,0x42,0x00,0x16,0x00,0x60,0x28,0x21, 0x02,0x40,0x20,0x21,0x0c,0x00,0x1f,0x20,0xaf,0xa0,0x00,0x10,0x08,0x00,0x0a,0xc2, 0x30,0x54,0xff,0xff,0x08,0x00,0x0a,0xaa,0x00,0x60,0x10,0x21,0x14,0x80,0xff,0xfd, 0x00,0x00,0x00,0x00,0x00,0x06,0x11,0x00,0x00,0x62,0x10,0x21,0x08,0x00,0x0a,0xaa, 0x24,0x42,0x00,0x04,0x27,0x82,0x90,0x10,0x01,0x02,0x10,0x21,0x90,0x43,0x00,0x00, 0x08,0x00,0x0a,0xba,0xa0,0x43,0x00,0x03,0x96,0x69,0x00,0x14,0x02,0xc0,0xb8,0x21, 0x24,0x0b,0x00,0x01,0x00,0x09,0x10,0xc0,0x00,0x49,0x18,0x21,0x00,0x03,0x40,0x80, 0x00,0x40,0x50,0x21,0x27,0x82,0x90,0x04,0x01,0x02,0x10,0x21,0x8c,0x43,0x00,0x18, 0x00,0x00,0x00,0x00,0x8c,0x65,0x00,0x08,0x8c,0x62,0x00,0x0c,0x8c,0x62,0x00,0x04, 0x00,0x05,0x24,0x42,0x00,0x05,0x1c,0x82,0x30,0x42,0x00,0x10,0x30,0x66,0x00,0x01, 0x10,0x40,0x00,0x0d,0x30,0x87,0x00,0x01,0x27,0x82,0x90,0x18,0x01,0x02,0x10,0x21, 0x80,0x43,0x00,0x00,0x00,0x00,0x58,0x21,0x00,0x03,0x11,0x00,0x00,0x43,0x10,0x23, 0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x23,0x00,0x02,0x10,0x80,0x27,0x83,0xb5,0x70, 0x00,0x43,0x10,0x21,0xa0,0x40,0x00,0x04,0x11,0x60,0x00,0x4f,0x00,0x00,0x00,0x00, 0x01,0x49,0x10,0x21,0x00,0x02,0x20,0x80,0x27,0x85,0x90,0x10,0x00,0x85,0x10,0x21, 0x80,0x43,0x00,0x05,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x42,0x01,0x49,0x10,0x21, 0x27,0x82,0x90,0x18,0x00,0x82,0x10,0x21,0x80,0x44,0x00,0x00,0x27,0x82,0xb5,0x78, 0x00,0x04,0x19,0x00,0x00,0x64,0x18,0x23,0x00,0x03,0x18,0x80,0x00,0x64,0x18,0x23, 0x00,0x03,0x18,0x80,0x00,0x62,0x10,0x21,0x90,0x45,0x00,0x05,0x27,0x84,0xb4,0xa0, 0x00,0x64,0x18,0x21,0x90,0x63,0x00,0x00,0x10,0xa0,0x00,0x2c,0x2c,0x64,0x00,0x0c, 0x14,0x80,0x00,0x04,0x00,0x60,0x10,0x21,0x00,0x06,0x11,0x00,0x00,0x62,0x10,0x21, 0x24,0x42,0x00,0x24,0x3c,0x01,0xb0,0x03,0xa0,0x22,0x00,0xe1,0x14,0x80,0x00,0x06, 0x00,0x60,0x28,0x21,0x00,0x07,0x10,0x40,0x00,0x46,0x10,0x21,0x00,0x02,0x11,0x00, 0x00,0x62,0x10,0x21,0x24,0x45,0x00,0x04,0x01,0x49,0x10,0x21,0x27,0x83,0x90,0x10, 0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0xa0,0x18,0x21,0xa0,0x45,0x00,0x03, 0xa0,0x45,0x00,0x00,0x8f,0xa4,0x00,0x18,0x24,0x02,0x00,0x08,0x10,0x82,0x00,0x0c, 0x00,0x60,0x28,0x21,0x24,0x02,0x00,0x01,0x02,0x60,0x20,0x21,0x0c,0x00,0x1f,0x6f, 0xaf,0xa2,0x00,0x10,0x8f,0xa3,0x00,0x18,0x30,0x54,0xff,0xff,0x92,0x62,0x00,0x16, 0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x25,0x08,0x00,0x0a,0x3f,0xa2,0x62,0x00,0x16, 0x02,0x60,0x20,0x21,0x0c,0x00,0x1f,0x20,0xaf,0xa0,0x00,0x10,0x08,0x00,0x0b,0x31, 0x00,0x00,0x00,0x00,0x08,0x00,0x0b,0x19,0x00,0x60,0x10,0x21,0x14,0x80,0xff,0xfd, 0x00,0x00,0x00,0x00,0x00,0x06,0x11,0x00,0x00,0x62,0x10,0x21,0x08,0x00,0x0b,0x19, 0x24,0x42,0x00,0x04,0x00,0x02,0x10,0x80,0x00,0x45,0x10,0x21,0x90,0x43,0x00,0x00, 0x08,0x00,0x0b,0x29,0xa0,0x43,0x00,0x03,0x27,0x85,0x90,0x10,0x08,0x00,0x0b,0x45, 0x01,0x49,0x10,0x21,0x3c,0x02,0x80,0x00,0x00,0x62,0x18,0x26,0x08,0x00,0x0a,0x7a, 0x00,0xc2,0x30,0x26,0x12,0x00,0xff,0x2d,0x24,0x02,0x00,0x01,0x08,0x00,0x0a,0x7f, 0x24,0x11,0x00,0x02,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xd0, 0x24,0x42,0x2d,0x54,0x34,0x63,0x00,0x20,0x3c,0x05,0xb0,0x05,0xaf,0xb3,0x00,0x24, 0xaf,0xb2,0x00,0x20,0xaf,0xb1,0x00,0x1c,0xaf,0xbf,0x00,0x28,0xaf,0xb0,0x00,0x18, 0xac,0x62,0x00,0x00,0x34,0xa5,0x02,0x42,0x90,0xa2,0x00,0x00,0x00,0x80,0x90,0x21, 0x24,0x11,0x00,0x10,0x30,0x53,0x00,0xff,0x24,0x02,0x00,0x10,0x12,0x22,0x00,0xcf, 0x00,0x00,0x18,0x21,0x24,0x02,0x00,0x11,0x12,0x22,0x00,0xc1,0x24,0x02,0x00,0x12, 0x12,0x22,0x00,0xb4,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0xad,0xae,0x43,0x00,0x40, 0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2c,0x8c,0x44,0x00,0x00,0x3c,0x03,0x00,0x02, 0x34,0x63,0x00,0xff,0x00,0x83,0x80,0x24,0x00,0x10,0x14,0x43,0x10,0x40,0x00,0x05, 0x00,0x00,0x00,0x00,0x8e,0x42,0x00,0x34,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x92, 0x00,0x00,0x00,0x00,0x93,0x83,0x8b,0x71,0x00,0x00,0x00,0x00,0x30,0x62,0x00,0x02, 0x10,0x40,0x00,0x04,0x32,0x10,0x00,0xff,0x00,0x10,0x11,0xc3,0x14,0x40,0x00,0x86, 0x00,0x00,0x00,0x00,0x16,0x00,0x00,0x15,0x02,0x00,0x10,0x21,0x26,0x22,0x00,0x01, 0x30,0x51,0x00,0xff,0x2e,0x23,0x00,0x13,0x14,0x60,0xff,0xdb,0x24,0x03,0x00,0x02, 0x12,0x63,0x00,0x73,0x24,0x02,0x00,0x05,0x2a,0x62,0x00,0x03,0x10,0x40,0x00,0x58, 0x24,0x02,0x00,0x04,0x24,0x02,0x00,0x01,0x12,0x62,0x00,0x4b,0x02,0x40,0x20,0x21, 0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2c,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x70,0x00,0xff,0x12,0x00,0x00,0x06,0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x28, 0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30, 0x92,0x46,0x00,0x04,0x8e,0x43,0x00,0x24,0x24,0x02,0x00,0x07,0x02,0x40,0x20,0x21, 0x00,0x00,0x28,0x21,0x24,0x07,0x00,0x06,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea, 0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x24,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c, 0x00,0x00,0x00,0x00,0x30,0x50,0x00,0xff,0x16,0x00,0xff,0xec,0x02,0x00,0x10,0x21, 0x92,0x46,0x00,0x05,0x8e,0x43,0x00,0x28,0x24,0x02,0x00,0x05,0x02,0x40,0x20,0x21, 0x24,0x05,0x00,0x01,0x24,0x07,0x00,0x04,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea, 0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x28,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c, 0x00,0x00,0x00,0x00,0x30,0x50,0x00,0xff,0x16,0x00,0xff,0xdc,0x02,0x00,0x10,0x21, 0x92,0x46,0x00,0x06,0x8e,0x43,0x00,0x2c,0x24,0x02,0x00,0x03,0x02,0x40,0x20,0x21, 0x24,0x05,0x00,0x02,0x00,0x00,0x38,0x21,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea, 0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x2c,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c, 0x00,0x00,0x00,0x00,0x30,0x50,0x00,0xff,0x16,0x00,0xff,0xcc,0x02,0x00,0x10,0x21, 0x92,0x46,0x00,0x07,0x8e,0x43,0x00,0x30,0x24,0x02,0x00,0x02,0x02,0x40,0x20,0x21, 0x24,0x05,0x00,0x03,0x24,0x07,0x00,0x01,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea, 0xaf,0xa3,0x00,0x14,0xae,0x42,0x00,0x30,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x02,0x2c, 0x08,0x00,0x0b,0x9b,0x30,0x42,0x00,0xff,0x92,0x46,0x00,0x04,0x8e,0x43,0x00,0x24, 0x24,0x02,0x00,0x07,0x00,0x00,0x28,0x21,0x24,0x07,0x00,0x06,0xaf,0xa2,0x00,0x10, 0x0c,0x00,0x09,0xea,0xaf,0xa3,0x00,0x14,0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x24, 0x12,0x62,0x00,0x0d,0x24,0x02,0x00,0x03,0x24,0x02,0x00,0x08,0x16,0x62,0xff,0xa8, 0x02,0x40,0x20,0x21,0x92,0x46,0x00,0x07,0x8e,0x42,0x00,0x30,0x24,0x05,0x00,0x03, 0x24,0x07,0x00,0x01,0xaf,0xa3,0x00,0x10,0x0c,0x00,0x09,0xea,0xaf,0xa2,0x00,0x14, 0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x30,0x92,0x46,0x00,0x06,0x8e,0x43,0x00,0x2c, 0x02,0x40,0x20,0x21,0x24,0x05,0x00,0x02,0x00,0x00,0x38,0x21,0xaf,0xa2,0x00,0x10, 0x0c,0x00,0x09,0xea,0xaf,0xa3,0x00,0x14,0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x2c, 0x92,0x46,0x00,0x05,0x8e,0x43,0x00,0x28,0x02,0x40,0x20,0x21,0x24,0x05,0x00,0x01, 0x24,0x07,0x00,0x04,0xaf,0xa2,0x00,0x10,0x0c,0x00,0x09,0xea,0xaf,0xa3,0x00,0x14, 0x08,0x00,0x0b,0x94,0xae,0x42,0x00,0x28,0x0c,0x00,0x01,0x57,0x24,0x04,0x00,0x01, 0x08,0x00,0x0b,0x85,0x00,0x00,0x00,0x00,0x8f,0x84,0xb4,0x40,0xae,0x40,0x00,0x34, 0x94,0x85,0x00,0x14,0x0c,0x00,0x1b,0x66,0x00,0x00,0x00,0x00,0x93,0x83,0x8b,0x71, 0x00,0x00,0x00,0x00,0x30,0x62,0x00,0x02,0x10,0x40,0xff,0x69,0x00,0x00,0x00,0x00, 0x0c,0x00,0x01,0x57,0x00,0x00,0x20,0x21,0x08,0x00,0x0b,0x7d,0x00,0x00,0x00,0x00, 0x02,0x40,0x20,0x21,0x0c,0x00,0x09,0x61,0x02,0x20,0x28,0x21,0x08,0x00,0x0b,0x71, 0x3c,0x02,0xb0,0x05,0x8e,0x42,0x00,0x3c,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x4a, 0x00,0x00,0x00,0x00,0x8f,0x82,0xb4,0x48,0x00,0x00,0x00,0x00,0x90,0x42,0x00,0x0a, 0x00,0x00,0x00,0x00,0x00,0x02,0x18,0x2b,0x08,0x00,0x0b,0x6e,0xae,0x43,0x00,0x3c, 0x8e,0x42,0x00,0x38,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x3d,0x24,0x02,0x00,0x12, 0x8f,0x82,0xb4,0x44,0x00,0x00,0x00,0x00,0x90,0x42,0x00,0x0a,0x00,0x00,0x00,0x00, 0x00,0x02,0x18,0x2b,0x08,0x00,0x0b,0x6e,0xae,0x43,0x00,0x38,0x8e,0x42,0x00,0x34, 0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x30,0x24,0x02,0x00,0x11,0x8f,0x82,0xb4,0x40, 0x00,0x00,0x00,0x00,0x90,0x42,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x02,0x18,0x2b, 0x08,0x00,0x0b,0x6e,0xae,0x43,0x00,0x34,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00, 0x27,0xbd,0xff,0xe0,0x34,0x63,0x00,0x20,0x24,0x42,0x31,0x08,0x3c,0x08,0xb0,0x03, 0xaf,0xb1,0x00,0x14,0xac,0x62,0x00,0x00,0x35,0x08,0x01,0x00,0xaf,0xbf,0x00,0x18, 0xaf,0xb0,0x00,0x10,0x91,0x03,0x00,0x00,0x00,0xa0,0x48,0x21,0x24,0x11,0x00,0x0a, 0x2c,0xa5,0x00,0x04,0x24,0x02,0x00,0x10,0x00,0x45,0x88,0x0a,0x30,0x63,0x00,0x01, 0x00,0xc0,0x28,0x21,0x14,0x60,0x00,0x02,0x00,0x11,0x40,0x40,0x02,0x20,0x40,0x21, 0x84,0x83,0x00,0x0c,0x31,0x11,0x00,0xff,0x01,0x20,0x20,0x21,0x00,0x03,0x10,0xc0, 0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x08,0x00,0x43,0x10,0x21, 0x84,0x43,0x00,0x04,0x24,0x06,0x00,0x0e,0x10,0xe0,0x00,0x06,0x02,0x23,0x80,0x21, 0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x20,0x0c,0x00,0x08,0xe3,0x00,0x00,0x00,0x00,0x02,0x11,0x18,0x21, 0x08,0x00,0x0c,0x64,0x00,0x62,0x80,0x21,0x27,0xbd,0xff,0xd0,0xaf,0xbf,0x00,0x28, 0xaf,0xb4,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,0xaf,0xb5,0x00,0x24, 0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0x84,0x82,0x00,0x0c,0x3c,0x06,0xb0,0x03, 0x34,0xc6,0x00,0x20,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x80, 0x27,0x82,0x90,0x04,0x00,0x62,0x10,0x21,0x8c,0x55,0x00,0x18,0x3c,0x02,0x80,0x00, 0x24,0x42,0x31,0xb8,0xac,0xc2,0x00,0x00,0x8e,0xb0,0x00,0x08,0x27,0x82,0x90,0x08, 0x00,0x62,0x18,0x21,0x90,0x71,0x00,0x07,0x00,0x10,0x86,0x43,0x32,0x10,0x00,0x01, 0x00,0xa0,0x38,0x21,0x02,0x00,0x30,0x21,0x00,0xa0,0x98,0x21,0x02,0x20,0x28,0x21, 0x0c,0x00,0x0c,0x42,0x00,0x80,0x90,0x21,0x02,0x20,0x20,0x21,0x02,0x00,0x28,0x21, 0x24,0x06,0x00,0x14,0x0c,0x00,0x08,0xe3,0x00,0x40,0xa0,0x21,0x86,0x43,0x00,0x0c, 0x3c,0x09,0xb0,0x09,0x3c,0x08,0xb0,0x09,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21, 0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0x80,0x43,0x00,0x06, 0x3c,0x07,0xb0,0x09,0x3c,0x05,0xb0,0x09,0x28,0x62,0x00,0x00,0x24,0x64,0x00,0x03, 0x00,0x82,0x18,0x0b,0x00,0x03,0x18,0x83,0x3c,0x02,0xb0,0x09,0x00,0x03,0x18,0x80, 0x34,0x42,0x01,0x02,0x35,0x29,0x01,0x10,0x35,0x08,0x01,0x14,0x34,0xe7,0x01,0x20, 0x34,0xa5,0x01,0x24,0xa4,0x54,0x00,0x00,0x12,0x60,0x00,0x11,0x02,0xa3,0xa8,0x21, 0x8e,0xa2,0x00,0x0c,0x8e,0xa3,0x00,0x08,0x00,0x02,0x14,0x00,0x00,0x03,0x1c,0x02, 0x00,0x43,0x10,0x21,0xad,0x22,0x00,0x00,0x8e,0xa3,0x00,0x0c,0x00,0x00,0x00,0x00, 0x00,0x03,0x1c,0x02,0xa5,0x03,0x00,0x00,0x8f,0xbf,0x00,0x28,0x7b,0xb4,0x01,0x3c, 0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30, 0x8e,0xa2,0x00,0x04,0x00,0x00,0x00,0x00,0xad,0x22,0x00,0x00,0x8e,0xa4,0x00,0x08, 0x00,0x00,0x00,0x00,0xa5,0x04,0x00,0x00,0x7a,0xa2,0x00,0x7c,0x00,0x00,0x00,0x00, 0x00,0x03,0x1c,0x00,0x00,0x02,0x14,0x02,0x00,0x62,0x18,0x21,0xac,0xe3,0x00,0x00, 0x8e,0xa2,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x02,0x14,0x02,0x08,0x00,0x0c,0xb6, 0xa4,0xa2,0x00,0x00,0x27,0xbd,0xff,0xe0,0xaf,0xb2,0x00,0x18,0xaf,0xb0,0x00,0x10, 0xaf,0xbf,0x00,0x1c,0xaf,0xb1,0x00,0x14,0x84,0x82,0x00,0x0c,0x00,0x80,0x90,0x21, 0x3c,0x05,0xb0,0x03,0x00,0x02,0x20,0xc0,0x00,0x82,0x20,0x21,0x00,0x04,0x20,0x80, 0x27,0x82,0x90,0x04,0x00,0x82,0x10,0x21,0x8c,0x51,0x00,0x18,0x3c,0x02,0x80,0x00, 0x34,0xa5,0x00,0x20,0x24,0x42,0x33,0x34,0x27,0x83,0x90,0x08,0xac,0xa2,0x00,0x00, 0x00,0x83,0x20,0x21,0x3c,0x02,0xb0,0x03,0x90,0x86,0x00,0x07,0x34,0x42,0x01,0x00, 0x8e,0x23,0x00,0x08,0x90,0x44,0x00,0x00,0x2c,0xc5,0x00,0x04,0x24,0x02,0x00,0x10, 0x24,0x10,0x00,0x0a,0x00,0x45,0x80,0x0a,0x00,0x03,0x1e,0x43,0x30,0x84,0x00,0x01, 0x30,0x65,0x00,0x01,0x14,0x80,0x00,0x02,0x00,0x10,0x10,0x40,0x02,0x00,0x10,0x21, 0x00,0xc0,0x20,0x21,0x24,0x06,0x00,0x20,0x0c,0x00,0x08,0xe3,0x30,0x50,0x00,0xff, 0x86,0x44,0x00,0x0c,0x27,0x85,0x90,0x10,0x3c,0x06,0xb0,0x09,0x00,0x04,0x18,0xc0, 0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x65,0x18,0x21,0x80,0x64,0x00,0x06, 0x00,0x50,0x10,0x21,0x34,0xc6,0x01,0x02,0x24,0x85,0x00,0x03,0x28,0x83,0x00,0x00, 0x00,0xa3,0x20,0x0b,0x00,0x04,0x20,0x83,0x00,0x04,0x20,0x80,0xa4,0xc2,0x00,0x00, 0x02,0x24,0x20,0x21,0x8c,0x83,0x00,0x04,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x10, 0xac,0x43,0x00,0x00,0x8c,0x86,0x00,0x08,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x14, 0xa4,0x46,0x00,0x00,0x8c,0x85,0x00,0x0c,0x8c,0x82,0x00,0x08,0x3c,0x06,0xb0,0x09, 0x00,0x05,0x2c,0x00,0x00,0x02,0x14,0x02,0x00,0xa2,0x28,0x21,0x34,0xc6,0x01,0x20, 0xac,0xc5,0x00,0x00,0x8c,0x83,0x00,0x0c,0x3c,0x05,0xb0,0x09,0x34,0xa5,0x01,0x24, 0x00,0x03,0x1c,0x02,0xa4,0xa3,0x00,0x00,0x92,0x42,0x00,0x0a,0x3c,0x03,0xb0,0x09, 0x34,0x63,0x01,0x30,0x00,0x02,0x13,0x00,0x24,0x42,0x00,0x04,0x30,0x42,0xff,0xff, 0xa4,0x62,0x00,0x00,0x86,0x44,0x00,0x0c,0x27,0x83,0x90,0x18,0x8f,0xbf,0x00,0x1c, 0x00,0x04,0x10,0xc0,0x00,0x44,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21, 0x94,0x44,0x00,0x02,0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x3c,0x05,0xb0,0x09, 0x34,0xa5,0x01,0x32,0xa4,0xa4,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20, 0x27,0xbd,0xff,0xe0,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x00,0xaf,0xb0,0x00,0x10, 0x34,0x42,0x00,0x20,0x00,0xa0,0x80,0x21,0x24,0x63,0x34,0xc0,0x00,0x05,0x2c,0x43, 0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x18,0xac,0x43,0x00,0x00,0x10,0xa0,0x00,0x05, 0x00,0x80,0x88,0x21,0x8c,0x82,0x00,0x34,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0xb6, 0x00,0x00,0x00,0x00,0x32,0x10,0x00,0xff,0x12,0x00,0x00,0x4c,0x00,0x00,0x10,0x21, 0x24,0x02,0x00,0x08,0x12,0x02,0x00,0xa3,0x2a,0x02,0x00,0x09,0x10,0x40,0x00,0x89, 0x24,0x02,0x00,0x40,0x24,0x04,0x00,0x02,0x12,0x04,0x00,0x79,0x2a,0x02,0x00,0x03, 0x10,0x40,0x00,0x69,0x24,0x02,0x00,0x04,0x24,0x02,0x00,0x01,0x12,0x02,0x00,0x5a, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05,0x34,0x42,0x00,0x08,0x3c,0x03,0x80,0x00, 0xa2,0x20,0x00,0x4e,0xac,0x43,0x00,0x00,0x82,0x24,0x00,0x11,0x92,0x27,0x00,0x11, 0x10,0x80,0x00,0x4e,0x00,0x00,0x00,0x00,0x92,0x26,0x00,0x0a,0x24,0x02,0x00,0x12, 0x10,0x46,0x00,0x09,0x30,0xc2,0x00,0xff,0x27,0x83,0xb4,0x00,0x00,0x02,0x10,0x80, 0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x94,0x83,0x00,0x14, 0x00,0x00,0x00,0x00,0xa6,0x23,0x00,0x0c,0x3c,0x02,0xb0,0x09,0x34,0x42,0x00,0x40, 0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x03,0xa2,0x23,0x00,0x10, 0x14,0x60,0x00,0x2b,0x30,0x65,0x00,0x01,0x30,0xc2,0x00,0xff,0x27,0x83,0xb4,0x00, 0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x82,0x23,0x00,0x12, 0x90,0x82,0x00,0x16,0x00,0x00,0x00,0x00,0x00,0x02,0x11,0x42,0x30,0x42,0x00,0x01, 0x00,0x62,0x18,0x21,0x00,0x03,0x26,0x00,0x14,0x80,0x00,0x18,0xa2,0x23,0x00,0x12, 0x00,0x07,0x16,0x00,0x14,0x40,0x00,0x11,0x24,0x02,0x00,0x01,0x96,0x23,0x00,0x0c, 0x27,0x84,0x90,0x10,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80, 0x00,0x44,0x10,0x21,0x80,0x45,0x00,0x06,0x00,0x03,0x1a,0x00,0x3c,0x02,0xb0,0x00, 0x00,0x65,0x18,0x21,0x00,0x62,0x18,0x21,0x90,0x64,0x00,0x00,0x90,0x62,0x00,0x04, 0xa2,0x20,0x00,0x15,0xa3,0x80,0x8b,0xd4,0x24,0x02,0x00,0x01,0x8f,0xbf,0x00,0x18, 0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x0c,0x00,0x0c,0xcd, 0x02,0x20,0x20,0x21,0x92,0x27,0x00,0x11,0x08,0x00,0x0d,0x7d,0x00,0x07,0x16,0x00, 0x0c,0x00,0x0c,0x6e,0x02,0x20,0x20,0x21,0x86,0x23,0x00,0x0c,0x27,0x84,0x90,0x08, 0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x44,0x20,0x21, 0x90,0x85,0x00,0x07,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0xa2,0x25,0x00,0x13, 0x90,0x83,0x00,0x07,0x08,0x00,0x0d,0x95,0xa0,0x43,0x00,0x02,0x92,0x26,0x00,0x0a, 0x08,0x00,0x0d,0x5e,0x30,0xc2,0x00,0xff,0x8e,0x22,0x00,0x24,0x00,0x00,0x00,0x00, 0x10,0x50,0x00,0x07,0xa2,0x20,0x00,0x08,0x24,0x02,0x00,0x07,0xa2,0x22,0x00,0x0a, 0x92,0x22,0x00,0x27,0xae,0x20,0x00,0x24,0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x04, 0x08,0x00,0x0d,0xaf,0x24,0x02,0x00,0x06,0x16,0x02,0xff,0x9b,0x3c,0x02,0xb0,0x05, 0x8e,0x23,0x00,0x2c,0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x07,0xa2,0x24,0x00,0x08, 0x24,0x02,0x00,0x03,0xa2,0x22,0x00,0x0a,0x92,0x22,0x00,0x2f,0xae,0x20,0x00,0x2c, 0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x06,0x08,0x00,0x0d,0xbe,0xa2,0x20,0x00,0x0a, 0x8e,0x22,0x00,0x28,0x24,0x03,0x00,0x01,0x24,0x04,0x00,0x01,0x10,0x44,0x00,0x07, 0xa2,0x23,0x00,0x08,0x24,0x02,0x00,0x05,0xa2,0x22,0x00,0x0a,0x92,0x22,0x00,0x2b, 0xae,0x20,0x00,0x28,0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x05,0x08,0x00,0x0d,0xca, 0x24,0x02,0x00,0x04,0x12,0x02,0x00,0x12,0x2a,0x02,0x00,0x41,0x10,0x40,0x00,0x09, 0x24,0x02,0x00,0x80,0x24,0x02,0x00,0x20,0x16,0x02,0xff,0x7b,0x3c,0x02,0xb0,0x05, 0x24,0x02,0x00,0x12,0xa2,0x22,0x00,0x0a,0xa2,0x22,0x00,0x08,0x08,0x00,0x0d,0x51, 0xae,0x20,0x00,0x3c,0x16,0x02,0xff,0x74,0x3c,0x02,0xb0,0x05,0x24,0x02,0x00,0x10, 0xa2,0x22,0x00,0x0a,0xa2,0x22,0x00,0x08,0x08,0x00,0x0d,0x51,0xae,0x20,0x00,0x34, 0x24,0x02,0x00,0x11,0xa2,0x22,0x00,0x0a,0xa2,0x22,0x00,0x08,0x08,0x00,0x0d,0x51, 0xae,0x20,0x00,0x38,0x8e,0x24,0x00,0x30,0x24,0x02,0x00,0x03,0x24,0x03,0x00,0x01, 0x10,0x83,0x00,0x07,0xa2,0x22,0x00,0x08,0x24,0x02,0x00,0x02,0xa2,0x22,0x00,0x0a, 0x92,0x22,0x00,0x33,0xae,0x20,0x00,0x30,0x08,0x00,0x0d,0x51,0xa2,0x22,0x00,0x07, 0x08,0x00,0x0d,0xf0,0xa2,0x24,0x00,0x0a,0x8f,0x84,0xb4,0x40,0xae,0x20,0x00,0x34, 0x94,0x85,0x00,0x14,0x0c,0x00,0x1b,0x66,0x32,0x10,0x00,0xff,0x08,0x00,0x0d,0x42, 0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x37,0xf4, 0x34,0x63,0x00,0x20,0xac,0x62,0x00,0x00,0x80,0xa2,0x00,0x15,0x3c,0x06,0xb0,0x05, 0x10,0x40,0x00,0x0a,0x34,0xc6,0x02,0x54,0x83,0x83,0x8b,0xd4,0x00,0x00,0x00,0x00, 0xac,0x83,0x00,0x24,0x8c,0xc2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x17,0x42, 0x30,0x42,0x00,0x01,0x03,0xe0,0x00,0x08,0xac,0x82,0x00,0x28,0x8c,0x82,0x00,0x2c, 0x3c,0x06,0xb0,0x05,0x34,0xc6,0x04,0x50,0x00,0x02,0x18,0x43,0x30,0x63,0x00,0x01, 0x10,0x40,0x00,0x04,0x30,0x45,0x00,0x01,0xac,0x83,0x00,0x28,0x03,0xe0,0x00,0x08, 0xac,0x85,0x00,0x24,0x90,0xc2,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff, 0x30,0x43,0x00,0x02,0x30,0x42,0x00,0x01,0xac,0x83,0x00,0x28,0x03,0xe0,0x00,0x08, 0xac,0x82,0x00,0x24,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xd8, 0x34,0x63,0x00,0x20,0x24,0x42,0x38,0x84,0xac,0x62,0x00,0x00,0xaf,0xb1,0x00,0x1c, 0xaf,0xbf,0x00,0x20,0xaf,0xb0,0x00,0x18,0x90,0xa6,0x00,0x0a,0x27,0x83,0xb4,0x00, 0x00,0xa0,0x88,0x21,0x00,0x06,0x10,0x80,0x00,0x43,0x10,0x21,0x8c,0x50,0x00,0x00, 0x80,0xa5,0x00,0x11,0x92,0x03,0x00,0x12,0x10,0xa0,0x00,0x04,0xa2,0x20,0x00,0x15, 0x24,0x02,0x00,0x12,0x10,0xc2,0x00,0xda,0x00,0x00,0x00,0x00,0x82,0x22,0x00,0x12, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x67,0x00,0x00,0x00,0x00,0xa2,0x20,0x00,0x12, 0xa2,0x00,0x00,0x19,0x86,0x23,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0xc0, 0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x20,0x00,0x43,0x10,0x21, 0xa0,0x40,0x00,0x00,0x92,0x03,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0xdf, 0xa2,0x03,0x00,0x16,0x82,0x02,0x00,0x12,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x20, 0x00,0x00,0x00,0x00,0x92,0x23,0x00,0x08,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x45, 0x24,0x02,0x00,0x01,0xa2,0x20,0x00,0x04,0x92,0x08,0x00,0x04,0x00,0x00,0x00,0x00, 0x15,0x00,0x00,0x1e,0x24,0x02,0x00,0x01,0x92,0x07,0x00,0x0a,0xa2,0x02,0x00,0x17, 0x92,0x02,0x00,0x16,0x30,0xe3,0x00,0xff,0x30,0x42,0x00,0xe4,0x10,0x60,0x00,0x03, 0xa2,0x02,0x00,0x16,0x34,0x42,0x00,0x01,0xa2,0x02,0x00,0x16,0x11,0x00,0x00,0x05, 0x00,0x00,0x00,0x00,0x92,0x02,0x00,0x16,0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x02, 0xa2,0x02,0x00,0x16,0x92,0x02,0x00,0x17,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x08, 0x00,0x00,0x00,0x00,0x96,0x02,0x00,0x06,0x00,0x00,0x00,0x00,0xa6,0x02,0x00,0x14, 0x8f,0xbf,0x00,0x20,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28, 0x96,0x02,0x00,0x00,0x08,0x00,0x0e,0x6c,0xa6,0x02,0x00,0x14,0x92,0x07,0x00,0x0a, 0x00,0x00,0x00,0x00,0x14,0xe0,0x00,0x03,0x00,0x00,0x00,0x00,0x08,0x00,0x0e,0x58, 0xa2,0x00,0x00,0x17,0x96,0x04,0x00,0x00,0x96,0x05,0x00,0x06,0x27,0x86,0x90,0x00, 0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x05,0x10,0xc0,0x00,0x45,0x10,0x21, 0x00,0x03,0x18,0x80,0x00,0x66,0x18,0x21,0x00,0x02,0x10,0x80,0x00,0x46,0x10,0x21, 0x8c,0x66,0x00,0x08,0x8c,0x45,0x00,0x08,0x3c,0x03,0x80,0x00,0x00,0xc3,0x20,0x24, 0x10,0x80,0x00,0x08,0x00,0xa3,0x10,0x24,0x10,0x40,0x00,0x04,0x00,0x00,0x18,0x21, 0x10,0x80,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0xa6,0x18,0x2b,0x08,0x00,0x0e,0x58, 0xa2,0x03,0x00,0x17,0x10,0x40,0xff,0xfd,0x00,0xa6,0x18,0x2b,0x08,0x00,0x0e,0x8c, 0x00,0x00,0x00,0x00,0x10,0x62,0x00,0x09,0x24,0x02,0x00,0x02,0x10,0x62,0x00,0x05, 0x24,0x02,0x00,0x03,0x14,0x62,0xff,0xb8,0x00,0x00,0x00,0x00,0x08,0x00,0x0e,0x52, 0xa2,0x20,0x00,0x07,0x08,0x00,0x0e,0x52,0xa2,0x20,0x00,0x06,0x08,0x00,0x0e,0x52, 0xa2,0x20,0x00,0x05,0x82,0x22,0x00,0x10,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x69, 0x2c,0x62,0x00,0x02,0x10,0x40,0x00,0x49,0x3c,0x02,0xb0,0x09,0x92,0x25,0x00,0x08, 0x00,0x00,0x00,0x00,0x30,0xa6,0x00,0xff,0x2c,0xc2,0x00,0x04,0x10,0x40,0x00,0x3b, 0x2c,0xc2,0x00,0x10,0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00, 0x24,0x02,0x00,0x01,0x00,0xc2,0x10,0x04,0x00,0x02,0x10,0x27,0x00,0x62,0x18,0x24, 0xa0,0x83,0x00,0x00,0x86,0x23,0x00,0x0c,0x96,0x26,0x00,0x0c,0x00,0x03,0x10,0xc0, 0x00,0x43,0x10,0x21,0x00,0x02,0x28,0x80,0x27,0x83,0x90,0x04,0x00,0xa3,0x18,0x21, 0x8c,0x64,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x04,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0x10,0x10,0x40,0x00,0x18,0x24,0x07,0x00,0x01,0x93,0x82,0x8b,0x71, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0x0a,0x24,0x05,0x00,0x24, 0x00,0x06,0x2c,0x00,0x00,0x05,0x2c,0x03,0x0c,0x00,0x1b,0x66,0x02,0x00,0x20,0x21, 0x92,0x02,0x00,0x16,0xa2,0x00,0x00,0x12,0x30,0x42,0x00,0xe7,0x08,0x00,0x0e,0x49, 0xa2,0x02,0x00,0x16,0xf0,0xc5,0x00,0x06,0x00,0x00,0x28,0x12,0x27,0x82,0x90,0x00, 0x00,0xa2,0x28,0x21,0x0c,0x00,0x01,0x49,0x3c,0x04,0x00,0x80,0x96,0x26,0x00,0x0c, 0x08,0x00,0x0e,0xc9,0x00,0x06,0x2c,0x00,0x27,0x83,0x90,0x10,0x27,0x82,0x90,0x18, 0x00,0xa2,0x10,0x21,0x00,0xa3,0x18,0x21,0x90,0x44,0x00,0x00,0x90,0x65,0x00,0x05, 0x93,0x82,0x80,0x10,0x00,0x00,0x30,0x21,0x0c,0x00,0x21,0x9a,0xaf,0xa2,0x00,0x10, 0x96,0x26,0x00,0x0c,0x08,0x00,0x0e,0xc3,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xcd, 0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x30,0xa5,0x00,0x0f, 0x24,0x02,0x00,0x80,0x08,0x00,0x0e,0xb2,0x00,0xa2,0x10,0x07,0x86,0x26,0x00,0x0c, 0x3c,0x03,0xb0,0x09,0x34,0x42,0x01,0x72,0x34,0x63,0x01,0x78,0x94,0x47,0x00,0x00, 0x8c,0x65,0x00,0x00,0x00,0x06,0x10,0xc0,0x00,0x46,0x10,0x21,0x3c,0x04,0xb0,0x09, 0xae,0x25,0x00,0x1c,0x34,0x84,0x01,0x7c,0x27,0x83,0x90,0x04,0x00,0x02,0x10,0x80, 0x8c,0x85,0x00,0x00,0x00,0x43,0x10,0x21,0x8c,0x43,0x00,0x18,0xae,0x25,0x00,0x20, 0xa6,0x27,0x00,0x18,0x8c,0x66,0x00,0x08,0x02,0x20,0x20,0x21,0x0c,0x00,0x0f,0x19, 0x00,0x00,0x28,0x21,0x86,0x25,0x00,0x18,0x8e,0x26,0x00,0x1c,0x8e,0x27,0x00,0x20, 0x02,0x20,0x20,0x21,0x0c,0x00,0x1c,0x68,0xaf,0xa2,0x00,0x10,0x08,0x00,0x0e,0x49, 0xa2,0x02,0x00,0x12,0x92,0x22,0x00,0x08,0x08,0x00,0x0e,0x49,0xa2,0x22,0x00,0x09, 0xa2,0x20,0x00,0x11,0x80,0x82,0x00,0x50,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x03, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xd0,0xac,0x40,0x00,0x00,0x08,0x00,0x0e,0x49, 0xa0,0x80,0x00,0x50,0x94,0x8a,0x00,0x0c,0x24,0x03,0x00,0x24,0x00,0x80,0x70,0x21, 0x3c,0x02,0x80,0x00,0x3c,0x04,0xb0,0x03,0x24,0x42,0x3c,0x64,0xf1,0x43,0x00,0x06, 0x34,0x84,0x00,0x20,0x00,0x00,0x18,0x12,0x00,0xa0,0x68,0x21,0xac,0x82,0x00,0x00, 0x27,0x85,0x90,0x10,0x27,0x82,0x90,0x0f,0x27,0xbd,0xff,0xf8,0x00,0x62,0x60,0x21, 0x00,0x65,0x58,0x21,0x00,0x00,0xc0,0x21,0x11,0xa0,0x00,0xcc,0x00,0x00,0x78,0x21, 0x00,0x0a,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0x45,0x10,0x21,0x91,0x87,0x00,0x00,0x80,0x48,0x00,0x04, 0x03,0xa0,0x60,0x21,0x00,0x0a,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0, 0x00,0x43,0x10,0x21,0x00,0x02,0x48,0x80,0x27,0x83,0x90,0x04,0xa3,0xa7,0x00,0x00, 0x01,0x23,0x18,0x21,0x8c,0x64,0x00,0x18,0x25,0x02,0xff,0xff,0x00,0x48,0x40,0x0b, 0x8c,0x83,0x00,0x04,0x2d,0x05,0x00,0x07,0x24,0x02,0x00,0x06,0x30,0x63,0x00,0x08, 0x14,0x60,0x00,0x35,0x00,0x45,0x40,0x0a,0x93,0xa7,0x00,0x00,0x27,0x82,0x90,0x18, 0x01,0x22,0x10,0x21,0x30,0xe3,0x00,0xf0,0x38,0x63,0x00,0x50,0x30,0xe5,0x00,0xff, 0x00,0x05,0x20,0x2b,0x00,0x03,0x18,0x2b,0x00,0x64,0x18,0x24,0x90,0x49,0x00,0x00, 0x10,0x60,0x00,0x16,0x30,0xe4,0x00,0x0f,0x24,0x02,0x00,0x04,0x10,0xa2,0x00,0x9d, 0x00,0x00,0x00,0x00,0x11,0xa0,0x00,0x3a,0x2c,0xa2,0x00,0x0c,0x10,0x40,0x00,0x02, 0x24,0x84,0x00,0x0c,0x00,0xe0,0x20,0x21,0x30,0x84,0x00,0xff,0x00,0x04,0x10,0x40, 0x27,0x83,0xbb,0x1c,0x00,0x44,0x10,0x21,0x00,0x43,0x10,0x21,0x90,0x47,0x00,0x00, 0x00,0x00,0x00,0x00,0x2c,0xe3,0x00,0x0c,0xa3,0xa7,0x00,0x00,0x10,0x60,0x00,0x02, 0x24,0xe2,0x00,0x04,0x00,0xe0,0x10,0x21,0xa3,0xa2,0x00,0x00,0x91,0x65,0x00,0x00, 0x91,0x82,0x00,0x00,0x30,0xa3,0x00,0xff,0x00,0x62,0x10,0x2b,0x10,0x40,0x00,0x0e, 0x2c,0x62,0x00,0x0c,0x14,0x40,0x00,0x03,0x00,0x60,0x20,0x21,0x30,0xa2,0x00,0x0f, 0x24,0x44,0x00,0x0c,0x00,0x04,0x10,0x40,0x00,0x44,0x20,0x21,0x27,0x83,0xbb,0x1c, 0x00,0x83,0x18,0x21,0x90,0x62,0x00,0x02,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x05, 0x00,0x09,0x11,0x00,0xa1,0x85,0x00,0x00,0x93,0xa2,0x00,0x00,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x08,0x00,0x49,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x49,0x10,0x23, 0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x21,0x27,0x83,0xb4,0xa8,0x00,0x43,0x10,0x21, 0x90,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0x83,0x00,0x0c,0x14,0x60,0x00,0x06, 0x00,0x80,0x10,0x21,0x00,0x18,0x10,0x40,0x00,0x4f,0x10,0x21,0x00,0x02,0x11,0x00, 0x00,0x82,0x10,0x21,0x24,0x42,0x00,0x04,0x08,0x00,0x0f,0x7a,0xa1,0x82,0x00,0x00, 0x8f,0x8d,0x81,0x5c,0x00,0x00,0x00,0x00,0x01,0xa8,0x10,0x21,0x90,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0x10,0x60,0xff,0xd1,0x00,0x00,0x28,0x21,0x00,0x06,0x74,0x82, 0x30,0xe2,0x00,0xff,0x2c,0x42,0x00,0x0c,0x14,0x40,0x00,0x03,0x00,0xe0,0x10,0x21, 0x30,0xe2,0x00,0x0f,0x24,0x42,0x00,0x0c,0x30,0x44,0x00,0xff,0xa3,0xa2,0x00,0x00, 0x24,0x02,0x00,0x0c,0x10,0x82,0x00,0x0d,0x00,0x09,0x11,0x00,0x00,0x49,0x10,0x23, 0x00,0x02,0x10,0x80,0x00,0x04,0x18,0x40,0x00,0x49,0x10,0x23,0x00,0x64,0x18,0x21, 0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x27,0x84,0xb4,0xa8,0x00,0x44,0x10,0x21, 0x90,0x47,0x00,0x00,0x00,0x00,0x00,0x00,0xa3,0xa7,0x00,0x00,0x00,0x0a,0x1c,0x00, 0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80, 0x27,0x83,0x90,0x04,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x18,0x00,0x00,0x00,0x00, 0x8c,0x83,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x10,0x14,0x60,0x00,0x33, 0x00,0x06,0x14,0x42,0x00,0x09,0x11,0x00,0x00,0x49,0x10,0x23,0x00,0x02,0x10,0x80, 0x00,0x49,0x10,0x23,0x27,0x83,0xb5,0x78,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21, 0x90,0x44,0x00,0x04,0x90,0x43,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x64,0xc0,0x24, 0x93,0xa7,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0xe2,0x00,0x0f,0x10,0x40,0x00,0x0f, 0x31,0xcf,0x00,0x01,0x00,0x0a,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0, 0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x84,0x90,0x00,0x00,0x44,0x10,0x21, 0x84,0x43,0x00,0x06,0x00,0x00,0x00,0x00,0x28,0x63,0x06,0x41,0x14,0x60,0x00,0x04, 0x30,0xe2,0x00,0xff,0x24,0x07,0x00,0x0f,0xa3,0xa7,0x00,0x00,0x30,0xe2,0x00,0xff, 0x2c,0x42,0x00,0x0c,0x14,0x40,0x00,0x06,0x00,0xe0,0x10,0x21,0x00,0x18,0x10,0x40, 0x00,0x4f,0x10,0x21,0x00,0x02,0x11,0x00,0x00,0x47,0x10,0x21,0x24,0x42,0x00,0x04, 0xa3,0xa2,0x00,0x00,0x00,0x40,0x38,0x21,0x01,0xa8,0x10,0x21,0x90,0x43,0x00,0x00, 0x24,0xa4,0x00,0x01,0x30,0x85,0xff,0xff,0x00,0xa3,0x18,0x2b,0x14,0x60,0xff,0xad, 0x30,0xe2,0x00,0xff,0x08,0x00,0x0f,0x67,0x00,0x00,0x00,0x00,0x08,0x00,0x0f,0xc8, 0x30,0x58,0x00,0x01,0x81,0xc2,0x00,0x48,0x00,0x00,0x00,0x00,0x10,0x40,0xff,0x73, 0x00,0x00,0x00,0x00,0x08,0x00,0x0f,0x55,0x00,0x00,0x00,0x00,0x00,0x0a,0x1c,0x00, 0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80, 0x00,0x45,0x10,0x21,0x80,0x48,0x00,0x05,0x91,0x67,0x00,0x00,0x08,0x00,0x0f,0x35, 0x03,0xa0,0x58,0x21,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20, 0x24,0x42,0x40,0x04,0x03,0xe0,0x00,0x08,0xac,0x62,0x00,0x00,0x27,0xbd,0xff,0xc0, 0xaf,0xb7,0x00,0x34,0xaf,0xb6,0x00,0x30,0xaf,0xb5,0x00,0x2c,0xaf,0xb4,0x00,0x28, 0xaf,0xb3,0x00,0x24,0xaf,0xb2,0x00,0x20,0xaf,0xbf,0x00,0x3c,0xaf,0xbe,0x00,0x38, 0xaf,0xb1,0x00,0x1c,0xaf,0xb0,0x00,0x18,0x84,0x82,0x00,0x0c,0x27,0x93,0x90,0x04, 0x3c,0x05,0xb0,0x03,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x80, 0x00,0x73,0x10,0x21,0x8c,0x5e,0x00,0x18,0x3c,0x02,0x80,0x00,0x34,0xa5,0x00,0x20, 0x24,0x42,0x40,0x1c,0xac,0xa2,0x00,0x00,0x8f,0xd0,0x00,0x08,0x27,0x95,0x90,0x10, 0x00,0x75,0x18,0x21,0x00,0x00,0x28,0x21,0x02,0x00,0x30,0x21,0x90,0x71,0x00,0x00, 0x0c,0x00,0x0f,0x19,0x00,0x80,0xb0,0x21,0x00,0x40,0x90,0x21,0x00,0x10,0x14,0x42, 0x30,0x54,0x00,0x01,0x02,0x40,0x20,0x21,0x00,0x10,0x14,0x82,0x02,0x80,0x28,0x21, 0x12,0x51,0x00,0x23,0x00,0x10,0xbf,0xc2,0x86,0xc3,0x00,0x0c,0x30,0x50,0x00,0x01, 0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x55,0x10,0x21, 0xa0,0x52,0x00,0x00,0x86,0xc3,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0xc0, 0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x53,0x30,0x21,0x8c,0xc7,0x00,0x18, 0x27,0x83,0x90,0x00,0x00,0x43,0x10,0x21,0x8c,0xe3,0x00,0x04,0x84,0x46,0x00,0x06, 0x00,0x03,0x19,0x42,0x0c,0x00,0x08,0xe3,0x30,0x73,0x00,0x01,0x00,0x40,0x88,0x21, 0x02,0x40,0x20,0x21,0x02,0x80,0x28,0x21,0x16,0xe0,0x00,0x10,0x02,0x00,0x30,0x21, 0x86,0xc2,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21, 0x00,0x03,0x18,0x80,0x27,0x82,0x90,0x08,0x00,0x62,0x18,0x21,0xa4,0x71,0x00,0x04, 0x7b,0xbe,0x01,0xfc,0x7b,0xb6,0x01,0xbc,0x7b,0xb4,0x01,0x7c,0x7b,0xb2,0x01,0x3c, 0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x40,0x86,0xc3,0x00,0x0c, 0xaf,0xb3,0x00,0x10,0xaf,0xa0,0x00,0x14,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0x55,0x10,0x21,0x80,0x47,0x00,0x06,0x00,0x00,0x00,0x00, 0x24,0xe7,0x00,0x02,0x00,0x07,0x17,0xc2,0x00,0xe2,0x38,0x21,0x00,0x07,0x38,0x43, 0x00,0x07,0x38,0x40,0x0c,0x00,0x09,0x0a,0x03,0xc7,0x38,0x21,0x08,0x00,0x10,0x48, 0x02,0x22,0x88,0x21,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0xd0, 0x34,0x63,0x00,0x20,0x24,0x42,0x41,0xa4,0xaf,0xb2,0x00,0x20,0xac,0x62,0x00,0x00, 0xaf,0xbf,0x00,0x28,0xaf,0xb3,0x00,0x24,0xaf,0xb1,0x00,0x1c,0xaf,0xb0,0x00,0x18, 0x3c,0x02,0xb0,0x03,0x90,0x83,0x00,0x0a,0x34,0x42,0x01,0x04,0x94,0x45,0x00,0x00, 0x00,0x03,0x18,0x80,0x27,0x82,0xb4,0x00,0x00,0x62,0x18,0x21,0x30,0xa6,0xff,0xff, 0x8c,0x71,0x00,0x00,0x80,0x85,0x00,0x12,0x30,0xc9,0x00,0xff,0x00,0x06,0x32,0x02, 0xa4,0x86,0x00,0x44,0xa4,0x89,0x00,0x46,0x82,0x22,0x00,0x12,0x00,0x80,0x90,0x21, 0x10,0xa0,0x00,0x1b,0xa0,0x80,0x00,0x15,0x00,0xc5,0x10,0x2a,0x10,0x40,0x00,0x14, 0x00,0x00,0x00,0x00,0xa2,0x20,0x00,0x19,0x84,0x83,0x00,0x0c,0x00,0x00,0x00,0x00, 0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x20, 0x00,0x43,0x10,0x21,0xa0,0x40,0x00,0x00,0xa0,0x80,0x00,0x12,0x92,0x22,0x00,0x16, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xdf,0xa2,0x22,0x00,0x16,0x8f,0xbf,0x00,0x28, 0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x30, 0x0c,0x00,0x10,0x01,0x00,0x00,0x00,0x00,0x08,0x00,0x10,0x97,0x00,0x00,0x00,0x00, 0x28,0x42,0x00,0x02,0x10,0x40,0x01,0x76,0x00,0x00,0x28,0x21,0x94,0x87,0x00,0x0c, 0x00,0x00,0x00,0x00,0x00,0xe0,0x10,0x21,0x00,0x02,0x14,0x00,0x00,0x02,0x14,0x03, 0x00,0x07,0x24,0x00,0x00,0x04,0x24,0x03,0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21, 0x00,0x04,0x28,0xc0,0x00,0xa4,0x28,0x21,0x27,0x82,0x90,0x20,0x00,0x03,0x18,0x80, 0x00,0x62,0x18,0x21,0x00,0x05,0x28,0x80,0x27,0x82,0x90,0x08,0x00,0xa2,0x10,0x21, 0x8c,0x68,0x00,0x00,0x80,0x44,0x00,0x06,0x27,0x82,0x90,0x10,0x00,0x08,0x1d,0x02, 0x00,0xa2,0x28,0x21,0x38,0x84,0x00,0x00,0x30,0x63,0x00,0x01,0x01,0x24,0x30,0x0b, 0x80,0xaa,0x00,0x04,0x80,0xa9,0x00,0x05,0x10,0x60,0x00,0x02,0x00,0x08,0x14,0x02, 0x30,0x46,0x00,0x0f,0x15,0x20,0x00,0x28,0x01,0x49,0x10,0x21,0x15,0x40,0x00,0x11, 0x30,0xe3,0xff,0xff,0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0xa8,0x00,0xff, 0x2d,0x02,0x00,0x04,0x10,0x40,0x01,0x46,0x2d,0x02,0x00,0x10,0x3c,0x04,0xb0,0x05, 0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x24,0x02,0x00,0x01,0x01,0x02,0x10,0x04, 0x00,0x62,0x18,0x25,0xa0,0x83,0x00,0x00,0x96,0x47,0x00,0x0c,0x00,0x00,0x00,0x00, 0x30,0xe3,0xff,0xff,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x27,0x84,0x90,0x10, 0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x21,0x80,0x45,0x00,0x06,0x00,0x03,0x1a,0x00, 0x3c,0x04,0xb0,0x00,0x00,0x65,0x18,0x21,0x00,0x64,0x20,0x21,0x94,0x82,0x00,0x00, 0x82,0x43,0x00,0x10,0x00,0x02,0x14,0x00,0x14,0x60,0x00,0x06,0x00,0x02,0x3c,0x03, 0x30,0xe2,0x00,0x04,0x14,0x40,0x00,0x04,0x01,0x49,0x10,0x21,0x34,0xe2,0x08,0x00, 0xa4,0x82,0x00,0x00,0x01,0x49,0x10,0x21,0x00,0x02,0x16,0x00,0x00,0x02,0x16,0x03, 0x00,0x46,0x10,0x2a,0x10,0x40,0x00,0x7c,0x00,0x00,0x00,0x00,0x82,0x42,0x00,0x10, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0e,0x00,0x00,0x00,0x00,0x86,0x43,0x00,0x0c, 0x25,0x44,0x00,0x01,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80, 0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0xa0,0x44,0x00,0x04,0x92,0x23,0x00,0x16, 0x02,0x40,0x20,0x21,0x30,0x63,0x00,0xfb,0x08,0x00,0x10,0x9c,0xa2,0x23,0x00,0x16, 0x86,0x43,0x00,0x0c,0x25,0x24,0x00,0x01,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21, 0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21,0xa0,0x44,0x00,0x05, 0x86,0x45,0x00,0x0c,0x0c,0x00,0x1e,0xea,0x02,0x20,0x20,0x21,0x10,0x40,0x00,0x5a, 0x00,0x00,0x00,0x00,0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0xa6,0x00,0xff, 0x2c,0xc2,0x00,0x04,0x10,0x40,0x00,0x4c,0x2c,0xc2,0x00,0x10,0x3c,0x04,0xb0,0x05, 0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x24,0x02,0x00,0x01,0x00,0xc2,0x10,0x04, 0x00,0x02,0x10,0x27,0x00,0x62,0x18,0x24,0xa0,0x83,0x00,0x00,0x92,0x45,0x00,0x08, 0x00,0x00,0x00,0x00,0x30,0xa5,0x00,0xff,0x14,0xa0,0x00,0x33,0x24,0x02,0x00,0x01, 0xa2,0x40,0x00,0x04,0x92,0x22,0x00,0x04,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x0c, 0x24,0x02,0x00,0x01,0xa2,0x22,0x00,0x17,0x92,0x22,0x00,0x17,0x00,0x00,0x00,0x00, 0x10,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0x96,0x22,0x00,0x06,0x08,0x00,0x10,0x97, 0xa6,0x22,0x00,0x14,0x96,0x22,0x00,0x00,0x08,0x00,0x10,0x97,0xa6,0x22,0x00,0x14, 0x92,0x22,0x00,0x0a,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x03,0x00,0x00,0x00,0x00, 0x08,0x00,0x11,0x26,0xa2,0x20,0x00,0x17,0x96,0x24,0x00,0x00,0x96,0x25,0x00,0x06, 0x27,0x86,0x90,0x00,0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x05,0x10,0xc0, 0x00,0x45,0x10,0x21,0x00,0x03,0x18,0x80,0x00,0x66,0x18,0x21,0x00,0x02,0x10,0x80, 0x00,0x46,0x10,0x21,0x8c,0x65,0x00,0x08,0x8c,0x44,0x00,0x08,0x3c,0x03,0x80,0x00, 0x00,0xa3,0x30,0x24,0x10,0xc0,0x00,0x08,0x00,0x83,0x10,0x24,0x10,0x40,0x00,0x04, 0x00,0x00,0x18,0x21,0x10,0xc0,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0x85,0x18,0x2b, 0x08,0x00,0x11,0x26,0xa2,0x23,0x00,0x17,0x10,0x40,0xff,0xfd,0x00,0x85,0x18,0x2b, 0x08,0x00,0x11,0x49,0x00,0x00,0x00,0x00,0x10,0xa2,0x00,0x09,0x24,0x02,0x00,0x02, 0x10,0xa2,0x00,0x05,0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xca,0x00,0x00,0x00,0x00, 0x08,0x00,0x11,0x21,0xa2,0x40,0x00,0x07,0x08,0x00,0x11,0x21,0xa2,0x40,0x00,0x06, 0x08,0x00,0x11,0x21,0xa2,0x40,0x00,0x05,0x14,0x40,0xff,0xbe,0x3c,0x04,0xb0,0x05, 0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x30,0xa5,0x00,0x0f,0x24,0x02,0x00,0x80, 0x08,0x00,0x11,0x18,0x00,0xa2,0x10,0x07,0x0c,0x00,0x10,0x07,0x02,0x40,0x20,0x21, 0x08,0x00,0x10,0x97,0x00,0x00,0x00,0x00,0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00, 0x30,0xa6,0x00,0xff,0x2c,0xc2,0x00,0x04,0x10,0x40,0x00,0x99,0x2c,0xc2,0x00,0x10, 0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00,0x24,0x02,0x00,0x01, 0x00,0xc2,0x10,0x04,0x00,0x02,0x10,0x27,0x00,0x62,0x18,0x24,0xa0,0x83,0x00,0x00, 0x92,0x45,0x00,0x08,0x00,0x00,0x00,0x00,0x30,0xa5,0x00,0xff,0x14,0xa0,0x00,0x80, 0x24,0x02,0x00,0x01,0xa2,0x40,0x00,0x04,0x86,0x43,0x00,0x0c,0x27,0x93,0x90,0x04, 0x96,0x47,0x00,0x0c,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x28,0x80, 0x00,0xb3,0x18,0x21,0x8c,0x64,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x04, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x10,0x10,0x40,0x00,0x64,0x00,0x00,0x30,0x21, 0x00,0x07,0x1c,0x00,0x00,0x03,0x1c,0x03,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0x53,0x10,0x21,0x8c,0x43,0x00,0x18,0x93,0x82,0x8b,0x71, 0x8c,0x64,0x00,0x04,0x30,0x42,0x00,0x01,0x00,0x04,0x21,0x42,0x14,0x40,0x00,0x4d, 0x30,0x90,0x00,0x01,0x00,0x07,0x2c,0x00,0x00,0x05,0x2c,0x03,0x0c,0x00,0x1b,0x66, 0x02,0x20,0x20,0x21,0x96,0x26,0x00,0x06,0x12,0x00,0x00,0x14,0x30,0xc5,0xff,0xff, 0x02,0x60,0x90,0x21,0x00,0x05,0x10,0xc0,0x00,0x45,0x10,0x21,0x00,0x02,0x10,0x80, 0x00,0x52,0x18,0x21,0x92,0x22,0x00,0x0a,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0b, 0x02,0x20,0x20,0x21,0x8c,0x63,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x62,0x00,0x04, 0x00,0x00,0x00,0x00,0x00,0x02,0x11,0x42,0x0c,0x00,0x1b,0x66,0x30,0x50,0x00,0x01, 0x96,0x26,0x00,0x06,0x16,0x00,0xff,0xef,0x30,0xc5,0xff,0xff,0x92,0x22,0x00,0x04, 0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x0d,0x24,0x02,0x00,0x01,0xa2,0x22,0x00,0x17, 0x92,0x22,0x00,0x17,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x05,0x00,0x00,0x00,0x00, 0xa6,0x26,0x00,0x14,0x92,0x22,0x00,0x16,0x08,0x00,0x10,0x96,0x30,0x42,0x00,0xc3, 0x96,0x22,0x00,0x00,0x08,0x00,0x11,0xbd,0xa6,0x22,0x00,0x14,0x92,0x22,0x00,0x0a, 0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x03,0x00,0x00,0x00,0x00,0x08,0x00,0x11,0xb8, 0xa2,0x20,0x00,0x17,0x96,0x24,0x00,0x00,0x30,0xc5,0xff,0xff,0x00,0x05,0x18,0xc0, 0x00,0x04,0x10,0xc0,0x00,0x44,0x10,0x21,0x00,0x65,0x18,0x21,0x27,0x84,0x90,0x00, 0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x21,0x00,0x03,0x18,0x80,0x8c,0x45,0x00,0x08, 0x00,0x64,0x18,0x21,0x8c,0x64,0x00,0x08,0x3c,0x02,0x80,0x00,0x00,0xa2,0x38,0x24, 0x10,0xe0,0x00,0x08,0x00,0x82,0x10,0x24,0x10,0x40,0x00,0x04,0x00,0x00,0x18,0x21, 0x10,0xe0,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0x85,0x18,0x2b,0x08,0x00,0x11,0xb8, 0xa2,0x23,0x00,0x17,0x10,0x40,0xff,0xfd,0x00,0x85,0x18,0x2b,0x08,0x00,0x11,0xdc, 0x00,0x00,0x00,0x00,0x24,0x05,0x00,0x24,0xf0,0xe5,0x00,0x06,0x00,0x00,0x28,0x12, 0x27,0x82,0x90,0x00,0x00,0xa2,0x28,0x21,0x0c,0x00,0x01,0x49,0x00,0x00,0x20,0x21, 0x96,0x47,0x00,0x0c,0x08,0x00,0x11,0x9a,0x00,0x07,0x2c,0x00,0x27,0x83,0x90,0x10, 0x27,0x82,0x90,0x18,0x00,0xa2,0x10,0x21,0x00,0xa3,0x18,0x21,0x90,0x44,0x00,0x00, 0x90,0x65,0x00,0x05,0x93,0x82,0x80,0x10,0x24,0x07,0x00,0x01,0x0c,0x00,0x21,0x9a, 0xaf,0xa2,0x00,0x10,0x96,0x47,0x00,0x0c,0x08,0x00,0x11,0x8d,0x00,0x07,0x1c,0x00, 0x10,0xa2,0x00,0x09,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x05,0x24,0x02,0x00,0x03, 0x14,0xa2,0xff,0x7d,0x00,0x00,0x00,0x00,0x08,0x00,0x11,0x7e,0xa2,0x40,0x00,0x07, 0x08,0x00,0x11,0x7e,0xa2,0x40,0x00,0x06,0x08,0x00,0x11,0x7e,0xa2,0x40,0x00,0x05, 0x14,0x40,0xff,0x71,0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00, 0x30,0xa5,0x00,0x0f,0x24,0x02,0x00,0x80,0x08,0x00,0x11,0x75,0x00,0xa2,0x10,0x07, 0x14,0x40,0xfe,0xc3,0x3c,0x04,0xb0,0x05,0x34,0x84,0x02,0x29,0x90,0x83,0x00,0x00, 0x30,0xa5,0x00,0x0f,0x24,0x02,0x00,0x80,0x08,0x00,0x10,0xd0,0x00,0xa2,0x10,0x07, 0x84,0x83,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21, 0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x04,0x00,0x43,0x10,0x21,0x8c,0x47,0x00,0x18, 0x00,0x00,0x00,0x00,0x8c,0xe6,0x00,0x08,0x0c,0x00,0x0f,0x19,0x00,0x00,0x00,0x00, 0x02,0x40,0x20,0x21,0x00,0x00,0x28,0x21,0x00,0x00,0x30,0x21,0x00,0x00,0x38,0x21, 0x0c,0x00,0x1c,0x68,0xaf,0xa2,0x00,0x10,0x00,0x02,0x1e,0x00,0x14,0x60,0xfe,0x6b, 0xa2,0x22,0x00,0x12,0x92,0x43,0x00,0x08,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x40, 0x24,0x02,0x00,0x01,0xa2,0x40,0x00,0x04,0x92,0x28,0x00,0x04,0x00,0x00,0x00,0x00, 0x15,0x00,0x00,0x19,0x24,0x02,0x00,0x01,0x92,0x27,0x00,0x0a,0xa2,0x22,0x00,0x17, 0x92,0x22,0x00,0x17,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x10,0x00,0x00,0x00,0x00, 0x96,0x22,0x00,0x06,0x00,0x00,0x00,0x00,0xa6,0x22,0x00,0x14,0x92,0x22,0x00,0x16, 0x30,0xe3,0x00,0xff,0x30,0x42,0x00,0xc0,0x10,0x60,0x00,0x03,0xa2,0x22,0x00,0x16, 0x34,0x42,0x00,0x01,0xa2,0x22,0x00,0x16,0x11,0x00,0xfe,0x50,0x00,0x00,0x00,0x00, 0x92,0x22,0x00,0x16,0x08,0x00,0x10,0x96,0x34,0x42,0x00,0x02,0x96,0x22,0x00,0x00, 0x08,0x00,0x12,0x3f,0xa6,0x22,0x00,0x14,0x92,0x27,0x00,0x0a,0x00,0x00,0x00,0x00, 0x14,0xe0,0x00,0x03,0x00,0x00,0x00,0x00,0x08,0x00,0x12,0x38,0xa2,0x20,0x00,0x17, 0x96,0x24,0x00,0x00,0x96,0x25,0x00,0x06,0x27,0x86,0x90,0x00,0x00,0x04,0x18,0xc0, 0x00,0x64,0x18,0x21,0x00,0x05,0x10,0xc0,0x00,0x45,0x10,0x21,0x00,0x03,0x18,0x80, 0x00,0x66,0x18,0x21,0x00,0x02,0x10,0x80,0x00,0x46,0x10,0x21,0x8c,0x65,0x00,0x08, 0x8c,0x44,0x00,0x08,0x3c,0x03,0x80,0x00,0x00,0xa3,0x30,0x24,0x10,0xc0,0x00,0x08, 0x00,0x83,0x10,0x24,0x10,0x40,0x00,0x04,0x00,0x00,0x18,0x21,0x10,0xc0,0x00,0x02, 0x24,0x03,0x00,0x01,0x00,0x85,0x18,0x2b,0x08,0x00,0x12,0x38,0xa2,0x23,0x00,0x17, 0x10,0x40,0xff,0xfd,0x00,0x85,0x18,0x2b,0x08,0x00,0x12,0x67,0x00,0x00,0x00,0x00, 0x10,0x62,0x00,0x09,0x24,0x02,0x00,0x02,0x10,0x62,0x00,0x05,0x24,0x02,0x00,0x03, 0x14,0x62,0xff,0xbd,0x00,0x00,0x00,0x00,0x08,0x00,0x12,0x32,0xa2,0x40,0x00,0x07, 0x08,0x00,0x12,0x32,0xa2,0x40,0x00,0x06,0x08,0x00,0x12,0x32,0xa2,0x40,0x00,0x05, 0x3c,0x02,0x80,0x00,0x00,0x82,0x30,0x24,0x10,0xc0,0x00,0x08,0x00,0xa2,0x18,0x24, 0x10,0x60,0x00,0x04,0x00,0x00,0x10,0x21,0x10,0xc0,0x00,0x02,0x24,0x02,0x00,0x01, 0x00,0xa4,0x10,0x2b,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x10,0x60,0xff,0xfd, 0x00,0xa4,0x10,0x2b,0x08,0x00,0x12,0x82,0x00,0x00,0x00,0x00,0x30,0x82,0xff,0xff, 0x00,0x02,0x18,0xc0,0x00,0x62,0x18,0x21,0x27,0x84,0x90,0x10,0x00,0x03,0x18,0x80, 0x00,0x64,0x18,0x21,0x80,0x66,0x00,0x06,0x00,0x02,0x12,0x00,0x3c,0x03,0xb0,0x00, 0x00,0x46,0x10,0x21,0x00,0x45,0x10,0x21,0x03,0xe0,0x00,0x08,0x00,0x43,0x10,0x21, 0x27,0xbd,0xff,0xe0,0x30,0x82,0x00,0x7c,0x30,0x84,0xff,0x00,0xaf,0xbf,0x00,0x1c, 0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0x14,0x40,0x00,0x41, 0x00,0x04,0x22,0x03,0x24,0x02,0x00,0x04,0x3c,0x10,0xb0,0x03,0x8e,0x10,0x00,0x00, 0x10,0x82,0x00,0x32,0x24,0x02,0x00,0x08,0x10,0x82,0x00,0x03,0x32,0x02,0x00,0x20, 0x08,0x00,0x12,0xa8,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x17,0x3c,0x02,0xb0,0x06, 0x34,0x42,0x80,0x24,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x67,0x00,0xff, 0x10,0xe0,0x00,0x23,0x00,0x00,0x88,0x21,0x8f,0x85,0x8f,0xe0,0x00,0x40,0x30,0x21, 0x94,0xa2,0x00,0x08,0x8c,0xc3,0x00,0x00,0x26,0x31,0x00,0x01,0x24,0x42,0x00,0x02, 0x30,0x42,0x01,0xff,0x34,0x63,0x01,0x00,0x02,0x27,0x20,0x2a,0xa4,0xa2,0x00,0x08, 0x14,0x80,0xff,0xf7,0xac,0xc3,0x00,0x00,0x84,0xa3,0x00,0x08,0x3c,0x02,0xb0,0x03, 0x34,0x42,0x00,0x30,0xac,0x43,0x00,0x00,0x27,0x92,0xb4,0x00,0x24,0x11,0x00,0x12, 0x8e,0x44,0x00,0x00,0x26,0x31,0xff,0xff,0x90,0x82,0x00,0x10,0x00,0x00,0x00,0x00, 0x10,0x40,0x00,0x03,0x26,0x52,0x00,0x04,0x0c,0x00,0x18,0xd0,0x00,0x00,0x00,0x00, 0x06,0x21,0xff,0xf7,0x24,0x02,0xff,0xdf,0x02,0x02,0x80,0x24,0x3c,0x01,0xb0,0x03, 0x0c,0x00,0x13,0x1c,0xac,0x30,0x00,0x00,0x08,0x00,0x12,0xa8,0x00,0x00,0x00,0x00, 0x8f,0x85,0x8f,0xe0,0x08,0x00,0x12,0xbe,0x00,0x00,0x00,0x00,0x24,0x02,0xff,0x95, 0x3c,0x03,0xb0,0x03,0x02,0x02,0x80,0x24,0x34,0x63,0x00,0x30,0x3c,0x01,0xb0,0x03, 0xac,0x30,0x00,0x00,0x0c,0x00,0x12,0xe5,0xac,0x60,0x00,0x00,0x08,0x00,0x12,0xa8, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x50,0x08,0x00,0x12,0xa8, 0xac,0x46,0x00,0x00,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4b,0x94,0x3c,0x0b,0xb0,0x03, 0xad,0x6a,0x00,0x20,0x3c,0x08,0x80,0x01,0x25,0x08,0x00,0x00,0x3c,0x09,0x80,0x01, 0x25,0x29,0x03,0x50,0x11,0x09,0x00,0x10,0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x00, 0x25,0x4a,0x4b,0xbc,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,0x3c,0x08,0xb0,0x06, 0x35,0x08,0x80,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x8d,0x09,0x00,0x00, 0x00,0x00,0x00,0x00,0x31,0x29,0x00,0x01,0x00,0x00,0x00,0x00,0x24,0x01,0x00,0x01, 0x15,0x21,0xff,0xf2,0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4b,0xf8, 0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20,0x3c,0x02,0xb0,0x03,0x8c,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0x34,0x63,0x00,0x40,0x00,0x00,0x00,0x00,0xac,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0x24,0x3c,0x0b,0xb0,0x03, 0xad,0x6a,0x00,0x20,0x3c,0x02,0x80,0x01,0x24,0x42,0x00,0x00,0x3c,0x03,0x80,0x01, 0x24,0x63,0x03,0x50,0x3c,0x04,0xb0,0x00,0x8c,0x85,0x00,0x00,0x00,0x00,0x00,0x00, 0xac,0x45,0x00,0x00,0x24,0x42,0x00,0x04,0x24,0x84,0x00,0x04,0x00,0x43,0x08,0x2a, 0x14,0x20,0xff,0xf9,0x00,0x00,0x00,0x00,0x0c,0x00,0x13,0x1c,0x00,0x00,0x00,0x00, 0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0x70,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20, 0x3c,0x02,0x80,0x01,0x24,0x42,0x03,0x50,0x3c,0x03,0x80,0x01,0x24,0x63,0x3f,0x24, 0xac,0x40,0x00,0x00,0xac,0x40,0x00,0x04,0xac,0x40,0x00,0x08,0xac,0x40,0x00,0x0c, 0x24,0x42,0x00,0x10,0x00,0x43,0x08,0x2a,0x14,0x20,0xff,0xf9,0x00,0x00,0x00,0x00, 0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0xb0,0x3c,0x0b,0xb0,0x03,0xad,0x6a,0x00,0x20, 0x3c,0x1c,0x80,0x01,0x27,0x9c,0x7f,0xf0,0x27,0x9d,0x8b,0xe0,0x00,0x00,0x00,0x00, 0x27,0x9d,0x8f,0xc8,0x3c,0x0a,0x80,0x00,0x25,0x4a,0x4c,0xd4,0x3c,0x0b,0xb0,0x03, 0xad,0x6a,0x00,0x20,0x40,0x80,0x68,0x00,0x40,0x08,0x60,0x00,0x00,0x00,0x00,0x00, 0x35,0x08,0xff,0x01,0x40,0x88,0x60,0x00,0x00,0x00,0x00,0x00,0x0c,0x00,0x15,0x62, 0x00,0x00,0x00,0x00,0x24,0x84,0xf8,0x00,0x30,0x87,0x00,0x03,0x00,0x04,0x30,0x40, 0x00,0xc7,0x20,0x23,0x3c,0x02,0xb0,0x0a,0x27,0xbd,0xff,0xe0,0x24,0x03,0xff,0xff, 0x00,0x82,0x20,0x21,0xaf,0xb1,0x00,0x14,0xac,0x83,0x10,0x00,0xaf,0xbf,0x00,0x18, 0xaf,0xb0,0x00,0x10,0x00,0xa0,0x88,0x21,0x24,0x03,0x00,0x01,0x8c,0x82,0x10,0x00, 0x00,0x00,0x00,0x00,0x14,0x43,0xff,0xfd,0x00,0xc7,0x10,0x23,0x3c,0x03,0xb0,0x0a, 0x00,0x43,0x10,0x21,0x8c,0x50,0x00,0x00,0x0c,0x00,0x13,0x99,0x02,0x20,0x20,0x21, 0x02,0x11,0x80,0x24,0x00,0x50,0x80,0x06,0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x18, 0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x27,0xbd,0xff,0xd8, 0xaf,0xb2,0x00,0x18,0x00,0xa0,0x90,0x21,0x24,0x05,0xff,0xff,0xaf,0xb3,0x00,0x1c, 0xaf,0xbf,0x00,0x20,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0x00,0xc0,0x98,0x21, 0x12,0x45,0x00,0x23,0x24,0x84,0xf8,0x00,0x30,0x83,0x00,0x03,0x00,0x04,0x10,0x40, 0x00,0x40,0x88,0x21,0x00,0x60,0x20,0x21,0x00,0x43,0x10,0x23,0x3c,0x03,0xb0,0x0a, 0x00,0x43,0x10,0x21,0xac,0x45,0x10,0x00,0x00,0x40,0x18,0x21,0x24,0x05,0x00,0x01, 0x8c,0x62,0x10,0x00,0x00,0x00,0x00,0x00,0x14,0x45,0xff,0xfd,0x3c,0x02,0xb0,0x0a, 0x02,0x24,0x88,0x23,0x02,0x22,0x88,0x21,0x8e,0x30,0x00,0x00,0x0c,0x00,0x13,0x99, 0x02,0x40,0x20,0x21,0x00,0x12,0x18,0x27,0x02,0x03,0x80,0x24,0x00,0x53,0x10,0x04, 0x02,0x02,0x80,0x25,0xae,0x30,0x00,0x00,0x24,0x03,0x00,0x01,0x8e,0x22,0x10,0x00, 0x00,0x00,0x00,0x00,0x14,0x43,0xff,0xfd,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x20, 0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28, 0x30,0x82,0x00,0x03,0x00,0x04,0x18,0x40,0x00,0x62,0x18,0x23,0x3c,0x04,0xb0,0x0a, 0x00,0x64,0x18,0x21,0xac,0x66,0x00,0x00,0x24,0x04,0x00,0x01,0x8c,0x62,0x10,0x00, 0x00,0x00,0x00,0x00,0x14,0x44,0xff,0xfd,0x00,0x00,0x00,0x00,0x08,0x00,0x13,0x87, 0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x21,0x00,0x64,0x10,0x06,0x30,0x42,0x00,0x01, 0x14,0x40,0x00,0x05,0x00,0x00,0x00,0x00,0x24,0x63,0x00,0x01,0x2c,0x62,0x00,0x20, 0x14,0x40,0xff,0xf9,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x60,0x10,0x21, 0x27,0xbd,0xff,0xe0,0x3c,0x03,0xb0,0x05,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14, 0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x1c,0x00,0x80,0x90,0x21,0x00,0xa0,0x80,0x21, 0x00,0xc0,0x88,0x21,0x34,0x63,0x02,0x2e,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0x01,0x14,0x40,0xff,0xfc,0x24,0x04,0x08,0x24,0x3c,0x05,0x00,0xc0, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x24,0x04,0x08,0x34,0x3c,0x05,0x00,0xc0, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x3c,0x02,0xc0,0x00,0x00,0x10,0x1c,0x00, 0x34,0x42,0x04,0x00,0x3c,0x04,0xb0,0x05,0x3c,0x05,0xb0,0x05,0x24,0x63,0x16,0x09, 0x02,0x22,0x10,0x21,0x34,0x84,0x04,0x20,0x34,0xa5,0x04,0x24,0x3c,0x06,0xb0,0x05, 0xac,0x83,0x00,0x00,0x24,0x07,0x00,0x01,0xac,0xa2,0x00,0x00,0x34,0xc6,0x02,0x28, 0x24,0x02,0x00,0x20,0xae,0x47,0x00,0x3c,0x24,0x04,0x08,0x24,0xa0,0xc2,0x00,0x00, 0x3c,0x05,0x00,0xc0,0xa2,0x47,0x00,0x11,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01, 0x24,0x04,0x08,0x34,0x3c,0x05,0x00,0xc0,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01, 0x8f,0xbf,0x00,0x1c,0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x20,0x24,0x02,0x00,0x06,0xac,0x82,0x00,0x0c,0xa0,0x80,0x00,0x50, 0xac,0x80,0x00,0x00,0xac,0x80,0x00,0x04,0xac,0x80,0x00,0x08,0xac,0x80,0x00,0x14, 0xac,0x80,0x00,0x18,0xac,0x80,0x00,0x1c,0xa4,0x80,0x00,0x20,0xac,0x80,0x00,0x24, 0xac,0x80,0x00,0x28,0xac,0x80,0x00,0x2c,0xa0,0x80,0x00,0x30,0xa0,0x80,0x00,0x31, 0xac,0x80,0x00,0x34,0xac,0x80,0x00,0x38,0xa0,0x80,0x00,0x3c,0xac,0x82,0x00,0x10, 0xa0,0x80,0x00,0x44,0xac,0x80,0x00,0x48,0x03,0xe0,0x00,0x08,0xac,0x80,0x00,0x4c, 0x3c,0x04,0xb0,0x06,0x34,0x84,0x80,0x00,0x8c,0x83,0x00,0x00,0x3c,0x02,0x12,0x00, 0x3c,0x05,0xb0,0x03,0x00,0x62,0x18,0x25,0x34,0xa5,0x00,0x8b,0x24,0x02,0xff,0x80, 0xac,0x83,0x00,0x00,0x03,0xe0,0x00,0x08,0xa0,0xa2,0x00,0x00,0x3c,0x04,0xb0,0x03, 0x34,0x84,0x00,0x0b,0x24,0x02,0x00,0x22,0x3c,0x05,0xb0,0x01,0x3c,0x06,0x45,0x67, 0x3c,0x0a,0xb0,0x09,0xa0,0x82,0x00,0x00,0x34,0xa5,0x00,0x04,0x34,0xc6,0x89,0xaa, 0x35,0x4a,0x00,0x04,0x24,0x02,0x01,0x23,0x3c,0x0b,0xb0,0x09,0x3c,0x07,0x01,0x23, 0x3c,0x0c,0xb0,0x09,0x3c,0x01,0xb0,0x01,0xac,0x20,0x00,0x00,0x27,0xbd,0xff,0xe0, 0xac,0xa0,0x00,0x00,0x35,0x6b,0x00,0x08,0x3c,0x01,0xb0,0x09,0xac,0x26,0x00,0x00, 0x34,0xe7,0x45,0x66,0xa5,0x42,0x00,0x00,0x35,0x8c,0x00,0x0c,0x24,0x02,0xcd,0xef, 0x3c,0x0d,0xb0,0x09,0x3c,0x08,0xcd,0xef,0x3c,0x0e,0xb0,0x09,0xad,0x67,0x00,0x00, 0xaf,0xb7,0x00,0x1c,0xa5,0x82,0x00,0x00,0xaf,0xb6,0x00,0x18,0xaf,0xb5,0x00,0x14, 0xaf,0xb4,0x00,0x10,0xaf,0xb3,0x00,0x0c,0xaf,0xb2,0x00,0x08,0xaf,0xb1,0x00,0x04, 0xaf,0xb0,0x00,0x00,0x35,0xad,0x00,0x10,0x35,0x08,0x01,0x22,0x35,0xce,0x00,0x14, 0x24,0x02,0x89,0xab,0x3c,0x0f,0xb0,0x09,0x3c,0x09,0x89,0xab,0x3c,0x10,0xb0,0x09, 0x3c,0x11,0xb0,0x09,0x3c,0x12,0xb0,0x09,0x3c,0x13,0xb0,0x09,0x3c,0x14,0xb0,0x09, 0x3c,0x15,0xb0,0x09,0x3c,0x16,0xb0,0x09,0x3c,0x17,0xb0,0x09,0xad,0xa8,0x00,0x00, 0x24,0x03,0xff,0xff,0xa5,0xc2,0x00,0x00,0x35,0xef,0x00,0x18,0x35,0x29,0xcd,0xee, 0x36,0x10,0x00,0x1c,0x36,0x31,0x00,0x20,0x36,0x52,0x00,0x24,0x36,0x73,0x00,0x28, 0x36,0x94,0x00,0x2c,0x36,0xb5,0x00,0x30,0x36,0xd6,0x00,0x34,0x36,0xf7,0x00,0x38, 0x24,0x02,0x45,0x67,0xad,0xe9,0x00,0x00,0xa6,0x02,0x00,0x00,0xae,0x23,0x00,0x00, 0x8f,0xb0,0x00,0x00,0xa6,0x43,0x00,0x00,0x8f,0xb1,0x00,0x04,0xae,0x63,0x00,0x00, 0x8f,0xb2,0x00,0x08,0xa6,0x83,0x00,0x00,0x8f,0xb3,0x00,0x0c,0xae,0xa3,0x00,0x00, 0x8f,0xb4,0x00,0x10,0xa6,0xc3,0x00,0x00,0x8f,0xb5,0x00,0x14,0xae,0xe3,0x00,0x00, 0x7b,0xb6,0x00,0xfc,0x3c,0x18,0xb0,0x09,0x37,0x18,0x00,0x3c,0xa7,0x03,0x00,0x00, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00, 0x34,0x63,0x00,0x20,0x24,0x42,0x51,0x48,0xac,0x62,0x00,0x00,0x8c,0x83,0x00,0x34, 0x34,0x02,0xff,0xff,0x00,0x43,0x10,0x2a,0x14,0x40,0x01,0x04,0x00,0x80,0x28,0x21, 0x8c,0x86,0x00,0x08,0x24,0x02,0x00,0x03,0x10,0xc2,0x00,0xf7,0x00,0x00,0x00,0x00, 0x8c,0xa2,0x00,0x2c,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x4f,0x24,0x02,0x00,0x06, 0x3c,0x03,0xb0,0x05,0x34,0x63,0x04,0x50,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0xff,0x14,0x40,0x00,0xdd,0xac,0xa2,0x00,0x2c,0x24,0x02,0x00,0x01, 0x10,0xc2,0x00,0xdc,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xc2,0x00,0xca, 0x00,0x00,0x00,0x00,0x8c,0xa7,0x00,0x04,0x24,0x02,0x00,0x02,0x10,0xe2,0x00,0xc0, 0x00,0x00,0x00,0x00,0x8c,0xa2,0x00,0x14,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x09, 0x24,0x02,0x00,0x01,0x3c,0x03,0xb0,0x09,0x34,0x63,0x01,0x60,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0x10,0x40,0x00,0x05,0xac,0xa2,0x00,0x14, 0x24,0x02,0x00,0x01,0xac,0xa2,0x00,0x00,0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x14, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xd0,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00, 0x04,0x61,0x00,0x19,0x3c,0x02,0xb0,0x03,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2e, 0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0x12, 0x3c,0x02,0xb0,0x03,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x42,0x90,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x0c,0x3c,0x02,0xb0,0x03,0x80,0xa2,0x00,0x50, 0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x08,0x3c,0x02,0xb0,0x03,0x14,0xc0,0x00,0x07, 0x34,0x42,0x00,0x3f,0x24,0x02,0x00,0x0e,0x24,0x03,0x00,0x01,0xac,0xa2,0x00,0x00, 0x03,0xe0,0x00,0x08,0xa0,0xa3,0x00,0x50,0x34,0x42,0x00,0x3f,0x90,0x44,0x00,0x00, 0x24,0x03,0x00,0x01,0x10,0x64,0x00,0x7f,0x3c,0x03,0xb0,0x05,0x80,0xa2,0x00,0x31, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0a,0x3c,0x02,0xb0,0x06,0x34,0x42,0x80,0x18, 0x8c,0x43,0x00,0x00,0x3c,0x04,0xf0,0x00,0x3c,0x02,0x80,0x00,0x00,0x64,0x18,0x24, 0x10,0x62,0x00,0x03,0x24,0x02,0x00,0x09,0x03,0xe0,0x00,0x08,0xac,0xa2,0x00,0x00, 0x8c,0xa2,0x00,0x40,0x00,0x00,0x00,0x00,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00, 0x10,0x60,0x00,0x09,0x3c,0x03,0xb0,0x03,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2c, 0x8c,0x43,0x00,0x00,0x3c,0x04,0x00,0x02,0x00,0x64,0x18,0x24,0x14,0x60,0xff,0xf2, 0x24,0x02,0x00,0x10,0x3c,0x03,0xb0,0x03,0x34,0x63,0x02,0x01,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x80,0x10,0x40,0x00,0x0e,0x00,0x00,0x00,0x00, 0x8c,0xa3,0x00,0x0c,0x00,0x00,0x00,0x00,0xac,0xa3,0x00,0x10,0x3c,0x02,0xb0,0x03, 0x90,0x42,0x02,0x01,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x0f,0xac,0xa2,0x00,0x0c, 0x90,0xa3,0x00,0x0f,0x24,0x02,0x00,0x0d,0x3c,0x01,0xb0,0x03,0x08,0x00,0x14,0xb2, 0xa0,0x23,0x02,0x01,0x3c,0x02,0xb0,0x09,0x34,0x42,0x01,0x80,0x90,0x44,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x04,0x1e,0x00,0x00,0x03,0x1e,0x03,0x10,0x60,0x00,0x15, 0xa0,0xa4,0x00,0x44,0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x0b,0x24,0x02,0x00,0x02, 0x10,0x62,0x00,0x03,0x24,0x03,0x00,0x0d,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x8c,0xa2,0x00,0x0c,0xac,0xa3,0x00,0x00,0x24,0x03,0x00,0x04,0xac,0xa2,0x00,0x10, 0x03,0xe0,0x00,0x08,0xac,0xa3,0x00,0x0c,0x24,0x02,0x00,0x0d,0xac,0xa2,0x00,0x00, 0x24,0x03,0x00,0x04,0x24,0x02,0x00,0x06,0xac,0xa3,0x00,0x10,0x03,0xe0,0x00,0x08, 0xac,0xa2,0x00,0x0c,0x8c,0xa3,0x00,0x38,0x24,0x04,0x00,0x01,0x10,0x64,0x00,0x2d, 0x24,0x02,0x00,0x02,0x10,0x60,0x00,0x19,0x00,0x00,0x00,0x00,0x10,0x62,0x00,0x10, 0x24,0x02,0x00,0x04,0x10,0x62,0x00,0x04,0x00,0x00,0x00,0x00,0xac,0xa0,0x00,0x38, 0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x00,0x10,0xe4,0x00,0x07,0x24,0x02,0x00,0x03, 0x80,0xa2,0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x02,0x18,0x0b,0xac,0xa3,0x00,0x00, 0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x38,0x08,0x00,0x15,0x04,0xac,0xa2,0x00,0x00, 0x10,0xe4,0x00,0x02,0x24,0x02,0x00,0x03,0x24,0x02,0x00,0x0c,0xac,0xa2,0x00,0x00, 0x24,0x02,0x00,0x04,0x03,0xe0,0x00,0x08,0xac,0xa2,0x00,0x38,0x10,0xe4,0x00,0x0e, 0x3c,0x03,0xb0,0x06,0x34,0x63,0x80,0x24,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0xff,0x10,0x40,0x00,0x06,0xac,0xa2,0x00,0x18,0x24,0x02,0x00,0x02, 0xac,0xa2,0x00,0x00,0xac,0xa0,0x00,0x18,0x08,0x00,0x15,0x0d,0x24,0x02,0x00,0x01, 0x08,0x00,0x15,0x1a,0xac,0xa0,0x00,0x00,0x24,0x02,0x00,0x03,0x08,0x00,0x15,0x1a, 0xac,0xa2,0x00,0x00,0x24,0x03,0x00,0x0b,0xac,0xa2,0x00,0x38,0x03,0xe0,0x00,0x08, 0xac,0xa3,0x00,0x00,0x34,0x63,0x02,0x2e,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0x01,0x14,0x40,0xff,0x7d,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x42, 0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x60,0xff,0x78,0x00,0x00,0x00,0x00, 0x10,0xc0,0xff,0x81,0x24,0x02,0x00,0x0e,0x08,0x00,0x14,0xa7,0x00,0x00,0x00,0x00, 0x80,0xa2,0x00,0x30,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x3e,0x24,0x02,0x00,0x04, 0x08,0x00,0x14,0xb2,0x00,0x00,0x00,0x00,0x84,0xa2,0x00,0x20,0x00,0x00,0x00,0x00, 0x10,0x40,0xff,0x75,0x24,0x02,0x00,0x06,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e, 0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff, 0x00,0x60,0x10,0x21,0x14,0x40,0xff,0x2b,0xa4,0xa3,0x00,0x20,0x08,0x00,0x14,0xb2, 0x24,0x02,0x00,0x06,0x8c,0xa2,0x00,0x1c,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0x66, 0x24,0x02,0x00,0x05,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2c,0x8c,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0x10,0x40,0xff,0x1b,0xac,0xa2,0x00,0x1c, 0x08,0x00,0x14,0xb2,0x24,0x02,0x00,0x05,0x3c,0x02,0xb0,0x05,0x8c,0x42,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x02,0x17,0x42,0x30,0x42,0x00,0x01,0x14,0x40,0xff,0x56, 0x24,0x02,0x00,0x06,0x08,0x00,0x14,0x60,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x0a, 0x03,0xe0,0x00,0x08,0xac,0x82,0x00,0x00,0x27,0xbd,0xff,0xd8,0xaf,0xb0,0x00,0x10, 0x27,0x90,0x86,0x58,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18, 0x0c,0x00,0x29,0xd5,0xaf,0xb1,0x00,0x14,0xaf,0x90,0x8f,0xe0,0x48,0x02,0x00,0x00, 0x0c,0x00,0x13,0xf0,0x00,0x00,0x00,0x00,0x0c,0x00,0x18,0x1f,0x02,0x00,0x20,0x21, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x3a,0x94,0x43,0x00,0x00,0x00,0x00,0x00,0x00, 0xa3,0x83,0x8f,0xe4,0x0c,0x00,0x00,0x34,0x00,0x00,0x00,0x00,0x0c,0x00,0x13,0xfb, 0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x98,0x0c,0x00,0x27,0x59,0x00,0x00,0x00,0x00, 0x93,0x84,0x80,0x10,0x0c,0x00,0x21,0x3f,0x00,0x00,0x00,0x00,0x27,0x84,0x89,0x18, 0x0c,0x00,0x06,0xe5,0x00,0x00,0x00,0x00,0x0c,0x00,0x01,0x39,0x00,0x00,0x00,0x00, 0x27,0x84,0x84,0x40,0x0c,0x00,0x13,0xd9,0x00,0x00,0x00,0x00,0x27,0x82,0x89,0x4c, 0xaf,0x82,0x84,0x80,0x0c,0x00,0x00,0x5f,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03, 0x34,0x63,0x01,0x08,0x3c,0x04,0xb0,0x09,0x3c,0x05,0xb0,0x09,0x8c,0x66,0x00,0x00, 0x34,0x84,0x01,0x68,0x34,0xa5,0x01,0x40,0x24,0x02,0xc8,0x80,0x24,0x03,0x00,0x0a, 0xa4,0x82,0x00,0x00,0xa4,0xa3,0x00,0x00,0x3c,0x04,0xb0,0x03,0x8c,0x82,0x00,0x00, 0x8f,0x85,0x84,0x40,0xaf,0x86,0x84,0x38,0x34,0x42,0x00,0x20,0xac,0x82,0x00,0x00, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x58,0x8c,0x43,0x00,0x00,0x2c,0xa4,0x00,0x11, 0x34,0x63,0x01,0x00,0xac,0x43,0x00,0x00,0x10,0x80,0xff,0xfa,0x3c,0x02,0xb0,0x03, 0x3c,0x03,0x80,0x01,0x00,0x05,0x10,0x80,0x24,0x63,0x02,0x00,0x00,0x43,0x10,0x21, 0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00, 0x27,0x84,0x84,0x98,0x0c,0x00,0x26,0x8e,0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x40, 0x0c,0x00,0x14,0x52,0x00,0x00,0x00,0x00,0x93,0x83,0x81,0xf1,0x24,0x02,0x00,0x01, 0x10,0x62,0x00,0x08,0x00,0x00,0x00,0x00,0x8f,0x85,0x84,0x40,0x8f,0x82,0x84,0x74, 0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x01,0xaf,0x82,0x84,0x74,0x08,0x00,0x15,0x9d, 0x3c,0x02,0xb0,0x03,0x27,0x84,0x84,0x98,0x0c,0x00,0x27,0x0d,0x00,0x00,0x00,0x00, 0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x98,0x0c,0x00,0x28,0xdd, 0x00,0x00,0x00,0x00,0xa3,0x82,0x84,0x71,0x8f,0x82,0x84,0x74,0xaf,0x80,0x84,0x40, 0x24,0x42,0x00,0x01,0xaf,0x82,0x84,0x74,0x08,0x00,0x15,0x9c,0x00,0x00,0x28,0x21, 0x27,0x84,0x86,0x58,0x0c,0x00,0x19,0x5b,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff, 0x14,0x40,0x00,0x05,0x3c,0x03,0xb0,0x05,0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x44, 0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x34,0x63,0x04,0x50,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x6c,0x14,0x40,0x00,0x20, 0x24,0x02,0x00,0x01,0x8f,0x84,0x84,0x48,0x00,0x00,0x00,0x00,0x10,0x82,0x00,0x20, 0x3c,0x03,0xb0,0x09,0x34,0x63,0x01,0x60,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x54,0x14,0x40,0x00,0x15,0x24,0x02,0x00,0x01, 0x24,0x02,0x00,0x02,0x10,0x82,0x00,0x07,0x00,0x00,0x00,0x00,0x24,0x05,0x00,0x03, 0x24,0x02,0x00,0x01,0xaf,0x82,0x84,0x44,0xaf,0x85,0x84,0x40,0x08,0x00,0x15,0xb6, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e,0x90,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff,0x00,0x60,0x10,0x21, 0xa7,0x83,0x84,0x60,0x14,0x40,0xff,0xf1,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0xaf,0x82,0x84,0x44,0xaf,0x80,0x84,0x40,0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00, 0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2c,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x5c,0x14,0x40,0xff,0xf5,0x24,0x02,0x00,0x01, 0x08,0x00,0x15,0xe1,0x3c,0x03,0xb0,0x09,0x27,0x84,0x86,0x58,0x0c,0x00,0x1a,0xd1, 0x00,0x00,0x00,0x00,0x83,0x82,0x84,0x70,0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xec, 0x24,0x02,0x00,0x02,0x3c,0x03,0xb0,0x05,0x34,0x63,0x04,0x50,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x6c,0x14,0x40,0xff,0xe4, 0x24,0x02,0x00,0x02,0x8f,0x84,0x84,0x48,0x24,0x02,0x00,0x01,0x10,0x82,0x00,0x12, 0x24,0x02,0x00,0x02,0x10,0x82,0x00,0x04,0x00,0x00,0x00,0x00,0x24,0x05,0x00,0x04, 0x08,0x00,0x15,0xed,0x24,0x02,0x00,0x02,0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e, 0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff, 0x00,0x60,0x10,0x21,0xa7,0x83,0x84,0x60,0x14,0x40,0xff,0xf4,0x00,0x00,0x00,0x00, 0x08,0x00,0x15,0xfc,0x24,0x02,0x00,0x02,0x3c,0x03,0xb0,0x05,0x34,0x63,0x02,0x2c, 0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xff,0xaf,0x82,0x84,0x5c, 0x14,0x40,0xff,0xf7,0x00,0x00,0x00,0x00,0x08,0x00,0x16,0x1d,0x24,0x02,0x00,0x02, 0x27,0x84,0x89,0x18,0x0c,0x00,0x0b,0x55,0x00,0x00,0x00,0x00,0x8f,0x83,0x84,0x44, 0xaf,0x82,0x84,0x5c,0x38,0x64,0x00,0x02,0x00,0x04,0x18,0x0a,0xaf,0x83,0x84,0x44, 0x14,0x40,0xff,0xad,0x24,0x05,0x00,0x05,0x8f,0x82,0x89,0x58,0xaf,0x80,0x84,0x40, 0x10,0x40,0x00,0x02,0x24,0x04,0x00,0x01,0xaf,0x84,0x84,0x48,0x93,0x82,0x89,0x66, 0x00,0x00,0x00,0x00,0x10,0x40,0xff,0x6c,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x05, 0x34,0x42,0x00,0x08,0x8c,0x43,0x00,0x00,0x3c,0x04,0x20,0x00,0x00,0x64,0x18,0x24, 0x10,0x60,0xff,0x65,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xa0, 0x8c,0x43,0x00,0x00,0x3c,0x04,0x80,0x00,0xaf,0x80,0x89,0x40,0x24,0x63,0x00,0x01, 0xac,0x43,0x00,0x00,0x3c,0x01,0xb0,0x05,0xac,0x24,0x00,0x08,0xaf,0x80,0x89,0x3c, 0xaf,0x80,0x89,0x44,0xaf,0x80,0x89,0x48,0xaf,0x80,0x89,0x54,0xaf,0x80,0x89,0x4c, 0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x83,0x82,0x84,0x90,0x00,0x00,0x00,0x00, 0x10,0x40,0x00,0x02,0x24,0x02,0x00,0x20,0xaf,0x82,0x84,0x5c,0x8f,0x85,0x84,0x5c, 0x27,0x84,0x89,0x18,0x0c,0x00,0x0d,0x30,0x00,0x00,0x00,0x00,0x00,0x02,0x1e,0x00, 0xa3,0x82,0x84,0x70,0xaf,0x80,0x84,0x5c,0x10,0x60,0xff,0x8e,0x00,0x00,0x00,0x00, 0x3c,0x02,0xb0,0x05,0x34,0x42,0x02,0x2e,0x90,0x43,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x63,0x00,0x01,0x30,0x63,0x00,0xff,0x00,0x60,0x10,0x21,0xa7,0x83,0x84,0x60, 0x10,0x40,0x00,0x04,0x24,0x04,0x00,0x02,0xaf,0x84,0x84,0x48,0x08,0x00,0x15,0xfd, 0x00,0x00,0x00,0x00,0x08,0x00,0x15,0xee,0x24,0x05,0x00,0x06,0x27,0x84,0x84,0x40, 0x27,0x85,0x89,0x18,0x0c,0x00,0x0d,0xfd,0x00,0x00,0x00,0x00,0x8f,0x82,0x84,0x64, 0xaf,0x80,0x84,0x6c,0x14,0x40,0x00,0x19,0x00,0x40,0x18,0x21,0x8f,0x82,0x84,0x68, 0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x15,0x24,0x02,0x00,0x02,0x8f,0x83,0x84,0x48, 0x00,0x00,0x00,0x00,0x10,0x62,0x00,0x0b,0x3c,0x02,0x40,0x00,0x8f,0x83,0x84,0x44, 0x24,0x02,0x00,0x01,0x10,0x62,0x00,0x02,0x24,0x05,0x00,0x03,0x24,0x05,0x00,0x06, 0xaf,0x85,0x84,0x40,0x24,0x04,0x00,0x03,0xaf,0x84,0x84,0x48,0x08,0x00,0x15,0xb6, 0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x14,0x3c,0x01,0xb0,0x05,0xac,0x22,0x00,0x00, 0xaf,0x80,0x84,0x40,0x08,0x00,0x16,0x96,0x24,0x04,0x00,0x03,0x10,0x60,0x00,0x10, 0x00,0x00,0x00,0x00,0x27,0x85,0x89,0x18,0x27,0x84,0x84,0x40,0x0c,0x00,0x0e,0x21, 0x00,0x00,0x00,0x00,0x8f,0x83,0x84,0x44,0x24,0x02,0x00,0x01,0xa3,0x80,0x84,0x70, 0xaf,0x80,0x84,0x48,0x10,0x62,0x00,0x02,0x24,0x05,0x00,0x03,0x24,0x05,0x00,0x04, 0xaf,0x85,0x84,0x40,0xaf,0x80,0x84,0x64,0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00, 0x83,0x82,0x84,0x90,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00, 0x27,0x84,0x89,0x18,0x0c,0x00,0x10,0x69,0x00,0x00,0x00,0x00,0x8f,0x82,0x84,0x44, 0xa3,0x80,0x84,0x70,0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x48,0x14,0x40,0x00,0x03, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0xaf,0x82,0x84,0x44,0xaf,0x80,0x84,0x68, 0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x40,0x27,0x85,0x89,0x18, 0x0c,0x00,0x0e,0x21,0x00,0x00,0x00,0x00,0x8f,0x82,0x84,0x44,0xa3,0x80,0x84,0x70, 0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x48,0x14,0x40,0xfe,0xeb,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x02,0xaf,0x82,0x84,0x44,0x08,0x00,0x15,0xb6,0x00,0x00,0x00,0x00, 0x27,0x84,0x89,0x18,0x0c,0x00,0x10,0x69,0x00,0x00,0x00,0x00,0x08,0x00,0x16,0xc6, 0x00,0x00,0x00,0x00,0x27,0x84,0x84,0x98,0x0c,0x00,0x29,0x73,0x00,0x00,0x00,0x00, 0x08,0x00,0x15,0xc5,0x00,0x00,0x00,0x00,0x0c,0x00,0x24,0x05,0x00,0x00,0x00,0x00, 0x0c,0x00,0x26,0xff,0x00,0x00,0x00,0x00,0x0c,0x00,0x18,0x11,0x00,0x00,0x00,0x00, 0x93,0x83,0xbc,0x18,0x00,0x00,0x00,0x00,0x14,0x60,0x00,0x2b,0x3c,0x02,0xb0,0x03, 0x34,0x42,0x01,0x08,0x8c,0x44,0x00,0x00,0x8f,0x83,0xbc,0x10,0x8f,0x82,0xbc,0x14, 0x00,0x83,0x18,0x23,0x00,0x43,0x10,0x2b,0x10,0x40,0x00,0x23,0x3c,0x02,0xb0,0x03, 0x24,0x04,0x05,0xa0,0x34,0x42,0x01,0x18,0x8c,0x42,0x00,0x00,0x0c,0x00,0x06,0xd1, 0x00,0x00,0x00,0x00,0x24,0x04,0x05,0xa4,0x0c,0x00,0x06,0xd1,0x00,0x02,0x84,0x02, 0x30,0x51,0xff,0xff,0x24,0x04,0x05,0xa8,0x00,0x02,0x94,0x02,0x0c,0x00,0x06,0xd1, 0x3a,0x10,0xff,0xff,0x3a,0x31,0xff,0xff,0x30,0x42,0xff,0xff,0x2e,0x10,0x00,0x01, 0x2e,0x31,0x00,0x01,0x3a,0x52,0xff,0xff,0x02,0x11,0x80,0x25,0x2e,0x52,0x00,0x01, 0x38,0x42,0xff,0xff,0x02,0x12,0x80,0x25,0x2c,0x42,0x00,0x01,0x02,0x02,0x80,0x25, 0x16,0x00,0x00,0x02,0x24,0x04,0x00,0x02,0x00,0x00,0x20,0x21,0x0c,0x00,0x05,0x6e, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08,0x8c,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0xaf,0x83,0xbc,0x10,0x0c,0x00,0x01,0xe9,0x00,0x00,0x00,0x00, 0xaf,0x80,0x84,0x40,0xaf,0x80,0x84,0x74,0x08,0x00,0x15,0x9c,0x00,0x00,0x28,0x21, 0x27,0x90,0xb4,0x00,0x24,0x11,0x00,0x12,0x8e,0x04,0x00,0x00,0x00,0x00,0x00,0x00, 0x90,0x82,0x00,0x10,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x03,0x00,0x00,0x00,0x00, 0x0c,0x00,0x18,0xd0,0x00,0x00,0x00,0x00,0x26,0x31,0xff,0xff,0x06,0x21,0xff,0xf6, 0x26,0x10,0x00,0x04,0xaf,0x80,0x84,0x40,0x08,0x00,0x15,0xb7,0x00,0x00,0x28,0x21, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x08,0x8c,0x44,0x00,0x00,0x8f,0x82,0x84,0x38, 0x00,0x04,0x19,0xc2,0x00,0x02,0x11,0xc2,0x10,0x62,0xff,0xf6,0x00,0x00,0x00,0x00, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x01,0x02,0x90,0x43,0x00,0x00,0x3c,0x12,0xb0,0x05, 0xaf,0x84,0x84,0x38,0x30,0x63,0x00,0xff,0x00,0x03,0x11,0x40,0x00,0x43,0x10,0x23, 0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x00,0x02,0x99,0x00,0x00,0x00,0x88,0x21, 0x36,0x52,0x02,0x2c,0x27,0x90,0xb4,0x00,0x8e,0x04,0x00,0x00,0x00,0x00,0x00,0x00, 0x90,0x83,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x62,0x00,0x03,0x10,0x40,0x00,0x06, 0x30,0x62,0x00,0x1c,0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0x8f,0x85,0x84,0x38, 0x0c,0x00,0x1e,0x94,0x02,0x60,0x30,0x21,0x8e,0x42,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0xff,0x14,0x40,0xff,0xd7,0x00,0x00,0x00,0x00,0x26,0x31,0x00,0x01, 0x2a,0x22,0x00,0x13,0x14,0x40,0xff,0xec,0x26,0x10,0x00,0x04,0x08,0x00,0x17,0x21, 0x00,0x00,0x00,0x00,0x8f,0x84,0x84,0x4c,0x27,0x85,0x89,0x18,0x0c,0x00,0x17,0xa4, 0x00,0x00,0x00,0x00,0x8f,0x83,0x84,0x4c,0x24,0x02,0x00,0x04,0x14,0x62,0xfe,0xa5, 0x00,0x00,0x00,0x00,0x08,0x00,0x15,0xee,0x24,0x05,0x00,0x05,0x3c,0x02,0xb0,0x03, 0x34,0x42,0x00,0x3f,0x90,0x44,0x00,0x00,0x24,0x03,0x00,0x01,0x10,0x64,0x00,0x08, 0x00,0x00,0x00,0x00,0x27,0x84,0x89,0x18,0x0c,0x00,0x24,0x2c,0x00,0x00,0x00,0x00, 0x24,0x05,0x00,0x05,0xaf,0x85,0x84,0x40,0x08,0x00,0x15,0xb7,0x00,0x00,0x00,0x00, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x14,0x8c,0x44,0x00,0x00,0x0c,0x00,0x24,0x49, 0x00,0x00,0x00,0x00,0x08,0x00,0x17,0x65,0x24,0x05,0x00,0x05,0x8f,0x82,0x89,0x4c, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0d,0x00,0x00,0x00,0x00,0x8f,0x84,0xb4,0x40, 0xaf,0x80,0x89,0x4c,0x94,0x85,0x00,0x14,0x0c,0x00,0x1b,0x66,0x00,0x00,0x00,0x00, 0x93,0x82,0x8b,0x71,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x02,0x10,0x40,0x00,0x03, 0x00,0x00,0x00,0x00,0x0c,0x00,0x01,0x57,0x00,0x00,0x20,0x21,0x8f,0x84,0xb4,0x40, 0x0c,0x00,0x18,0xd0,0x00,0x00,0x00,0x00,0x08,0x00,0x17,0x21,0x00,0x00,0x00,0x00, 0x3c,0x02,0xff,0x90,0x27,0xbd,0xff,0xe8,0x00,0x80,0x18,0x21,0x34,0x42,0x00,0x01, 0x27,0x84,0x89,0x18,0x10,0x62,0x00,0x05,0xaf,0xbf,0x00,0x10,0x8f,0xbf,0x00,0x10, 0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x06,0xe5, 0x00,0x00,0x00,0x00,0x27,0x84,0x86,0x58,0x0c,0x00,0x18,0x1f,0x00,0x00,0x00,0x00, 0x27,0x84,0x84,0x40,0x0c,0x00,0x13,0xd9,0x00,0x00,0x00,0x00,0x08,0x00,0x17,0x8b, 0x00,0x00,0x00,0x00,0x8f,0x82,0x89,0x58,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x05, 0x00,0x00,0x18,0x21,0x8f,0x82,0x84,0x48,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x02, 0x00,0x00,0x00,0x00,0x24,0x03,0x00,0x01,0x03,0xe0,0x00,0x08,0x00,0x60,0x10,0x21, 0x27,0xbd,0xff,0xe0,0x3c,0x06,0xb0,0x03,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10, 0x34,0xc6,0x00,0x5f,0xaf,0xbf,0x00,0x18,0x90,0xc3,0x00,0x00,0x3c,0x07,0xb0,0x03, 0x34,0xe7,0x00,0x5d,0x34,0x63,0x00,0x01,0x3c,0x09,0xb0,0x03,0x24,0x02,0x00,0x01, 0xa0,0xc3,0x00,0x00,0x00,0x80,0x80,0x21,0xa0,0xe2,0x00,0x00,0x00,0xa0,0x88,0x21, 0x35,0x29,0x00,0x5e,0x00,0xe0,0x40,0x21,0x24,0x04,0x00,0x01,0x91,0x22,0x00,0x00, 0x91,0x03,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x83,0x00,0x03,0x30,0x42,0x00,0x01, 0x14,0x40,0xff,0xfa,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x04,0x12,0x02,0x00,0x2c, 0x24,0x05,0x0f,0x00,0x24,0x02,0x00,0x06,0x12,0x02,0x00,0x08,0x24,0x05,0x00,0x0f, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x02,0x00,0xa0,0x50,0x00,0x00,0x8f,0xbf,0x00,0x18, 0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x24,0x04,0x0c,0x04, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x0d,0x04,0x24,0x05,0x00,0x0f, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x08,0x80,0x24,0x05,0x1e,0x00, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x08,0x8c,0x24,0x05,0x0f,0x00, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x0f,0x24,0x04,0x08,0x24,0x3c,0x05,0x00,0x30, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x24,0x04,0x08,0x2c,0x3c,0x05,0x00,0x30, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x24,0x04,0x08,0x34,0x3c,0x05,0x00,0x30, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x24,0x04,0x08,0x3c,0x3c,0x05,0x00,0x30, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x08,0x00,0x17,0xc5,0x3c,0x02,0xb0,0x03, 0x24,0x04,0x08,0x8c,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x08,0x80, 0x24,0x05,0x1e,0x00,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x0c,0x04, 0x24,0x05,0x00,0x0f,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x0d,0x04, 0x24,0x05,0x00,0x0f,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x04,0x24,0x04,0x08,0x24, 0x3c,0x05,0x00,0x30,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x24,0x04,0x08,0x2c, 0x3c,0x05,0x00,0x30,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x03,0x24,0x04,0x08,0x34, 0x3c,0x05,0x00,0x30,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x02,0x3c,0x05,0x00,0x30, 0x24,0x06,0x00,0x03,0x0c,0x00,0x13,0x5f,0x24,0x04,0x08,0x3c,0x02,0x20,0x20,0x21, 0x24,0x05,0x00,0x14,0x0c,0x00,0x13,0xa4,0x24,0x06,0x01,0x07,0x08,0x00,0x17,0xc5, 0x3c,0x02,0xb0,0x03,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x73,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x02,0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00, 0xa3,0x80,0x81,0x58,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0xa3,0x82,0x81,0x58,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03, 0x3c,0x02,0x80,0x00,0x00,0x80,0x70,0x21,0x34,0x63,0x00,0x20,0x24,0x42,0x60,0x7c, 0x3c,0x04,0xb0,0x03,0xac,0x62,0x00,0x00,0x34,0x84,0x00,0x30,0xad,0xc0,0x02,0xb8, 0x8c,0x83,0x00,0x00,0x24,0x02,0x00,0xff,0xa5,0xc0,0x00,0x0a,0x00,0x00,0x30,0x21, 0xa7,0x82,0x8f,0xf0,0x27,0x88,0x90,0x00,0xa5,0xc3,0x00,0x08,0x3c,0x07,0xb0,0x08, 0x30,0xc2,0xff,0xff,0x00,0x02,0x20,0xc0,0x24,0xc3,0x00,0x01,0x00,0x82,0x10,0x21, 0x00,0x60,0x30,0x21,0x00,0x02,0x10,0x80,0x30,0x63,0xff,0xff,0x00,0x48,0x10,0x21, 0x00,0x87,0x20,0x21,0x28,0xc5,0x00,0xff,0xac,0x83,0x00,0x00,0x14,0xa0,0xff,0xf4, 0xa4,0x43,0x00,0x00,0x3c,0x02,0xb0,0x08,0x34,0x03,0xff,0xff,0x25,0xc4,0x00,0x0c, 0x24,0x0a,0x00,0x02,0x34,0x42,0x07,0xf8,0x3c,0x06,0xb0,0x03,0xa7,0x83,0xb3,0xdc, 0xac,0x43,0x00,0x00,0xaf,0x84,0xb4,0x00,0x34,0xc6,0x00,0x64,0xa0,0x8a,0x00,0x18, 0x94,0xc5,0x00,0x00,0x8f,0x82,0xb4,0x00,0x25,0xc4,0x00,0x30,0x24,0x08,0x00,0x03, 0x3c,0x03,0xb0,0x03,0xa0,0x45,0x00,0x21,0x34,0x63,0x00,0x66,0xaf,0x84,0xb4,0x04, 0xa0,0x88,0x00,0x18,0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x04,0x25,0xc4,0x00,0x54, 0x25,0xc7,0x00,0x78,0xa0,0x45,0x00,0x21,0xaf,0x84,0xb4,0x08,0xa0,0x88,0x00,0x18, 0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x08,0x25,0xc8,0x00,0x9c,0x24,0x09,0x00,0x01, 0xa0,0x45,0x00,0x21,0xaf,0x87,0xb4,0x0c,0xa0,0xea,0x00,0x18,0x94,0xc4,0x00,0x00, 0x8f,0x82,0xb4,0x0c,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x62,0xa0,0x44,0x00,0x21, 0xaf,0x88,0xb4,0x10,0xa1,0x09,0x00,0x18,0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x10, 0x25,0xc4,0x00,0xc0,0x3c,0x06,0xb0,0x03,0xa0,0x45,0x00,0x21,0xaf,0x84,0xb4,0x14, 0xa0,0x89,0x00,0x18,0x94,0x65,0x00,0x00,0x8f,0x82,0xb4,0x14,0x25,0xc4,0x00,0xe4, 0x34,0xc6,0x00,0x60,0xa0,0x45,0x00,0x21,0xaf,0x84,0xb4,0x18,0xa0,0x80,0x00,0x18, 0x94,0xc5,0x00,0x00,0x8f,0x82,0xb4,0x18,0x25,0xc3,0x01,0x08,0x25,0xc7,0x01,0x2c, 0xa0,0x45,0x00,0x21,0xaf,0x83,0xb4,0x1c,0xa0,0x60,0x00,0x18,0x94,0xc8,0x00,0x00, 0x8f,0x82,0xb4,0x1c,0x25,0xc4,0x01,0x50,0x25,0xc5,0x01,0x74,0xa0,0x48,0x00,0x21, 0x25,0xc6,0x01,0x98,0x25,0xc9,0x01,0xbc,0x25,0xca,0x01,0xe0,0x25,0xcb,0x02,0x04, 0x25,0xcc,0x02,0x28,0x25,0xcd,0x02,0x4c,0x24,0x02,0x00,0x10,0x3c,0x03,0xb0,0x03, 0xaf,0x87,0xb4,0x20,0x34,0x63,0x00,0x38,0xa0,0xe0,0x00,0x18,0xaf,0x84,0xb4,0x24, 0xa0,0x80,0x00,0x18,0xaf,0x85,0xb4,0x28,0xa0,0xa0,0x00,0x18,0xaf,0x86,0xb4,0x2c, 0xa0,0xc0,0x00,0x18,0xaf,0x89,0xb4,0x30,0xa1,0x20,0x00,0x18,0xaf,0x8a,0xb4,0x34, 0xa1,0x40,0x00,0x18,0xaf,0x8b,0xb4,0x38,0xa1,0x60,0x00,0x18,0xaf,0x8c,0xb4,0x3c, 0xa1,0x80,0x00,0x18,0xaf,0x8d,0xb4,0x40,0xa1,0xa2,0x00,0x18,0x94,0x64,0x00,0x00, 0x8f,0x82,0xb4,0x40,0x25,0xc5,0x02,0x70,0x3c,0x03,0xb0,0x03,0xa0,0x44,0x00,0x21, 0x24,0x02,0x00,0x11,0xaf,0x85,0xb4,0x44,0x34,0x63,0x00,0x6e,0xa0,0xa2,0x00,0x18, 0x94,0x64,0x00,0x00,0x8f,0x82,0xb4,0x44,0x25,0xc5,0x02,0x94,0x3c,0x03,0xb0,0x03, 0xa0,0x44,0x00,0x21,0x24,0x02,0x00,0x12,0xaf,0x85,0xb4,0x48,0x34,0x63,0x00,0x6c, 0xa0,0xa2,0x00,0x18,0x94,0x64,0x00,0x00,0x8f,0x82,0xb4,0x48,0x24,0x05,0xff,0xff, 0x24,0x07,0x00,0x01,0xa0,0x44,0x00,0x21,0x24,0x06,0x00,0x12,0x27,0x84,0xb4,0x00, 0x8c,0x82,0x00,0x00,0x24,0xc6,0xff,0xff,0xa0,0x40,0x00,0x04,0x8c,0x83,0x00,0x00, 0xa4,0x45,0x00,0x00,0xa4,0x45,0x00,0x02,0xa0,0x60,0x00,0x0a,0x8c,0x82,0x00,0x00, 0xa4,0x65,0x00,0x06,0xa4,0x65,0x00,0x08,0xa0,0x40,0x00,0x10,0x8c,0x83,0x00,0x00, 0xa4,0x45,0x00,0x0c,0xa4,0x45,0x00,0x0e,0xa0,0x60,0x00,0x12,0x8c,0x82,0x00,0x00, 0x00,0x00,0x00,0x00,0xa0,0x40,0x00,0x16,0x8c,0x83,0x00,0x00,0xa4,0x45,0x00,0x14, 0xa0,0x67,0x00,0x17,0x8c,0x82,0x00,0x00,0x24,0x84,0x00,0x04,0xa0,0x40,0x00,0x20, 0x04,0xc1,0xff,0xe7,0xac,0x40,0x00,0x1c,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x00,0x34,0x42,0x00,0x20,0x24,0x63,0x63,0x40, 0xac,0x43,0x00,0x00,0x90,0x82,0x00,0x10,0x00,0x80,0x60,0x21,0x10,0x40,0x00,0x56, 0x00,0x00,0x70,0x21,0x97,0x82,0x8f,0xf0,0x94,0x8a,0x00,0x0c,0x27,0x87,0x90,0x00, 0x00,0x02,0x40,0xc0,0x01,0x02,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x47,0x10,0x21, 0x90,0x8b,0x00,0x18,0xa4,0x4a,0x00,0x00,0x94,0x83,0x00,0x0e,0x39,0x64,0x00,0x10, 0x2c,0x84,0x00,0x01,0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x34,0x85,0x00,0x02, 0x39,0x63,0x00,0x11,0x00,0x83,0x28,0x0b,0x34,0xa3,0x00,0x08,0x39,0x64,0x00,0x12, 0x00,0x02,0x10,0x80,0x00,0xa4,0x18,0x0b,0x00,0x47,0x10,0x21,0x94,0x49,0x00,0x04, 0x34,0x64,0x00,0x20,0x00,0x6b,0x20,0x0b,0x34,0x83,0x00,0x40,0x39,0x62,0x00,0x01, 0x00,0x82,0x18,0x0b,0x00,0x09,0x30,0xc0,0x34,0x64,0x00,0x80,0x00,0xc9,0x28,0x21, 0x39,0x62,0x00,0x02,0x00,0x60,0x68,0x21,0x00,0x82,0x68,0x0a,0x00,0x05,0x28,0x80, 0x3c,0x02,0xb0,0x08,0x00,0xa7,0x28,0x21,0x00,0xc2,0x30,0x21,0x01,0x02,0x40,0x21, 0x34,0x03,0xff,0xff,0x35,0xa4,0x01,0x00,0x39,0x62,0x00,0x03,0x2d,0x67,0x00,0x13, 0xad,0x0a,0x00,0x00,0xa4,0xa3,0x00,0x00,0xac,0xc3,0x00,0x00,0xa7,0x89,0x8f,0xf0, 0x10,0xe0,0x00,0x0f,0x00,0x82,0x68,0x0a,0x3c,0x03,0x80,0x01,0x00,0x0b,0x10,0x80, 0x24,0x63,0x02,0x44,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x60, 0x94,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x14,0x00,0x00,0x02,0x74,0x03, 0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x3a,0x94,0x44,0x00,0x00,0x93,0x83,0x8f,0xe4, 0x91,0x82,0x00,0x21,0x01,0xc4,0x20,0x21,0x91,0x85,0x00,0x10,0x00,0x04,0x24,0x00, 0x00,0x62,0x18,0x21,0x00,0x04,0x74,0x03,0x00,0x6e,0x18,0x23,0x00,0x65,0x10,0x2a, 0x00,0xa2,0x18,0x0a,0x00,0x0d,0x24,0x00,0x3c,0x02,0xb0,0x06,0x24,0x05,0xff,0xff, 0x00,0x64,0x18,0x25,0x34,0x42,0x80,0x20,0xac,0x43,0x00,0x00,0xa5,0x85,0x00,0x0e, 0xa1,0x80,0x00,0x10,0xa5,0x85,0x00,0x0c,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x62,0x3c,0x03,0xb0,0x03, 0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x64,0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14, 0x34,0x63,0x00,0x66,0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x38, 0x3c,0x03,0xb0,0x03,0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x6e,0x3c,0x03,0xb0,0x03, 0x08,0x00,0x19,0x14,0x34,0x63,0x00,0x6c,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00, 0x34,0x63,0x00,0x20,0x24,0x42,0x65,0x08,0x00,0x05,0x28,0x40,0xac,0x62,0x00,0x00, 0x00,0xa6,0x28,0x21,0x2c,0xe2,0x00,0x10,0x14,0x80,0x00,0x06,0x00,0x00,0x18,0x21, 0x10,0x40,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0xe0,0x18,0x21,0x03,0xe0,0x00,0x08, 0x00,0x60,0x10,0x21,0x24,0x02,0x00,0x20,0x10,0xe2,0x00,0x06,0x2c,0xe4,0x00,0x10, 0x24,0xa2,0x00,0x01,0x10,0x80,0xff,0xf9,0x00,0x02,0x11,0x00,0x08,0x00,0x19,0x4f, 0x00,0x47,0x18,0x21,0x08,0x00,0x19,0x4f,0x24,0xa3,0x00,0x50,0x27,0xbd,0xff,0xc8, 0xaf,0xb3,0x00,0x1c,0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x30, 0xaf,0xb7,0x00,0x2c,0xaf,0xb6,0x00,0x28,0xaf,0xb5,0x00,0x24,0xaf,0xb4,0x00,0x20, 0xaf,0xb0,0x00,0x10,0x00,0x80,0x88,0x21,0x84,0x84,0x00,0x08,0x3c,0x05,0xb0,0x03, 0x3c,0x02,0x80,0x00,0x34,0xa5,0x00,0x20,0x24,0x42,0x65,0x6c,0x3c,0x03,0xb0,0x06, 0x00,0x04,0x20,0x80,0xac,0xa2,0x00,0x00,0x00,0x83,0x20,0x21,0x3c,0x06,0xb0,0x06, 0x8c,0x82,0x00,0x00,0x34,0xc6,0x80,0x24,0x8c,0x88,0x00,0x00,0x8c,0xc4,0x00,0x00, 0x96,0x25,0x00,0x08,0x30,0x52,0xff,0xff,0x00,0x08,0x44,0x02,0x34,0x84,0x01,0x00, 0x3c,0x02,0xb0,0x00,0x00,0x08,0x18,0xc0,0x00,0x12,0x3a,0x00,0xac,0xc4,0x00,0x00, 0x00,0xe2,0x38,0x21,0xae,0x32,0x02,0xb8,0x00,0x68,0x18,0x21,0x24,0xa5,0x00,0x02, 0x8c,0xf6,0x00,0x00,0x30,0xa5,0x01,0xff,0x8c,0xf4,0x00,0x04,0x27,0x86,0x90,0x00, 0x00,0x03,0x18,0x80,0x00,0x12,0x98,0xc0,0xa6,0x25,0x00,0x08,0x00,0x66,0x18,0x21, 0x02,0x72,0x10,0x21,0x94,0x65,0x00,0x00,0x00,0x02,0x48,0x80,0x01,0x26,0x30,0x21, 0x24,0x02,0xff,0xff,0x00,0x14,0x1a,0x02,0x27,0x84,0x90,0x10,0xa4,0xc2,0x00,0x02, 0x30,0x63,0x00,0x1f,0x24,0x02,0x00,0x10,0x01,0x24,0x20,0x21,0xa4,0xc8,0x00,0x04, 0x8c,0xf0,0x00,0x08,0xa6,0x23,0x00,0x06,0xa6,0x25,0x00,0x0a,0xa0,0x82,0x00,0x06, 0x86,0x25,0x00,0x06,0x27,0x82,0x90,0x04,0x01,0x22,0x10,0x21,0x24,0x03,0x00,0x13, 0x10,0xa3,0x00,0xee,0xac,0x47,0x00,0x18,0x3c,0x03,0xb0,0x03,0x34,0x63,0x01,0x00, 0xa6,0x20,0x00,0x02,0x3c,0x02,0xb0,0x03,0x90,0x64,0x00,0x00,0x34,0x42,0x01,0x08, 0x8c,0x45,0x00,0x00,0x00,0x10,0x1b,0xc2,0x00,0x04,0x20,0x82,0x30,0x63,0x00,0x01, 0xac,0xc5,0x00,0x08,0x10,0x60,0x00,0xc7,0x30,0x97,0x00,0x01,0x00,0x10,0x16,0x82, 0x30,0x46,0x00,0x01,0x00,0x10,0x12,0x02,0x00,0x10,0x19,0xc2,0x00,0x10,0x26,0x02, 0x00,0x10,0x2e,0x42,0x30,0x48,0x00,0x7f,0x24,0x02,0x00,0x01,0x30,0x75,0x00,0x01, 0x30,0x84,0x00,0x01,0x10,0xc2,0x00,0xb3,0x30,0xa3,0x00,0x01,0x00,0x60,0x28,0x21, 0x0c,0x00,0x19,0x42,0x01,0x00,0x38,0x21,0x02,0x72,0x18,0x21,0x00,0x03,0x18,0x80, 0x2c,0x46,0x00,0x54,0x27,0x85,0x90,0x10,0x27,0x84,0x90,0x08,0x00,0x06,0x10,0x0a, 0x00,0x65,0x28,0x21,0x26,0xa6,0x00,0x02,0x00,0x64,0x18,0x21,0xa0,0xa2,0x00,0x02, 0xa0,0x66,0x00,0x06,0xa0,0x62,0x00,0x07,0xa0,0xa2,0x00,0x01,0x02,0x72,0x28,0x21, 0x00,0x05,0x28,0x80,0x27,0x82,0x90,0x04,0x00,0xa2,0x58,0x21,0x8d,0x64,0x00,0x18, 0x00,0x10,0x15,0xc2,0x30,0x42,0x00,0x01,0x8c,0x83,0x00,0x0c,0x27,0x84,0x90,0x20, 0x00,0xa4,0x48,0x21,0xa6,0x22,0x00,0x00,0xa6,0x36,0x00,0x04,0x8d,0x26,0x00,0x00, 0x00,0x03,0x19,0x42,0x3c,0x02,0xff,0xef,0x34,0x42,0xff,0xff,0x30,0x63,0x00,0x01, 0x00,0xc2,0x40,0x24,0x00,0x03,0x1d,0x00,0x01,0x03,0x40,0x25,0x00,0x08,0x15,0x02, 0x00,0x14,0x19,0x82,0x00,0x14,0x25,0x82,0x00,0x10,0x34,0x42,0x00,0x10,0x3c,0x82, 0x00,0x10,0x2c,0x02,0x30,0x42,0x00,0x01,0x30,0xcd,0x00,0x01,0x30,0x6c,0x00,0x01, 0x30,0xe6,0x00,0x01,0x30,0x8a,0x00,0x03,0x32,0x94,0x00,0x07,0x30,0xa5,0x00,0x01, 0xad,0x28,0x00,0x00,0x10,0x40,0x00,0x0b,0x32,0x07,0x00,0x7f,0x8d,0x64,0x00,0x18, 0x3c,0x03,0xff,0xf0,0x34,0x63,0xff,0xff,0x8c,0x82,0x00,0x0c,0x01,0x03,0x18,0x24, 0x00,0x02,0x13,0x82,0x30,0x42,0x00,0x0f,0x00,0x02,0x14,0x00,0x00,0x62,0x18,0x25, 0xad,0x23,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xc2,0x00,0x6a,0x00,0x00,0x00,0x00, 0x15,0x80,0x00,0x03,0x00,0x00,0x00,0x00,0x15,0x40,0x00,0x5b,0x24,0x02,0x00,0x01, 0x96,0x22,0x00,0x04,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x04,0xa6,0x22,0x00,0x04, 0x00,0xa0,0x20,0x21,0x0c,0x00,0x19,0x42,0x01,0xa0,0x28,0x21,0x02,0x72,0x18,0x21, 0x00,0x03,0x40,0x80,0x2c,0x45,0x00,0x54,0x27,0x84,0x90,0x10,0x01,0x04,0x20,0x21, 0x00,0x05,0x10,0x0a,0xa0,0x82,0x00,0x00,0xa0,0x80,0x00,0x04,0xa0,0x80,0x00,0x05, 0x96,0x23,0x00,0x04,0x27,0x82,0x90,0x00,0x01,0x02,0x10,0x21,0xa4,0x43,0x00,0x06, 0x27,0x82,0x90,0x04,0x92,0x26,0x00,0x01,0x01,0x02,0x10,0x21,0x8c,0x45,0x00,0x18, 0x27,0x83,0x90,0x20,0x01,0x03,0x18,0x21,0xa0,0x60,0x00,0x00,0xa0,0x86,0x00,0x07, 0x94,0xa2,0x00,0x10,0x24,0x03,0x00,0x04,0x30,0x42,0x00,0x0f,0x10,0x43,0x00,0x36, 0x24,0xa5,0x00,0x10,0x94,0xa3,0x00,0x16,0x27,0x87,0x90,0x18,0x01,0x07,0x10,0x21, 0xa4,0x43,0x00,0x02,0x94,0xa2,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01, 0x14,0x40,0x00,0x24,0x02,0x72,0x20,0x21,0x94,0xa2,0x00,0x00,0x24,0x03,0x00,0xa4, 0x30,0x42,0x00,0xff,0x10,0x43,0x00,0x1f,0x00,0x00,0x00,0x00,0x94,0xa2,0x00,0x00, 0x24,0x03,0x00,0x88,0x30,0x42,0x00,0x88,0x10,0x43,0x00,0x14,0x02,0x72,0x18,0x21, 0x27,0x84,0x90,0x20,0x00,0x03,0x18,0x80,0x00,0x64,0x18,0x21,0x8c,0x62,0x00,0x00, 0x3c,0x04,0x00,0x80,0x00,0x44,0x10,0x25,0xac,0x62,0x00,0x00,0x02,0x72,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0x47,0x10,0x21,0xa0,0x54,0x00,0x00,0x8f,0xbf,0x00,0x30, 0x7b,0xb6,0x01,0x7c,0x7b,0xb4,0x01,0x3c,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc, 0x24,0x02,0x00,0x01,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38,0x94,0xa2,0x00,0x18, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x60,0x10,0x40,0xff,0xe9,0x02,0x72,0x18,0x21, 0x02,0x72,0x20,0x21,0x27,0x82,0x90,0x20,0x00,0x04,0x20,0x80,0x00,0x82,0x20,0x21, 0x8c,0x83,0x00,0x00,0x3c,0x02,0xff,0x7f,0x34,0x42,0xff,0xff,0x00,0x62,0x18,0x24, 0x08,0x00,0x1a,0x37,0xac,0x83,0x00,0x00,0x27,0x87,0x90,0x18,0x01,0x07,0x10,0x21, 0x08,0x00,0x1a,0x21,0xa4,0x40,0x00,0x02,0x11,0x42,0x00,0x07,0x00,0x00,0x00,0x00, 0x2d,0x42,0x00,0x02,0x14,0x40,0xff,0xa7,0x00,0xa0,0x20,0x21,0x96,0x22,0x00,0x04, 0x08,0x00,0x19,0xff,0x24,0x42,0x00,0x0c,0x96,0x22,0x00,0x04,0x08,0x00,0x19,0xff, 0x24,0x42,0x00,0x08,0x16,0xe6,0xff,0x96,0x3c,0x02,0xff,0xfb,0x8d,0x63,0x00,0x18, 0x34,0x42,0xff,0xff,0x02,0x02,0x10,0x24,0xac,0x62,0x00,0x08,0x08,0x00,0x19,0xf8, 0x00,0x00,0x30,0x21,0x16,0xe6,0xff,0x4e,0x00,0x60,0x28,0x21,0x3c,0x02,0xfb,0xff, 0x34,0x42,0xff,0xff,0x02,0x02,0x10,0x24,0xac,0xe2,0x00,0x08,0x08,0x00,0x19,0xb7, 0x00,0x00,0x30,0x21,0x93,0x87,0xbb,0x14,0x00,0x10,0x1e,0x42,0x00,0x10,0x26,0x82, 0x27,0x82,0x90,0x08,0x2c,0xe5,0x00,0x0c,0x01,0x22,0x48,0x21,0x30,0x63,0x00,0x01, 0x30,0x86,0x00,0x01,0x14,0xa0,0x00,0x06,0x00,0xe0,0x40,0x21,0x00,0x03,0x10,0x40, 0x00,0x46,0x10,0x21,0x00,0x02,0x11,0x00,0x00,0xe2,0x10,0x21,0x24,0x48,0x00,0x04, 0x02,0x72,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x84,0x90,0x10,0x27,0x83,0x90,0x08, 0x00,0x44,0x20,0x21,0x00,0x43,0x10,0x21,0xa1,0x28,0x00,0x07,0xa0,0x40,0x00,0x06, 0xa0,0x80,0x00,0x02,0x08,0x00,0x19,0xc7,0xa0,0x80,0x00,0x01,0x24,0x02,0x00,0x01, 0xa6,0x22,0x00,0x02,0x0c,0x00,0x01,0xc2,0x00,0xe0,0x20,0x21,0x08,0x00,0x1a,0x3b, 0x00,0x00,0x00,0x00,0x30,0xa7,0xff,0xff,0x00,0x07,0x18,0xc0,0x00,0x67,0x18,0x21, 0x3c,0x06,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x6a,0x44,0x27,0x85,0x90,0x10, 0x00,0x03,0x18,0x80,0x34,0xc6,0x00,0x20,0x00,0x65,0x18,0x21,0xac,0xc2,0x00,0x00, 0x80,0x62,0x00,0x07,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x29,0x00,0x80,0x28,0x21, 0x90,0x82,0x00,0x16,0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x02,0x30,0x43,0x00,0x01, 0x14,0x60,0x00,0x02,0xa0,0x82,0x00,0x16,0xa0,0x80,0x00,0x17,0x90,0xa2,0x00,0x04, 0x3c,0x03,0xb0,0x03,0x27,0x86,0x90,0x00,0x14,0x40,0x00,0x06,0x34,0x63,0x00,0x20, 0x24,0x02,0x00,0x01,0xa0,0xa2,0x00,0x04,0xa4,0xa7,0x00,0x02,0x03,0xe0,0x00,0x08, 0xa4,0xa7,0x00,0x00,0x94,0xa4,0x00,0x02,0x3c,0x02,0x80,0x01,0x24,0x42,0x82,0x6c, 0xac,0x62,0x00,0x00,0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80, 0x00,0x66,0x18,0x21,0x94,0x62,0x00,0x04,0xa4,0x67,0x00,0x02,0x3c,0x03,0xb0,0x08, 0x00,0x02,0x20,0xc0,0x00,0x82,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x46,0x10,0x21, 0x00,0x83,0x20,0x21,0xa4,0x47,0x00,0x00,0xac,0x87,0x00,0x00,0x90,0xa2,0x00,0x04, 0xa4,0xa7,0x00,0x02,0x24,0x42,0x00,0x01,0x03,0xe0,0x00,0x08,0xa0,0xa2,0x00,0x04, 0x90,0x82,0x00,0x16,0x24,0x85,0x00,0x06,0x34,0x42,0x00,0x01,0x30,0x43,0x00,0x02, 0x14,0x60,0xff,0xda,0xa0,0x82,0x00,0x16,0x24,0x02,0x00,0x01,0x08,0x00,0x1a,0xa7, 0xa0,0x82,0x00,0x17,0x27,0xbd,0xff,0xe8,0xaf,0xbf,0x00,0x10,0x00,0x80,0x38,0x21, 0x84,0x84,0x00,0x02,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x3c,0x0a,0xb0,0x06, 0x34,0x63,0x00,0x20,0x24,0x42,0x6b,0x44,0x3c,0x0b,0xb0,0x08,0x27,0x89,0x90,0x00, 0x34,0x0c,0xff,0xff,0x35,0x4a,0x80,0x20,0x10,0x80,0x00,0x30,0xac,0x62,0x00,0x00, 0x97,0x82,0x8f,0xf0,0x94,0xe6,0x02,0xba,0x00,0x02,0x18,0xc0,0x00,0x6b,0x28,0x21, 0xac,0xa6,0x00,0x00,0x8c,0xe4,0x02,0xb8,0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x80, 0x00,0x04,0x10,0xc0,0x00,0x44,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x49,0x10,0x21, 0x94,0x48,0x00,0x04,0x00,0x69,0x18,0x21,0xa4,0x66,0x00,0x00,0x00,0x08,0x28,0xc0, 0x00,0xab,0x10,0x21,0xac,0x4c,0x00,0x00,0x8c,0xe4,0x02,0xb8,0x27,0x82,0x90,0x04, 0x00,0xa8,0x28,0x21,0x00,0x04,0x18,0xc0,0x00,0x64,0x18,0x21,0x00,0x03,0x18,0x80, 0x00,0x62,0x10,0x21,0x8c,0x46,0x00,0x18,0x27,0x84,0x90,0x10,0x00,0x64,0x18,0x21, 0x8c,0xc2,0x00,0x00,0x80,0x67,0x00,0x06,0x00,0x05,0x28,0x80,0x30,0x42,0xff,0xff, 0x00,0x47,0x10,0x21,0x30,0x43,0x00,0xff,0x00,0x03,0x18,0x2b,0x00,0x02,0x12,0x02, 0x00,0x43,0x10,0x21,0x3c,0x04,0x00,0x04,0x00,0xa9,0x28,0x21,0x00,0x44,0x10,0x25, 0xa4,0xac,0x00,0x00,0xad,0x42,0x00,0x00,0xa7,0x88,0x8f,0xf0,0x8f,0xbf,0x00,0x10, 0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x84,0xe3,0x00,0x06, 0x27,0x82,0xb4,0x00,0x94,0xe5,0x02,0xba,0x00,0x03,0x18,0x80,0x00,0x62,0x18,0x21, 0x8c,0x64,0x00,0x00,0x0c,0x00,0x1a,0x91,0x00,0x00,0x00,0x00,0x08,0x00,0x1b,0x0b, 0x00,0x00,0x00,0x00,0x94,0x88,0x00,0x00,0x00,0x80,0x58,0x21,0x27,0x8a,0x90,0x00, 0x00,0x08,0x18,0xc0,0x00,0x68,0x18,0x21,0x3c,0x04,0xb0,0x03,0x00,0x03,0x18,0x80, 0x3c,0x02,0x80,0x00,0x00,0x6a,0x18,0x21,0x34,0x84,0x00,0x20,0x24,0x42,0x6c,0x64, 0x30,0xa5,0xff,0xff,0xac,0x82,0x00,0x00,0x94,0x67,0x00,0x02,0x11,0x05,0x00,0x35, 0x24,0x04,0x00,0x01,0x91,0x66,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x86,0x10,0x2a, 0x10,0x40,0x00,0x10,0x00,0xc0,0x48,0x21,0x3c,0x0d,0xb0,0x03,0x01,0x40,0x60,0x21, 0x35,0xad,0x00,0x20,0x10,0xe5,0x00,0x0d,0x24,0x84,0x00,0x01,0x00,0x07,0x10,0xc0, 0x00,0x47,0x10,0x21,0x00,0x02,0x10,0x80,0x01,0x20,0x30,0x21,0x00,0x4a,0x10,0x21, 0x00,0x86,0x18,0x2a,0x00,0xe0,0x40,0x21,0x94,0x47,0x00,0x02,0x14,0x60,0xff,0xf5, 0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x10,0x21,0x00,0x08,0x20,0xc0, 0x00,0x88,0x20,0x21,0x24,0xc2,0xff,0xff,0x00,0x04,0x20,0x80,0xa1,0x62,0x00,0x04, 0x00,0x8c,0x20,0x21,0x94,0x83,0x00,0x04,0x00,0x07,0x10,0xc0,0x00,0x47,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0x4c,0x10,0x21,0x00,0x03,0x28,0xc0,0x94,0x46,0x00,0x02, 0x00,0xa3,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x6c,0x18,0x21,0xa4,0x66,0x00,0x00, 0xa4,0x86,0x00,0x02,0x95,0x64,0x00,0x02,0x3c,0x03,0xb0,0x08,0x3c,0x02,0x80,0x01, 0x00,0xa3,0x28,0x21,0x24,0x42,0x82,0x6c,0xad,0xa2,0x00,0x00,0x10,0x87,0x00,0x03, 0xac,0xa6,0x00,0x00,0x03,0xe0,0x00,0x08,0x24,0x02,0x00,0x01,0x08,0x00,0x1b,0x59, 0xa5,0x68,0x00,0x02,0x91,0x62,0x00,0x04,0xa5,0x67,0x00,0x00,0x24,0x42,0xff,0xff, 0x30,0x43,0x00,0xff,0x14,0x60,0xff,0xf7,0xa1,0x62,0x00,0x04,0x24,0x02,0xff,0xff, 0x08,0x00,0x1b,0x59,0xa5,0x62,0x00,0x02,0x00,0x05,0x40,0xc0,0x01,0x05,0x30,0x21, 0x27,0xbd,0xff,0xd8,0x00,0x06,0x30,0x80,0x27,0x82,0x90,0x04,0xaf,0xb2,0x00,0x18, 0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c,0xaf,0xb0,0x00,0x10, 0x00,0xc2,0x10,0x21,0x8c,0x47,0x00,0x18,0x00,0xa0,0x90,0x21,0x3c,0x02,0x80,0x00, 0x3c,0x05,0xb0,0x03,0x34,0xa5,0x00,0x20,0x24,0x42,0x6d,0x98,0xac,0xa2,0x00,0x00, 0x27,0x83,0x90,0x10,0x00,0xc3,0x30,0x21,0x8c,0xe2,0x00,0x00,0x80,0xc5,0x00,0x06, 0x00,0x80,0x88,0x21,0x30,0x42,0xff,0xff,0x00,0x45,0x10,0x21,0x30,0x43,0x00,0xff, 0x10,0x60,0x00,0x02,0x00,0x02,0x12,0x02,0x24,0x42,0x00,0x01,0x30,0x53,0x00,0xff, 0x01,0x12,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x10,0x00,0x43,0x10,0x21, 0x80,0x44,0x00,0x07,0x00,0x00,0x00,0x00,0x10,0x80,0x00,0x4b,0x26,0x24,0x00,0x06, 0x32,0x50,0xff,0xff,0x02,0x20,0x20,0x21,0x0c,0x00,0x1b,0x19,0x02,0x00,0x28,0x21, 0x92,0x22,0x00,0x10,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x2e,0x3c,0x03,0xb0,0x08, 0x3c,0x09,0x80,0x01,0x27,0x88,0x90,0x00,0xa6,0x32,0x00,0x0c,0x00,0x10,0x20,0xc0, 0x00,0x90,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x88,0x20,0x21,0x94,0x82,0x00,0x04, 0x3c,0x03,0xb0,0x08,0x3c,0x07,0xb0,0x03,0x00,0x02,0x28,0xc0,0x00,0xa2,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0x48,0x10,0x21,0x00,0xa3,0x28,0x21,0x25,0x26,0x82,0x6c, 0x34,0x03,0xff,0xff,0x34,0xe7,0x00,0x20,0xac,0xe6,0x00,0x00,0xa4,0x83,0x00,0x02, 0xa4,0x43,0x00,0x00,0xac,0xa3,0x00,0x00,0x92,0x22,0x00,0x10,0x92,0x23,0x00,0x0a, 0xa6,0x32,0x00,0x0e,0x02,0x62,0x10,0x21,0x14,0x60,0x00,0x05,0xa2,0x22,0x00,0x10, 0x92,0x22,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfe,0xa2,0x22,0x00,0x16, 0x92,0x22,0x00,0x04,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x05,0x00,0x00,0x00,0x00, 0x92,0x22,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfd,0xa2,0x22,0x00,0x16, 0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x28,0x96,0x22,0x00,0x0e,0x27,0x88,0x90,0x00,0x00,0x02,0x20,0xc0, 0x00,0x82,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x88,0x20,0x21,0x94,0x82,0x00,0x04, 0x3c,0x06,0xb0,0x03,0x3c,0x09,0x80,0x01,0x00,0x02,0x28,0xc0,0x00,0xa2,0x10,0x21, 0x00,0x02,0x10,0x80,0x00,0xa3,0x28,0x21,0x00,0x48,0x10,0x21,0x34,0xc6,0x00,0x20, 0x25,0x23,0x82,0x6c,0xac,0xc3,0x00,0x00,0xa4,0x50,0x00,0x00,0xac,0xb0,0x00,0x00, 0x08,0x00,0x1b,0x97,0xa4,0x90,0x00,0x02,0x08,0x00,0x1b,0x8e,0x32,0x50,0xff,0xff, 0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00,0x24,0x42,0x6f,0x60,0x34,0x63,0x00,0x20, 0xac,0x62,0x00,0x00,0x90,0x82,0x00,0x04,0x97,0xaa,0x00,0x12,0x00,0x80,0x60,0x21, 0x30,0xa8,0xff,0xff,0x00,0x4a,0x20,0x23,0x34,0x09,0xff,0xff,0x30,0xcf,0xff,0xff, 0x30,0xee,0xff,0xff,0x11,0x09,0x00,0x73,0xa1,0x84,0x00,0x04,0x00,0x0e,0xc0,0xc0, 0x00,0x08,0x10,0xc0,0x00,0x48,0x10,0x21,0x03,0x0e,0x20,0x21,0x27,0x8d,0x90,0x00, 0x00,0x04,0x20,0x80,0x00,0x02,0x10,0x80,0x00,0x4d,0x10,0x21,0x00,0x8d,0x20,0x21, 0x94,0x86,0x00,0x02,0x94,0x43,0x00,0x04,0x3c,0x19,0x80,0x01,0xa4,0x46,0x00,0x02, 0x00,0x03,0x28,0xc0,0x00,0xa3,0x18,0x21,0x94,0x87,0x00,0x02,0x3c,0x02,0xb0,0x08, 0x00,0x03,0x18,0x80,0x00,0xa2,0x28,0x21,0x00,0x6d,0x18,0x21,0x27,0x22,0x82,0x6c, 0x3c,0x01,0xb0,0x03,0xac,0x22,0x00,0x20,0xa4,0x66,0x00,0x00,0x10,0xe9,0x00,0x57, 0xac,0xa6,0x00,0x00,0x01,0xe0,0x30,0x21,0x11,0x40,0x00,0x1d,0x00,0x00,0x48,0x21, 0x01,0x40,0x38,0x21,0x27,0x8b,0x90,0x04,0x27,0x8a,0x90,0x10,0x00,0x06,0x40,0xc0, 0x01,0x06,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x6b,0x10,0x21,0x8c,0x44,0x00,0x18, 0x00,0x6a,0x18,0x21,0x80,0x65,0x00,0x06,0x8c,0x82,0x00,0x00,0x00,0x00,0x00,0x00, 0x30,0x42,0xff,0xff,0x00,0x45,0x10,0x21,0x30,0x44,0x00,0xff,0x00,0x02,0x12,0x02, 0x01,0x22,0x18,0x21,0x24,0x62,0x00,0x01,0x14,0x80,0x00,0x02,0x30,0x49,0x00,0xff, 0x30,0x69,0x00,0xff,0x01,0x06,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x4d,0x10,0x21, 0x24,0xe7,0xff,0xff,0x94,0x46,0x00,0x02,0x14,0xe0,0xff,0xe9,0x00,0x06,0x40,0xc0, 0x91,0x82,0x00,0x10,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x20,0x3c,0x06,0xb0,0x03, 0xa5,0x8f,0x00,0x0c,0x03,0x0e,0x20,0x21,0x00,0x04,0x20,0x80,0x00,0x8d,0x20,0x21, 0x94,0x82,0x00,0x04,0x3c,0x03,0xb0,0x08,0x3c,0x07,0xb0,0x03,0x00,0x02,0x28,0xc0, 0x00,0xa2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x4d,0x10,0x21,0x00,0xa3,0x28,0x21, 0x27,0x26,0x82,0x6c,0x34,0x03,0xff,0xff,0x34,0xe7,0x00,0x20,0xac,0xe6,0x00,0x00, 0xa4,0x83,0x00,0x02,0xa4,0x43,0x00,0x00,0xac,0xa3,0x00,0x00,0x91,0x82,0x00,0x10, 0x91,0x83,0x00,0x04,0xa5,0x8e,0x00,0x0e,0x01,0x22,0x10,0x21,0x14,0x60,0x00,0x05, 0xa1,0x82,0x00,0x10,0x91,0x82,0x00,0x16,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0xfd, 0xa1,0x82,0x00,0x16,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x95,0x82,0x00,0x0e, 0x3c,0x03,0xb0,0x08,0x00,0x02,0x20,0xc0,0x00,0x82,0x20,0x21,0x00,0x04,0x20,0x80, 0x00,0x8d,0x20,0x21,0x94,0x82,0x00,0x04,0x34,0xc6,0x00,0x20,0x27,0x27,0x82,0x6c, 0x00,0x02,0x28,0xc0,0x00,0xa2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0xa3,0x28,0x21, 0x00,0x4d,0x10,0x21,0xac,0xc7,0x00,0x00,0xa4,0x8f,0x00,0x02,0xa4,0x4f,0x00,0x00, 0xac,0xaf,0x00,0x00,0x08,0x00,0x1c,0x26,0x03,0x0e,0x20,0x21,0x08,0x00,0x1c,0x01, 0xa5,0x88,0x00,0x02,0x00,0x0e,0xc0,0xc0,0x03,0x0e,0x10,0x21,0x00,0x02,0x10,0x80, 0x27,0x8d,0x90,0x00,0x00,0x4d,0x10,0x21,0x94,0x43,0x00,0x02,0x30,0x84,0x00,0xff, 0x14,0x80,0x00,0x05,0xa5,0x83,0x00,0x00,0x24,0x02,0xff,0xff,0x3c,0x19,0x80,0x01, 0x08,0x00,0x1c,0x01,0xa5,0x82,0x00,0x02,0x08,0x00,0x1c,0x01,0x3c,0x19,0x80,0x01, 0x3c,0x08,0xb0,0x03,0x3c,0x02,0x80,0x00,0x27,0xbd,0xff,0x78,0x35,0x08,0x00,0x20, 0x24,0x42,0x71,0xa0,0xaf,0xb2,0x00,0x68,0xaf,0xb1,0x00,0x64,0xaf,0xb0,0x00,0x60, 0xad,0x02,0x00,0x00,0xaf,0xbf,0x00,0x84,0xaf,0xbe,0x00,0x80,0xaf,0xb7,0x00,0x7c, 0xaf,0xb6,0x00,0x78,0xaf,0xb5,0x00,0x74,0xaf,0xb4,0x00,0x70,0xaf,0xb3,0x00,0x6c, 0xaf,0xa4,0x00,0x88,0x90,0x83,0x00,0x0a,0x27,0x82,0xb4,0x00,0xaf,0xa6,0x00,0x90, 0x00,0x03,0x18,0x80,0x00,0x62,0x18,0x21,0x8c,0x63,0x00,0x00,0xaf,0xa7,0x00,0x94, 0x27,0x86,0x90,0x04,0xaf,0xa3,0x00,0x1c,0x94,0x63,0x00,0x14,0x30,0xb1,0xff,0xff, 0x24,0x08,0x00,0x01,0x00,0x03,0x20,0xc0,0xaf,0xa3,0x00,0x18,0x00,0x83,0x18,0x21, 0xaf,0xa4,0x00,0x54,0x00,0x03,0x18,0x80,0x27,0x84,0x90,0x10,0x00,0x64,0x20,0x21, 0x80,0x82,0x00,0x06,0x00,0x66,0x18,0x21,0x8c,0x66,0x00,0x18,0x24,0x42,0x00,0x02, 0x00,0x02,0x1f,0xc2,0x8c,0xc4,0x00,0x08,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43, 0x00,0x02,0x10,0x40,0x00,0x04,0x2f,0xc2,0x00,0x04,0x1c,0x82,0x00,0xc2,0x38,0x21, 0x00,0x04,0x24,0x42,0x8f,0xa2,0x00,0x1c,0x30,0x63,0x00,0x01,0x30,0x84,0x00,0x01, 0xaf,0xa5,0x00,0x3c,0xaf,0xa3,0x00,0x34,0xaf,0xa4,0x00,0x38,0xaf,0xa0,0x00,0x40, 0xaf,0xa0,0x00,0x44,0xaf,0xa0,0x00,0x50,0xaf,0xa8,0x00,0x20,0x80,0x42,0x00,0x12, 0x8f,0xb2,0x00,0x18,0xaf,0xa2,0x00,0x28,0x8c,0xd0,0x00,0x0c,0x14,0xa0,0x01,0xe4, 0x00,0x60,0x30,0x21,0x00,0x10,0x10,0x82,0x30,0x45,0x00,0x07,0x10,0xa0,0x00,0x11, 0xaf,0xa0,0x00,0x30,0x8f,0xa4,0x00,0x98,0x27,0x82,0x80,0x1c,0x00,0x04,0x18,0x40, 0x00,0x62,0x18,0x21,0x24,0xa2,0x00,0x06,0x8f,0xa5,0x00,0x20,0x94,0x64,0x00,0x00, 0x00,0x45,0x10,0x04,0x00,0x44,0x00,0x1a,0x14,0x80,0x00,0x02,0x00,0x00,0x00,0x00, 0x00,0x07,0x00,0x0d,0x00,0x00,0x10,0x12,0x24,0x42,0x00,0x20,0x30,0x42,0xff,0xfc, 0xaf,0xa2,0x00,0x30,0x8f,0xa3,0x00,0x18,0x8f,0xa4,0x00,0x28,0x34,0x02,0xff,0xff, 0xaf,0xa0,0x00,0x2c,0xaf,0xa2,0x00,0x48,0xaf,0xa3,0x00,0x4c,0x00,0x60,0xf0,0x21, 0x00,0x00,0xb8,0x21,0x18,0x80,0x00,0x48,0xaf,0xa0,0x00,0x24,0x00,0x11,0x89,0x02, 0xaf,0xb1,0x00,0x58,0x00,0x80,0xa8,0x21,0x00,0x12,0x10,0xc0,0x00,0x52,0x18,0x21, 0x00,0x03,0x80,0x80,0x27,0x85,0x90,0x00,0x02,0x40,0x20,0x21,0x00,0x40,0xa0,0x21, 0x02,0x05,0x10,0x21,0x94,0x56,0x00,0x02,0x0c,0x00,0x12,0x8b,0x00,0x00,0x28,0x21, 0x90,0x42,0x00,0x00,0x24,0x03,0x00,0x08,0x30,0x42,0x00,0x0c,0x10,0x43,0x01,0x9e, 0x24,0x04,0x00,0x01,0x24,0x02,0x00,0x01,0x10,0x82,0x01,0x7c,0x3c,0x02,0xb0,0x03, 0x8f,0xa6,0x00,0x88,0x34,0x42,0x01,0x04,0x84,0xc5,0x00,0x0c,0x02,0x92,0x18,0x21, 0x94,0x46,0x00,0x00,0x00,0x05,0x20,0xc0,0x00,0x85,0x20,0x21,0x00,0x03,0x18,0x80, 0x27,0x82,0x90,0x10,0x27,0x85,0x90,0x08,0x00,0x65,0x28,0x21,0x00,0x62,0x18,0x21, 0x80,0x71,0x00,0x05,0x80,0x73,0x00,0x04,0x8f,0xa3,0x00,0x88,0x30,0xd0,0xff,0xff, 0x00,0x10,0x3a,0x03,0x32,0x08,0x00,0xff,0x27,0x82,0x90,0x20,0x00,0x04,0x20,0x80, 0x80,0xa6,0x00,0x06,0x00,0x82,0x20,0x21,0xa4,0x67,0x00,0x44,0xa4,0x68,0x00,0x46, 0x8c,0x84,0x00,0x00,0x38,0xc6,0x00,0x00,0x01,0x00,0x80,0x21,0x00,0x04,0x15,0x02, 0x30,0x42,0x00,0x01,0x10,0x40,0x00,0x03,0x00,0xe6,0x80,0x0a,0x00,0x04,0x14,0x02, 0x30,0x50,0x00,0x0f,0x12,0x20,0x01,0x50,0x02,0x40,0x20,0x21,0x02,0x71,0x10,0x21, 0x00,0x50,0x10,0x2a,0x14,0x40,0x00,0xed,0x02,0x92,0x10,0x21,0x93,0x82,0x8b,0x71, 0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01,0x14,0x40,0x00,0xe0,0x02,0x92,0x28,0x21, 0x26,0xe2,0x00,0x01,0x30,0x57,0xff,0xff,0x02,0x40,0xf0,0x21,0x26,0xb5,0xff,0xff, 0x16,0xa0,0xff,0xbd,0x02,0xc0,0x90,0x21,0x16,0xe0,0x00,0xd0,0x00,0x00,0x00,0x00, 0x8f,0xa3,0x00,0x98,0x00,0x00,0x00,0x00,0x2c,0x62,0x00,0x10,0x10,0x40,0x00,0x2e, 0x00,0x00,0x00,0x00,0x8f,0xa4,0x00,0x24,0x00,0x00,0x00,0x00,0x18,0x80,0x00,0x2a, 0x24,0x03,0x00,0x01,0x8f,0xa5,0x00,0x1c,0x27,0x84,0x90,0x04,0x94,0xb2,0x00,0x14, 0xa0,0xa3,0x00,0x12,0x8f,0xa6,0x00,0x3c,0x00,0x12,0x10,0xc0,0x00,0x52,0x10,0x21, 0x00,0x02,0x80,0x80,0x27,0x82,0x90,0x10,0x02,0x02,0x10,0x21,0x80,0x43,0x00,0x06, 0x02,0x04,0x20,0x21,0x8c,0x85,0x00,0x18,0x24,0x63,0x00,0x02,0x00,0x03,0x17,0xc2, 0x00,0x62,0x18,0x21,0x00,0x03,0x18,0x43,0x00,0x03,0x18,0x40,0x14,0xc0,0x00,0x0e, 0x00,0xa3,0x38,0x21,0x27,0x82,0x90,0x00,0x02,0x02,0x10,0x21,0x94,0x43,0x00,0x06, 0x8f,0xa8,0x00,0x1c,0x24,0x02,0x00,0x01,0xa5,0x03,0x00,0x1a,0x7b,0xbe,0x04,0x3c, 0x7b,0xb6,0x03,0xfc,0x7b,0xb4,0x03,0xbc,0x7b,0xb2,0x03,0x7c,0x7b,0xb0,0x03,0x3c, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x88,0x8f,0xa4,0x00,0x98,0x8f,0xa5,0x00,0x38, 0x8f,0xa6,0x00,0x34,0xaf,0xa0,0x00,0x10,0x0c,0x00,0x09,0x0a,0xaf,0xa0,0x00,0x14, 0x08,0x00,0x1d,0x2d,0x00,0x00,0x00,0x00,0x8f,0xa3,0x00,0x44,0x93,0x82,0x81,0x58, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x61,0x30,0x69,0x00,0x03,0x8f,0xa4,0x00,0x24, 0x8f,0xa5,0x00,0x28,0x00,0x00,0x00,0x00,0x00,0x85,0x10,0x2a,0x10,0x40,0x00,0x8f, 0x00,0x00,0x00,0x00,0x8f,0xa6,0x00,0x1c,0x00,0x00,0x00,0x00,0x90,0xc4,0x00,0x04, 0x00,0x00,0x00,0x00,0x30,0x83,0x00,0xff,0x00,0xa3,0x10,0x2a,0x10,0x40,0x00,0x87, 0x00,0x00,0x00,0x00,0x8f,0xa8,0x00,0x24,0x00,0x00,0x00,0x00,0x11,0x00,0x00,0x83, 0x00,0x65,0x10,0x23,0x00,0xa8,0x18,0x23,0x00,0x62,0x10,0x2a,0x14,0x40,0x00,0x7d, 0x30,0x63,0x00,0xff,0x00,0x85,0x10,0x23,0x30,0x42,0x00,0xff,0xaf,0xa2,0x00,0x50, 0x8f,0xa2,0x00,0x50,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x73,0x00,0x00,0xa8,0x21, 0x27,0x8c,0x90,0x00,0x3c,0x0b,0x80,0xff,0x24,0x10,0x00,0x04,0x27,0x91,0x90,0x04, 0x35,0x6b,0xff,0xff,0x3c,0x0d,0x7f,0x00,0x27,0x8e,0x90,0x10,0x01,0x80,0x78,0x21, 0x00,0x12,0x30,0xc0,0x00,0xd2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x4c,0x10,0x21, 0x94,0x42,0x00,0x06,0x8f,0xa3,0x00,0x2c,0x8f,0xa4,0x00,0x30,0xaf,0xa2,0x00,0x44, 0x8f,0xa5,0x00,0x44,0x30,0x49,0x00,0x03,0x02,0x09,0x10,0x23,0x30,0x42,0x00,0x03, 0x00,0xa2,0x10,0x21,0x8f,0xa8,0x00,0x30,0x24,0x42,0x00,0x04,0x30,0x42,0xff,0xff, 0x00,0x64,0x38,0x21,0x01,0x02,0x28,0x23,0x00,0x62,0x18,0x21,0x00,0x48,0x10,0x2b, 0x10,0x40,0x00,0x52,0x00,0x00,0x20,0x21,0x30,0xe7,0xff,0xff,0x30,0xa4,0xff,0xff, 0xaf,0xa7,0x00,0x2c,0x00,0xd2,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x51,0x18,0x21, 0x8c,0x65,0x00,0x18,0x00,0x04,0x25,0x40,0x00,0x8d,0x20,0x24,0x8c,0xa8,0x00,0x04, 0x00,0x4e,0x18,0x21,0x00,0x4f,0x50,0x21,0x01,0x0b,0x40,0x24,0x01,0x04,0x40,0x25, 0xac,0xa8,0x00,0x04,0x8f,0xa4,0x00,0x98,0x8f,0xa2,0x00,0x50,0x26,0xb5,0x00,0x01, 0xa0,0x64,0x00,0x00,0x8c,0xa4,0x00,0x08,0x00,0x00,0x00,0x00,0x04,0x81,0x00,0x0c, 0x02,0xa2,0x30,0x2a,0x80,0x62,0x00,0x06,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x02, 0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,0x00,0x02,0x10,0x40, 0x00,0xa2,0x38,0x21,0x8f,0xa5,0x00,0x40,0x00,0x00,0x00,0x00,0xa4,0xe5,0x00,0x00, 0x95,0x52,0x00,0x02,0x14,0xc0,0xff,0xc7,0x00,0x12,0x30,0xc0,0x8f,0xa4,0x00,0x24, 0x8f,0xa5,0x00,0x50,0x8f,0xa6,0x00,0x1c,0x8f,0xa3,0x00,0x2c,0x00,0x85,0x80,0x21, 0xa0,0xd0,0x00,0x12,0x00,0x09,0x10,0x23,0x30,0x42,0x00,0x03,0x8f,0xa8,0x00,0x88, 0x00,0x62,0x10,0x23,0xa4,0xc2,0x00,0x1a,0x85,0x03,0x00,0x0c,0x00,0x00,0x00,0x00, 0x00,0x03,0x10,0xc0,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x04, 0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x18,0x00,0x00,0x00,0x00,0x8c,0x83,0x00,0x04, 0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x10,0x14,0x60,0xff,0x74,0x02,0x00,0x10,0x21, 0x8f,0xa3,0x00,0x54,0x8f,0xa4,0x00,0x18,0x8f,0xa5,0x00,0x24,0x00,0x64,0x10,0x21, 0x00,0x02,0x10,0x80,0x27,0x83,0x90,0x18,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x00, 0x10,0xa0,0x00,0x03,0x00,0x00,0x30,0x21,0x08,0x00,0x1d,0x33,0x02,0x00,0x10,0x21, 0x93,0x82,0x80,0x10,0x00,0x00,0x28,0x21,0x00,0x00,0x38,0x21,0x0c,0x00,0x21,0x9a, 0xaf,0xa2,0x00,0x10,0x08,0x00,0x1d,0x33,0x02,0x00,0x10,0x21,0x30,0x63,0xff,0xff, 0x08,0x00,0x1d,0x85,0xaf,0xa3,0x00,0x2c,0x8f,0xa8,0x00,0x44,0x08,0x00,0x1d,0xa7, 0x31,0x09,0x00,0x03,0x08,0x00,0x1d,0x60,0xaf,0xa3,0x00,0x50,0x8f,0xa6,0x00,0x44, 0xaf,0xa0,0x00,0x50,0x08,0x00,0x1d,0xa7,0x30,0xc9,0x00,0x03,0x8f,0xa5,0x00,0x48, 0x8f,0xa6,0x00,0x4c,0x8f,0xa4,0x00,0x1c,0x03,0xc0,0x38,0x21,0x0c,0x00,0x1b,0xd8, 0xaf,0xb7,0x00,0x10,0x08,0x00,0x1d,0x10,0x00,0x00,0x00,0x00,0x00,0x05,0x28,0x80, 0x27,0x82,0x90,0x00,0x00,0xa2,0x28,0x21,0x00,0x00,0x20,0x21,0x0c,0x00,0x01,0x49, 0x00,0x00,0x00,0x00,0x08,0x00,0x1d,0x09,0x26,0xe2,0x00,0x01,0x00,0x02,0x80,0x80, 0x27,0x83,0x90,0x10,0x8f,0xa4,0x00,0x1c,0x02,0x03,0x18,0x21,0x26,0x31,0x00,0x01, 0x02,0x40,0x28,0x21,0x0c,0x00,0x1e,0xea,0xa0,0x71,0x00,0x05,0x14,0x40,0xff,0x13, 0x00,0x00,0x00,0x00,0x16,0xe0,0x00,0x4d,0x03,0xc0,0x38,0x21,0x8f,0xa4,0x00,0x24, 0x8f,0xa5,0x00,0x20,0x24,0x02,0x00,0x01,0x24,0x84,0x00,0x01,0xaf,0xb2,0x00,0x48, 0xaf,0xb6,0x00,0x4c,0x02,0xc0,0xf0,0x21,0x10,0xa2,0x00,0x41,0xaf,0xa4,0x00,0x24, 0x27,0x82,0x90,0x00,0x02,0x02,0x10,0x21,0x94,0x42,0x00,0x06,0x8f,0xa4,0x00,0x30, 0xaf,0xa0,0x00,0x20,0xaf,0xa2,0x00,0x44,0x30,0x49,0x00,0x03,0x8f,0xa8,0x00,0x44, 0x00,0x09,0x10,0x23,0x30,0x42,0x00,0x03,0x01,0x02,0x10,0x21,0x24,0x42,0x00,0x04, 0x30,0x42,0xff,0xff,0x00,0x44,0x18,0x2b,0x10,0x60,0x00,0x2b,0x00,0x00,0x00,0x00, 0x8f,0xa5,0x00,0x2c,0x00,0x82,0x10,0x23,0x00,0xa4,0x18,0x21,0x30,0x63,0xff,0xff, 0x30,0x44,0xff,0xff,0xaf,0xa3,0x00,0x2c,0x02,0x92,0x28,0x21,0x00,0x05,0x28,0x80, 0x27,0x82,0x90,0x04,0x00,0xa2,0x10,0x21,0x8c,0x46,0x00,0x18,0x3c,0x03,0x80,0xff, 0x3c,0x02,0x7f,0x00,0x8c,0xc8,0x00,0x04,0x00,0x04,0x25,0x40,0x34,0x63,0xff,0xff, 0x00,0x82,0x20,0x24,0x01,0x03,0x40,0x24,0x01,0x04,0x40,0x25,0xac,0xc8,0x00,0x04, 0x8f,0xa8,0x00,0x98,0x27,0x82,0x90,0x10,0x00,0xa2,0x10,0x21,0xa0,0x48,0x00,0x00, 0x8c,0xc4,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x04,0x27,0xc2,0x10,0x80,0xfe,0xdb, 0xaf,0xa4,0x00,0x3c,0x80,0x42,0x00,0x06,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x02, 0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,0x00,0x02,0x10,0x40, 0x00,0xc2,0x38,0x21,0x8f,0xa2,0x00,0x40,0x00,0x00,0x00,0x00,0xa4,0xe2,0x00,0x00, 0x08,0x00,0x1d,0x0c,0x26,0xb5,0xff,0xff,0x8f,0xa6,0x00,0x2c,0x00,0x00,0x20,0x21, 0x00,0xc2,0x10,0x21,0x30,0x42,0xff,0xff,0x08,0x00,0x1e,0x1a,0xaf,0xa2,0x00,0x2c, 0x8f,0xa6,0x00,0x1c,0x08,0x00,0x1e,0x04,0xa4,0xd2,0x00,0x14,0x8f,0xa5,0x00,0x48, 0x8f,0xa6,0x00,0x4c,0x8f,0xa4,0x00,0x1c,0x0c,0x00,0x1b,0xd8,0xaf,0xb7,0x00,0x10, 0x08,0x00,0x1d,0xfb,0x00,0x00,0xb8,0x21,0x0c,0x00,0x12,0x8b,0x00,0x00,0x28,0x21, 0x00,0x40,0x18,0x21,0x94,0x42,0x00,0x00,0x00,0x00,0x00,0x00,0x34,0x42,0x08,0x00, 0xa4,0x62,0x00,0x00,0x08,0x00,0x1d,0x00,0x02,0x71,0x10,0x21,0x02,0x92,0x18,0x21, 0x00,0x03,0x80,0x80,0x27,0x82,0x90,0x04,0x02,0x02,0x10,0x21,0x8c,0x44,0x00,0x18, 0x00,0x00,0x00,0x00,0x8c,0x83,0x00,0x04,0x00,0x00,0x00,0x00,0x30,0x63,0x00,0x10, 0x10,0x60,0x00,0x09,0x24,0x06,0x00,0x01,0x93,0x82,0x8b,0x71,0x00,0x00,0x00,0x00, 0x30,0x42,0x00,0x01,0x10,0x40,0xfe,0xa2,0x3c,0x04,0x00,0x80,0x27,0x85,0x90,0x00, 0x08,0x00,0x1d,0xeb,0x02,0x05,0x28,0x21,0x27,0x83,0x90,0x18,0x27,0x82,0x90,0x10, 0x02,0x03,0x18,0x21,0x02,0x02,0x10,0x21,0x90,0x64,0x00,0x00,0x90,0x45,0x00,0x05, 0x93,0x83,0x80,0x10,0x00,0x00,0x38,0x21,0x0c,0x00,0x21,0x9a,0xaf,0xa3,0x00,0x10, 0x08,0x00,0x1e,0x62,0x00,0x00,0x00,0x00,0x27,0x82,0x90,0x18,0x02,0x02,0x10,0x21, 0x94,0x43,0x00,0x02,0x8f,0xa6,0x00,0x58,0x00,0x03,0x19,0x02,0x00,0x66,0x18,0x23, 0x30,0x63,0x0f,0xff,0x28,0x62,0x00,0x20,0x10,0x40,0x00,0x06,0x28,0x62,0x00,0x40, 0x8f,0xa8,0x00,0x90,0x00,0x00,0x00,0x00,0x00,0x68,0x10,0x06,0x08,0x00,0x1c,0xd9, 0x30,0x44,0x00,0x01,0x10,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0x8f,0xa4,0x00,0x94, 0x08,0x00,0x1e,0x83,0x00,0x64,0x10,0x06,0x08,0x00,0x1c,0xd9,0x00,0x00,0x20,0x21, 0x8f,0xa4,0x00,0x98,0x8f,0xa5,0x00,0x38,0xaf,0xa0,0x00,0x10,0x0c,0x00,0x09,0x0a, 0xaf,0xa8,0x00,0x14,0x30,0x42,0xff,0xff,0x08,0x00,0x1c,0xa9,0xaf,0xa2,0x00,0x40, 0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x00,0x27,0xbd,0xff,0xe0,0x34,0x42,0x00,0x20, 0x24,0x63,0x7a,0x50,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x18, 0xac,0x43,0x00,0x00,0x90,0x82,0x00,0x0a,0x00,0x80,0x80,0x21,0x14,0x40,0x00,0x45, 0x00,0x00,0x88,0x21,0x92,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x3c, 0x00,0x00,0x00,0x00,0x12,0x20,0x00,0x18,0x00,0x00,0x00,0x00,0x92,0x02,0x00,0x16, 0x92,0x05,0x00,0x0a,0x30,0x42,0x00,0xfc,0x10,0xa0,0x00,0x03,0xa2,0x02,0x00,0x16, 0x34,0x42,0x00,0x01,0xa2,0x02,0x00,0x16,0x92,0x04,0x00,0x04,0x00,0x00,0x00,0x00, 0x30,0x83,0x00,0xff,0x10,0x60,0x00,0x05,0x00,0x00,0x00,0x00,0x92,0x02,0x00,0x16, 0x00,0x00,0x00,0x00,0x34,0x42,0x00,0x02,0xa2,0x02,0x00,0x16,0x10,0x60,0x00,0x0a, 0x00,0x00,0x00,0x00,0x14,0xa0,0x00,0x08,0x00,0x00,0x00,0x00,0x96,0x02,0x00,0x00, 0xa2,0x00,0x00,0x17,0xa6,0x02,0x00,0x14,0x8f,0xbf,0x00,0x18,0x7b,0xb0,0x00,0xbc, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x14,0x80,0x00,0x05,0x24,0x02,0x00,0x01, 0x96,0x03,0x00,0x06,0xa2,0x02,0x00,0x17,0x08,0x00,0x1e,0xbe,0xa6,0x03,0x00,0x14, 0x96,0x04,0x00,0x00,0x96,0x05,0x00,0x06,0x27,0x86,0x90,0x00,0x00,0x04,0x10,0xc0, 0x00,0x05,0x18,0xc0,0x00,0x44,0x10,0x21,0x00,0x65,0x18,0x21,0x00,0x02,0x10,0x80, 0x00,0x03,0x18,0x80,0x00,0x66,0x18,0x21,0x00,0x46,0x10,0x21,0x8c,0x65,0x00,0x08, 0x8c,0x44,0x00,0x08,0x0c,0x00,0x12,0x7c,0x00,0x00,0x00,0x00,0x30,0x43,0x00,0xff, 0x10,0x60,0x00,0x04,0xa2,0x02,0x00,0x17,0x96,0x02,0x00,0x06,0x08,0x00,0x1e,0xbe, 0xa6,0x02,0x00,0x14,0x96,0x02,0x00,0x00,0x08,0x00,0x1e,0xbe,0xa6,0x02,0x00,0x14, 0x96,0x05,0x00,0x00,0x0c,0x00,0x1e,0xea,0x02,0x00,0x20,0x21,0x08,0x00,0x1e,0xa5, 0x02,0x22,0x88,0x21,0x94,0x85,0x00,0x06,0x0c,0x00,0x1e,0xea,0x00,0x00,0x00,0x00, 0x08,0x00,0x1e,0xa1,0x00,0x40,0x88,0x21,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x00, 0x34,0x63,0x00,0x20,0x24,0x42,0x7b,0xa8,0x27,0xbd,0xff,0xf0,0xac,0x62,0x00,0x00, 0x00,0x00,0x10,0x21,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x10,0x3c,0x03,0xb0,0x03, 0x3c,0x02,0x80,0x00,0x34,0x63,0x00,0x20,0x24,0x42,0x7b,0xcc,0xac,0x62,0x00,0x00, 0x90,0x89,0x00,0x0a,0x00,0x80,0x30,0x21,0x11,0x20,0x00,0x05,0x00,0xa0,0x50,0x21, 0x90,0x82,0x00,0x17,0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x1b,0x00,0x00,0x00,0x00, 0x90,0xc7,0x00,0x04,0x00,0x00,0x00,0x00,0x10,0xe0,0x00,0x1b,0x00,0x00,0x00,0x00, 0x94,0xc8,0x00,0x00,0x27,0x83,0x90,0x00,0x93,0x85,0x8b,0x70,0x00,0x08,0x10,0xc0, 0x00,0x48,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x08, 0x00,0xe5,0x28,0x2b,0x10,0xa0,0x00,0x06,0x01,0x44,0x18,0x23,0x8f,0x82,0x8b,0x88, 0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x2b,0x10,0x40,0x00,0x05,0x00,0x00,0x00,0x00, 0x24,0x03,0x00,0x10,0xa4,0xc8,0x00,0x14,0x03,0xe0,0x00,0x08,0x00,0x60,0x10,0x21, 0x11,0x20,0x00,0x05,0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x06,0x24,0x03,0x00,0x08, 0x08,0x00,0x1f,0x16,0xa4,0xc2,0x00,0x14,0x08,0x00,0x1f,0x16,0x00,0x00,0x18,0x21, 0x27,0xbd,0xff,0xc8,0xaf,0xb5,0x00,0x2c,0xaf,0xb4,0x00,0x28,0xaf,0xb3,0x00,0x24, 0xaf,0xb0,0x00,0x18,0xaf,0xbf,0x00,0x30,0xaf,0xb2,0x00,0x20,0xaf,0xb1,0x00,0x1c, 0x94,0x91,0x00,0x06,0x00,0x80,0xa0,0x21,0x3c,0x02,0x80,0x00,0x3c,0x04,0xb0,0x03, 0x00,0x11,0xa8,0xc0,0x34,0x84,0x00,0x20,0x24,0x42,0x7c,0x80,0x02,0xb1,0x48,0x21, 0xac,0x82,0x00,0x00,0x00,0x09,0x48,0x80,0x24,0x03,0x00,0x01,0x27,0x82,0x90,0x10, 0xa2,0x83,0x00,0x12,0x01,0x22,0x10,0x21,0x27,0x84,0x90,0x04,0x01,0x24,0x20,0x21, 0x80,0x48,0x00,0x06,0x8c,0x8a,0x00,0x18,0x27,0x83,0x90,0x20,0x01,0x23,0x48,0x21, 0x8d,0x24,0x00,0x00,0x25,0x08,0x00,0x02,0x8d,0x42,0x00,0x00,0x8d,0x49,0x00,0x04, 0x00,0x08,0x17,0xc2,0x8d,0x43,0x00,0x08,0x01,0x02,0x40,0x21,0x00,0x04,0x25,0xc2, 0x00,0x08,0x40,0x43,0x30,0x84,0x00,0x01,0x00,0x03,0x1f,0xc2,0x00,0x08,0x40,0x40, 0x00,0xe0,0x80,0x21,0x00,0x64,0x18,0x24,0x00,0x09,0x49,0x42,0x01,0x48,0x10,0x21, 0x00,0xa0,0x98,0x21,0x00,0xa0,0x20,0x21,0x00,0x40,0x38,0x21,0x02,0x00,0x28,0x21, 0x14,0x60,0x00,0x19,0x31,0x29,0x00,0x01,0x94,0x42,0x00,0x00,0x02,0xb1,0x88,0x21, 0x02,0x00,0x28,0x21,0x00,0x11,0x88,0x80,0x27,0x90,0x90,0x00,0x02,0x30,0x80,0x21, 0x96,0x03,0x00,0x06,0x30,0x52,0xff,0xff,0x02,0x60,0x20,0x21,0x00,0x60,0x30,0x21, 0xa6,0x83,0x00,0x1a,0x27,0x82,0x90,0x08,0x0c,0x00,0x08,0xe3,0x02,0x22,0x88,0x21, 0x00,0x52,0x10,0x21,0x96,0x03,0x00,0x06,0xa6,0x22,0x00,0x04,0x8f,0xbf,0x00,0x30, 0x7b,0xb4,0x01,0x7c,0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x00,0x60,0x10,0x21, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x38,0xaf,0xa9,0x00,0x10,0x0c,0x00,0x09,0x0a, 0xaf,0xa0,0x00,0x14,0x08,0x00,0x1f,0x54,0x02,0xb1,0x88,0x21,0x27,0xbd,0xff,0xc0, 0xaf,0xbe,0x00,0x38,0xaf,0xb7,0x00,0x34,0xaf,0xb6,0x00,0x30,0xaf,0xb5,0x00,0x2c, 0xaf,0xb3,0x00,0x24,0xaf,0xb1,0x00,0x1c,0xaf,0xbf,0x00,0x3c,0xaf,0xb4,0x00,0x28, 0xaf,0xb2,0x00,0x20,0xaf,0xb0,0x00,0x18,0x94,0x90,0x00,0x00,0x3c,0x08,0xb0,0x03, 0x35,0x08,0x00,0x20,0x00,0x10,0x10,0xc0,0x00,0x50,0x18,0x21,0x00,0x40,0x88,0x21, 0x3c,0x02,0x80,0x00,0x00,0x03,0x48,0x80,0x24,0x42,0x7d,0xbc,0x00,0x80,0x98,0x21, 0x27,0x84,0x90,0x10,0x01,0x24,0x20,0x21,0x93,0xb7,0x00,0x53,0xad,0x02,0x00,0x00, 0x80,0x83,0x00,0x06,0x27,0x82,0x90,0x04,0x01,0x22,0x10,0x21,0x8c,0x44,0x00,0x18, 0x24,0x63,0x00,0x02,0x00,0x03,0x17,0xc2,0x8c,0x88,0x00,0x08,0x00,0x62,0x18,0x21, 0x00,0x03,0x18,0x43,0x00,0x03,0x18,0x40,0xaf,0xa7,0x00,0x4c,0x2c,0xa2,0x00,0x10, 0x00,0xa0,0xa8,0x21,0x00,0x83,0x50,0x21,0x00,0x08,0x47,0xc2,0x00,0xc0,0x58,0x21, 0x00,0x00,0xb0,0x21,0x8c,0x92,0x00,0x0c,0x14,0x40,0x00,0x13,0x00,0x00,0xf0,0x21, 0x92,0x67,0x00,0x04,0x24,0x14,0x00,0x01,0x12,0x87,0x00,0x10,0x02,0x30,0x10,0x21, 0x27,0x83,0x90,0x18,0x01,0x23,0x18,0x21,0x80,0x64,0x00,0x00,0x27,0x83,0xb5,0x70, 0x00,0x04,0x11,0x00,0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x23, 0x00,0x02,0x10,0x80,0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x04,0x00,0x00,0x00,0x00, 0x10,0x80,0x00,0x23,0x00,0x00,0x00,0x00,0x02,0x30,0x10,0x21,0x00,0x02,0x80,0x80, 0x24,0x04,0x00,0x01,0x27,0x83,0x90,0x20,0xa2,0x64,0x00,0x12,0x02,0x03,0x18,0x21, 0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x15,0xc2,0x30,0x42,0x00,0x01, 0x01,0x02,0x10,0x24,0x14,0x40,0x00,0x0e,0x02,0xa0,0x20,0x21,0x27,0x82,0x90,0x00, 0x02,0x02,0x10,0x21,0x94,0x43,0x00,0x06,0x00,0x00,0x00,0x00,0xa6,0x63,0x00,0x1a, 0x94,0x42,0x00,0x06,0x7b,0xbe,0x01,0xfc,0x7b,0xb6,0x01,0xbc,0x7b,0xb4,0x01,0x7c, 0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x40, 0x8f,0xa5,0x00,0x4c,0x01,0x60,0x30,0x21,0x01,0x40,0x38,0x21,0xaf,0xa0,0x00,0x10, 0x0c,0x00,0x09,0x0a,0xaf,0xa0,0x00,0x14,0x08,0x00,0x1f,0xbb,0x00,0x00,0x00,0x00, 0x27,0x83,0x90,0x20,0x01,0x23,0x18,0x21,0x8c,0x62,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x02,0x15,0xc2,0x30,0x42,0x00,0x01,0x01,0x02,0x10,0x24,0x14,0x40,0x00,0xaf, 0x00,0xa0,0x20,0x21,0x32,0x4f,0x00,0x03,0x00,0x12,0x10,0x82,0x25,0xe3,0x00,0x0d, 0x30,0x45,0x00,0x07,0x00,0x74,0x78,0x04,0x10,0xa0,0x00,0x0e,0x00,0x00,0x90,0x21, 0x27,0x82,0x80,0x1c,0x00,0x15,0x18,0x40,0x00,0x62,0x18,0x21,0x94,0x64,0x00,0x00, 0x24,0xa2,0x00,0x06,0x00,0x54,0x10,0x04,0x00,0x44,0x00,0x1a,0x14,0x80,0x00,0x02, 0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0d,0x00,0x00,0x10,0x12,0x24,0x42,0x00,0x20, 0x30,0x52,0xff,0xfc,0x02,0x30,0x10,0x21,0x27,0x83,0x90,0x10,0x00,0x02,0x10,0x80, 0x00,0x43,0x10,0x21,0x90,0x44,0x00,0x03,0x00,0x00,0x00,0x00,0x30,0x83,0x00,0xff, 0x2c,0x62,0x00,0x0c,0x14,0x40,0x00,0x04,0x2c,0x62,0x00,0x19,0x30,0x82,0x00,0x0f, 0x24,0x43,0x00,0x0c,0x2c,0x62,0x00,0x19,0x10,0x40,0x00,0x19,0x24,0x0e,0x00,0x20, 0x24,0x62,0xff,0xe9,0x2c,0x42,0x00,0x02,0x14,0x40,0x00,0x15,0x24,0x0e,0x00,0x10, 0x24,0x62,0xff,0xeb,0x2c,0x42,0x00,0x02,0x14,0x40,0x00,0x11,0x24,0x0e,0x00,0x08, 0x24,0x02,0x00,0x14,0x10,0x62,0x00,0x0e,0x24,0x0e,0x00,0x02,0x24,0x62,0xff,0xef, 0x2c,0x42,0x00,0x03,0x14,0x40,0x00,0x0a,0x24,0x0e,0x00,0x10,0x24,0x62,0xff,0xf1, 0x2c,0x42,0x00,0x02,0x14,0x40,0x00,0x06,0x24,0x0e,0x00,0x08,0x24,0x62,0xff,0xf3, 0x2c,0x42,0x00,0x02,0x24,0x0e,0x00,0x04,0x24,0x03,0x00,0x02,0x00,0x62,0x70,0x0a, 0x30,0xe2,0x00,0xff,0x00,0x00,0x48,0x21,0x00,0x00,0x68,0x21,0x10,0x40,0x00,0x6d, 0x00,0x00,0x58,0x21,0x3c,0x14,0x80,0xff,0x27,0x99,0x90,0x00,0x01,0xf2,0xc0,0x23, 0x36,0x94,0xff,0xff,0x01,0xc9,0x10,0x2a,0x14,0x40,0x00,0x64,0x24,0x03,0x00,0x04, 0x00,0x10,0x28,0xc0,0x00,0xb0,0x10,0x21,0x00,0x02,0x10,0x80,0x00,0x59,0x10,0x21, 0x94,0x56,0x00,0x06,0x00,0x00,0x00,0x00,0x32,0xcc,0x00,0x03,0x00,0x6c,0x10,0x23, 0x30,0x42,0x00,0x03,0x02,0xc2,0x10,0x21,0x24,0x42,0x00,0x04,0x30,0x51,0xff,0xff, 0x02,0x32,0x18,0x2b,0x10,0x60,0x00,0x4d,0x01,0xf1,0x10,0x23,0x02,0x51,0x10,0x23, 0x01,0x78,0x18,0x2b,0x10,0x60,0x00,0x34,0x30,0x44,0xff,0xff,0x29,0x22,0x00,0x40, 0x10,0x40,0x00,0x31,0x01,0x72,0x18,0x21,0x25,0x22,0x00,0x01,0x00,0x02,0x16,0x00, 0x00,0x02,0x4e,0x03,0x00,0xb0,0x10,0x21,0x00,0x02,0x30,0x80,0x27,0x82,0x90,0x04, 0x30,0x6b,0xff,0xff,0x00,0xc2,0x18,0x21,0x8c,0x67,0x00,0x18,0x00,0x04,0x25,0x40, 0x3c,0x03,0x7f,0x00,0x8c,0xe2,0x00,0x04,0x00,0x83,0x20,0x24,0x27,0x83,0x90,0x10, 0x00,0x54,0x10,0x24,0x00,0xc3,0x28,0x21,0x00,0x44,0x10,0x25,0xac,0xe2,0x00,0x04, 0x16,0xe0,0x00,0x02,0xa0,0xb5,0x00,0x00,0xa0,0xb5,0x00,0x03,0x27,0x84,0x90,0x20, 0x00,0xc4,0x18,0x21,0x8c,0x62,0x00,0x00,0x8c,0xe8,0x00,0x08,0x00,0x02,0x15,0xc2, 0x00,0x08,0x47,0xc2,0x30,0x42,0x00,0x01,0x01,0x02,0x10,0x24,0x10,0x40,0x00,0x0a, 0x00,0x00,0x00,0x00,0x80,0xa2,0x00,0x06,0x00,0x00,0x00,0x00,0x24,0x42,0x00,0x02, 0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43,0x00,0x02,0x10,0x40, 0x00,0xe2,0x50,0x21,0xa5,0x5e,0x00,0x00,0x92,0x62,0x00,0x04,0x25,0xad,0x00,0x01, 0x27,0x84,0x90,0x00,0x00,0xc4,0x18,0x21,0x01,0xa2,0x10,0x2a,0x94,0x70,0x00,0x02, 0x14,0x40,0xff,0xb8,0x00,0x00,0x00,0x00,0x96,0x63,0x00,0x14,0x00,0x0c,0x10,0x23, 0xa2,0x69,0x00,0x12,0x30,0x42,0x00,0x03,0x01,0x62,0x10,0x23,0x00,0x03,0x80,0xc0, 0x8f,0xa5,0x00,0x4c,0x30,0x4b,0xff,0xff,0x02,0x03,0x80,0x21,0x27,0x82,0x90,0x08, 0x00,0x10,0x80,0x80,0xa6,0x6b,0x00,0x1a,0x02,0xa0,0x20,0x21,0x01,0x60,0x30,0x21, 0x01,0x60,0x88,0x21,0x0c,0x00,0x08,0xe3,0x02,0x02,0x80,0x21,0x00,0x5e,0x10,0x21, 0xa6,0x02,0x00,0x04,0x08,0x00,0x1f,0xc1,0x02,0x20,0x10,0x21,0x01,0x62,0x10,0x2b, 0x10,0x40,0xff,0xe9,0x00,0x00,0x20,0x21,0x29,0x22,0x00,0x40,0x10,0x40,0xff,0xe6, 0x01,0x71,0x18,0x21,0x08,0x00,0x20,0x37,0x25,0x22,0x00,0x01,0x08,0x00,0x20,0x66, 0x32,0xcc,0x00,0x03,0x08,0x00,0x20,0x66,0x00,0x00,0x60,0x21,0x8f,0xa5,0x00,0x4c, 0x01,0x40,0x38,0x21,0xaf,0xa0,0x00,0x10,0x0c,0x00,0x09,0x0a,0xaf,0xb4,0x00,0x14, 0x92,0x67,0x00,0x04,0x08,0x00,0x1f,0xd9,0x30,0x5e,0xff,0xff,0x30,0x84,0xff,0xff, 0x00,0x04,0x30,0xc0,0x00,0xc4,0x20,0x21,0x00,0x04,0x20,0x80,0x27,0x82,0x90,0x00, 0x3c,0x03,0xb0,0x08,0x30,0xa5,0xff,0xff,0x00,0x82,0x20,0x21,0x00,0xc3,0x30,0x21, 0xac,0xc5,0x00,0x00,0x03,0xe0,0x00,0x08,0xa4,0x85,0x00,0x00,0x30,0x84,0xff,0xff, 0x00,0x04,0x30,0xc0,0x00,0xc4,0x30,0x21,0x27,0x88,0x90,0x00,0x00,0x06,0x30,0x80, 0x00,0xc8,0x30,0x21,0x94,0xc3,0x00,0x04,0x3c,0x02,0xb0,0x08,0x3c,0x07,0xb0,0x03, 0x00,0x03,0x20,0xc0,0x00,0x83,0x18,0x21,0x00,0x03,0x18,0x80,0x00,0x82,0x20,0x21, 0x3c,0x02,0x80,0x01,0x30,0xa5,0xff,0xff,0x00,0x68,0x18,0x21,0x34,0xe7,0x00,0x20, 0x24,0x42,0x82,0x6c,0xac,0xe2,0x00,0x00,0xa4,0xc5,0x00,0x02,0xa4,0x65,0x00,0x00, 0x03,0xe0,0x00,0x08,0xac,0x85,0x00,0x00,0x30,0x84,0xff,0xff,0x00,0x04,0x10,0xc0, 0x00,0x44,0x10,0x21,0x27,0x89,0x90,0x00,0x00,0x02,0x10,0x80,0x00,0x49,0x10,0x21, 0x97,0x83,0x8f,0xf0,0x94,0x4a,0x00,0x04,0x3c,0x02,0xb0,0x08,0x00,0x03,0x38,0xc0, 0x00,0x0a,0x40,0xc0,0x00,0xe3,0x18,0x21,0x01,0x0a,0x28,0x21,0x00,0xe2,0x38,0x21, 0x01,0x02,0x40,0x21,0x00,0x03,0x18,0x80,0x00,0x05,0x28,0x80,0x3c,0x06,0xb0,0x03, 0x3c,0x02,0x80,0x01,0x00,0xa9,0x28,0x21,0x00,0x69,0x18,0x21,0x34,0xc6,0x00,0x20, 0x34,0x09,0xff,0xff,0x24,0x42,0x82,0xc8,0xac,0xc2,0x00,0x00,0xa4,0x64,0x00,0x00, 0xac,0xe4,0x00,0x00,0xa4,0xa9,0x00,0x00,0xad,0x09,0x00,0x00,0xa7,0x8a,0x8f,0xf0, 0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x01, 0x34,0x63,0x00,0x20,0x24,0x42,0x83,0x48,0x3c,0x04,0xb0,0x03,0xac,0x62,0x00,0x00, 0x34,0x84,0x01,0x10,0x8c,0x82,0x00,0x00,0x97,0x83,0x81,0x60,0x30,0x42,0xff,0xff, 0x10,0x62,0x00,0x16,0x24,0x0a,0x00,0x01,0xa7,0x82,0x81,0x60,0xaf,0x80,0xb4,0x50, 0x00,0x40,0x28,0x21,0x24,0x06,0x00,0x01,0x27,0x84,0xb4,0x54,0x25,0x43,0xff,0xff, 0x00,0x66,0x10,0x04,0x00,0xa2,0x10,0x24,0x14,0x40,0x00,0x07,0x00,0x00,0x00,0x00, 0x8c,0x83,0xff,0xfc,0x00,0x00,0x00,0x00,0x00,0x66,0x10,0x04,0x00,0xa2,0x10,0x24, 0x38,0x42,0x00,0x00,0x01,0x42,0x18,0x0a,0x25,0x4a,0x00,0x01,0x2d,0x42,0x00,0x14, 0xac,0x83,0x00,0x00,0x14,0x40,0xff,0xf1,0x24,0x84,0x00,0x04,0x3c,0x0b,0xb0,0x03, 0x00,0x00,0x50,0x21,0x3c,0x0c,0x80,0x00,0x27,0x89,0xb4,0xa0,0x35,0x6b,0x01,0x20, 0x8d,0x68,0x00,0x00,0x8d,0x23,0x00,0x04,0x01,0x0c,0x10,0x24,0x00,0x02,0x17,0xc2, 0x11,0x03,0x00,0x37,0xa1,0x22,0x00,0xdc,0xa1,0x20,0x00,0xd5,0xa1,0x20,0x00,0xd6, 0x01,0x20,0x30,0x21,0x00,0x00,0x38,0x21,0x00,0x00,0x28,0x21,0x01,0x20,0x20,0x21, 0x00,0xa8,0x10,0x06,0x30,0x42,0x00,0x01,0x10,0xe0,0x00,0x10,0xa0,0x82,0x00,0x0a, 0x90,0x82,0x00,0x07,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x31,0x24,0xa2,0xff,0xff, 0xa0,0x82,0x00,0x08,0x90,0x82,0x00,0x0a,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x09, 0x00,0x00,0x00,0x00,0x90,0x83,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x03,0x10,0x40, 0x00,0x43,0x10,0x21,0x00,0x46,0x10,0x21,0xa0,0x45,0x00,0x09,0x90,0x82,0x00,0x0a, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x07,0x00,0x00,0x00,0x00,0x14,0xe0,0x00,0x04, 0x00,0x00,0x00,0x00,0xa0,0xc5,0x00,0xd5,0x24,0x07,0x00,0x01,0xa0,0x85,0x00,0x08, 0xa0,0xc5,0x00,0xd6,0x24,0xa5,0x00,0x01,0x2c,0xa2,0x00,0x1c,0x14,0x40,0xff,0xe0, 0x24,0x84,0x00,0x03,0x90,0xc4,0x00,0xd5,0x00,0x00,0x28,0x21,0x00,0xa4,0x10,0x2b, 0x10,0x40,0x00,0x0b,0x00,0x00,0x00,0x00,0x00,0xc0,0x18,0x21,0xa0,0x64,0x00,0x08, 0x90,0xc2,0x00,0xd5,0x24,0xa5,0x00,0x01,0xa0,0x62,0x00,0x09,0x90,0xc4,0x00,0xd5, 0x00,0x00,0x00,0x00,0x00,0xa4,0x10,0x2b,0x14,0x40,0xff,0xf8,0x24,0x63,0x00,0x03, 0x25,0x4a,0x00,0x01,0x2d,0x42,0x00,0x08,0xad,0x28,0x00,0x04,0x25,0x6b,0x00,0x04, 0x14,0x40,0xff,0xbf,0x25,0x29,0x00,0xec,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x90,0x82,0x00,0x05,0x08,0x00,0x21,0x0d,0xa0,0x82,0x00,0x08,0x97,0x85,0x8b,0x7a, 0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x63,0x00,0x20, 0x24,0x42,0x84,0xfc,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x62,0x00,0x00, 0x30,0x90,0x00,0xff,0x00,0x05,0x28,0x42,0x00,0x00,0x48,0x21,0x27,0x8f,0xb4,0xa4, 0x00,0x00,0x50,0x21,0x00,0x00,0x58,0x21,0x27,0x98,0xb5,0x84,0x27,0x99,0xb5,0x80, 0x27,0x8e,0xb5,0x7e,0x27,0x8c,0xb4,0xa8,0x27,0x8d,0xb5,0x00,0x27,0x88,0xb5,0x78, 0x00,0x0a,0x18,0x80,0x01,0x6f,0x10,0x21,0xac,0x40,0x00,0x00,0xac,0x45,0x00,0x58, 0x00,0x6e,0x20,0x21,0x00,0x78,0x10,0x21,0xa1,0x00,0xff,0xfc,0xad,0x00,0x00,0x00, 0xa1,0x00,0x00,0x04,0xa1,0x00,0x00,0x05,0xad,0x00,0xff,0xf8,0x00,0x79,0x18,0x21, 0x24,0x06,0x00,0x01,0x24,0xc6,0xff,0xff,0xa0,0x80,0x00,0x00,0xa4,0x60,0x00,0x00, 0xac,0x40,0x00,0x00,0x24,0x63,0x00,0x02,0x24,0x42,0x00,0x04,0x04,0xc1,0xff,0xf9, 0x24,0x84,0x00,0x01,0x00,0x0a,0x10,0x80,0x00,0x4d,0x20,0x21,0x00,0x00,0x30,0x21, 0x00,0x4c,0x18,0x21,0x27,0x87,0x81,0x64,0x8c,0xe2,0x00,0x00,0x24,0xe7,0x00,0x04, 0xac,0x82,0x00,0x00,0xa0,0x66,0x00,0x00,0xa0,0x66,0x00,0x01,0x24,0xc6,0x00,0x01, 0x28,0xc2,0x00,0x1c,0xa0,0x60,0x00,0x02,0x24,0x84,0x00,0x04,0x14,0x40,0xff,0xf6, 0x24,0x63,0x00,0x03,0x25,0x29,0x00,0x01,0x29,0x22,0x00,0x08,0x25,0x4a,0x00,0x3b, 0x25,0x08,0x00,0xec,0x14,0x40,0xff,0xd6,0x25,0x6b,0x00,0xec,0xa7,0x80,0x81,0x60, 0x00,0x00,0x48,0x21,0x27,0x83,0xb4,0x50,0xac,0x69,0x00,0x00,0x25,0x29,0x00,0x01, 0x29,0x22,0x00,0x0c,0x14,0x40,0xff,0xfc,0x24,0x63,0x00,0x04,0x0c,0x00,0x20,0xd2, 0x00,0x00,0x00,0x00,0x2e,0x04,0x00,0x14,0x27,0x83,0xb4,0xa0,0x24,0x09,0x00,0x07, 0x10,0x80,0x00,0x0a,0x00,0x00,0x00,0x00,0x90,0x62,0x00,0xd5,0x25,0x29,0xff,0xff, 0xa0,0x62,0x00,0x00,0x05,0x21,0xff,0xfa,0x24,0x63,0x00,0xec,0x8f,0xbf,0x00,0x14, 0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x90,0x62,0x00,0xd6, 0x08,0x00,0x21,0x90,0x25,0x29,0xff,0xff,0x30,0x84,0x00,0xff,0x00,0x04,0x11,0x00, 0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80,0x00,0x44,0x10,0x23,0x00,0x02,0x10,0x80, 0x27,0x83,0xb4,0xa0,0x00,0x43,0x60,0x21,0x3c,0x04,0xb0,0x03,0x3c,0x02,0x80,0x01, 0x34,0x84,0x00,0x20,0x24,0x42,0x86,0x68,0x30,0xc6,0x00,0xff,0x93,0xaa,0x00,0x13, 0x30,0xa5,0x00,0xff,0x30,0xe7,0x00,0xff,0xac,0x82,0x00,0x00,0x10,0xc0,0x00,0xe8, 0x25,0x8f,0x00,0xd0,0x91,0x82,0x00,0x00,0x00,0x00,0x00,0x00,0x24,0x42,0xff,0xfc, 0x2c,0x43,0x00,0x18,0x10,0x60,0x00,0xc7,0x3c,0x03,0x80,0x01,0x00,0x02,0x10,0x80, 0x24,0x63,0x02,0x90,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x30,0x14,0x40,0x00,0x1c, 0x00,0x00,0x00,0x00,0x10,0xa0,0x00,0x17,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0x00,0x11,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x0c, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x06,0x00,0x00,0x00,0x00, 0x8d,0x82,0x00,0xd0,0x00,0x00,0x00,0x00,0x24,0x42,0xff,0xe0,0x03,0xe0,0x00,0x08, 0xad,0x82,0x00,0xd0,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xe8, 0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb, 0x24,0x42,0x00,0x01,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,0x24,0x42,0x00,0x02, 0x10,0xa0,0xff,0xf9,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0x00,0x0a, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xe9,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0xe6,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0, 0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xd0,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb, 0x24,0x42,0xff,0xfc,0x10,0xa0,0xff,0xeb,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0xff,0xe5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xe0, 0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xdb,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0, 0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xf8,0x2d,0x42,0x00,0x19,0x14,0x40,0xff,0xc5, 0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xdb,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0xff,0xd5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xd0, 0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0xf1,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0, 0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xf0,0x2d,0x42,0x00,0x1b,0x10,0x40,0xff,0xf1, 0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xcb,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0xff,0xc5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x14,0xa2,0xff,0xb5, 0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb,0x24,0x42,0xff,0xf4, 0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xe3,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xbd, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xb5,0x24,0x02,0x00,0x02, 0x10,0xa2,0xff,0xd6,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xc6,0x24,0x02,0x00,0x03, 0x2d,0x42,0x00,0x23,0x10,0x40,0xff,0xd7,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xae, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xa9,0x24,0x02,0x00,0x02, 0x14,0xa2,0xff,0xb7,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0x03,0x00,0x00,0x00,0x00, 0x2d,0x42,0x00,0x25,0x10,0x40,0xff,0xcb,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xd8, 0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x16,0x14,0x40,0x00,0x0e,0x00,0x00,0x00,0x00, 0x10,0xa0,0xff,0xa0,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x9a, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x95,0x24,0x02,0x00,0x03, 0x14,0xa2,0xff,0xb6,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x21,0xcb, 0x24,0x42,0xff,0xfa,0x10,0xa0,0xff,0x93,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0xff,0x8d,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x88, 0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xf3,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x17, 0x14,0x40,0xff,0xac,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0x34,0x00,0x00,0x00,0x00, 0x2d,0x42,0x00,0x19,0x10,0x40,0xff,0xe2,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0x81, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x7b,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x76,0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0x97, 0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xc8,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0x51, 0x2d,0x42,0x00,0x1b,0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xde,0x00,0x00,0x00,0x00, 0x10,0xa0,0xff,0x70,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x6a, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x65,0x24,0x02,0x00,0x03, 0x10,0xa2,0xff,0x96,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xc8,0x00,0x00,0x00,0x00, 0x2d,0x42,0x00,0x23,0x14,0x40,0xff,0xf2,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xf9, 0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xf7,0x2d,0x42,0x00,0x25,0x08,0x00,0x22,0x2d, 0x2d,0x42,0x00,0x27,0x10,0xa0,0xff,0x5b,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0xff,0x55,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x50, 0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0x71,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xe6, 0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x27,0x14,0x40,0xff,0xad,0x00,0x00,0x00,0x00, 0x08,0x00,0x22,0x79,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x2a,0x14,0x40,0xff,0xd8, 0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xe9,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x2c, 0x14,0x40,0xff,0x78,0x00,0x00,0x00,0x00,0x08,0x00,0x21,0xbd,0x00,0x00,0x00,0x00, 0x91,0x86,0x00,0x00,0x91,0x83,0x00,0xd4,0x25,0x8d,0x00,0x5c,0x30,0xc4,0x00,0xff, 0x00,0x04,0x10,0x40,0x00,0x44,0x10,0x21,0x00,0x04,0x48,0x80,0x01,0x82,0x58,0x21, 0x01,0x89,0x40,0x21,0x25,0x78,0x00,0x08,0x10,0x60,0x00,0x37,0x25,0x0e,0x00,0x60, 0x2c,0xa2,0x00,0x03,0x14,0x40,0x00,0x25,0x00,0x00,0x00,0x00,0x91,0x82,0x00,0xdd, 0x00,0x00,0x00,0x00,0x14,0x40,0x00,0x1e,0x00,0x00,0x00,0x00,0x27,0x87,0x81,0x64, 0x01,0x27,0x10,0x21,0x8c,0x43,0x00,0x00,0x00,0x00,0x00,0x00,0xad,0x03,0x00,0x60, 0x91,0x62,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x40,0x30,0x21,0xa1,0x82,0x00,0x00, 0x30,0xc2,0x00,0xff,0x00,0x02,0x10,0x80,0x00,0x47,0x10,0x21,0x8c,0x43,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x03,0x18,0x42,0xad,0xa3,0x00,0x00,0x91,0x84,0x00,0x00, 0x8d,0xc5,0x00,0x00,0x00,0x04,0x20,0x80,0x00,0x87,0x10,0x21,0x8c,0x43,0x00,0x00, 0x00,0x05,0x28,0x40,0x00,0x8c,0x20,0x21,0x00,0x03,0x18,0x80,0x00,0xa3,0x10,0x2b, 0x00,0x62,0x28,0x0a,0xac,0x85,0x00,0x60,0x03,0xe0,0x00,0x08,0xa1,0x80,0x00,0xd4, 0x27,0x87,0x81,0x64,0x08,0x00,0x22,0xb0,0xa1,0x80,0x00,0xdd,0x27,0x82,0x81,0xd4, 0x8d,0x83,0x00,0xd8,0x00,0x82,0x10,0x21,0x90,0x44,0x00,0x00,0x24,0x63,0x00,0x01, 0x00,0x64,0x20,0x2b,0x14,0x80,0xff,0x02,0xad,0x83,0x00,0xd8,0x8d,0x02,0x00,0x60, 0xa1,0x80,0x00,0xd4,0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43, 0x03,0xe0,0x00,0x08,0xad,0x82,0x00,0x5c,0x10,0xe0,0x00,0x1d,0x24,0x83,0xff,0xfc, 0x2c,0x62,0x00,0x18,0x10,0x40,0x01,0x10,0x00,0x03,0x10,0x80,0x3c,0x03,0x80,0x01, 0x24,0x63,0x02,0xf0,0x00,0x43,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x80,0x00,0x08,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x30,0x14,0x40,0x00,0x65, 0x00,0x00,0x00,0x00,0x10,0xa0,0x00,0x60,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0x00,0x5a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x08, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x51,0x00,0x00,0x00,0x00, 0x8d,0x82,0x00,0xd0,0x00,0x00,0x00,0x00,0x24,0x42,0xff,0xe0,0xad,0x82,0x00,0xd0, 0x8d,0xe3,0x00,0x00,0x8d,0xa2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x21, 0xad,0xa2,0x00,0x00,0xad,0xe0,0x00,0x00,0x8d,0xa3,0x00,0x00,0x8d,0xc4,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x83,0x10,0x2a,0x10,0x40,0x00,0x22,0x00,0x00,0x00,0x00, 0x93,0x05,0x00,0x01,0x91,0x82,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x45,0x00,0x05, 0x24,0x02,0x00,0x01,0xa1,0x85,0x00,0x00,0xa1,0x82,0x00,0xd4,0x03,0xe0,0x00,0x08, 0xad,0x80,0x00,0xd8,0x91,0x82,0x00,0xdd,0x24,0x03,0x00,0x01,0x10,0x43,0x00,0x05, 0x00,0x00,0x00,0x00,0xa1,0x83,0x00,0xd4,0xad,0x80,0x00,0xd8,0x03,0xe0,0x00,0x08, 0xa1,0x83,0x00,0xdd,0x00,0x04,0x17,0xc2,0x00,0x82,0x10,0x21,0x00,0x02,0x10,0x43, 0xad,0xa2,0x00,0x00,0x91,0x83,0x00,0x00,0x27,0x82,0x81,0x64,0x8d,0xc5,0x00,0x00, 0x00,0x03,0x18,0x80,0x00,0x62,0x18,0x21,0x8c,0x64,0x00,0x00,0x00,0x05,0x28,0x40, 0x00,0x04,0x18,0x80,0x00,0xa3,0x10,0x2b,0x00,0x62,0x28,0x0a,0x08,0x00,0x22,0xc2, 0xad,0xc5,0x00,0x00,0x97,0x82,0x8b,0x7c,0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x2a, 0x10,0x40,0xfe,0xab,0x00,0x00,0x00,0x00,0x91,0x82,0x00,0xdd,0x00,0x00,0x00,0x00, 0x14,0x40,0x00,0x15,0x00,0x00,0x00,0x00,0x91,0x83,0x00,0x00,0x27,0x82,0x81,0x64, 0x00,0x03,0x18,0x80,0x00,0x62,0x10,0x21,0x8c,0x44,0x00,0x00,0x00,0x6c,0x18,0x21, 0xac,0x64,0x00,0x60,0x93,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x10,0x80, 0x01,0x82,0x10,0x21,0x24,0x4e,0x00,0x60,0xa1,0x85,0x00,0x00,0x8d,0xc2,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x02,0x1f,0xc2,0x00,0x43,0x10,0x21,0x00,0x02,0x10,0x43, 0x03,0xe0,0x00,0x08,0xad,0xa2,0x00,0x00,0x08,0x00,0x23,0x37,0xa1,0x80,0x00,0xdd, 0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xe8,0x8d,0x82,0x00,0xd0, 0x08,0x00,0x22,0xf3,0x24,0x42,0x00,0x01,0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3, 0x24,0x42,0x00,0x02,0x10,0xa0,0xff,0xf9,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0x00,0x0a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xa0, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0x9d,0x00,0x00,0x00,0x00, 0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xd0,0x8d,0x82,0x00,0xd0, 0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xfc,0x10,0xa0,0xff,0xeb,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xe5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02, 0x10,0xa2,0xff,0x93,0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xdd,0x00,0x00,0x00,0x00, 0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xf8,0x2d,0x42,0x00,0x19, 0x14,0x40,0xff,0x7c,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xdb,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xd5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02, 0x10,0xa2,0xff,0x83,0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0xf1,0x00,0x00,0x00,0x00, 0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xf0,0x2d,0x42,0x00,0x1b, 0x10,0x40,0xff,0xf1,0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xcb,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0xc5,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02, 0x14,0xa2,0xff,0x6c,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0,0x08,0x00,0x22,0xf3, 0x24,0x42,0xff,0xf4,0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xe3,0x00,0x00,0x00,0x00, 0x10,0xa0,0xff,0xbd,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x68, 0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0xd6,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xee, 0x24,0x02,0x00,0x03,0x2d,0x42,0x00,0x23,0x10,0x40,0xff,0xd7,0x00,0x00,0x00,0x00, 0x10,0xa0,0xff,0xae,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x5c, 0x24,0x02,0x00,0x02,0x14,0xa2,0xff,0xb7,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x74, 0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x25,0x10,0x40,0xff,0xcb,0x00,0x00,0x00,0x00, 0x08,0x00,0x23,0x49,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x16,0x14,0x40,0x00,0x0e, 0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0xa0,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0xff,0x9a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x48, 0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0xb6,0x00,0x00,0x00,0x00,0x8d,0x82,0x00,0xd0, 0x08,0x00,0x22,0xf3,0x24,0x42,0xff,0xfa,0x10,0xa0,0xff,0x93,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x8d,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02, 0x10,0xa2,0xff,0x3b,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x64,0x00,0x00,0x00,0x00, 0x2d,0x42,0x00,0x17,0x14,0x40,0xff,0xac,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0xa5, 0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x19,0x10,0x40,0xff,0xe2,0x00,0x00,0x00,0x00, 0x10,0xa0,0xff,0x81,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x7b, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x29,0x24,0x02,0x00,0x03, 0x10,0xa2,0xff,0x97,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xf0,0x00,0x00,0x00,0x00, 0x08,0x00,0x23,0xc2,0x2d,0x42,0x00,0x1b,0x2d,0x42,0x00,0x1e,0x10,0x40,0xff,0xde, 0x00,0x00,0x00,0x00,0x10,0xa0,0xff,0x70,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01, 0x10,0xa2,0xff,0x6a,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0xa2,0xff,0x18, 0x24,0x02,0x00,0x03,0x10,0xa2,0xff,0x96,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xf0, 0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x23,0x14,0x40,0xff,0xf2,0x00,0x00,0x00,0x00, 0x08,0x00,0x23,0x6a,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x68,0x2d,0x42,0x00,0x25, 0x08,0x00,0x23,0x9e,0x2d,0x42,0x00,0x27,0x10,0xa0,0xff,0x5b,0x00,0x00,0x00,0x00, 0x24,0x02,0x00,0x01,0x10,0xa2,0xff,0x55,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02, 0x10,0xa2,0xff,0x03,0x24,0x02,0x00,0x03,0x14,0xa2,0xff,0x71,0x00,0x00,0x00,0x00, 0x08,0x00,0x23,0x57,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x27,0x14,0x40,0xff,0xad, 0x00,0x00,0x00,0x00,0x08,0x00,0x23,0xea,0x00,0x00,0x00,0x00,0x2d,0x42,0x00,0x2a, 0x14,0x40,0xff,0xd8,0x00,0x00,0x00,0x00,0x08,0x00,0x23,0x5a,0x00,0x00,0x00,0x00, 0x2d,0x42,0x00,0x2c,0x14,0x40,0xff,0x78,0x00,0x00,0x00,0x00,0x08,0x00,0x22,0xe5, 0x00,0x00,0x00,0x00,0x27,0xbd,0xff,0xe8,0x3c,0x02,0xb0,0x03,0xaf,0xbf,0x00,0x14, 0xaf,0xb0,0x00,0x10,0x34,0x42,0x01,0x18,0x3c,0x03,0xb0,0x03,0x8c,0x50,0x00,0x00, 0x34,0x63,0x01,0x2c,0x90,0x62,0x00,0x00,0x32,0x05,0x00,0x01,0xa3,0x82,0x80,0x10, 0x14,0xa0,0x00,0x14,0x30,0x44,0x00,0xff,0x32,0x02,0x01,0x00,0x14,0x40,0x00,0x09, 0x00,0x00,0x00,0x00,0x32,0x02,0x08,0x00,0x10,0x40,0x00,0x02,0x24,0x02,0x00,0x01, 0xa3,0x82,0xbc,0x18,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x18,0x0c,0x00,0x05,0x37,0x00,0x00,0x00,0x00,0x26,0x02,0xff,0x00, 0xa3,0x80,0xbc,0x18,0x3c,0x01,0xb0,0x03,0xac,0x22,0x01,0x18,0x08,0x00,0x24,0x16, 0x32,0x02,0x08,0x00,0x0c,0x00,0x21,0x3f,0x00,0x00,0x00,0x00,0x26,0x02,0xff,0xff, 0x3c,0x01,0xb0,0x03,0xac,0x22,0x01,0x18,0x08,0x00,0x24,0x13,0x32,0x02,0x01,0x00, 0x27,0xbd,0xff,0xe0,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0xd0,0xaf,0xbf,0x00,0x18, 0x8c,0x43,0x00,0x00,0x3c,0x02,0x00,0x40,0x24,0x07,0x0f,0xff,0x00,0x03,0x33,0x02, 0x00,0x03,0x2d,0x02,0x00,0x03,0x43,0x02,0x30,0x69,0x0f,0xff,0x00,0x62,0x18,0x24, 0x30,0xa5,0x00,0x03,0x30,0xc6,0x00,0xff,0x10,0x60,0x00,0x08,0x31,0x08,0x00,0xff, 0x01,0x00,0x30,0x21,0x0c,0x00,0x24,0xdf,0xaf,0xa9,0x00,0x10,0x8f,0xbf,0x00,0x18, 0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x0c,0x00,0x25,0x31, 0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0xd4,0x08,0x00,0x24,0x3f, 0xac,0x62,0x00,0x00,0x27,0xbd,0xff,0xc0,0x3c,0x02,0xb0,0x03,0xaf,0xbe,0x00,0x38, 0xaf,0xb5,0x00,0x2c,0xaf,0xb1,0x00,0x1c,0xaf,0xb0,0x00,0x18,0xaf,0xbf,0x00,0x3c, 0xaf,0xb7,0x00,0x34,0xaf,0xb6,0x00,0x30,0xaf,0xb4,0x00,0x28,0xaf,0xb3,0x00,0x24, 0xaf,0xb2,0x00,0x20,0x34,0x42,0x00,0x3f,0x90,0x43,0x00,0x00,0x00,0x80,0x80,0x21, 0x00,0x00,0xf0,0x21,0x00,0x00,0x88,0x21,0x10,0x60,0x00,0x76,0x00,0x00,0xa8,0x21, 0x3c,0x01,0xb0,0x03,0xa0,0x20,0x00,0x3f,0x00,0x10,0x12,0x02,0x24,0x04,0x06,0x14, 0x0c,0x00,0x06,0xd1,0x30,0x54,0x00,0x0f,0x24,0x04,0x06,0x14,0x0c,0x00,0x06,0xd1, 0xaf,0xa2,0x00,0x10,0x3c,0x03,0x00,0xff,0x34,0x63,0xff,0xff,0x32,0x10,0x00,0x7f, 0x00,0x43,0x10,0x24,0x00,0x10,0x86,0x00,0x02,0x02,0x80,0x25,0x02,0x00,0x28,0x21, 0x24,0x04,0x06,0x14,0x3c,0x13,0xbf,0xff,0x0c,0x00,0x06,0xbf,0x3c,0x16,0xb0,0x03, 0x00,0x00,0x90,0x21,0x3c,0x17,0x40,0x00,0x36,0x73,0xff,0xff,0x36,0xd6,0x00,0x3e, 0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x00,0x00,0x57,0x10,0x25,0x00,0x40,0x28,0x21, 0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x00,0x00,0x00,0x80,0x21,0x0c,0x00,0x25,0xf9, 0x00,0x00,0x00,0x00,0x26,0x03,0x00,0x01,0x10,0x40,0x00,0x46,0x30,0x70,0x00,0xff, 0x12,0x00,0xff,0xfa,0x00,0x00,0x00,0x00,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0x00, 0x00,0x53,0x10,0x24,0x00,0x40,0x28,0x21,0x0c,0x00,0x06,0xbf,0x24,0x04,0x04,0x00, 0x24,0x02,0x00,0x01,0x12,0x82,0x00,0x37,0x00,0x00,0x00,0x00,0x12,0x80,0x00,0x35, 0x00,0x00,0x00,0x00,0x32,0x31,0x00,0x7f,0x12,0x20,0x00,0x04,0x24,0x03,0x00,0x04, 0x27,0xc2,0x00,0x01,0x30,0x5e,0x00,0xff,0x02,0xb1,0xa8,0x21,0x12,0x43,0x00,0x2a, 0x3c,0x03,0xb0,0x03,0x02,0x43,0x10,0x21,0xa0,0x51,0x00,0x34,0x26,0x42,0x00,0x01, 0x30,0x52,0x00,0xff,0x2e,0x43,0x00,0x05,0x14,0x60,0xff,0xd9,0x00,0x00,0x00,0x00, 0x8f,0xa5,0x00,0x10,0x0c,0x00,0x06,0xbf,0x24,0x04,0x06,0x14,0x12,0xa0,0x00,0x0e, 0x3c,0x02,0xb0,0x03,0x13,0xc0,0x00,0x0d,0x34,0x42,0x00,0x3c,0x00,0x15,0x10,0x40, 0x00,0x55,0x10,0x21,0x00,0x02,0x10,0xc0,0x00,0x55,0x10,0x21,0x00,0x02,0xa8,0x80, 0x02,0xbe,0x00,0x1b,0x17,0xc0,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0d, 0x00,0x00,0xa8,0x12,0x3c,0x02,0xb0,0x03,0x34,0x42,0x00,0x3c,0x3c,0x03,0xb0,0x03, 0x3c,0x04,0xb0,0x03,0xa4,0x55,0x00,0x00,0x34,0x63,0x00,0x1c,0x34,0x84,0x00,0x1d, 0x24,0x02,0x00,0x01,0xa0,0x60,0x00,0x00,0xa0,0x82,0x00,0x00,0x7b,0xbe,0x01,0xfc, 0x7b,0xb6,0x01,0xbc,0x7b,0xb4,0x01,0x7c,0x7b,0xb2,0x01,0x3c,0x7b,0xb0,0x00,0xfc, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x40,0xa2,0xd1,0x00,0x00,0x08,0x00,0x24,0x98, 0x26,0x42,0x00,0x01,0x0c,0x00,0x06,0xd1,0x24,0x04,0x04,0xfc,0x08,0x00,0x24,0x8d, 0x00,0x40,0x88,0x21,0x3c,0x03,0xb0,0x03,0x34,0x63,0x00,0x3c,0x3c,0x04,0xb0,0x03, 0x3c,0x05,0xb0,0x03,0xa4,0x60,0x00,0x00,0x34,0x84,0x00,0x1c,0x34,0xa5,0x00,0x1d, 0x24,0x02,0x00,0x02,0x24,0x03,0x00,0x01,0xa0,0x82,0x00,0x00,0x08,0x00,0x24,0xb7, 0xa0,0xa3,0x00,0x00,0x0c,0x00,0x17,0x99,0x00,0x00,0x00,0x00,0x10,0x40,0xff,0x8b, 0x00,0x10,0x12,0x02,0x3c,0x02,0xb0,0x03,0x3c,0x04,0xb0,0x03,0x34,0x42,0x00,0x3c, 0x34,0x84,0x00,0x14,0x24,0x03,0x00,0x01,0xa4,0x40,0x00,0x00,0x3c,0x01,0xb0,0x03, 0xa0,0x23,0x00,0x3f,0x08,0x00,0x24,0xb7,0xac,0x90,0x00,0x00,0x27,0xbd,0xff,0xd8, 0xaf,0xb0,0x00,0x10,0x30,0xd0,0x00,0xff,0x2e,0x02,0x00,0x2e,0xaf,0xb2,0x00,0x18, 0xaf,0xb1,0x00,0x14,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c,0x30,0xb1,0x00,0xff, 0x14,0x40,0x00,0x06,0x00,0x80,0x90,0x21,0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc, 0x7b,0xb0,0x00,0xbc,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,0x2e,0x13,0x00,0x10, 0x24,0x05,0x00,0x14,0x0c,0x00,0x13,0xa4,0x24,0x06,0x01,0x07,0x12,0x60,0x00,0x38, 0x02,0x00,0x30,0x21,0x8f,0xa2,0x00,0x38,0x30,0xc3,0x00,0x3f,0x3c,0x04,0xb0,0x09, 0x00,0x02,0x14,0x00,0x00,0x43,0x30,0x25,0x34,0x84,0x01,0x60,0x90,0x82,0x00,0x00, 0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xfd,0x24,0x02,0x00,0x01,0x12,0x22,0x00,0x2a, 0x2a,0x22,0x00,0x02,0x14,0x40,0x00,0x24,0x24,0x02,0x00,0x02,0x12,0x22,0x00,0x20, 0x24,0x02,0x00,0x03,0x12,0x22,0x00,0x19,0x00,0x00,0x00,0x00,0x16,0x60,0xff,0xe2, 0x24,0x02,0x00,0x01,0x12,0x22,0x00,0x13,0x2a,0x22,0x00,0x02,0x14,0x40,0x00,0x0d, 0x24,0x02,0x00,0x02,0x12,0x22,0x00,0x09,0x24,0x02,0x00,0x03,0x16,0x22,0xff,0xda, 0x00,0x00,0x00,0x00,0x24,0x04,0x08,0x4c,0x24,0x05,0xff,0xff,0x0c,0x00,0x13,0x5f, 0x3c,0x06,0x0c,0xb8,0x08,0x00,0x24,0xea,0x00,0x00,0x00,0x00,0x08,0x00,0x25,0x12, 0x24,0x04,0x08,0x48,0x16,0x20,0xff,0xd0,0x00,0x00,0x00,0x00,0x08,0x00,0x25,0x12, 0x24,0x04,0x08,0x40,0x08,0x00,0x25,0x12,0x24,0x04,0x08,0x44,0x24,0x04,0x08,0x4c, 0x0c,0x00,0x13,0x5f,0x24,0x05,0xff,0xff,0x08,0x00,0x25,0x07,0x00,0x00,0x00,0x00, 0x08,0x00,0x25,0x20,0x24,0x04,0x08,0x48,0x16,0x20,0xff,0xe0,0x00,0x00,0x00,0x00, 0x08,0x00,0x25,0x20,0x24,0x04,0x08,0x40,0x08,0x00,0x25,0x20,0x24,0x04,0x08,0x44, 0x02,0x40,0x20,0x21,0x0c,0x00,0x25,0x71,0x02,0x20,0x28,0x21,0x08,0x00,0x24,0xf5, 0x00,0x40,0x30,0x21,0x27,0xbd,0xff,0xd8,0x2c,0xc2,0x00,0x2e,0xaf,0xb2,0x00,0x18, 0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x20,0xaf,0xb3,0x00,0x1c, 0x00,0xc0,0x80,0x21,0x30,0xb1,0x00,0xff,0x00,0x80,0x90,0x21,0x14,0x40,0x00,0x07, 0x00,0x00,0x18,0x21,0x8f,0xbf,0x00,0x20,0x7b,0xb2,0x00,0xfc,0x7b,0xb0,0x00,0xbc, 0x00,0x60,0x10,0x21,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x28,0x2e,0x13,0x00,0x10, 0x24,0x05,0x00,0x14,0x0c,0x00,0x13,0xa4,0x24,0x06,0x01,0x07,0x12,0x60,0x00,0x24, 0x02,0x00,0x30,0x21,0x3c,0x03,0xb0,0x09,0x34,0x63,0x01,0x60,0x90,0x62,0x00,0x00, 0x00,0x00,0x00,0x00,0x14,0x40,0xff,0xfd,0x30,0xc5,0x00,0x3f,0x0c,0x00,0x25,0xae, 0x02,0x20,0x20,0x21,0x16,0x60,0x00,0x0a,0x00,0x40,0x80,0x21,0x24,0x02,0x00,0x01, 0x12,0x22,0x00,0x15,0x2a,0x22,0x00,0x02,0x14,0x40,0x00,0x0f,0x24,0x02,0x00,0x02, 0x12,0x22,0x00,0x0b,0x24,0x02,0x00,0x03,0x12,0x22,0x00,0x03,0x00,0x00,0x00,0x00, 0x08,0x00,0x25,0x3d,0x02,0x00,0x18,0x21,0x24,0x04,0x08,0x4c,0x24,0x05,0xff,0xff, 0x0c,0x00,0x13,0x5f,0x3c,0x06,0x0c,0xb8,0x08,0x00,0x25,0x3d,0x02,0x00,0x18,0x21, 0x08,0x00,0x25,0x5f,0x24,0x04,0x08,0x48,0x16,0x20,0xff,0xf5,0x00,0x00,0x00,0x00, 0x08,0x00,0x25,0x5f,0x24,0x04,0x08,0x40,0x08,0x00,0x25,0x5f,0x24,0x04,0x08,0x44, 0x02,0x40,0x20,0x21,0x0c,0x00,0x25,0x71,0x02,0x20,0x28,0x21,0x08,0x00,0x25,0x49, 0x00,0x40,0x30,0x21,0x27,0xbd,0xff,0xe8,0x2c,0xc2,0x00,0x1f,0xaf,0xb0,0x00,0x10, 0xaf,0xbf,0x00,0x14,0x00,0xc0,0x80,0x21,0x14,0x40,0x00,0x1d,0x30,0xa5,0x00,0xff, 0x24,0x02,0x00,0x01,0x10,0xa2,0x00,0x18,0x28,0xa2,0x00,0x02,0x14,0x40,0x00,0x12, 0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x0e,0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x07, 0x24,0x04,0x08,0x4c,0x26,0x10,0xff,0xe2,0x02,0x00,0x10,0x21,0x8f,0xbf,0x00,0x14, 0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x24,0x05,0xff,0xff, 0x0c,0x00,0x13,0x5f,0x3c,0x06,0x0d,0xf8,0x08,0x00,0x25,0x82,0x26,0x10,0xff,0xe2, 0x08,0x00,0x25,0x87,0x24,0x04,0x08,0x48,0x14,0xa0,0xff,0xf2,0x24,0x04,0x08,0x40, 0x08,0x00,0x25,0x88,0x24,0x05,0xff,0xff,0x08,0x00,0x25,0x87,0x24,0x04,0x08,0x44, 0x2c,0xc2,0x00,0x10,0x14,0x40,0xff,0xec,0x24,0x02,0x00,0x01,0x10,0xa2,0x00,0x14, 0x28,0xa2,0x00,0x02,0x14,0x40,0x00,0x0e,0x24,0x02,0x00,0x02,0x10,0xa2,0x00,0x0a, 0x24,0x02,0x00,0x03,0x10,0xa2,0x00,0x03,0x24,0x04,0x08,0x4c,0x08,0x00,0x25,0x82, 0x26,0x10,0xff,0xf1,0x24,0x05,0xff,0xff,0x0c,0x00,0x13,0x5f,0x3c,0x06,0x0d,0xb8, 0x08,0x00,0x25,0x82,0x26,0x10,0xff,0xf1,0x08,0x00,0x25,0xa1,0x24,0x04,0x08,0x48, 0x14,0xa0,0xff,0xf6,0x24,0x04,0x08,0x40,0x08,0x00,0x25,0xa2,0x24,0x05,0xff,0xff, 0x08,0x00,0x25,0xa1,0x24,0x04,0x08,0x44,0x27,0xbd,0xff,0xe8,0x30,0x84,0x00,0xff, 0x24,0x02,0x00,0x01,0x10,0x82,0x00,0x39,0xaf,0xbf,0x00,0x10,0x28,0x82,0x00,0x02, 0x14,0x40,0x00,0x27,0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x02,0x10,0x82,0x00,0x17, 0x00,0xa0,0x30,0x21,0x24,0x02,0x00,0x03,0x10,0x82,0x00,0x05,0x24,0x04,0x08,0x3c, 0x8f,0xbf,0x00,0x10,0x00,0x00,0x00,0x00,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18, 0x0c,0x00,0x13,0x5f,0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x3c,0x3c,0x05,0x80,0x00, 0x0c,0x00,0x13,0x5f,0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x3c,0x3c,0x05,0x80,0x00, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,0x24,0x04,0x08,0xac,0x0c,0x00,0x13,0x41, 0x24,0x05,0x0f,0xff,0x08,0x00,0x25,0xbc,0x00,0x00,0x00,0x00,0x24,0x04,0x08,0x34, 0x0c,0x00,0x13,0x5f,0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x34,0x3c,0x05,0x80,0x00, 0x0c,0x00,0x13,0x5f,0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x34,0x3c,0x05,0x80,0x00, 0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,0x08,0x00,0x25,0xcb,0x24,0x04,0x08,0xa8, 0x14,0x80,0xff,0xdf,0x00,0xa0,0x30,0x21,0x24,0x04,0x08,0x24,0x0c,0x00,0x13,0x5f, 0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x24,0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f, 0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x24,0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f, 0x24,0x06,0x00,0x01,0x08,0x00,0x25,0xcb,0x24,0x04,0x08,0xa0,0x00,0xa0,0x30,0x21, 0x24,0x04,0x08,0x2c,0x0c,0x00,0x13,0x5f,0x3c,0x05,0x3f,0x00,0x24,0x04,0x08,0x2c, 0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f,0x00,0x00,0x30,0x21,0x24,0x04,0x08,0x2c, 0x3c,0x05,0x80,0x00,0x0c,0x00,0x13,0x5f,0x24,0x06,0x00,0x01,0x08,0x00,0x25,0xcb, 0x24,0x04,0x08,0xa4,0x3c,0x05,0x00,0x14,0x3c,0x02,0xb0,0x05,0x34,0x42,0x04,0x20, 0x3c,0x06,0xc0,0x00,0x3c,0x03,0xb0,0x05,0x3c,0x04,0xb0,0x05,0x34,0xa5,0x17,0x09, 0xac,0x45,0x00,0x00,0x34,0xc6,0x05,0x07,0x34,0x63,0x04,0x24,0x34,0x84,0x02,0x28, 0x3c,0x07,0xb0,0x05,0x24,0x02,0x00,0x20,0xac,0x66,0x00,0x00,0x34,0xe7,0x04,0x50, 0xa0,0x82,0x00,0x00,0x90,0xe2,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x03, 0x10,0x40,0xff,0xfc,0x24,0x02,0x00,0x01,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x93,0x85,0x81,0xf1,0x24,0x02,0x00,0x01,0x14,0xa2,0x00,0x53,0x00,0x80,0x40,0x21, 0x8c,0x89,0x00,0x04,0x3c,0x02,0xb0,0x01,0x01,0x22,0x30,0x21,0x8c,0xc3,0x00,0x04, 0x3c,0x02,0x01,0x00,0x00,0x62,0x10,0x24,0x10,0x40,0x00,0x4b,0x30,0x62,0x00,0x08, 0x10,0x45,0x00,0x59,0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x38,0x24,0x03,0x00,0xb4, 0x30,0x44,0x00,0xff,0x10,0x83,0x00,0x61,0x24,0x02,0x00,0xc4,0x10,0x82,0x00,0x54, 0x24,0x02,0x00,0x94,0x10,0x82,0x00,0x45,0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x38, 0x00,0x00,0x00,0x00,0x30,0x47,0xff,0xff,0x30,0xe3,0x40,0xff,0x24,0x02,0x40,0x88, 0x14,0x62,0x00,0x39,0x30,0xe3,0x03,0x00,0x24,0x02,0x03,0x00,0x10,0x62,0x00,0x38, 0x00,0x00,0x00,0x00,0x94,0xc2,0x00,0x56,0x00,0x00,0x00,0x00,0x30,0x47,0xff,0xff, 0x30,0xe2,0x00,0x80,0x14,0x40,0x00,0x30,0x3c,0x02,0xb0,0x01,0x01,0x22,0x30,0x21, 0x94,0xc3,0x00,0x60,0x24,0x02,0x00,0x08,0x14,0x43,0x00,0x3b,0x00,0x00,0x00,0x00, 0x90,0xc2,0x00,0x62,0x24,0x03,0x00,0x04,0x00,0x02,0x39,0x02,0x10,0xe3,0x00,0x15, 0x24,0x02,0x00,0x06,0x14,0xe2,0x00,0x34,0x00,0x00,0x00,0x00,0x8d,0x05,0x01,0xac, 0x94,0xc4,0x00,0x66,0x27,0x82,0x89,0x68,0x00,0x05,0x28,0x80,0x30,0x87,0xff,0xff, 0x00,0xa2,0x28,0x21,0x00,0x07,0x1a,0x00,0x8c,0xa4,0x00,0x00,0x00,0x07,0x12,0x02, 0x00,0x43,0x10,0x25,0x24,0x42,0x00,0x5e,0x24,0x03,0xc0,0x00,0x30,0x47,0xff,0xff, 0x00,0x83,0x20,0x24,0x00,0x87,0x20,0x25,0xac,0xa4,0x00,0x00,0x08,0x00,0x26,0x76, 0xad,0x07,0x00,0x10,0x8d,0x05,0x01,0xac,0x94,0xc4,0x00,0x64,0x27,0x82,0x89,0x68, 0x00,0x05,0x28,0x80,0x30,0x87,0xff,0xff,0x00,0xa2,0x28,0x21,0x00,0x07,0x1a,0x00, 0x8c,0xa4,0x00,0x00,0x00,0x07,0x12,0x02,0x00,0x43,0x10,0x25,0x24,0x42,0x00,0x36, 0x3c,0x03,0xff,0xff,0x30,0x47,0xff,0xff,0x00,0x83,0x20,0x24,0x00,0x87,0x20,0x25, 0xac,0xa4,0x00,0x00,0xad,0x07,0x00,0x10,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x94,0xc2,0x00,0x50,0x08,0x00,0x26,0x34,0x30,0x47,0xff,0xff,0x8d,0x04,0x01,0xac, 0x27,0x83,0x89,0x68,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x00, 0x3c,0x03,0xff,0xff,0x00,0x43,0x10,0x24,0x34,0x42,0x00,0x2e,0xac,0x82,0x00,0x00, 0x24,0x03,0x00,0x2e,0xad,0x03,0x00,0x10,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x8d,0x04,0x01,0xac,0x27,0x83,0x89,0x68,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21, 0x8c,0x82,0x00,0x00,0x3c,0x03,0xff,0xff,0x00,0x43,0x10,0x24,0x34,0x42,0x00,0x0e, 0x24,0x03,0x00,0x0e,0x08,0x00,0x26,0x75,0xac,0x82,0x00,0x00,0x8d,0x04,0x01,0xac, 0x27,0x83,0x89,0x68,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x00, 0x3c,0x03,0xff,0xff,0x00,0x43,0x10,0x24,0x34,0x42,0x00,0x14,0x24,0x03,0x00,0x14, 0x08,0x00,0x26,0x75,0xac,0x82,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x30,0xc6,0x00,0xff,0x00,0x06,0x48,0x40,0x01,0x26,0x10,0x21,0x00,0x02,0x10,0x80, 0x27,0x8b,0xbc,0x30,0x27,0x83,0xbc,0x36,0x00,0x4b,0x40,0x21,0x00,0x43,0x10,0x21, 0x94,0x47,0x00,0x00,0x30,0xa2,0x3f,0xff,0x10,0xe2,0x00,0x29,0x30,0x8a,0xff,0xff, 0x95,0x02,0x00,0x02,0x24,0x03,0x00,0x01,0x00,0x02,0x11,0x82,0x30,0x42,0x00,0x01, 0x10,0x43,0x00,0x18,0x00,0x00,0x00,0x00,0x01,0x26,0x10,0x21,0x00,0x02,0x10,0x80, 0x00,0x4b,0x30,0x21,0x94,0xc4,0x00,0x02,0x27,0x83,0xbc,0x36,0x27,0x85,0xbc,0x34, 0x00,0x45,0x28,0x21,0x30,0x84,0xff,0xdf,0x00,0x43,0x10,0x21,0xa4,0xc4,0x00,0x02, 0xa4,0x40,0x00,0x00,0xa4,0xa0,0x00,0x00,0x94,0xc3,0x00,0x02,0x3c,0x04,0xb0,0x01, 0x01,0x44,0x20,0x21,0x30,0x63,0xff,0xbf,0xa4,0xc3,0x00,0x02,0xa0,0xc0,0x00,0x00, 0x8c,0x82,0x00,0x04,0x24,0x03,0xf0,0xff,0x00,0x43,0x10,0x24,0x03,0xe0,0x00,0x08, 0xac,0x82,0x00,0x04,0x24,0x02,0xc0,0x00,0x91,0x04,0x00,0x01,0x00,0xa2,0x10,0x24, 0x00,0x47,0x28,0x25,0x3c,0x03,0xb0,0x01,0x24,0x02,0x00,0x02,0x14,0x82,0xff,0xe2, 0x01,0x43,0x18,0x21,0xac,0x65,0x00,0x00,0x08,0x00,0x26,0xa3,0x01,0x26,0x10,0x21, 0x08,0x00,0x26,0xa3,0x01,0x26,0x10,0x21,0x93,0x83,0x81,0xf1,0x24,0x02,0x00,0x01, 0x14,0x62,0x00,0x0d,0x3c,0x02,0xb0,0x01,0x8c,0x84,0x00,0x04,0x3c,0x06,0xb0,0x09, 0x00,0x82,0x20,0x21,0x8c,0x85,0x00,0x08,0x8c,0x83,0x00,0x04,0x3c,0x02,0x01,0x00, 0x34,0xc6,0x01,0x00,0x00,0x62,0x18,0x24,0x14,0x60,0x00,0x05,0x30,0xa5,0x20,0x00, 0x24,0x02,0x00,0x06,0xa0,0xc2,0x00,0x00,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00, 0x3c,0x03,0xb0,0x09,0x10,0xa0,0xff,0xfc,0x34,0x63,0x01,0x00,0x24,0x02,0x00,0x0e, 0x08,0x00,0x26,0xd6,0xa0,0x62,0x00,0x00,0x3c,0x02,0xb0,0x01,0x30,0xa5,0xff,0xff, 0x00,0xa2,0x28,0x21,0x8c,0xa3,0x00,0x00,0x3c,0x02,0x10,0x00,0x00,0x80,0x30,0x21, 0x00,0x62,0x18,0x24,0x8c,0xa2,0x00,0x04,0x10,0x60,0x00,0x04,0x00,0x00,0x00,0x00, 0x30,0x42,0x80,0x00,0x10,0x40,0x00,0x13,0x00,0x00,0x00,0x00,0x8c,0xc2,0x01,0xa8, 0x00,0x00,0x00,0x00,0x24,0x44,0x00,0x01,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x40, 0x00,0x83,0x10,0x0a,0x93,0x83,0x81,0xf0,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80, 0x00,0x82,0x20,0x23,0x24,0x63,0xff,0xff,0xac,0xc4,0x01,0xa8,0xa3,0x83,0x81,0xf0, 0x8c,0xc4,0x01,0xac,0x8c,0xc2,0x01,0xa8,0x00,0x00,0x00,0x00,0x00,0x44,0x10,0x26, 0x00,0x02,0x10,0x2b,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x3c,0x03,0xb0,0x03, 0x34,0x63,0x00,0x73,0x90,0x62,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x42,0x00,0x01, 0x14,0x40,0x00,0x04,0x00,0x00,0x00,0x00,0xa3,0x80,0x81,0xf1,0x03,0xe0,0x00,0x08, 0x00,0x00,0x00,0x00,0x24,0x02,0x00,0x01,0xa3,0x82,0x81,0xf1,0x03,0xe0,0x00,0x08, 0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x04,0x3c,0x05,0xb0,0x01,0x00,0x80,0x50,0x21, 0x00,0x45,0x10,0x21,0x8c,0x43,0x00,0x04,0x24,0x02,0x00,0x05,0x00,0x03,0x1a,0x02, 0x30,0x69,0x00,0x0f,0x11,0x22,0x00,0x0b,0x24,0x02,0x00,0x07,0x11,0x22,0x00,0x09, 0x24,0x02,0x00,0x0a,0x11,0x22,0x00,0x07,0x24,0x02,0x00,0x0b,0x11,0x22,0x00,0x05, 0x24,0x02,0x00,0x01,0x93,0x83,0x81,0xf0,0x3c,0x04,0xb0,0x06,0x10,0x62,0x00,0x03, 0x34,0x84,0x80,0x18,0x03,0xe0,0x00,0x08,0x00,0x00,0x00,0x00,0x8c,0x82,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x02,0x17,0x02,0x14,0x40,0xff,0xfa,0x00,0x00,0x00,0x00, 0x8d,0x43,0x01,0xa8,0x27,0x82,0x89,0x68,0x00,0x03,0x18,0x80,0x00,0x6a,0x20,0x21, 0x8c,0x87,0x00,0xa8,0x00,0x62,0x18,0x21,0x8c,0x68,0x00,0x00,0x00,0xe5,0x28,0x21, 0x8c,0xa9,0x00,0x00,0x3c,0x02,0xff,0xff,0x27,0x83,0x8a,0x68,0x01,0x22,0x10,0x24, 0x00,0x48,0x10,0x25,0xac,0xa2,0x00,0x00,0x8d,0x44,0x01,0xa8,0x00,0x07,0x30,0xc2, 0x3c,0x02,0x00,0x80,0x00,0x04,0x20,0x80,0x00,0x83,0x20,0x21,0x00,0x06,0x32,0x00, 0x8c,0xa9,0x00,0x04,0x00,0xc2,0x30,0x25,0x8c,0x82,0x00,0x00,0x3c,0x03,0x80,0x00, 0x01,0x22,0x10,0x25,0x00,0x43,0x10,0x25,0xac,0xa2,0x00,0x04,0xaf,0x87,0xbc,0x20, 0x8c,0xa2,0x00,0x00,0x00,0x00,0x00,0x00,0xaf,0x82,0xbc,0x28,0x8c,0xa3,0x00,0x04, 0x3c,0x01,0xb0,0x07,0xac,0x26,0x80,0x18,0x8d,0x42,0x01,0xa8,0xaf,0x83,0xbc,0x24, 0x93,0x85,0x81,0xf0,0x24,0x44,0x00,0x01,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x40, 0x00,0x83,0x10,0x0a,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x24,0xa5,0xff,0xff, 0x00,0x82,0x20,0x23,0xad,0x44,0x01,0xa8,0xa3,0x85,0x81,0xf0,0x08,0x00,0x27,0x21, 0x00,0x00,0x00,0x00,0x3c,0x05,0xb0,0x03,0x3c,0x02,0x80,0x01,0x34,0xa5,0x00,0x20, 0x24,0x42,0x9d,0x64,0xac,0xa2,0x00,0x00,0x24,0x02,0x00,0x02,0x24,0x03,0x00,0x20, 0xac,0x82,0x00,0x64,0x3c,0x02,0x80,0x01,0xac,0x83,0x00,0x60,0xac,0x80,0x00,0x00, 0xac,0x80,0x00,0x04,0xac,0x80,0x00,0x08,0xac,0x80,0x00,0x4c,0xac,0x80,0x00,0x50, 0xac,0x80,0x00,0x54,0xac,0x80,0x00,0x0c,0xac,0x80,0x00,0x58,0xa0,0x80,0x00,0x5c, 0x24,0x42,0x9e,0x28,0x24,0x83,0x00,0x68,0x24,0x05,0x00,0x0f,0x24,0xa5,0xff,0xff, 0xac,0x62,0x00,0x00,0x04,0xa1,0xff,0xfd,0x24,0x63,0x00,0x04,0x3c,0x02,0x80,0x01, 0x24,0x42,0x9f,0x10,0xac,0x82,0x00,0x78,0x3c,0x03,0x80,0x01,0x3c,0x02,0x80,0x01, 0x24,0x63,0xa0,0x9c,0x24,0x42,0xa0,0x08,0xac,0x83,0x00,0x88,0xac,0x82,0x00,0x98, 0x3c,0x03,0x80,0x01,0x3c,0x02,0x80,0x01,0x24,0x63,0xa1,0x44,0x24,0x42,0xa2,0x5c, 0xac,0x83,0x00,0xa0,0xac,0x82,0x00,0xa4,0xa0,0x80,0x01,0xba,0xac,0x80,0x01,0xa8, 0xac,0x80,0x01,0xac,0xac,0x80,0x01,0xb0,0xac,0x80,0x01,0xb4,0xa0,0x80,0x01,0xb8, 0x03,0xe0,0x00,0x08,0xa0,0x80,0x01,0xb9,0x3c,0x03,0xb0,0x03,0x3c,0x02,0x80,0x01, 0x34,0x63,0x00,0x20,0x24,0x42,0x9e,0x28,0x03,0xe0,0x00,0x08,0xac,0x62,0x00,0x00, 0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x34,0x42,0x00,0x20,0x24,0x63,0x9e,0x40, 0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x10,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x11, 0x00,0x80,0x28,0x21,0x8c,0x82,0x00,0x14,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0d, 0x00,0x00,0x00,0x00,0x8c,0x84,0x00,0x10,0x8c,0xa3,0x00,0x14,0x8c,0xa2,0x00,0x04, 0x00,0x83,0x20,0x21,0x00,0x44,0x10,0x21,0x30,0x43,0x00,0xff,0x00,0x03,0x18,0x2b, 0x00,0x02,0x12,0x02,0x00,0x43,0x10,0x21,0x00,0x02,0x12,0x00,0x30,0x42,0x3f,0xff, 0xac,0xa2,0x00,0x04,0xac,0xa0,0x00,0x00,0xac,0xa0,0x00,0x4c,0xac,0xa0,0x00,0x50, 0xac,0xa0,0x00,0x54,0x03,0xe0,0x00,0x08,0xac,0xa0,0x00,0x0c,0x3c,0x03,0xb0,0x03, 0x3c,0x02,0x80,0x01,0x34,0x63,0x00,0x20,0x24,0x42,0x9e,0xbc,0xac,0x62,0x00,0x00, 0x8c,0x86,0x00,0x04,0x3c,0x02,0xb0,0x01,0x24,0x03,0x00,0x01,0x00,0xc2,0x10,0x21, 0x8c,0x45,0x00,0x00,0xac,0x83,0x00,0x4c,0x00,0x05,0x14,0x02,0x30,0xa3,0x3f,0xff, 0x30,0x42,0x00,0xff,0xac,0x83,0x00,0x10,0xac,0x82,0x00,0x14,0x8c,0x83,0x00,0x14, 0xac,0x85,0x00,0x40,0x00,0xc3,0x30,0x21,0x03,0xe0,0x00,0x08,0xac,0x86,0x00,0x08, 0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20, 0x24,0x63,0x9f,0x10,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00, 0x8c,0x82,0x00,0x4c,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x0a,0x00,0x80,0x80,0x21, 0xae,0x00,0x00,0x00,0xae,0x00,0x00,0x4c,0xae,0x00,0x00,0x50,0xae,0x00,0x00,0x54, 0xae,0x00,0x00,0x0c,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xaf,0x00,0x00,0x00,0x00,0x08,0x00,0x27,0xd1, 0xae,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8, 0x34,0x42,0x00,0x20,0x24,0x63,0x9f,0x74,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14, 0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x4c,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x16, 0x00,0x80,0x80,0x21,0x8e,0x03,0x00,0x08,0x3c,0x02,0xb0,0x01,0x8e,0x04,0x00,0x44, 0x00,0x62,0x18,0x21,0x90,0x65,0x00,0x00,0x24,0x02,0x00,0x01,0xae,0x02,0x00,0x50, 0x30,0xa3,0x00,0xff,0x00,0x03,0x10,0x82,0x00,0x04,0x23,0x02,0x30,0x84,0x00,0x0f, 0x30,0x42,0x00,0x03,0x00,0x03,0x19,0x02,0xae,0x04,0x00,0x34,0xae,0x02,0x00,0x2c, 0xae,0x03,0x00,0x30,0xa2,0x05,0x00,0x48,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10, 0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xaf,0x00,0x00,0x00,0x00, 0x08,0x00,0x27,0xe9,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01, 0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20,0x24,0x63,0xa0,0x08,0xaf,0xb0,0x00,0x10, 0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x50,0x00,0x00,0x00,0x00, 0x10,0x40,0x00,0x16,0x00,0x80,0x80,0x21,0x92,0x03,0x00,0x44,0x8e,0x02,0x00,0x40, 0x83,0x85,0x8b,0xd4,0x92,0x04,0x00,0x41,0x30,0x63,0x00,0x01,0x00,0x02,0x16,0x02, 0xae,0x04,0x00,0x14,0x00,0x00,0x30,0x21,0xae,0x02,0x00,0x18,0x10,0xa0,0x00,0x04, 0xae,0x03,0x00,0x3c,0x10,0x60,0x00,0x03,0x24,0x02,0x00,0x01,0x24,0x06,0x00,0x01, 0x24,0x02,0x00,0x01,0xa3,0x86,0x8b,0xd4,0x8f,0xbf,0x00,0x14,0xae,0x02,0x00,0x54, 0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xdd, 0x00,0x00,0x00,0x00,0x08,0x00,0x28,0x0e,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03, 0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20,0x24,0x63,0xa0,0x9c, 0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x50, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x1b,0x00,0x80,0x80,0x21,0x3c,0x02,0xb0,0x03, 0x8c,0x42,0x00,0x00,0x92,0x04,0x00,0x44,0x8e,0x03,0x00,0x40,0x83,0x86,0x8b,0xd4, 0x92,0x05,0x00,0x41,0x30,0x42,0x08,0x00,0x30,0x84,0x00,0x01,0x00,0x02,0x12,0xc2, 0x00,0x03,0x1e,0x02,0x00,0x82,0x20,0x25,0xae,0x05,0x00,0x14,0x00,0x00,0x38,0x21, 0xae,0x03,0x00,0x18,0x10,0xc0,0x00,0x04,0xae,0x04,0x00,0x3c,0x10,0x80,0x00,0x03, 0x24,0x02,0x00,0x01,0x24,0x07,0x00,0x01,0x24,0x02,0x00,0x01,0xa3,0x87,0x8b,0xd4, 0x8f,0xbf,0x00,0x14,0xae,0x02,0x00,0x54,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x18,0x0c,0x00,0x27,0xdd,0x00,0x00,0x00,0x00,0x08,0x00,0x28,0x33, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8, 0x34,0x42,0x00,0x20,0x24,0x63,0xa1,0x44,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14, 0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x54,0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x37, 0x00,0x80,0x80,0x21,0x8e,0x04,0x00,0x04,0x8e,0x03,0x00,0x44,0x3c,0x02,0x80,0x00, 0x3c,0x05,0xb0,0x01,0x34,0x42,0x00,0x10,0x00,0x85,0x20,0x21,0x00,0x62,0x18,0x25, 0xac,0x83,0x00,0x04,0x8e,0x02,0x00,0x04,0x8e,0x03,0x01,0xac,0x02,0x00,0x20,0x21, 0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x00,0x00,0x03,0x18,0x80,0x27,0x82,0x89,0x68, 0x00,0x62,0x18,0x21,0xac,0x66,0x00,0x00,0x8e,0x02,0x00,0x04,0x8e,0x03,0x01,0xac, 0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x04,0x00,0x03,0x18,0x80,0x27,0x82,0x8a,0x68, 0x00,0x62,0x18,0x21,0x0c,0x00,0x26,0x10,0xac,0x66,0x00,0x00,0x8e,0x03,0x01,0xac, 0x8e,0x07,0x00,0x04,0x3c,0x06,0xb0,0x03,0x24,0x65,0x00,0x01,0x28,0xa4,0x00,0x00, 0x24,0x62,0x00,0x40,0x00,0xa4,0x10,0x0a,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80, 0x00,0x03,0x18,0x80,0x00,0xa2,0x28,0x23,0x00,0x70,0x18,0x21,0xae,0x05,0x01,0xac, 0xac,0x67,0x00,0xa8,0x34,0xc6,0x00,0x30,0x8c,0xc3,0x00,0x00,0x93,0x82,0x81,0xf0, 0x02,0x00,0x20,0x21,0x24,0x63,0x00,0x01,0x24,0x42,0x00,0x01,0xac,0xc3,0x00,0x00, 0xa3,0x82,0x81,0xf0,0x0c,0x00,0x27,0x90,0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x14, 0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x18,0x0c,0x00,0x28,0x27, 0x00,0x00,0x00,0x00,0x08,0x00,0x28,0x5d,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03, 0x3c,0x03,0x80,0x01,0x27,0xbd,0xff,0xe8,0x34,0x42,0x00,0x20,0x24,0x63,0xa2,0x5c, 0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x14,0xac,0x43,0x00,0x00,0x8c,0x82,0x00,0x54, 0x00,0x00,0x00,0x00,0x10,0x40,0x00,0x37,0x00,0x80,0x80,0x21,0x8e,0x04,0x00,0x04, 0x8e,0x03,0x00,0x44,0x3c,0x02,0x80,0x00,0x3c,0x05,0xb0,0x01,0x34,0x42,0x00,0x10, 0x00,0x85,0x20,0x21,0x00,0x62,0x18,0x25,0xac,0x83,0x00,0x04,0x8e,0x02,0x00,0x04, 0x8e,0x03,0x01,0xac,0x02,0x00,0x20,0x21,0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x00, 0x00,0x03,0x18,0x80,0x27,0x82,0x89,0x68,0x00,0x62,0x18,0x21,0xac,0x66,0x00,0x00, 0x8e,0x02,0x00,0x04,0x8e,0x03,0x01,0xac,0x00,0x45,0x10,0x21,0x8c,0x46,0x00,0x04, 0x00,0x03,0x18,0x80,0x27,0x82,0x8a,0x68,0x00,0x62,0x18,0x21,0x0c,0x00,0x26,0x10, 0xac,0x66,0x00,0x00,0x8e,0x03,0x01,0xac,0x8e,0x07,0x00,0x04,0x3c,0x06,0xb0,0x03, 0x24,0x65,0x00,0x01,0x28,0xa4,0x00,0x00,0x24,0x62,0x00,0x40,0x00,0xa4,0x10,0x0a, 0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x00,0x03,0x18,0x80,0x00,0xa2,0x28,0x23, 0x00,0x70,0x18,0x21,0xae,0x05,0x01,0xac,0xac,0x67,0x00,0xa8,0x34,0xc6,0x00,0x30, 0x8c,0xc3,0x00,0x00,0x93,0x82,0x81,0xf0,0x02,0x00,0x20,0x21,0x24,0x63,0x00,0x01, 0x24,0x42,0x00,0x01,0xac,0xc3,0x00,0x00,0xa3,0x82,0x81,0xf0,0x0c,0x00,0x27,0x90, 0x00,0x00,0x00,0x00,0x8f,0xbf,0x00,0x14,0x8f,0xb0,0x00,0x10,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x18,0x0c,0x00,0x28,0x27,0x00,0x00,0x00,0x00,0x08,0x00,0x28,0xa3, 0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03,0x3c,0x03,0x80,0x01,0x34,0x42,0x00,0x20, 0x24,0x63,0xa3,0x74,0x27,0xbd,0xff,0xe0,0xac,0x43,0x00,0x00,0x3c,0x02,0x80,0x01, 0xaf,0xb2,0x00,0x18,0xaf,0xb1,0x00,0x14,0xaf,0xb0,0x00,0x10,0xaf,0xbf,0x00,0x1c, 0x00,0x80,0x80,0x21,0x24,0x52,0x9e,0x28,0x00,0x00,0x88,0x21,0x3c,0x03,0xb0,0x09, 0x34,0x63,0x00,0x06,0x8e,0x06,0x00,0x04,0x90,0x62,0x00,0x00,0x00,0x06,0x22,0x02, 0x00,0x44,0x10,0x23,0x24,0x44,0x00,0x40,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x7f, 0x00,0x83,0x10,0x0a,0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x24,0x84,0xff,0xff, 0x10,0x44,0x00,0x68,0x00,0x00,0x28,0x21,0x3c,0x02,0xb0,0x01,0x00,0xc2,0x10,0x21, 0x8c,0x44,0x00,0x04,0x3c,0x03,0x7c,0x00,0x34,0x63,0x00,0xf0,0x00,0x83,0x18,0x24, 0xae,0x04,0x00,0x44,0x8c,0x44,0x00,0x00,0x10,0x60,0x00,0x69,0x00,0x00,0x38,0x21, 0x3c,0x09,0xb0,0x03,0x3c,0x06,0x7c,0x00,0x35,0x29,0x00,0x99,0x3c,0x0a,0xb0,0x01, 0x24,0x08,0x00,0x40,0x34,0xc6,0x00,0xf0,0x3c,0x0b,0xff,0xff,0x3c,0x0c,0x28,0x38, 0x16,0x20,0x00,0x06,0x24,0xa5,0x00,0x01,0x93,0x82,0x81,0xf6,0x24,0x11,0x00,0x01, 0x24,0x42,0x00,0x01,0xa1,0x22,0x00,0x00,0xa3,0x82,0x81,0xf6,0x8e,0x02,0x00,0x04, 0x24,0x07,0x00,0x01,0x24,0x42,0x01,0x00,0x30,0x42,0x3f,0xff,0xae,0x02,0x00,0x04, 0x00,0x4a,0x10,0x21,0x8c,0x43,0x00,0x04,0x00,0x00,0x00,0x00,0xae,0x03,0x00,0x44, 0x8c,0x44,0x00,0x00,0x10,0xa8,0x00,0x2d,0x00,0x66,0x18,0x24,0x14,0x60,0xff,0xec, 0x00,0x8b,0x10,0x24,0x14,0x4c,0xff,0xea,0x24,0x02,0x00,0x01,0x10,0xe2,0x00,0x2f, 0x3c,0x03,0xb0,0x09,0x8e,0x02,0x00,0x44,0x8e,0x04,0x00,0x60,0x00,0x02,0x1e,0x42, 0x00,0x02,0x12,0x02,0x30,0x42,0x00,0x0f,0x30,0x63,0x00,0x01,0xae,0x02,0x00,0x00, 0x10,0x44,0x00,0x1a,0xae,0x03,0x00,0x58,0x8e,0x02,0x00,0x64,0x8e,0x04,0x00,0x58, 0x00,0x00,0x00,0x00,0x10,0x82,0x00,0x05,0x00,0x00,0x00,0x00,0xae,0x00,0x00,0x4c, 0xae,0x00,0x00,0x50,0xae,0x00,0x00,0x54,0xae,0x00,0x00,0x0c,0x8e,0x03,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x03,0x10,0x80,0x00,0x50,0x10,0x21,0x8c,0x42,0x00,0x68, 0x00,0x00,0x00,0x00,0x10,0x52,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x40,0xf8,0x09, 0x02,0x00,0x20,0x21,0x8e,0x04,0x00,0x58,0x8e,0x03,0x00,0x00,0x00,0x00,0x00,0x00, 0xae,0x03,0x00,0x60,0x08,0x00,0x28,0xeb,0xae,0x04,0x00,0x64,0x8e,0x02,0x00,0x64, 0x00,0x00,0x00,0x00,0x14,0x62,0xff,0xe5,0x00,0x00,0x00,0x00,0x7a,0x02,0x0d,0x7c, 0x8f,0xbf,0x00,0x1c,0x8f,0xb2,0x00,0x18,0x7b,0xb0,0x00,0xbc,0x00,0x43,0x10,0x26, 0x00,0x02,0x10,0x2b,0x03,0xe0,0x00,0x08,0x27,0xbd,0x00,0x20,0x34,0x63,0x00,0x06, 0x8e,0x04,0x00,0x04,0x90,0x62,0x00,0x00,0x00,0x04,0x22,0x02,0x00,0x44,0x10,0x23, 0x24,0x44,0x00,0x40,0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x7f,0x00,0x83,0x10,0x0a, 0x00,0x02,0x11,0x83,0x00,0x02,0x11,0x80,0x00,0x82,0x20,0x23,0x14,0x87,0xff,0xc5, 0x00,0x00,0x00,0x00,0x8e,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x2c,0x62,0x00,0x03, 0x14,0x40,0x00,0x05,0x24,0x02,0x00,0x0d,0x10,0x62,0x00,0x03,0x24,0x02,0x00,0x01, 0x08,0x00,0x29,0x4b,0xa2,0x02,0x00,0x5c,0x08,0x00,0x29,0x4b,0xa2,0x00,0x00,0x5c, 0x3c,0x02,0xff,0xff,0x00,0x82,0x10,0x24,0x3c,0x03,0x28,0x38,0x14,0x43,0xff,0x94, 0x24,0x02,0x00,0x01,0x08,0x00,0x29,0x23,0x00,0x00,0x00,0x00,0x3c,0x02,0xb0,0x03, 0x3c,0x03,0x80,0x01,0x34,0x42,0x00,0x20,0x24,0x63,0xa5,0xcc,0xac,0x43,0x00,0x00, 0x8c,0x83,0x01,0xa8,0x8c,0x82,0x01,0xac,0x00,0x80,0x40,0x21,0x10,0x62,0x00,0x20, 0x00,0x00,0x20,0x21,0x93,0x82,0x81,0xf1,0x00,0x03,0x28,0x80,0x3c,0x07,0xb0,0x06, 0x00,0xa8,0x18,0x21,0x24,0x04,0x00,0x01,0x8c,0x66,0x00,0xa8,0x10,0x44,0x00,0x1c, 0x34,0xe7,0x80,0x18,0x3c,0x05,0xb0,0x01,0xaf,0x86,0xbc,0x20,0x00,0xc5,0x28,0x21, 0x8c,0xa3,0x00,0x00,0x00,0x06,0x20,0xc2,0x3c,0x02,0x00,0x80,0x00,0x04,0x22,0x00, 0x00,0x82,0x20,0x25,0xaf,0x83,0xbc,0x28,0x8c,0xa2,0x00,0x04,0xac,0xe4,0x00,0x00, 0x8d,0x03,0x01,0xa8,0xaf,0x82,0xbc,0x24,0x24,0x64,0x00,0x01,0x04,0x80,0x00,0x0a, 0x00,0x80,0x10,0x21,0x00,0x02,0x11,0x83,0x8d,0x03,0x01,0xac,0x00,0x02,0x11,0x80, 0x00,0x82,0x10,0x23,0x00,0x43,0x18,0x26,0xad,0x02,0x01,0xa8,0x00,0x03,0x20,0x2b, 0x03,0xe0,0x00,0x08,0x00,0x80,0x10,0x21,0x08,0x00,0x29,0x95,0x24,0x62,0x00,0x40, 0x27,0x82,0x89,0x68,0x00,0x06,0x20,0xc2,0x00,0x04,0x22,0x00,0x00,0xa2,0x48,0x21, 0x3c,0x02,0x00,0x80,0x00,0x82,0x58,0x25,0x93,0x82,0x81,0xf0,0x3c,0x0a,0xb0,0x06, 0x3c,0x03,0xb0,0x01,0x2c,0x42,0x00,0x02,0x00,0xc3,0x38,0x21,0x35,0x4a,0x80,0x18, 0x14,0x40,0xff,0xef,0x00,0x00,0x20,0x21,0x8c,0xe5,0x00,0x00,0x8d,0x23,0x00,0x00, 0x24,0x02,0xc0,0x00,0x00,0xa2,0x10,0x24,0x00,0x43,0x10,0x25,0xac,0xe2,0x00,0x00, 0x8d,0x04,0x01,0xa8,0x27,0x83,0x8a,0x68,0x8c,0xe5,0x00,0x04,0x00,0x04,0x20,0x80, 0x00,0x83,0x20,0x21,0x8c,0x82,0x00,0x00,0x3c,0x03,0x80,0x00,0x00,0xa2,0x10,0x25, 0x00,0x43,0x10,0x25,0xac,0xe2,0x00,0x04,0xaf,0x86,0xbc,0x20,0x8c,0xe2,0x00,0x00, 0x93,0x85,0x81,0xf0,0xaf,0x82,0xbc,0x28,0x8c,0xe3,0x00,0x04,0xad,0x4b,0x00,0x00, 0x8d,0x02,0x01,0xa8,0xaf,0x83,0xbc,0x24,0x24,0xa5,0xff,0xff,0x24,0x44,0x00,0x01, 0x28,0x83,0x00,0x00,0x24,0x42,0x00,0x40,0x00,0x83,0x10,0x0a,0x00,0x02,0x11,0x83, 0x00,0x02,0x11,0x80,0x00,0x82,0x20,0x23,0xad,0x04,0x01,0xa8,0xa3,0x85,0x81,0xf0, 0x79,0x02,0x0d,0x7c,0x00,0x00,0x00,0x00,0x00,0x43,0x10,0x26,0x08,0x00,0x29,0x9c, 0x00,0x02,0x20,0x2b,0x3c,0x04,0xb0,0x03,0x3c,0x06,0xb0,0x07,0x3c,0x02,0x80,0x01, 0x34,0xc6,0x00,0x18,0x34,0x84,0x00,0x20,0x24,0x42,0xa7,0x54,0x24,0x03,0xff,0x83, 0xac,0x82,0x00,0x00,0xa0,0xc3,0x00,0x00,0x90,0xc4,0x00,0x00,0x27,0xbd,0xff,0xf8, 0x3c,0x03,0xb0,0x07,0x24,0x02,0xff,0x82,0xa3,0xa4,0x00,0x00,0xa0,0x62,0x00,0x00, 0x90,0x64,0x00,0x00,0x3c,0x02,0xb0,0x07,0x34,0x42,0x00,0x08,0xa3,0xa4,0x00,0x01, 0xa0,0x40,0x00,0x00,0x90,0x43,0x00,0x00,0x24,0x02,0x00,0x03,0x3c,0x05,0xb0,0x07, 0xa3,0xa3,0x00,0x00,0xa0,0xc2,0x00,0x00,0x90,0xc4,0x00,0x00,0x34,0xa5,0x00,0x10, 0x24,0x02,0x00,0x06,0x3c,0x03,0xb0,0x07,0xa3,0xa4,0x00,0x00,0x34,0x63,0x00,0x38, 0xa0,0xa2,0x00,0x00,0x90,0x64,0x00,0x00,0x3c,0x02,0xb0,0x07,0x34,0x42,0x00,0x20, 0xa3,0xa4,0x00,0x00,0xa0,0xa0,0x00,0x00,0x90,0xa3,0x00,0x00,0xaf,0x82,0xbf,0x30, 0xa3,0xa3,0x00,0x00,0xa0,0x40,0x00,0x00,0x90,0x43,0x00,0x00,0x03,0xe0,0x00,0x08, 0x27,0xbd,0x00,0x08,}; u8 Rtl8192PciEFwDataArray[DataArrayLengthPciE] = { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x10,0x00,0x08,0x00, 0x02,0xe9,0x01,0x74,0x02,0xab,0x01,0xc7,0x01,0x55,0x00,0xe4,0x00,0xab,0x00,0x72, 0x00,0x55,0x00,0x4c,0x00,0x4c,0x00,0x4c,0x00,0x4c,0x00,0x4c,0x02,0x76,0x01,0x3b, 0x00,0xd2,0x00,0x9e,0x00,0x69,0x00,0x4f,0x00,0x46,0x00,0x3f,0x01,0x3b,0x00,0x9e, 0x00,0x69,0x00,0x4f,0x00,0x35,0x00,0x27,0x00,0x23,0x00,0x20,0x01,0x2f,0x00,0x98, 0x00,0x65,0x00,0x4c,0x00,0x33,0x00,0x26,0x00,0x22,0x00,0x1e,0x00,0x98,0x00,0x4c, 0x00,0x33,0x00,0x26,0x00,0x19,0x00,0x13,0x00,0x11,0x00,0x0f,0x02,0x39,0x01,0x1c, 0x00,0xbd,0x00,0x8e,0x00,0x5f,0x00,0x47,0x00,0x3f,0x00,0x39,0x01,0x1c,0x00,0x8e, 0x00,0x5f,0x00,0x47,0x00,0x2f,0x00,0x23,0x00,0x20,0x00,0x1c,0x01,0x11,0x00,0x89, 0x00,0x5b,0x00,0x44,0x00,0x2e,0x00,0x22,0x00,0x1e,0x00,0x1b,0x00,0x89,0x00,0x44, 0x00,0x2e,0x00,0x22,0x00,0x17,0x00,0x11,0x00,0x0f,0x00,0x0e,0x02,0xab,0x02,0xab, 0x02,0x66,0x02,0x66,0x07,0x06,0x06,0x06,0x05,0x06,0x07,0x08,0x04,0x06,0x07,0x08, 0x09,0x0a,0x0b,0x0b,0x49,0x6e,0x74,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x4c, 0x42,0x4d,0x4f,0x44,0x00,0x00,0x00,0x00,0x54,0x4c,0x42,0x4c,0x5f,0x64,0x61,0x74, 0x61,0x00,0x54,0x4c,0x42,0x53,0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x64,0x45,0x4c, 0x5f,0x64,0x61,0x74,0x61,0x00,0x41,0x64,0x45,0x53,0x00,0x00,0x00,0x00,0x00,0x00, 0x45,0x78,0x63,0x43,0x6f,0x64,0x65,0x36,0x00,0x00,0x45,0x78,0x63,0x43,0x6f,0x64, 0x65,0x37,0x00,0x00,0x53,0x79,0x73,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x42,0x70, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x49,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x43,0x70,0x55,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x4f,0x76,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x01,0x0b,0x63, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x2c, 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x48,0x00,0x00,0x00,0x60, 0x00,0x00,0x00,0x90,0x00,0x00,0x00,0xc0,0x00,0x00,0x01,0x20,0x00,0x00,0x01,0x80, 0x00,0x00,0x01,0xb0,0x00,0x00,0x00,0x34,0x00,0x00,0x00,0x68,0x00,0x00,0x00,0x9c, 0x00,0x00,0x00,0xd0,0x00,0x00,0x01,0x38,0x00,0x00,0x01,0xa0,0x00,0x00,0x01,0xd4, 0x00,0x00,0x02,0x08,0x00,0x00,0x00,0x68,0x00,0x00,0x00,0xd0,0x00,0x00,0x01,0x38, 0x00,0x00,0x01,0xa0,0x00,0x00,0x02,0x6f,0x00,0x00,0x03,0x40,0x00,0x00,0x03,0xa8, 0x00,0x00,0x04,0x10,0x01,0x01,0x01,0x02,0x01,0x01,0x02,0x02,0x03,0x03,0x04,0x04, 0x01,0x01,0x02,0x02,0x03,0x03,0x04,0x04,0x02,0x03,0x03,0x04,0x05,0x06,0x07,0x08, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x07,0x6c,0x80,0x00,0x07,0x80, 0x80,0x00,0x07,0x80,0x80,0x00,0x07,0x70,0x80,0x00,0x07,0x70,0x80,0x00,0x07,0x94, 0x80,0x00,0x56,0xb0,0x80,0x00,0x57,0x08,0x80,0x00,0x57,0x30,0x80,0x00,0x58,0x28, 0x80,0x00,0x58,0xe0,0x80,0x00,0x59,0x88,0x80,0x00,0x59,0xfc,0x80,0x00,0x5b,0x08, 0x80,0x00,0x5b,0x40,0x80,0x00,0x5b,0x54,0x80,0x00,0x5b,0x68,0x80,0x00,0x5c,0x50, 0x80,0x00,0x5c,0x90,0x80,0x00,0x5d,0x44,0x80,0x00,0x5d,0x6c,0x80,0x00,0x56,0x70, 0x80,0x00,0x5d,0xbc,0x80,0x00,0x64,0x48,0x80,0x00,0x64,0xc0,0x80,0x00,0x64,0xcc, 0x80,0x00,0x64,0xd8,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60, 0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60, 0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60,0x80,0x00,0x64,0x60, 0x80,0x00,0x64,0x60,0x80,0x00,0x64,0xe4,0x80,0x00,0x64,0xf0,0x80,0x00,0x64,0xfc, 0x80,0x00,0x87,0xa4,0x80,0x00,0x87,0xa4,0x80,0x00,0x87,0xa4,0x80,0x00,0x87,0xd8, 0x80,0x00,0x88,0x18,0x80,0x00,0x88,0x50,0x80,0x00,0x88,0x80,0x80,0x00,0x88,0xb0, 0x80,0x00,0x88,0xc4,0x80,0x00,0x89,0x2c,0x80,0x00,0x89,0x40,0x80,0x00,0x89,0x7c, 0x80,0x00,0x89,0x84,0x80,0x00,0x89,0xc0,0x80,0x00,0x89,0xd4,0x80,0x00,0x89,0xdc, 0x80,0x00,0x89,0xe4,0x80,0x00,0x89,0xe4,0x80,0x00,0x89,0xe4,0x80,0x00,0x89,0xe4, 0x80,0x00,0x8a,0x14,0x80,0x00,0x8a,0x28,0x80,0x00,0x8a,0x3c,0x80,0x00,0x86,0xe8, 0x80,0x00,0x8d,0x68,0x80,0x00,0x8d,0x68,0x80,0x00,0x8d,0x68,0x80,0x00,0x8d,0x9c, 0x80,0x00,0x8d,0xdc,0x80,0x00,0x8e,0x14,0x80,0x00,0x8e,0x44,0x80,0x00,0x8e,0x74, 0x80,0x00,0x8e,0x88,0x80,0x00,0x8e,0xf0,0x80,0x00,0x8f,0x04,0x80,0x00,0x8f,0x40, 0x80,0x00,0x8f,0x48,0x80,0x00,0x8f,0x84,0x80,0x00,0x8f,0x98,0x80,0x00,0x8f,0xa0, 0x80,0x00,0x8f,0xa8,0x80,0x00,0x8f,0xa8,0x80,0x00,0x8f,0xa8,0x80,0x00,0x8f,0xa8, 0x80,0x00,0x8f,0xd8,0x80,0x00,0x8f,0xec,0x80,0x00,0x90,0x00,0x80,0x00,0x8b,0x88, }; u32 Rtl8192PciEPHY_REGArray[PHY_REGArrayLengthPciE] = {0x0,}; u32 Rtl8192PciEPHY_REG_1T2RArray[PHY_REG_1T2RArrayLengthPciE] = { 0x800, 0x00000000, 0x804, 0x00000001, 0x808, 0x0000fc00, 0x80c, 0x0000001c, 0x810, 0x801010aa, 0x814, 0x008514d0, 0x818, 0x00000040, 0x81c, 0x00000000, 0x820, 0x00000004, 0x824, 0x00690000, 0x828, 0x00000004, 0x82c, 0x00e90000, 0x830, 0x00000004, 0x834, 0x00690000, 0x838, 0x00000004, 0x83c, 0x00e90000, 0x840, 0x00000000, 0x844, 0x00000000, 0x848, 0x00000000, 0x84c, 0x00000000, 0x850, 0x00000000, 0x854, 0x00000000, 0x858, 0x65a965a9, 0x85c, 0x65a965a9, 0x860, 0x001f0010, 0x864, 0x007f0010, 0x868, 0x001f0010, 0x86c, 0x007f0010, 0x870, 0x0f100f70, 0x874, 0x0f100f70, 0x878, 0x00000000, 0x87c, 0x00000000, 0x880, 0x6870e36c, 0x884, 0xe3573600, 0x888, 0x4260c340, 0x88c, 0x0000ff00, 0x890, 0x00000000, 0x894, 0xfffffffe, 0x898, 0x4c42382f, 0x89c, 0x00656056, 0x8b0, 0x00000000, 0x8e0, 0x00000000, 0x8e4, 0x00000000, 0x900, 0x00000000, 0x904, 0x00000023, 0x908, 0x00000000, 0x90c, 0x31121311, 0xa00, 0x00d0c7d8, 0xa04, 0x811f0008, 0xa08, 0x80cd8300, 0xa0c, 0x2e62740f, 0xa10, 0x95009b78, 0xa14, 0x11145008, 0xa18, 0x00881117, 0xa1c, 0x89140fa0, 0xa20, 0x1a1b0000, 0xa24, 0x090e1317, 0xa28, 0x00000204, 0xa2c, 0x00000000, 0xc00, 0x00000040, 0xc04, 0x00005433, 0xc08, 0x000000e4, 0xc0c, 0x6c6c6c6c, 0xc10, 0x08800000, 0xc14, 0x40000100, 0xc18, 0x08000000, 0xc1c, 0x40000100, 0xc20, 0x08000000, 0xc24, 0x40000100, 0xc28, 0x08000000, 0xc2c, 0x40000100, 0xc30, 0x6de9ac44, 0xc34, 0x465c52cd, 0xc38, 0x497f5994, 0xc3c, 0x0a969764, 0xc40, 0x1f7c403f, 0xc44, 0x000100b7, 0xc48, 0xec020000, 0xc4c, 0x00000300, 0xc50, 0x69543420, 0xc54, 0x433c0094, 0xc58, 0x69543420, 0xc5c, 0x433c0094, 0xc60, 0x69543420, 0xc64, 0x433c0094, 0xc68, 0x69543420, 0xc6c, 0x433c0094, 0xc70, 0x2c7f000d, 0xc74, 0x0186175b, 0xc78, 0x0000001f, 0xc7c, 0x00b91612, 0xc80, 0x40000100, 0xc84, 0x20000000, 0xc88, 0x40000100, 0xc8c, 0x20200000, 0xc90, 0x40000100, 0xc94, 0x00000000, 0xc98, 0x40000100, 0xc9c, 0x00000000, 0xca0, 0x00492492, 0xca4, 0x00000000, 0xca8, 0x00000000, 0xcac, 0x00000000, 0xcb0, 0x00000000, 0xcb4, 0x00000000, 0xcb8, 0x00000000, 0xcbc, 0x00492492, 0xcc0, 0x00000000, 0xcc4, 0x00000000, 0xcc8, 0x00000000, 0xccc, 0x00000000, 0xcd0, 0x00000000, 0xcd4, 0x00000000, 0xcd8, 0x64b22427, 0xcdc, 0x00766932, 0xce0, 0x00222222, 0xd00, 0x00000750, 0xd04, 0x00000403, 0xd08, 0x0000907f, 0xd0c, 0x00000001, 0xd10, 0xa0633333, 0xd14, 0x33333c63, 0xd18, 0x6a8f5b6b, 0xd1c, 0x00000000, 0xd20, 0x00000000, 0xd24, 0x00000000, 0xd28, 0x00000000, 0xd2c, 0xcc979975, 0xd30, 0x00000000, 0xd34, 0x00000000, 0xd38, 0x00000000, 0xd3c, 0x00027293, 0xd40, 0x00000000, 0xd44, 0x00000000, 0xd48, 0x00000000, 0xd4c, 0x00000000, 0xd50, 0x6437140a, 0xd54, 0x024dbd02, 0xd58, 0x00000000, 0xd5c, 0x04032064, 0xe00, 0x161a1a1a, 0xe04, 0x12121416, 0xe08, 0x00001800, 0xe0c, 0x00000000, 0xe10, 0x161a1a1a, 0xe14, 0x12121416, 0xe18, 0x161a1a1a, 0xe1c, 0x12121416, }; u32 Rtl8192PciERadioA_Array[RadioA_ArrayLengthPciE] = { 0x019, 0x00000003, 0x000, 0x000000bf, 0x001, 0x00000ee0, 0x002, 0x0000004c, 0x003, 0x000007f1, 0x004, 0x00000975, 0x005, 0x00000c58, 0x006, 0x00000ae6, 0x007, 0x000000ca, 0x008, 0x00000e1c, 0x009, 0x000007f0, 0x00a, 0x000009d0, 0x00b, 0x000001ba, 0x00c, 0x00000240, 0x00e, 0x00000020, 0x00f, 0x00000990, 0x012, 0x00000806, 0x014, 0x000005ab, 0x015, 0x00000f80, 0x016, 0x00000020, 0x017, 0x00000597, 0x018, 0x0000050a, 0x01a, 0x00000f80, 0x01b, 0x00000f5e, 0x01c, 0x00000008, 0x01d, 0x00000607, 0x01e, 0x000006cc, 0x01f, 0x00000000, 0x020, 0x000001a5, 0x01f, 0x00000001, 0x020, 0x00000165, 0x01f, 0x00000002, 0x020, 0x000000c6, 0x01f, 0x00000003, 0x020, 0x00000086, 0x01f, 0x00000004, 0x020, 0x00000046, 0x01f, 0x00000005, 0x020, 0x000001e6, 0x01f, 0x00000006, 0x020, 0x000001a6, 0x01f, 0x00000007, 0x020, 0x00000166, 0x01f, 0x00000008, 0x020, 0x000000c7, 0x01f, 0x00000009, 0x020, 0x00000087, 0x01f, 0x0000000a, 0x020, 0x000000f7, 0x01f, 0x0000000b, 0x020, 0x000000d7, 0x01f, 0x0000000c, 0x020, 0x000000b7, 0x01f, 0x0000000d, 0x020, 0x00000097, 0x01f, 0x0000000e, 0x020, 0x00000077, 0x01f, 0x0000000f, 0x020, 0x00000057, 0x01f, 0x00000010, 0x020, 0x00000037, 0x01f, 0x00000011, 0x020, 0x000000fb, 0x01f, 0x00000012, 0x020, 0x000000db, 0x01f, 0x00000013, 0x020, 0x000000bb, 0x01f, 0x00000014, 0x020, 0x000000ff, 0x01f, 0x00000015, 0x020, 0x000000e3, 0x01f, 0x00000016, 0x020, 0x000000c3, 0x01f, 0x00000017, 0x020, 0x000000a3, 0x01f, 0x00000018, 0x020, 0x00000083, 0x01f, 0x00000019, 0x020, 0x00000063, 0x01f, 0x0000001a, 0x020, 0x00000043, 0x01f, 0x0000001b, 0x020, 0x00000023, 0x01f, 0x0000001c, 0x020, 0x00000003, 0x01f, 0x0000001d, 0x020, 0x000001e3, 0x01f, 0x0000001e, 0x020, 0x000001c3, 0x01f, 0x0000001f, 0x020, 0x000001a3, 0x01f, 0x00000020, 0x020, 0x00000183, 0x01f, 0x00000021, 0x020, 0x00000163, 0x01f, 0x00000022, 0x020, 0x00000143, 0x01f, 0x00000023, 0x020, 0x00000123, 0x01f, 0x00000024, 0x020, 0x00000103, 0x023, 0x00000203, 0x024, 0x00000100, 0x00b, 0x000001ba, 0x02c, 0x000003d7, 0x02d, 0x00000ff0, 0x000, 0x00000037, 0x004, 0x00000160, 0x007, 0x00000080, 0x002, 0x0000088d, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x016, 0x00000200, 0x016, 0x00000380, 0x016, 0x00000020, 0x016, 0x000001a0, 0x000, 0x000000bf, 0x00d, 0x0000001f, 0x00d, 0x00000c9f, 0x002, 0x0000004d, 0x000, 0x00000cbf, 0x004, 0x00000975, 0x007, 0x00000700, }; u32 Rtl8192PciERadioB_Array[RadioB_ArrayLengthPciE] = { 0x019, 0x00000003, 0x000, 0x000000bf, 0x001, 0x000006e0, 0x002, 0x0000004c, 0x003, 0x000007f1, 0x004, 0x00000975, 0x005, 0x00000c58, 0x006, 0x00000ae6, 0x007, 0x000000ca, 0x008, 0x00000e1c, 0x000, 0x000000b7, 0x00a, 0x00000850, 0x000, 0x000000bf, 0x00b, 0x000001ba, 0x00c, 0x00000240, 0x00e, 0x00000020, 0x015, 0x00000f80, 0x016, 0x00000020, 0x017, 0x00000597, 0x018, 0x0000050a, 0x01a, 0x00000e00, 0x01b, 0x00000f5e, 0x01d, 0x00000607, 0x01e, 0x000006cc, 0x00b, 0x000001ba, 0x023, 0x00000203, 0x024, 0x00000100, 0x000, 0x00000037, 0x004, 0x00000160, 0x016, 0x00000200, 0x016, 0x00000380, 0x016, 0x00000020, 0x016, 0x000001a0, 0x00d, 0x00000ccc, 0x000, 0x000000bf, 0x002, 0x0000004d, 0x000, 0x00000cbf, 0x004, 0x00000975, 0x007, 0x00000700, }; u32 Rtl8192PciERadioC_Array[RadioC_ArrayLengthPciE] = { 0x0, }; u32 Rtl8192PciERadioD_Array[RadioD_ArrayLengthPciE] = { 0x0, }; u32 Rtl8192PciEMACPHY_Array[] = { 0x03c, 0xffff0000, 0x00000f0f, 0x340, 0xffffffff, 0x161a1a1a, 0x344, 0xffffffff, 0x12121416, 0x348, 0x0000ffff, 0x00001818, 0x12c, 0xffffffff, 0x04000802, 0x318, 0x00000fff, 0x00000100, }; u32 Rtl8192PciEMACPHY_Array_PG[] = { 0x03c, 0xffff0000, 0x00000f0f, 0xe00, 0xffffffff, 0x06090909, 0xe04, 0xffffffff, 0x00030306, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x0a0c0d0f, 0xe14, 0xffffffff, 0x06070809, 0xe18, 0xffffffff, 0x0a0c0d0f, 0xe1c, 0xffffffff, 0x06070809, 0x12c, 0xffffffff, 0x04000802, 0x318, 0x00000fff, 0x00000800, }; u32 Rtl8192PciEAGCTAB_Array[AGCTAB_ArrayLengthPciE] = { 0xc78, 0x7d000001, 0xc78, 0x7d010001, 0xc78, 0x7d020001, 0xc78, 0x7d030001, 0xc78, 0x7d040001, 0xc78, 0x7d050001, 0xc78, 0x7c060001, 0xc78, 0x7b070001, 0xc78, 0x7a080001, 0xc78, 0x79090001, 0xc78, 0x780a0001, 0xc78, 0x770b0001, 0xc78, 0x760c0001, 0xc78, 0x750d0001, 0xc78, 0x740e0001, 0xc78, 0x730f0001, 0xc78, 0x72100001, 0xc78, 0x71110001, 0xc78, 0x70120001, 0xc78, 0x6f130001, 0xc78, 0x6e140001, 0xc78, 0x6d150001, 0xc78, 0x6c160001, 0xc78, 0x6b170001, 0xc78, 0x6a180001, 0xc78, 0x69190001, 0xc78, 0x681a0001, 0xc78, 0x671b0001, 0xc78, 0x661c0001, 0xc78, 0x651d0001, 0xc78, 0x641e0001, 0xc78, 0x491f0001, 0xc78, 0x48200001, 0xc78, 0x47210001, 0xc78, 0x46220001, 0xc78, 0x45230001, 0xc78, 0x44240001, 0xc78, 0x43250001, 0xc78, 0x28260001, 0xc78, 0x27270001, 0xc78, 0x26280001, 0xc78, 0x25290001, 0xc78, 0x242a0001, 0xc78, 0x232b0001, 0xc78, 0x222c0001, 0xc78, 0x212d0001, 0xc78, 0x202e0001, 0xc78, 0x0a2f0001, 0xc78, 0x08300001, 0xc78, 0x06310001, 0xc78, 0x05320001, 0xc78, 0x04330001, 0xc78, 0x03340001, 0xc78, 0x02350001, 0xc78, 0x01360001, 0xc78, 0x00370001, 0xc78, 0x00380001, 0xc78, 0x00390001, 0xc78, 0x003a0001, 0xc78, 0x003b0001, 0xc78, 0x003c0001, 0xc78, 0x003d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x7d400001, 0xc78, 0x7d410001, 0xc78, 0x7d420001, 0xc78, 0x7d430001, 0xc78, 0x7d440001, 0xc78, 0x7d450001, 0xc78, 0x7c460001, 0xc78, 0x7b470001, 0xc78, 0x7a480001, 0xc78, 0x79490001, 0xc78, 0x784a0001, 0xc78, 0x774b0001, 0xc78, 0x764c0001, 0xc78, 0x754d0001, 0xc78, 0x744e0001, 0xc78, 0x734f0001, 0xc78, 0x72500001, 0xc78, 0x71510001, 0xc78, 0x70520001, 0xc78, 0x6f530001, 0xc78, 0x6e540001, 0xc78, 0x6d550001, 0xc78, 0x6c560001, 0xc78, 0x6b570001, 0xc78, 0x6a580001, 0xc78, 0x69590001, 0xc78, 0x685a0001, 0xc78, 0x675b0001, 0xc78, 0x665c0001, 0xc78, 0x655d0001, 0xc78, 0x645e0001, 0xc78, 0x495f0001, 0xc78, 0x48600001, 0xc78, 0x47610001, 0xc78, 0x46620001, 0xc78, 0x45630001, 0xc78, 0x44640001, 0xc78, 0x43650001, 0xc78, 0x28660001, 0xc78, 0x27670001, 0xc78, 0x26680001, 0xc78, 0x25690001, 0xc78, 0x246a0001, 0xc78, 0x236b0001, 0xc78, 0x226c0001, 0xc78, 0x216d0001, 0xc78, 0x206e0001, 0xc78, 0x0a6f0001, 0xc78, 0x08700001, 0xc78, 0x06710001, 0xc78, 0x05720001, 0xc78, 0x04730001, 0xc78, 0x03740001, 0xc78, 0x02750001, 0xc78, 0x01760001, 0xc78, 0x00770001, 0xc78, 0x00780001, 0xc78, 0x00790001, 0xc78, 0x007a0001, 0xc78, 0x007b0001, 0xc78, 0x007c0001, 0xc78, 0x007d0001, 0xc78, 0x007e0001, 0xc78, 0x007f0001, 0xc78, 0x2e00001e, 0xc78, 0x2e01001e, 0xc78, 0x2e02001e, 0xc78, 0x2e03001e, 0xc78, 0x2e04001e, 0xc78, 0x2e05001e, 0xc78, 0x3006001e, 0xc78, 0x3407001e, 0xc78, 0x3908001e, 0xc78, 0x3c09001e, 0xc78, 0x3f0a001e, 0xc78, 0x420b001e, 0xc78, 0x440c001e, 0xc78, 0x450d001e, 0xc78, 0x460e001e, 0xc78, 0x460f001e, 0xc78, 0x4710001e, 0xc78, 0x4811001e, 0xc78, 0x4912001e, 0xc78, 0x4a13001e, 0xc78, 0x4b14001e, 0xc78, 0x4b15001e, 0xc78, 0x4c16001e, 0xc78, 0x4d17001e, 0xc78, 0x4e18001e, 0xc78, 0x4f19001e, 0xc78, 0x4f1a001e, 0xc78, 0x501b001e, 0xc78, 0x511c001e, 0xc78, 0x521d001e, 0xc78, 0x521e001e, 0xc78, 0x531f001e, 0xc78, 0x5320001e, 0xc78, 0x5421001e, 0xc78, 0x5522001e, 0xc78, 0x5523001e, 0xc78, 0x5624001e, 0xc78, 0x5725001e, 0xc78, 0x5726001e, 0xc78, 0x5827001e, 0xc78, 0x5828001e, 0xc78, 0x5929001e, 0xc78, 0x592a001e, 0xc78, 0x5a2b001e, 0xc78, 0x5b2c001e, 0xc78, 0x5c2d001e, 0xc78, 0x5c2e001e, 0xc78, 0x5d2f001e, 0xc78, 0x5e30001e, 0xc78, 0x5f31001e, 0xc78, 0x6032001e, 0xc78, 0x6033001e, 0xc78, 0x6134001e, 0xc78, 0x6235001e, 0xc78, 0x6336001e, 0xc78, 0x6437001e, 0xc78, 0x6438001e, 0xc78, 0x6539001e, 0xc78, 0x663a001e, 0xc78, 0x673b001e, 0xc78, 0x673c001e, 0xc78, 0x683d001e, 0xc78, 0x693e001e, 0xc78, 0x6a3f001e, };
gpl-2.0
KylinUI/android_kernel_oppo_find5
drivers/gpu/drm/drm_agpsupport.c
8276
12404
/** * \file drm_agpsupport.c * DRM support for AGP/GART backend * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include <linux/module.h> #include <linux/slab.h> #if __OS_HAS_AGP #include <asm/agp.h> /** * Get AGP information. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a (output) drm_agp_info structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; if (!dev->agp || !dev->agp->acquired) return -EINVAL; kern = &dev->agp->agp_info; info->agp_version_major = kern->version.major; info->agp_version_minor = kern->version.minor; info->mode = kern->mode; info->aperture_base = kern->aper_base; info->aperture_size = kern->aper_size * 1024 * 1024; info->memory_allowed = kern->max_memory << PAGE_SHIFT; info->memory_used = kern->current_memory << PAGE_SHIFT; info->id_vendor = kern->device->vendor; info->id_device = kern->device->device; return 0; } EXPORT_SYMBOL(drm_agp_info); int drm_agp_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_info *info = data; int err; err = drm_agp_info(dev, info); if (err) return err; return 0; } /** * Acquire the AGP device. * * \param dev DRM device that is to acquire AGP. * \return zero on success or a negative number on failure. * * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ int drm_agp_acquire(struct drm_device * dev) { if (!dev->agp) return -ENODEV; if (dev->agp->acquired) return -EBUSY; if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev))) return -ENODEV; dev->agp->acquired = 1; return 0; } EXPORT_SYMBOL(drm_agp_acquire); /** * Acquire the AGP device (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or a negative number on failure. * * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return drm_agp_acquire((struct drm_device *) file_priv->minor->dev); } /** * Release the AGP device. * * \param dev DRM device that is to release AGP. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been acquired and calls \c agp_backend_release. */ int drm_agp_release(struct drm_device * dev) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; agp_backend_release(dev->agp->bridge); dev->agp->acquired = 0; return 0; } EXPORT_SYMBOL(drm_agp_release); int drm_agp_release_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return drm_agp_release(dev); } /** * Enable the AGP bus. * * \param dev DRM device that has previously acquired AGP. * \param mode Requested AGP mode. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; dev->agp->mode = mode.mode; agp_enable(dev->agp->bridge, mode.mode); dev->agp->enabled = 1; return 0; } EXPORT_SYMBOL(drm_agp_enable); int drm_agp_enable_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_mode *mode = data; return drm_agp_enable(dev, *mode); } /** * Allocate AGP memory. * * \param inode device inode. * \param file_priv file private pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired, allocates the * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. */ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { struct drm_agp_mem *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL))) return -ENOMEM; memset(entry, 0, sizeof(*entry)); pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request->type; if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { kfree(entry); return -ENOMEM; } entry->handle = (unsigned long)memory->key + 1; entry->memory = memory; entry->bound = 0; entry->pages = pages; list_add(&entry->head, &dev->agp->memory); request->handle = entry->handle; request->physical = memory->physical; return 0; } EXPORT_SYMBOL(drm_agp_alloc); int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_buffer *request = data; return drm_agp_alloc(dev, request); } /** * Search for the AGP memory entry associated with a handle. * * \param dev DRM device structure. * \param handle AGP memory handle. * \return pointer to the drm_agp_mem structure associated with \p handle. * * Walks through drm_agp_head::memory until finding a matching handle. */ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, unsigned long handle) { struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == handle) return entry; } return NULL; } /** * Unbind AGP memory from the GATT (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { struct drm_agp_mem *entry; int ret; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (!entry->bound) return -EINVAL; ret = drm_unbind_agp(entry->memory); if (ret == 0) entry->bound = 0; return ret; } EXPORT_SYMBOL(drm_agp_unbind); int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_binding *request = data; return drm_agp_unbind(dev, request); } /** * Bind AGP memory into the GATT (ioctl) * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and that no memory * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { struct drm_agp_mem *entry; int retcode; int page; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (entry->bound) return -EINVAL; page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE; if ((retcode = drm_bind_agp(entry->memory, page))) return retcode; entry->bound = dev->agp->base + (page << PAGE_SHIFT); DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", dev->agp->base, entry->bound); return 0; } EXPORT_SYMBOL(drm_agp_bind); int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_binding *request = data; return drm_agp_bind(dev, request); } /** * Free AGP memory (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and looks up the * AGP memory entry. If the memory it's currently bound, unbind it via * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { struct drm_agp_mem *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (!(entry = drm_agp_lookup_entry(dev, request->handle))) return -EINVAL; if (entry->bound) drm_unbind_agp(entry->memory); list_del(&entry->head); drm_free_agp(entry->memory, entry->pages); kfree(entry); return 0; } EXPORT_SYMBOL(drm_agp_free); int drm_agp_free_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_agp_buffer *request = data; return drm_agp_free(dev, request); } /** * Initialize the AGP resources. * * \return pointer to a drm_agp_head structure. * * Gets the drm_agp_t structure which is made available by the agpgart module * via the inter_module_* functions. Creates and initializes a drm_agp_head * structure. */ struct drm_agp_head *drm_agp_init(struct drm_device *dev) { struct drm_agp_head *head = NULL; if (!(head = kmalloc(sizeof(*head), GFP_KERNEL))) return NULL; memset((void *)head, 0, sizeof(*head)); head->bridge = agp_find_bridge(dev->pdev); if (!head->bridge) { if (!(head->bridge = agp_backend_acquire(dev->pdev))) { kfree(head); return NULL; } agp_copy_info(head->bridge, &head->agp_info); agp_backend_release(head->bridge); } else { agp_copy_info(head->bridge, &head->agp_info); } if (head->agp_info.chipset == NOT_SUPPORTED) { kfree(head); return NULL; } INIT_LIST_HEAD(&head->memory); head->cant_use_aperture = head->agp_info.cant_use_aperture; head->page_mask = head->agp_info.page_mask; head->base = head->agp_info.aper_base; return head; } /** * Binds a collection of pages into AGP memory at the given offset, returning * the AGP memory structure containing them. * * No reference is held on the pages during this time -- it is up to the * caller to handle that. */ DRM_AGP_MEM * drm_agp_bind_pages(struct drm_device *dev, struct page **pages, unsigned long num_pages, uint32_t gtt_offset, u32 type) { DRM_AGP_MEM *mem; int ret, i; DRM_DEBUG("\n"); mem = agp_allocate_memory(dev->agp->bridge, num_pages, type); if (mem == NULL) { DRM_ERROR("Failed to allocate memory for %ld pages\n", num_pages); return NULL; } for (i = 0; i < num_pages; i++) mem->pages[i] = pages[i]; mem->page_count = num_pages; mem->is_flushed = true; ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE); if (ret != 0) { DRM_ERROR("Failed to bind AGP memory: %d\n", ret); agp_free_memory(mem); return NULL; } return mem; } EXPORT_SYMBOL(drm_agp_bind_pages); #endif /* __OS_HAS_AGP */
gpl-2.0
hallovveen31/HELLRAZOR
drivers/char/mwave/mwavedd.c
8276
19183
/* * * mwavedd.c -- mwave device driver * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/major.h> #include <linux/miscdevice.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/serial_8250.h> #include "smapi.h" #include "mwavedd.h" #include "3780i.h" #include "tp3780i.h" MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver"); MODULE_AUTHOR("Mike Sullivan and Paul Schroeder"); MODULE_LICENSE("GPL"); /* * These parameters support the setting of MWave resources. Note that no * checks are made against other devices (ie. superio) for conflicts. * We'll depend on users using the tpctl utility to do that for now */ static DEFINE_MUTEX(mwave_mutex); int mwave_debug = 0; int mwave_3780i_irq = 0; int mwave_3780i_io = 0; int mwave_uart_irq = 0; int mwave_uart_io = 0; module_param(mwave_debug, int, 0); module_param(mwave_3780i_irq, int, 0); module_param(mwave_3780i_io, int, 0); module_param(mwave_uart_irq, int, 0); module_param(mwave_uart_io, int, 0); static int mwave_open(struct inode *inode, struct file *file); static int mwave_close(struct inode *inode, struct file *file); static long mwave_ioctl(struct file *filp, unsigned int iocmd, unsigned long ioarg); MWAVE_DEVICE_DATA mwave_s_mdd; static int mwave_open(struct inode *inode, struct file *file) { unsigned int retval = 0; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_open, entry inode %p file %p\n", inode, file); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_open, exit return retval %x\n", retval); return retval; } static int mwave_close(struct inode *inode, struct file *file) { unsigned int retval = 0; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_close, entry inode %p file %p\n", inode, file); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n", retval); return retval; } static long mwave_ioctl(struct file *file, unsigned int iocmd, unsigned long ioarg) { unsigned int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; void __user *arg = (void __user *)ioarg; PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n", file, iocmd, (int) ioarg); switch (iocmd) { case IOCTL_MW_RESET: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " calling tp3780I_ResetDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_ResetDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " retval %x from tp3780I_ResetDSP\n", retval); break; case IOCTL_MW_RUN: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " calling tp3780I_StartDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_StartDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " retval %x from tp3780I_StartDSP\n", retval); break; case IOCTL_MW_DSP_ABILITIES: { MW_ABILITIES rAbilities; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl," " IOCTL_MW_DSP_ABILITIES calling" " tp3780I_QueryAbilities\n"); mutex_lock(&mwave_mutex); retval = tp3780I_QueryAbilities(&pDrvData->rBDData, &rAbilities); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " retval %x from tp3780I_QueryAbilities\n", retval); if (retval == 0) { if( copy_to_user(arg, &rAbilities, sizeof(MW_ABILITIES)) ) return -EFAULT; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " exit retval %x\n", retval); } break; case IOCTL_MW_READ_DATA: case IOCTL_MW_READCLEAR_DATA: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength, rReadData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_READ_INST: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_INST," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength / 2, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength / 2, rReadData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_WRITE_DATA: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_WRITE_INST: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *)(rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_REGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_REGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); mutex_lock(&mwave_mutex); pDrvData->IPCs[ipcnum].bIsHere = FALSE; pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x exit\n", ipcnum); } break; case IOCTL_MW_GET_IPC: { unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_GET_IPC: Error:" " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { DECLARE_WAITQUEUE(wait, current); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, thread for" " ipc %x going to sleep\n", ipcnum); add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); pDrvData->IPCs[ipcnum].bIsHere = TRUE; set_current_state(TASK_INTERRUPTIBLE); /* check whether an event was signalled by */ /* the interrupt handler while we were gone */ if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */ pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " handling first int\n", ipcnum); } else { /* either 1st int has not yet occurred, or we have already handled the first int */ schedule(); if (pDrvData->IPCs[ipcnum].usIntCount == 1) { pDrvData->IPCs[ipcnum].usIntCount = 2; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " woke up and returning to" " application\n", ipcnum); } pDrvData->IPCs[ipcnum].bIsHere = FALSE; remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); set_current_state(TASK_RUNNING); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC," " returning thread for ipc %x" " processing\n", ipcnum); } mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_UNREGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC" " ipcnum %x\n", ipcnum); if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_UNREGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { pDrvData->IPCs[ipcnum].bIsEnabled = FALSE; if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) { wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue); } } mutex_unlock(&mwave_mutex); } break; default: return -ENOTTY; break; } /* switch */ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval); return retval; } static ssize_t mwave_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) { PRINTK_5(TRACE_MWAVE, "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n", file, buf, count, ppos); return -EINVAL; } static ssize_t mwave_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos) { PRINTK_5(TRACE_MWAVE, "mwavedd::mwave_write entry file %p, buf %p," " count %zx ppos %p\n", file, buf, count, ppos); return -EINVAL; } static int register_serial_portandirq(unsigned int port, int irq) { struct uart_port uart; switch ( port ) { case 0x3f8: case 0x2f8: case 0x3e8: case 0x2e8: /* OK */ break; default: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::register_serial_portandirq:" " Error: Illegal port %x\n", port ); return -1; } /* switch */ /* port is okay */ switch ( irq ) { case 3: case 4: case 5: case 7: /* OK */ break; default: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::register_serial_portandirq:" " Error: Illegal irq %x\n", irq ); return -1; } /* switch */ /* irq is okay */ memset(&uart, 0, sizeof(struct uart_port)); uart.uartclk = 1843200; uart.iobase = port; uart.irq = irq; uart.iotype = UPIO_PORT; uart.flags = UPF_SHARE_IRQ; return serial8250_register_port(&uart); } static const struct file_operations mwave_fops = { .owner = THIS_MODULE, .read = mwave_read, .write = mwave_write, .unlocked_ioctl = mwave_ioctl, .open = mwave_open, .release = mwave_close, .llseek = default_llseek, }; static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops }; #if 0 /* totally b0rked */ /* * sysfs support <paulsch@us.ibm.com> */ struct device mwave_device; /* Prevent code redundancy, create a macro for mwave_show_* functions. */ #define mwave_show_function(attr_name, format_string, field) \ static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ DSP_3780I_CONFIG_SETTINGS *pSettings = \ &mwave_s_mdd.rBDData.rDspSettings; \ return sprintf(buf, format_string, pSettings->field); \ } /* All of our attributes are read attributes. */ #define mwave_dev_rd_attr(attr_name, format_string, field) \ mwave_show_function(attr_name, format_string, field) \ static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL) mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma); mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq); mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO); mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq); mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO); static struct device_attribute * const mwave_dev_attrs[] = { &dev_attr_3780i_dma, &dev_attr_3780i_irq, &dev_attr_3780i_io, &dev_attr_uart_irq, &dev_attr_uart_io, }; #endif /* * mwave_init is called on module load * * mwave_exit is called on module unload * mwave_exit is also used to clean up after an aborted mwave_init */ static void mwave_exit(void) { pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n"); #if 0 for (i = 0; i < pDrvData->nr_registered_attrs; i++) device_remove_file(&mwave_device, mwave_dev_attrs[i]); pDrvData->nr_registered_attrs = 0; if (pDrvData->device_registered) { device_unregister(&mwave_device); pDrvData->device_registered = FALSE; } #endif if ( pDrvData->sLine >= 0 ) { serial8250_unregister_port(pDrvData->sLine); } if (pDrvData->bMwaveDevRegistered) { misc_deregister(&mwave_misc_dev); } if (pDrvData->bDSPEnabled) { tp3780I_DisableDSP(&pDrvData->rBDData); } if (pDrvData->bResourcesClaimed) { tp3780I_ReleaseResources(&pDrvData->rBDData); } if (pDrvData->bBDInitialized) { tp3780I_Cleanup(&pDrvData->rBDData); } PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n"); } module_exit(mwave_exit); static int __init mwave_init(void) { int i; int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n"); memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA)); pDrvData->bBDInitialized = FALSE; pDrvData->bResourcesClaimed = FALSE; pDrvData->bDSPEnabled = FALSE; pDrvData->bDSPReset = FALSE; pDrvData->bMwaveDevRegistered = FALSE; pDrvData->sLine = -1; for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) { pDrvData->IPCs[i].bIsEnabled = FALSE; pDrvData->IPCs[i].bIsHere = FALSE; pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */ init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue); } retval = tp3780I_InitializeBoardData(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_InitializeBoardData" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize board data\n"); goto cleanup_error; } pDrvData->bBDInitialized = TRUE; retval = tp3780I_CalcResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_CalcResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to calculate resources\n"); goto cleanup_error; } retval = tp3780I_ClaimResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_ClaimResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to claim resources\n"); goto cleanup_error; } pDrvData->bResourcesClaimed = TRUE; retval = tp3780I_EnableDSP(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_EnableDSP" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to enable DSP\n"); goto cleanup_error; } pDrvData->bDSPEnabled = TRUE; if (misc_register(&mwave_misc_dev) < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register misc device\n"); goto cleanup_error; } pDrvData->bMwaveDevRegistered = TRUE; pDrvData->sLine = register_serial_portandirq( pDrvData->rBDData.rDspSettings.usUartBaseIO, pDrvData->rBDData.rDspSettings.usUartIrq ); if (pDrvData->sLine < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register serial driver\n"); goto cleanup_error; } /* uart is registered */ #if 0 /* sysfs */ memset(&mwave_device, 0, sizeof (struct device)); dev_set_name(&mwave_device, "mwave"); if (device_register(&mwave_device)) goto cleanup_error; pDrvData->device_registered = TRUE; for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) { if(device_create_file(&mwave_device, mwave_dev_attrs[i])) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to create sysfs file %s\n", mwave_dev_attrs[i]->attr.name); goto cleanup_error; } pDrvData->nr_registered_attrs++; } #endif /* SUCCESS! */ return 0; cleanup_error: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize\n"); mwave_exit(); /* clean up */ return -EIO; } module_init(mwave_init);
gpl-2.0
cmtrebon/android_kernel_samsung_msm7x27a
arch/mips/vr41xx/common/bcu.c
9300
5507
/* * bcu.c, Bus Control Unit routines for the NEC VR4100 series. * * Copyright (C) 2002 MontaVista Software Inc. * Author: Yoichi Yuasa <source@mvista.com> * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Changes: * MontaVista Software Inc. <source@mvista.com> * - New creation, NEC VR4122 and VR4131 are supported. * - Added support for NEC VR4111 and VR4121. * * Yoichi Yuasa <yuasa@linux-mips.org> * - Added support for NEC VR4133. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/types.h> #include <asm/cpu.h> #include <asm/io.h> #define CLKSPEEDREG_TYPE1 (void __iomem *)KSEG1ADDR(0x0b000014) #define CLKSPEEDREG_TYPE2 (void __iomem *)KSEG1ADDR(0x0f000014) #define CLKSP(x) ((x) & 0x001f) #define CLKSP_VR4133(x) ((x) & 0x0007) #define DIV2B 0x8000 #define DIV3B 0x4000 #define DIV4B 0x2000 #define DIVT(x) (((x) & 0xf000) >> 12) #define DIVVT(x) (((x) & 0x0f00) >> 8) #define TDIVMODE(x) (2 << (((x) & 0x1000) >> 12)) #define VTDIVMODE(x) (((x) & 0x0700) >> 8) static unsigned long vr41xx_vtclock; static unsigned long vr41xx_tclock; unsigned long vr41xx_get_vtclock_frequency(void) { return vr41xx_vtclock; } EXPORT_SYMBOL_GPL(vr41xx_get_vtclock_frequency); unsigned long vr41xx_get_tclock_frequency(void) { return vr41xx_tclock; } EXPORT_SYMBOL_GPL(vr41xx_get_tclock_frequency); static inline uint16_t read_clkspeed(void) { switch (current_cpu_type()) { case CPU_VR4111: case CPU_VR4121: return readw(CLKSPEEDREG_TYPE1); case CPU_VR4122: case CPU_VR4131: case CPU_VR4133: return readw(CLKSPEEDREG_TYPE2); default: printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); break; } return 0; } static inline unsigned long calculate_pclock(uint16_t clkspeed) { unsigned long pclock = 0; switch (current_cpu_type()) { case CPU_VR4111: case CPU_VR4121: pclock = 18432000 * 64; pclock /= CLKSP(clkspeed); break; case CPU_VR4122: pclock = 18432000 * 98; pclock /= CLKSP(clkspeed); break; case CPU_VR4131: pclock = 18432000 * 108; pclock /= CLKSP(clkspeed); break; case CPU_VR4133: switch (CLKSP_VR4133(clkspeed)) { case 0: pclock = 133000000; break; case 1: pclock = 149000000; break; case 2: pclock = 165900000; break; case 3: pclock = 199100000; break; case 4: pclock = 265900000; break; default: printk(KERN_INFO "Unknown PClock speed for NEC VR4133\n"); break; } break; default: printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); break; } printk(KERN_INFO "PClock: %ldHz\n", pclock); return pclock; } static inline unsigned long calculate_vtclock(uint16_t clkspeed, unsigned long pclock) { unsigned long vtclock = 0; switch (current_cpu_type()) { case CPU_VR4111: /* The NEC VR4111 doesn't have the VTClock. */ break; case CPU_VR4121: vtclock = pclock; /* DIVVT == 9 Divide by 1.5 . VTClock = (PClock * 6) / 9 */ if (DIVVT(clkspeed) == 9) vtclock = pclock * 6; /* DIVVT == 10 Divide by 2.5 . VTClock = (PClock * 4) / 10 */ else if (DIVVT(clkspeed) == 10) vtclock = pclock * 4; vtclock /= DIVVT(clkspeed); printk(KERN_INFO "VTClock: %ldHz\n", vtclock); break; case CPU_VR4122: if(VTDIVMODE(clkspeed) == 7) vtclock = pclock / 1; else if(VTDIVMODE(clkspeed) == 1) vtclock = pclock / 2; else vtclock = pclock / VTDIVMODE(clkspeed); printk(KERN_INFO "VTClock: %ldHz\n", vtclock); break; case CPU_VR4131: case CPU_VR4133: vtclock = pclock / VTDIVMODE(clkspeed); printk(KERN_INFO "VTClock: %ldHz\n", vtclock); break; default: printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); break; } return vtclock; } static inline unsigned long calculate_tclock(uint16_t clkspeed, unsigned long pclock, unsigned long vtclock) { unsigned long tclock = 0; switch (current_cpu_type()) { case CPU_VR4111: if (!(clkspeed & DIV2B)) tclock = pclock / 2; else if (!(clkspeed & DIV3B)) tclock = pclock / 3; else if (!(clkspeed & DIV4B)) tclock = pclock / 4; break; case CPU_VR4121: tclock = pclock / DIVT(clkspeed); break; case CPU_VR4122: case CPU_VR4131: case CPU_VR4133: tclock = vtclock / TDIVMODE(clkspeed); break; default: printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); break; } printk(KERN_INFO "TClock: %ldHz\n", tclock); return tclock; } void vr41xx_calculate_clock_frequency(void) { unsigned long pclock; uint16_t clkspeed; clkspeed = read_clkspeed(); pclock = calculate_pclock(clkspeed); vr41xx_vtclock = calculate_vtclock(clkspeed, pclock); vr41xx_tclock = calculate_tclock(clkspeed, pclock, vr41xx_vtclock); } EXPORT_SYMBOL_GPL(vr41xx_calculate_clock_frequency);
gpl-2.0
LiquidSmooth-Devices/Deathly_Kernel_D2
tools/power/cpupower/utils/helpers/amd.c
9556
2831
#if defined(__i386__) || defined(__x86_64__) #include <unistd.h> #include <errno.h> #include <stdio.h> #include <stdint.h> #include <pci/pci.h> #include "helpers/helpers.h" #define MSR_AMD_PSTATE_STATUS 0xc0010063 #define MSR_AMD_PSTATE 0xc0010064 #define MSR_AMD_PSTATE_LIMIT 0xc0010061 union msr_pstate { struct { unsigned fid:6; unsigned did:3; unsigned vid:7; unsigned res1:6; unsigned nbdid:1; unsigned res2:2; unsigned nbvid:7; unsigned iddval:8; unsigned idddiv:2; unsigned res3:21; unsigned en:1; } bits; unsigned long long val; }; static int get_did(int family, union msr_pstate pstate) { int t; if (family == 0x12) t = pstate.val & 0xf; else t = pstate.bits.did; return t; } static int get_cof(int family, union msr_pstate pstate) { int t; int fid, did; did = get_did(family, pstate); t = 0x10; fid = pstate.bits.fid; if (family == 0x11) t = 0x8; return (100 * (fid + t)) >> did; } /* Needs: * cpu -> the cpu that gets evaluated * cpu_family -> The cpu's family (0x10, 0x12,...) * boots_states -> how much boost states the machines support * * Fills up: * pstates -> a pointer to an array of size MAX_HW_PSTATES * must be initialized with zeros. * All available HW pstates (including boost states) * no -> amount of pstates above array got filled up with * * returns zero on success, -1 on failure */ int decode_pstates(unsigned int cpu, unsigned int cpu_family, int boost_states, unsigned long *pstates, int *no) { int i, psmax, pscur; union msr_pstate pstate; unsigned long long val; /* Only read out frequencies from HW when CPU might be boostable to keep the code as short and clean as possible. Otherwise frequencies are exported via ACPI tables. */ if (cpu_family < 0x10 || cpu_family == 0x14) return -1; if (read_msr(cpu, MSR_AMD_PSTATE_LIMIT, &val)) return -1; psmax = (val >> 4) & 0x7; if (read_msr(cpu, MSR_AMD_PSTATE_STATUS, &val)) return -1; pscur = val & 0x7; pscur += boost_states; psmax += boost_states; for (i = 0; i <= psmax; i++) { if (i >= MAX_HW_PSTATES) { fprintf(stderr, "HW pstates [%d] exceeding max [%d]\n", psmax, MAX_HW_PSTATES); return -1; } if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val)) return -1; pstates[i] = get_cof(cpu_family, pstate); } *no = i; return 0; } int amd_pci_get_num_boost_states(int *active, int *states) { struct pci_access *pci_acc; struct pci_dev *device; uint8_t val = 0; *active = *states = 0; device = pci_slot_func_init(&pci_acc, 0x18, 4); if (device == NULL) return -ENODEV; val = pci_read_byte(device, 0x15c); if (val & 3) *active = 1; else *active = 0; *states = (val >> 2) & 7; pci_cleanup(pci_acc); return 0; } #endif /* defined(__i386__) || defined(__x86_64__) */
gpl-2.0
paulburton/axdroid-kernel
drivers/s390/char/defkeymap.c
14676
6243
/* Do not edit this file! It was automatically generated by */ /* loadkeys --mktable defkeymap.map > defkeymap.c */ #include <linux/types.h> #include <linux/keyboard.h> #include <linux/kd.h> #include <linux/kbd_kern.h> #include <linux/kbd_diacr.h> u_short plain_map[NR_KEYS] = { 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5, 0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c, 0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef, 0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac, 0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5, 0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f, 0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf, 0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022, }; static u_short shift_map[NR_KEYS] = { 0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067, 0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1, 0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070, 0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4, 0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078, 0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae, 0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc, 0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7, 0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047, 0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5, 0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050, 0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff, 0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058, 0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5, 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037, 0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000, }; static u_short ctrl_map[NR_KEYS] = { 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200, }; static u_short shift_ctrl_map[NR_KEYS] = { 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112, 0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b, 0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, }; ushort *key_maps[MAX_NR_KEYMAPS] = { plain_map, shift_map, NULL, NULL, ctrl_map, shift_ctrl_map, NULL, }; unsigned int keymap_count = 4; /* * Philosophy: most people do not define more strings, but they who do * often want quite a lot of string space. So, we statically allocate * the default and allocate dynamically in chunks of 512 bytes. */ char func_buf[] = { '\033', '[', '[', 'A', 0, '\033', '[', '[', 'B', 0, '\033', '[', '[', 'C', 0, '\033', '[', '[', 'D', 0, '\033', '[', '[', 'E', 0, '\033', '[', '1', '7', '~', 0, '\033', '[', '1', '8', '~', 0, '\033', '[', '1', '9', '~', 0, '\033', '[', '2', '0', '~', 0, '\033', '[', '2', '1', '~', 0, '\033', '[', '2', '3', '~', 0, '\033', '[', '2', '4', '~', 0, '\033', '[', '2', '5', '~', 0, '\033', '[', '2', '6', '~', 0, '\033', '[', '2', '8', '~', 0, '\033', '[', '2', '9', '~', 0, '\033', '[', '3', '1', '~', 0, '\033', '[', '3', '2', '~', 0, '\033', '[', '3', '3', '~', 0, '\033', '[', '3', '4', '~', 0, }; char *funcbufptr = func_buf; int funcbufsize = sizeof(func_buf); int funcbufleft = 0; /* space left */ char *func_table[MAX_NR_FUNC] = { func_buf + 0, func_buf + 5, func_buf + 10, func_buf + 15, func_buf + 20, func_buf + 25, func_buf + 31, func_buf + 37, func_buf + 43, func_buf + 49, func_buf + 55, func_buf + 61, func_buf + 67, func_buf + 73, func_buf + 79, func_buf + 85, func_buf + 91, func_buf + 97, func_buf + 103, func_buf + 109, NULL, }; struct kbdiacruc accent_table[MAX_DIACR] = { {'^', 'c', 0003}, {'^', 'd', 0004}, {'^', 'z', 0032}, {'^', 0012, 0000}, }; unsigned int accent_table_size = 4;
gpl-2.0
ShinySide/SM-G361H
drivers/irqchip/irq-gic-v3.c
85
16206
/* * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/irqchip/arm-gic-v3.h> #include <asm/cputype.h> #include <asm/exception.h> #include <asm/smp_plat.h> #include "irq-gic-common.h" #include "irqchip.h" struct gic_chip_data { void __iomem *dist_base; void __iomem **redist_base; void __percpu __iomem **rdist; struct irq_domain *domain; u64 redist_stride; u32 redist_regions; unsigned int irq_nr; }; static struct gic_chip_data gic_data __read_mostly; #define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) #define gic_data_rdist_rd_base() (*gic_data_rdist()) #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) /* Our default, arbitrary priority value. Linux only uses one anyway. */ #define DEFAULT_PMR_VALUE 0xf0 static inline unsigned int gic_irq(struct irq_data *d) { return d->hwirq; } static inline int gic_irq_in_rdist(struct irq_data *d) { return gic_irq(d) < 32; } static inline void __iomem *gic_dist_base(struct irq_data *d) { if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ return gic_data_rdist_sgi_base(); if (d->hwirq <= 1023) /* SPI -> dist_base */ return gic_data.dist_base; if (d->hwirq >= 8192) BUG(); /* LPI Detected!!! */ return NULL; } static void gic_do_wait_for_rwp(void __iomem *base) { u32 count = 1000000; /* 1s! */ while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); return; } cpu_relax(); udelay(1); }; } /* Wait for completion of a distributor change */ static void gic_dist_wait_for_rwp(void) { gic_do_wait_for_rwp(gic_data.dist_base); } /* Wait for completion of a redistributor change */ static void gic_redist_wait_for_rwp(void) { gic_do_wait_for_rwp(gic_data_rdist_rd_base()); } /* Low level accessors */ static u64 gic_read_iar(void) { u64 irqstat; asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); return irqstat; } static void gic_write_pmr(u64 val) { asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); } static void gic_write_ctlr(u64 val) { asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); isb(); } static void gic_write_grpen1(u64 val) { asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); isb(); } static void gic_write_sgi1r(u64 val) { asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); } static void gic_enable_sre(void) { u64 val; asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); val |= ICC_SRE_EL1_SRE; asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val)); isb(); /* * Need to check that the SRE bit has actually been set. If * not, it means that SRE is disabled at EL2. We're going to * die painfully, and there is nothing we can do about it. * * Kindly inform the luser. */ asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); if (!(val & ICC_SRE_EL1_SRE)) pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); } static void gic_enable_redist(void) { void __iomem *rbase; u32 count = 1000000; /* 1s! */ u32 val; rbase = gic_data_rdist_rd_base(); /* Wake up this CPU redistributor */ val = readl_relaxed(rbase + GICR_WAKER); val &= ~GICR_WAKER_ProcessorSleep; writel_relaxed(val, rbase + GICR_WAKER); while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) { count--; if (!count) { pr_err_ratelimited("redist didn't wake up...\n"); return; } cpu_relax(); udelay(1); }; } /* * Routines to disable, enable, EOI and route interrupts */ static void gic_poke_irq(struct irq_data *d, u32 offset) { u32 mask = 1 << (gic_irq(d) % 32); void (*rwp_wait)(void); void __iomem *base; if (gic_irq_in_rdist(d)) { base = gic_data_rdist_sgi_base(); rwp_wait = gic_redist_wait_for_rwp; } else { base = gic_data.dist_base; rwp_wait = gic_dist_wait_for_rwp; } writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); rwp_wait(); } static int gic_peek_irq(struct irq_data *d, u32 offset) { u32 mask = 1 << (gic_irq(d) % 32); void __iomem *base; if (gic_irq_in_rdist(d)) base = gic_data_rdist_sgi_base(); else base = gic_data.dist_base; return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); } static void gic_mask_irq(struct irq_data *d) { gic_poke_irq(d, GICD_ICENABLER); } static void gic_unmask_irq(struct irq_data *d) { gic_poke_irq(d, GICD_ISENABLER); } static void gic_eoi_irq(struct irq_data *d) { gic_write_eoir(gic_irq(d)); } static int gic_set_type(struct irq_data *d, unsigned int type) { unsigned int irq = gic_irq(d); void (*rwp_wait)(void); void __iomem *base; /* Interrupt configuration for SGIs can't be changed */ if (irq < 16) return -EINVAL; if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) return -EINVAL; if (gic_irq_in_rdist(d)) { base = gic_data_rdist_sgi_base(); rwp_wait = gic_redist_wait_for_rwp; } else { base = gic_data.dist_base; rwp_wait = gic_dist_wait_for_rwp; } gic_configure_irq(irq, type, base, rwp_wait); return 0; } static u64 gic_mpidr_to_affinity(u64 mpidr) { u64 aff; aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); return aff; } static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u64 irqnr; do { irqnr = gic_read_iar(); if (likely(irqnr > 15 && irqnr < 1020)) { u64 irq = irq_find_mapping(gic_data.domain, irqnr); if (likely(irq)) { handle_IRQ(irq, regs); continue; } WARN_ONCE(true, "Unexpected SPI received!\n"); gic_write_eoir(irqnr); } if (irqnr < 16) { gic_write_eoir(irqnr); #ifdef CONFIG_SMP handle_IPI(irqnr, regs); #else WARN_ONCE(true, "Unexpected SGI received!\n"); #endif continue; } } while (irqnr != ICC_IAR1_EL1_SPURIOUS); } static void __init gic_dist_init(void) { unsigned int i; u64 affinity; void __iomem *base = gic_data.dist_base; /* Disable the distributor */ writel_relaxed(0, base + GICD_CTLR); gic_dist_wait_for_rwp(); gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); /* Enable distributor with ARE, Group1 */ writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, base + GICD_CTLR); /* * Set all global interrupts to the boot CPU only. ARE must be * enabled. */ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); for (i = 32; i < gic_data.irq_nr; i++) writeq_relaxed(affinity, base + GICD_IROUTER + i * 8); } static int gic_populate_rdist(void) { u64 mpidr = cpu_logical_map(smp_processor_id()); u64 typer; u32 aff; int i; /* * Convert affinity to a 32bit value that can be matched to * GICR_TYPER bits [63:32]. */ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); for (i = 0; i < gic_data.redist_regions; i++) { void __iomem *ptr = gic_data.redist_base[i]; u32 reg; reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ pr_warn("No redistributor present @%p\n", ptr); break; } do { typer = readq_relaxed(ptr + GICR_TYPER); if ((typer >> 32) == aff) { gic_data_rdist_rd_base() = ptr; pr_info("CPU%d: found redistributor %llx @%p\n", smp_processor_id(), (unsigned long long)mpidr, ptr); return 0; } if (gic_data.redist_stride) { ptr += gic_data.redist_stride; } else { ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ if (typer & GICR_TYPER_VLPIS) ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ } } while (!(typer & GICR_TYPER_LAST)); } /* We couldn't even deal with ourselves... */ WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n", smp_processor_id(), (unsigned long long)mpidr); return -ENODEV; } static void gic_cpu_init(void) { void __iomem *rbase; /* Register ourselves with the rest of the world */ if (gic_populate_rdist()) return; gic_enable_redist(); rbase = gic_data_rdist_sgi_base(); gic_cpu_config(rbase, gic_redist_wait_for_rwp); /* Enable system registers */ gic_enable_sre(); /* Set priority mask register */ gic_write_pmr(DEFAULT_PMR_VALUE); /* EOI deactivates interrupt too (mode 0) */ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); /* ... and let's hit the road... */ gic_write_grpen1(1); } #ifdef CONFIG_SMP static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) gic_cpu_init(); return NOTIFY_OK; } /* * Notifier for enabling the GIC CPU interface. Set an arbitrarily high * priority because the GIC needs to be up before the ARM generic timers. */ static struct notifier_block gic_cpu_notifier = { .notifier_call = gic_secondary_init, .priority = 100, }; static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, u64 cluster_id) { int cpu = *base_cpu; u64 mpidr = cpu_logical_map(cpu); u16 tlist = 0; while (cpu < nr_cpu_ids) { /* * If we ever get a cluster of more than 16 CPUs, just * scream and skip that CPU. */ if (WARN_ON((mpidr & 0xff) >= 16)) goto out; tlist |= 1 << (mpidr & 0xf); cpu = cpumask_next(cpu, mask); if (cpu == nr_cpu_ids) goto out; mpidr = cpu_logical_map(cpu); if (cluster_id != (mpidr & ~0xffUL)) { cpu--; goto out; } } out: *base_cpu = cpu; return tlist; } static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) { u64 val; val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | irq << 24 | MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | tlist); pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); gic_write_sgi1r(val); } static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) { int cpu; if (WARN_ON(irq >= 16)) return; /* * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ smp_wmb(); for_each_cpu_mask(cpu, *mask) { u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; u16 tlist; tlist = gic_compute_target_list(&cpu, mask, cluster_id); gic_send_sgi(cluster_id, tlist, irq); } /* Force the above writes to ICC_SGI1R_EL1 to be executed */ isb(); } static void gic_smp_init(void) { set_smp_cross_call(gic_raise_softirq); register_cpu_notifier(&gic_cpu_notifier); } static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); void __iomem *reg; int enabled; u64 val; if (gic_irq_in_rdist(d)) return -EINVAL; /* If interrupt was enabled, disable it first */ enabled = gic_peek_irq(d, GICD_ISENABLER); if (enabled) gic_mask_irq(d); reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); writeq_relaxed(val, reg); /* * If the interrupt was enabled, enabled it again. Otherwise, * just wait for the distributor to have digested our changes. */ if (enabled) gic_unmask_irq(d); else gic_dist_wait_for_rwp(); return IRQ_SET_MASK_OK; } #else #define gic_set_affinity NULL #define gic_smp_init() do { } while(0) #endif static struct irq_chip gic_chip = { .name = "GICv3", .irq_mask = gic_mask_irq, .irq_unmask = gic_unmask_irq, .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, }; static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { /* SGIs are private to the core kernel */ if (hw < 16) return -EPERM; /* PPIs */ if (hw < 32) { irq_set_percpu_devid(irq); irq_set_chip_and_handler(irq, &gic_chip, handle_percpu_devid_irq); set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); } /* SPIs */ if (hw >= 32 && hw < gic_data.irq_nr) { irq_set_chip_and_handler(irq, &gic_chip, handle_fasteoi_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } irq_set_chip_data(irq, d->host_data); return 0; } static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *controller, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (d->of_node != controller) return -EINVAL; if (intsize < 3) return -EINVAL; switch(intspec[0]) { case 0: /* SPI */ *out_hwirq = intspec[1] + 32; break; case 1: /* PPI */ *out_hwirq = intspec[1] + 16; break; default: return -EINVAL; } *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; return 0; } static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, .xlate = gic_irq_domain_xlate, }; static int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *dist_base; void __iomem **redist_base; u64 redist_stride; u32 redist_regions; u32 reg; int gic_irqs; int err; int i; dist_base = of_iomap(node, 0); if (!dist_base) { pr_err("%s: unable to map gic dist registers\n", node->full_name); return -ENXIO; } reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) { pr_err("%s: no distributor detected, giving up\n", node->full_name); err = -ENODEV; goto out_unmap_dist; } if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) redist_regions = 1; redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); if (!redist_base) { err = -ENOMEM; goto out_unmap_dist; } for (i = 0; i < redist_regions; i++) { redist_base[i] = of_iomap(node, 1 + i); if (!redist_base[i]) { pr_err("%s: couldn't map region %d\n", node->full_name, i); err = -ENODEV; goto out_unmap_rdist; } } if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) redist_stride = 0; gic_data.dist_base = dist_base; gic_data.redist_base = redist_base; gic_data.redist_regions = redist_regions; gic_data.redist_stride = redist_stride; /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) */ gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; gic_irqs = (gic_irqs + 1) * 32; if (gic_irqs > 1020) gic_irqs = 1020; gic_data.irq_nr = gic_irqs; gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, &gic_data); gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { err = -ENOMEM; goto out_free; } set_handle_irq(gic_handle_irq); gic_smp_init(); gic_dist_init(); gic_cpu_init(); return 0; out_free: if (gic_data.domain) irq_domain_remove(gic_data.domain); free_percpu(gic_data.rdist); out_unmap_rdist: for (i = 0; i < redist_regions; i++) if (redist_base[i]) iounmap(redist_base[i]); kfree(redist_base); out_unmap_dist: iounmap(dist_base); return err; } IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
gpl-2.0
yvxiang/linux-zswap
drivers/ata/sata_highbank.c
85
18379
/* * Calxeda Highbank AHCI SATA platform driver * Copyright 2012 Calxeda, Inc. * * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/libata.h> #include <linux/ahci_platform.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include "ahci.h" #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f)) #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2) #define SERDES_CR_CTL 0x80a0 #define SERDES_CR_ADDR 0x80a1 #define SERDES_CR_DATA 0x80a2 #define CR_BUSY 0x0001 #define CR_START 0x0001 #define CR_WR_RDN 0x0002 #define CPHY_TX_INPUT_STS 0x2001 #define CPHY_RX_INPUT_STS 0x2002 #define CPHY_SATA_TX_OVERRIDE 0x8000 #define CPHY_SATA_RX_OVERRIDE 0x4000 #define CPHY_TX_OVERRIDE 0x2004 #define CPHY_RX_OVERRIDE 0x2005 #define SPHY_LANE 0x100 #define SPHY_HALF_RATE 0x0001 #define CPHY_SATA_DPLL_MODE 0x0700 #define CPHY_SATA_DPLL_SHIFT 8 #define CPHY_SATA_DPLL_RESET (1 << 11) #define CPHY_SATA_TX_ATTEN 0x1c00 #define CPHY_SATA_TX_ATTEN_SHIFT 10 #define CPHY_PHY_COUNT 6 #define CPHY_LANE_COUNT 4 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT) static DEFINE_SPINLOCK(cphy_lock); /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based * sata ports to their phys and then to their lanes within the phys */ struct phy_lane_info { void __iomem *phy_base; u8 lane_mapping; u8 phy_devs; u8 tx_atten; }; static struct phy_lane_info port_data[CPHY_PORT_COUNT]; static DEFINE_SPINLOCK(sgpio_lock); #define SCLOCK 0 #define SLOAD 1 #define SDATA 2 #define SGPIO_PINS 3 #define SGPIO_PORTS 8 struct ecx_plat_data { u32 n_ports; /* number of extra clocks that the SGPIO PIC controller expects */ u32 pre_clocks; u32 post_clocks; unsigned sgpio_gpio[SGPIO_PINS]; u32 sgpio_pattern; u32 port_to_sgpio[SGPIO_PORTS]; }; #define SGPIO_SIGNALS 3 #define ECX_ACTIVITY_BITS 0x300000 #define ECX_ACTIVITY_SHIFT 0 #define ECX_LOCATE_BITS 0x80000 #define ECX_LOCATE_SHIFT 1 #define ECX_FAULT_BITS 0x400000 #define ECX_FAULT_SHIFT 2 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, u32 shift) { return 1 << (3 * pdata->port_to_sgpio[port] + shift); } static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state) { if (state & ECX_ACTIVITY_BITS) pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, ECX_ACTIVITY_SHIFT); else pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, ECX_ACTIVITY_SHIFT); if (state & ECX_LOCATE_BITS) pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, ECX_LOCATE_SHIFT); else pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, ECX_LOCATE_SHIFT); if (state & ECX_FAULT_BITS) pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, ECX_FAULT_SHIFT); else pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, ECX_FAULT_SHIFT); } /* * Tell the LED controller that the signal has changed by raising the clock * line for 50 uS and then lowering it for 50 uS. */ static void ecx_led_cycle_clock(struct ecx_plat_data *pdata) { gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1); udelay(50); gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0); udelay(50); } static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state, ssize_t size) { struct ahci_host_priv *hpriv = ap->host->private_data; struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data; struct ahci_port_priv *pp = ap->private_data; unsigned long flags; int pmp, i; struct ahci_em_priv *emp; u32 sgpio_out; /* get the slot number from the message */ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; if (pmp < EM_MAX_SLOTS) emp = &pp->em_priv[pmp]; else return -EINVAL; if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED)) return size; spin_lock_irqsave(&sgpio_lock, flags); ecx_parse_sgpio(pdata, ap->port_no, state); sgpio_out = pdata->sgpio_pattern; for (i = 0; i < pdata->pre_clocks; i++) ecx_led_cycle_clock(pdata); gpio_set_value(pdata->sgpio_gpio[SLOAD], 1); ecx_led_cycle_clock(pdata); gpio_set_value(pdata->sgpio_gpio[SLOAD], 0); /* * bit-bang out the SGPIO pattern, by consuming a bit and then * clocking it out. */ for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) { gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1); sgpio_out >>= 1; ecx_led_cycle_clock(pdata); } for (i = 0; i < pdata->post_clocks; i++) ecx_led_cycle_clock(pdata); /* save off new led state for port/slot */ emp->led_state = state; spin_unlock_irqrestore(&sgpio_lock, flags); return size; } static void highbank_set_em_messages(struct device *dev, struct ahci_host_priv *hpriv, struct ata_port_info *pi) { struct device_node *np = dev->of_node; struct ecx_plat_data *pdata = hpriv->plat_data; int i; int err; for (i = 0; i < SGPIO_PINS; i++) { err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); if (IS_ERR_VALUE(err)) return; pdata->sgpio_gpio[i] = err; err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO"); if (err) { pr_err("sata_highbank gpio_request %d failed: %d\n", i, err); return; } gpio_direction_output(pdata->sgpio_gpio[i], 1); } of_property_read_u32_array(np, "calxeda,led-order", pdata->port_to_sgpio, pdata->n_ports); if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks)) pdata->pre_clocks = 0; if (of_property_read_u32(np, "calxeda,post-clocks", &pdata->post_clocks)) pdata->post_clocks = 0; /* store em_loc */ hpriv->em_loc = 0; hpriv->em_buf_sz = 4; hpriv->em_msg_type = EM_MSG_TYPE_LED; pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY; } static u32 __combo_phy_reg_read(u8 sata_port, u32 addr) { u32 data; u8 dev = port_data[sata_port].phy_devs; spin_lock(&cphy_lock); writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr)); spin_unlock(&cphy_lock); return data; } static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data) { u8 dev = port_data[sata_port].phy_devs; spin_lock(&cphy_lock); writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr)); spin_unlock(&cphy_lock); } static void combo_phy_wait_for_ready(u8 sata_port) { while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY) udelay(5); } static u32 combo_phy_read(u8 sata_port, u32 addr) { combo_phy_wait_for_ready(sata_port); __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START); combo_phy_wait_for_ready(sata_port); return __combo_phy_reg_read(sata_port, SERDES_CR_DATA); } static void combo_phy_write(u8 sata_port, u32 addr, u32 data) { combo_phy_wait_for_ready(sata_port); __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data); __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START); } static void highbank_cphy_disable_overrides(u8 sata_port) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; if (unlikely(port_data[sata_port].phy_base == NULL)) return; tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_RX_OVERRIDE; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); } static void cphy_override_tx_attenuation(u8 sata_port, u32 val) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; if (val & 0x8) return; tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_TX_OVERRIDE; combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_TX_OVERRIDE; combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN; combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); } static void cphy_override_rx_mode(u8 sata_port, u32 val) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_RX_OVERRIDE; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_RX_OVERRIDE; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp &= ~CPHY_SATA_DPLL_MODE; tmp |= val << CPHY_SATA_DPLL_SHIFT; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_DPLL_RESET; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); tmp &= ~CPHY_SATA_DPLL_RESET; combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); msleep(15); } static void highbank_cphy_override_lane(u8 sata_port) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp, k = 0; if (unlikely(port_data[sata_port].phy_base == NULL)) return; do { tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000)); cphy_override_rx_mode(sata_port, 3); cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten); } static int highbank_initialize_phys(struct device *dev, void __iomem *addr) { struct device_node *sata_node = dev->of_node; int phy_count = 0, phy, port = 0, i; void __iomem *cphy_base[CPHY_PHY_COUNT]; struct device_node *phy_nodes[CPHY_PHY_COUNT]; u32 tx_atten[CPHY_PORT_COUNT]; memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT); memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT); memset(tx_atten, 0xff, CPHY_PORT_COUNT); do { u32 tmp; struct of_phandle_args phy_data; if (of_parse_phandle_with_args(sata_node, "calxeda,port-phys", "#phy-cells", port, &phy_data)) break; for (phy = 0; phy < phy_count; phy++) { if (phy_nodes[phy] == phy_data.np) break; } if (phy_nodes[phy] == NULL) { phy_nodes[phy] = phy_data.np; cphy_base[phy] = of_iomap(phy_nodes[phy], 0); if (cphy_base[phy] == NULL) { return 0; } phy_count += 1; } port_data[port].lane_mapping = phy_data.args[0]; of_property_read_u32(phy_nodes[phy], "phydev", &tmp); port_data[port].phy_devs = tmp; port_data[port].phy_base = cphy_base[phy]; of_node_put(phy_data.np); port += 1; } while (port < CPHY_PORT_COUNT); of_property_read_u32_array(sata_node, "calxeda,tx-atten", tx_atten, port); for (i = 0; i < port; i++) port_data[i].tx_atten = (u8) tx_atten[i]; return 0; } /* * The Calxeda SATA phy intermittently fails to bring up a link with Gen3 * Retrying the phy hard reset can work around the issue, but the drive * may fail again. In less than 150 out of 15000 test runs, it took more * than 10 tries for the link to be established (but never more than 35). * Triple the maximum observed retry count to provide plenty of margin for * rare events and to guarantee that the link is established. * * Also, the default 2 second time-out on a failed drive is too long in * this situation. The uboot implementation of the same driver function * uses a much shorter time-out period and never experiences a time out * issue. Reducing the time-out to 500ms improves the responsiveness. * The other timing constants were kept the same as the stock AHCI driver. * This change was also tested 15000 times on 24 drives and none of them * experienced a time out. */ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { static const unsigned long timing[] = { 5, 100, 500}; struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; bool online; u32 sstatus; int rc; int retry = 100; ahci_stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); tf.command = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); do { highbank_cphy_disable_overrides(link->ap->port_no); rc = sata_link_hardreset(link, timing, deadline, &online, NULL); highbank_cphy_override_lane(link->ap->port_no); /* If the status is 1, we are connected, but the link did not * come up. So retry resetting the link again. */ if (sata_scr_read(link, SCR_STATUS, &sstatus)) break; if (!(sstatus & 0x3)) break; } while (!online && retry--); ahci_start_engine(ap); if (online) *class = ahci_dev_classify(ap); return rc; } static struct ata_port_operations ahci_highbank_ops = { .inherits = &ahci_ops, .hardreset = ahci_highbank_hardreset, .transmit_led_message = ecx_transmit_led_message, }; static const struct ata_port_info ahci_highbank_port_info = { .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_highbank_ops, }; static struct scsi_host_template ahci_highbank_platform_sht = { AHCI_SHT("sata_highbank"), }; static const struct of_device_id ahci_of_match[] = { { .compatible = "calxeda,hb-ahci" }, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); static int ahci_highbank_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; struct ecx_plat_data *pdata; struct ata_host *host; struct resource *mem; int irq; int i; int rc; u32 n_ports; struct ata_port_info pi = ahci_highbank_port_info; const struct ata_port_info *ppi[] = { &pi, NULL }; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "no mmio space\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "no irq\n"); return -EINVAL; } hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) { dev_err(dev, "can't alloc ahci_host_priv\n"); return -ENOMEM; } pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(dev, "can't alloc ecx_plat_data\n"); return -ENOMEM; } hpriv->flags |= (unsigned long)pi.private_data; hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); if (!hpriv->mmio) { dev_err(dev, "can't map %pR\n", mem); return -ENOMEM; } rc = highbank_initialize_phys(dev, hpriv->mmio); if (rc) return rc; ahci_save_initial_config(dev, hpriv, 0, 0); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; if (hpriv->cap & HOST_CAP_64) dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); pdata->n_ports = n_ports; hpriv->plat_data = pdata; highbank_set_em_messages(dev, hpriv, &pi); host = ata_host_alloc_pinfo(dev, ppi, n_ports); if (!host) { rc = -ENOMEM; goto err0; } host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_desc(ap, "mmio %pR", mem); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } rc = ahci_reset_controller(host); if (rc) goto err0; ahci_init_controller(host); ahci_print_info(host, "platform"); rc = ata_host_activate(host, irq, ahci_interrupt, 0, &ahci_highbank_platform_sht); if (rc) goto err0; return 0; err0: return rc; } #ifdef CONFIG_PM_SLEEP static int ahci_highbank_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->mmio; u32 ctl; int rc; if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { dev_err(dev, "firmware update required for suspend/resume\n"); return -EIO; } /* * AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a * transition of the HBA to D3 state. */ ctl = readl(mmio + HOST_CTL); ctl &= ~HOST_IRQ_EN; writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ rc = ata_host_suspend(host, PMSG_SUSPEND); if (rc) return rc; return 0; } static int ahci_highbank_resume(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); int rc; if (dev->power.power_state.event == PM_EVENT_SUSPEND) { rc = ahci_reset_controller(host); if (rc) return rc; ahci_init_controller(host); } ata_host_resume(host); return 0; } #endif static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops, ahci_highbank_suspend, ahci_highbank_resume); static struct platform_driver ahci_highbank_driver = { .remove = ata_platform_remove_one, .driver = { .name = "highbank-ahci", .owner = THIS_MODULE, .of_match_table = ahci_of_match, .pm = &ahci_highbank_pm_ops, }, .probe = ahci_highbank_probe, }; module_platform_driver(ahci_highbank_driver); MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver"); MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("sata:highbank");
gpl-2.0
smx-smx/dsl-n55u-bender
release/src-ra/linux/linux-2.6.21.x/drivers/char/watchdog/rm9k_wdt.c
85
10399
/* * Watchdog implementation for GPI h/w found on PMC-Sierra RM9xxx * chips. * * Copyright (C) 2004 by Basler Vision Technologies AG * Author: Thomas Koeller <thomas.koeller@baslerweb.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/notifier.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <asm/io.h> #include <asm/atomic.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/rm9k-ocd.h> #include <rm9k_wdt.h> #define CLOCK 125000000 #define MAX_TIMEOUT_SECONDS 32 #define CPCCR 0x0080 #define CPGIG1SR 0x0044 #define CPGIG1ER 0x0054 /* Function prototypes */ static irqreturn_t wdt_gpi_irqhdl(int, void *); static void wdt_gpi_start(void); static void wdt_gpi_stop(void); static void wdt_gpi_set_timeout(unsigned int); static int wdt_gpi_open(struct inode *, struct file *); static int wdt_gpi_release(struct inode *, struct file *); static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t, loff_t *); static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long); static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *); static const struct resource *wdt_gpi_get_resource(struct platform_device *, const char *, unsigned int); static int __init wdt_gpi_probe(struct device *); static int __exit wdt_gpi_remove(struct device *); static const char wdt_gpi_name[] = "wdt_gpi"; static atomic_t opencnt; static int expect_close; static int locked; /* These are set from device resources */ static void __iomem * wd_regs; static unsigned int wd_irq, wd_ctr; /* Module arguments */ static int timeout = MAX_TIMEOUT_SECONDS; module_param(timeout, int, 0444); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds"); static unsigned long resetaddr = 0xbffdc200; module_param(resetaddr, ulong, 0444); MODULE_PARM_DESC(resetaddr, "Address to write to to force a reset"); static unsigned long flagaddr = 0xbffdc104; module_param(flagaddr, ulong, 0444); MODULE_PARM_DESC(flagaddr, "Address to write to boot flags to"); static int powercycle; module_param(powercycle, bool, 0444); MODULE_PARM_DESC(powercycle, "Cycle power if watchdog expires"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0444); MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started"); /* Kernel interfaces */ static const struct file_operations fops = { .owner = THIS_MODULE, .open = wdt_gpi_open, .release = wdt_gpi_release, .write = wdt_gpi_write, .unlocked_ioctl = wdt_gpi_ioctl, }; static struct miscdevice miscdev = { .minor = WATCHDOG_MINOR, .name = wdt_gpi_name, .fops = &fops, }; static struct notifier_block wdt_gpi_shutdown = { .notifier_call = wdt_gpi_notify, }; /* Interrupt handler */ static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt) { if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1)) return IRQ_NONE; __raw_writel(0x1, wd_regs + 0x0008); printk(KERN_CRIT "%s: watchdog expired - resetting system\n", wdt_gpi_name); *(volatile char *) flagaddr |= 0x01; *(volatile char *) resetaddr = powercycle ? 0x01 : 0x2; iob(); while (1) cpu_relax(); } /* Watchdog functions */ static void wdt_gpi_start(void) { u32 reg; lock_titan_regs(); reg = titan_readl(CPGIG1ER); titan_writel(reg | (0x100 << wd_ctr), CPGIG1ER); iob(); unlock_titan_regs(); } static void wdt_gpi_stop(void) { u32 reg; lock_titan_regs(); reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4)); titan_writel(reg, CPCCR); reg = titan_readl(CPGIG1ER); titan_writel(reg & ~(0x100 << wd_ctr), CPGIG1ER); iob(); unlock_titan_regs(); } static void wdt_gpi_set_timeout(unsigned int to) { u32 reg; const u32 wdval = (to * CLOCK) & ~0x0000000f; lock_titan_regs(); reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4)); titan_writel(reg, CPCCR); wmb(); __raw_writel(wdval, wd_regs + 0x0000); wmb(); titan_writel(reg | (0x2 << (wd_ctr * 4)), CPCCR); wmb(); titan_writel(reg | (0x5 << (wd_ctr * 4)), CPCCR); iob(); unlock_titan_regs(); } /* /dev/watchdog operations */ static int wdt_gpi_open(struct inode *inode, struct file *file) { int res; if (unlikely(atomic_dec_if_positive(&opencnt) < 0)) return -EBUSY; expect_close = 0; if (locked) { module_put(THIS_MODULE); free_irq(wd_irq, &miscdev); locked = 0; } res = request_irq(wd_irq, wdt_gpi_irqhdl, IRQF_SHARED | IRQF_DISABLED, wdt_gpi_name, &miscdev); if (unlikely(res)) return res; wdt_gpi_set_timeout(timeout); wdt_gpi_start(); printk(KERN_INFO "%s: watchdog started, timeout = %u seconds\n", wdt_gpi_name, timeout); return nonseekable_open(inode, file); } static int wdt_gpi_release(struct inode *inode, struct file *file) { if (nowayout) { printk(KERN_INFO "%s: no way out - watchdog left running\n", wdt_gpi_name); __module_get(THIS_MODULE); locked = 1; } else { if (expect_close) { wdt_gpi_stop(); free_irq(wd_irq, &miscdev); printk(KERN_INFO "%s: watchdog stopped\n", wdt_gpi_name); } else { printk(KERN_CRIT "%s: unexpected close() -" " watchdog left running\n", wdt_gpi_name); wdt_gpi_set_timeout(timeout); __module_get(THIS_MODULE); locked = 1; } } atomic_inc(&opencnt); return 0; } static ssize_t wdt_gpi_write(struct file *f, const char __user *d, size_t s, loff_t *o) { char val; wdt_gpi_set_timeout(timeout); expect_close = (s > 0) && !get_user(val, d) && (val == 'V'); return s ? 1 : 0; } static long wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { long res = -ENOTTY; const long size = _IOC_SIZE(cmd); int stat; void __user *argp = (void __user *)arg; static struct watchdog_info wdinfo = { .identity = "RM9xxx/GPI watchdog", .firmware_version = 0, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING }; if (unlikely(_IOC_TYPE(cmd) != WATCHDOG_IOCTL_BASE)) return -ENOTTY; if ((_IOC_DIR(cmd) & _IOC_READ) && !access_ok(VERIFY_WRITE, arg, size)) return -EFAULT; if ((_IOC_DIR(cmd) & _IOC_WRITE) && !access_ok(VERIFY_READ, arg, size)) return -EFAULT; expect_close = 0; switch (cmd) { case WDIOC_GETSUPPORT: wdinfo.options = nowayout ? WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING : WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE; res = __copy_to_user(argp, &wdinfo, size) ? -EFAULT : size; break; case WDIOC_GETSTATUS: break; case WDIOC_GETBOOTSTATUS: stat = (*(volatile char *) flagaddr & 0x01) ? WDIOF_CARDRESET : 0; res = __copy_to_user(argp, &stat, size) ? -EFAULT : size; break; case WDIOC_SETOPTIONS: break; case WDIOC_KEEPALIVE: wdt_gpi_set_timeout(timeout); res = size; break; case WDIOC_SETTIMEOUT: { int val; if (unlikely(__copy_from_user(&val, argp, size))) { res = -EFAULT; break; } if (val > MAX_TIMEOUT_SECONDS) val = MAX_TIMEOUT_SECONDS; timeout = val; wdt_gpi_set_timeout(val); res = size; printk(KERN_INFO "%s: timeout set to %u seconds\n", wdt_gpi_name, timeout); } break; case WDIOC_GETTIMEOUT: res = __copy_to_user(argp, &timeout, size) ? -EFAULT : size; break; } return res; } /* Shutdown notifier */ static int wdt_gpi_notify(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_gpi_stop(); return NOTIFY_DONE; } /* Init & exit procedures */ static const struct resource * wdt_gpi_get_resource(struct platform_device *pdv, const char *name, unsigned int type) { char buf[80]; if (snprintf(buf, sizeof buf, "%s_0", name) >= sizeof buf) return NULL; return platform_get_resource_byname(pdv, type, buf); } /* No hotplugging on the platform bus - use __init */ static int __init wdt_gpi_probe(struct device *dev) { int res; struct platform_device * const pdv = to_platform_device(dev); const struct resource * const rr = wdt_gpi_get_resource(pdv, WDT_RESOURCE_REGS, IORESOURCE_MEM), * const ri = wdt_gpi_get_resource(pdv, WDT_RESOURCE_IRQ, IORESOURCE_IRQ), * const rc = wdt_gpi_get_resource(pdv, WDT_RESOURCE_COUNTER, 0); if (unlikely(!rr || !ri || !rc)) return -ENXIO; wd_regs = ioremap_nocache(rr->start, rr->end + 1 - rr->start); if (unlikely(!wd_regs)) return -ENOMEM; wd_irq = ri->start; wd_ctr = rc->start; res = misc_register(&miscdev); if (res) iounmap(wd_regs); else register_reboot_notifier(&wdt_gpi_shutdown); return res; } static int __exit wdt_gpi_remove(struct device *dev) { int res; unregister_reboot_notifier(&wdt_gpi_shutdown); res = misc_deregister(&miscdev); iounmap(wd_regs); wd_regs = NULL; return res; } /* Device driver init & exit */ static struct device_driver wdt_gpi_driver = { .name = (char *) wdt_gpi_name, .bus = &platform_bus_type, .owner = THIS_MODULE, .probe = wdt_gpi_probe, .remove = __exit_p(wdt_gpi_remove), .shutdown = NULL, .suspend = NULL, .resume = NULL, }; static int __init wdt_gpi_init_module(void) { atomic_set(&opencnt, 1); if (timeout > MAX_TIMEOUT_SECONDS) timeout = MAX_TIMEOUT_SECONDS; return driver_register(&wdt_gpi_driver); } static void __exit wdt_gpi_cleanup_module(void) { driver_unregister(&wdt_gpi_driver); } module_init(wdt_gpi_init_module); module_exit(wdt_gpi_cleanup_module); MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>"); MODULE_DESCRIPTION("Basler eXcite watchdog driver for gpi devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
ps06756/linux-3.17.2
drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
85
7391
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <engine/software.h> #include <engine/disp.h> #include <nvif/class.h> #include "nv50.h" /******************************************************************************* * EVO master channel object ******************************************************************************/ const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac = { .mthd = 0x0080, .addr = 0x000008, .data = { { 0x0400, 0x610b58 }, { 0x0404, 0x610bdc }, { 0x0420, 0x610bc4 }, {} } }; const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head = { .mthd = 0x0400, .addr = 0x000540, .data = { { 0x0800, 0x610ad8 }, { 0x0804, 0x610ad0 }, { 0x0808, 0x610a48 }, { 0x080c, 0x610a78 }, { 0x0810, 0x610ac0 }, { 0x0814, 0x610af8 }, { 0x0818, 0x610b00 }, { 0x081c, 0x610ae8 }, { 0x0820, 0x610af0 }, { 0x0824, 0x610b08 }, { 0x0828, 0x610b10 }, { 0x082c, 0x610a68 }, { 0x0830, 0x610a60 }, { 0x0834, 0x000000 }, { 0x0838, 0x610a40 }, { 0x0840, 0x610a24 }, { 0x0844, 0x610a2c }, { 0x0848, 0x610aa8 }, { 0x084c, 0x610ab0 }, { 0x085c, 0x610c5c }, { 0x0860, 0x610a84 }, { 0x0864, 0x610a90 }, { 0x0868, 0x610b18 }, { 0x086c, 0x610b20 }, { 0x0870, 0x610ac8 }, { 0x0874, 0x610a38 }, { 0x0878, 0x610c50 }, { 0x0880, 0x610a58 }, { 0x0884, 0x610a9c }, { 0x089c, 0x610c68 }, { 0x08a0, 0x610a70 }, { 0x08a4, 0x610a50 }, { 0x08a8, 0x610ae0 }, { 0x08c0, 0x610b28 }, { 0x08c4, 0x610b30 }, { 0x08c8, 0x610b40 }, { 0x08d4, 0x610b38 }, { 0x08d8, 0x610b48 }, { 0x08dc, 0x610b50 }, { 0x0900, 0x610a18 }, { 0x0904, 0x610ab8 }, { 0x0910, 0x610c70 }, { 0x0914, 0x610c78 }, {} } }; const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan = { .name = "Core", .addr = 0x000000, .data = { { "Global", 1, &nv50_disp_mast_mthd_base }, { "DAC", 3, &nv84_disp_mast_mthd_dac }, { "SOR", 2, &nv50_disp_mast_mthd_sor }, { "PIOR", 3, &nv50_disp_mast_mthd_pior }, { "HEAD", 2, &nv84_disp_mast_mthd_head }, {} } }; /******************************************************************************* * EVO sync channel objects ******************************************************************************/ static const struct nv50_disp_mthd_list nv84_disp_sync_mthd_base = { .mthd = 0x0000, .addr = 0x000000, .data = { { 0x0080, 0x000000 }, { 0x0084, 0x0008c4 }, { 0x0088, 0x0008d0 }, { 0x008c, 0x0008dc }, { 0x0090, 0x0008e4 }, { 0x0094, 0x610884 }, { 0x00a0, 0x6108a0 }, { 0x00a4, 0x610878 }, { 0x00c0, 0x61086c }, { 0x00c4, 0x610800 }, { 0x00c8, 0x61080c }, { 0x00cc, 0x610818 }, { 0x00e0, 0x610858 }, { 0x00e4, 0x610860 }, { 0x00e8, 0x6108ac }, { 0x00ec, 0x6108b4 }, { 0x00fc, 0x610824 }, { 0x0100, 0x610894 }, { 0x0104, 0x61082c }, { 0x0110, 0x6108bc }, { 0x0114, 0x61088c }, {} } }; const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan = { .name = "Base", .addr = 0x000540, .data = { { "Global", 1, &nv84_disp_sync_mthd_base }, { "Image", 2, &nv50_disp_sync_mthd_image }, {} } }; /******************************************************************************* * EVO overlay channel objects ******************************************************************************/ static const struct nv50_disp_mthd_list nv84_disp_ovly_mthd_base = { .mthd = 0x0000, .addr = 0x000000, .data = { { 0x0080, 0x000000 }, { 0x0084, 0x6109a0 }, { 0x0088, 0x6109c0 }, { 0x008c, 0x6109c8 }, { 0x0090, 0x6109b4 }, { 0x0094, 0x610970 }, { 0x00a0, 0x610998 }, { 0x00a4, 0x610964 }, { 0x00c0, 0x610958 }, { 0x00e0, 0x6109a8 }, { 0x00e4, 0x6109d0 }, { 0x00e8, 0x6109d8 }, { 0x0100, 0x61094c }, { 0x0104, 0x610984 }, { 0x0108, 0x61098c }, { 0x0800, 0x6109f8 }, { 0x0808, 0x610a08 }, { 0x080c, 0x610a10 }, { 0x0810, 0x610a00 }, {} } }; const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan = { .name = "Overlay", .addr = 0x000540, .data = { { "Global", 1, &nv84_disp_ovly_mthd_base }, {} } }; /******************************************************************************* * Base display object ******************************************************************************/ static struct nouveau_oclass nv84_disp_sclass[] = { { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, {} }; static struct nouveau_oclass nv84_disp_base_oclass[] = { { G82_DISP, &nv50_disp_base_ofuncs }, {} }; /******************************************************************************* * Display engine implementation ******************************************************************************/ static int nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_disp_priv *priv; int ret; ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP", "display", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nv84_disp_base_oclass; nv_engine(priv)->cclass = &nv50_disp_cclass; nv_subdev(priv)->intr = nv50_disp_intr; INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); priv->sclass = nv84_disp_sclass; priv->head.nr = 2; priv->dac.nr = 3; priv->sor.nr = 2; priv->pior.nr = 3; priv->dac.power = nv50_dac_power; priv->dac.sense = nv50_dac_sense; priv->sor.power = nv50_sor_power; priv->sor.hdmi = nv84_hdmi_ctrl; priv->pior.power = nv50_pior_power; return 0; } struct nouveau_oclass * nv84_disp_oclass = &(struct nv50_disp_impl) { .base.base.handle = NV_ENGINE(DISP, 0x82), .base.base.ofuncs = &(struct nouveau_ofuncs) { .ctor = nv84_disp_ctor, .dtor = _nouveau_disp_dtor, .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, .base.vblank = &nv50_disp_vblank_func, .base.outp = nv50_disp_outp_sclass, .mthd.core = &nv84_disp_mast_mthd_chan, .mthd.base = &nv84_disp_sync_mthd_chan, .mthd.ovly = &nv84_disp_ovly_mthd_chan, .mthd.prev = 0x000004, .head.scanoutpos = nv50_disp_base_scanoutpos, }.base.base;
gpl-2.0
yohanes/Acer-BeTouch-E130-Linux-Kernel
fs/eventpoll.c
85
38019
/* * fs/eventpoll.c (Efficent event polling implementation) * Copyright (C) 2001,...,2007 Davide Libenzi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Davide Libenzi <davidel@xmailserver.org> * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/string.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/rbtree.h> #include <linux/wait.h> #include <linux/eventpoll.h> #include <linux/mount.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/anon_inodes.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> #include <asm/mman.h> #include <asm/atomic.h> /* * LOCKING: * There are three level of locking required by epoll : * * 1) epmutex (mutex) * 2) ep->mtx (mutex) * 3) ep->lock (spinlock) * * The acquire order is the one listed above, from 1 to 3. * We need a spinlock (ep->lock) because we manipulate objects * from inside the poll callback, that might be triggered from * a wake_up() that in turn might be called from IRQ context. * So we can't sleep inside the poll callback and hence we need * a spinlock. During the event transfer loop (from kernel to * user space) we could end up sleeping due a copy_to_user(), so * we need a lock that will allow us to sleep. This lock is a * mutex (ep->mtx). It is acquired during the event transfer loop, * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). * Then we also need a global mutex to serialize eventpoll_release_file() * and ep_free(). * This mutex is acquired by ep_free() during the epoll file * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. * Events that require holding "epmutex" are very rare, while for * normal operations the epoll private "ep->mtx" will guarantee * a better scalability. */ #define DEBUG_EPOLL 0 #if DEBUG_EPOLL > 0 #define DPRINTK(x) printk x #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0) #else /* #if DEBUG_EPOLL > 0 */ #define DPRINTK(x) (void) 0 #define DNPRINTK(n, x) (void) 0 #endif /* #if DEBUG_EPOLL > 0 */ #define DEBUG_EPI 0 #if DEBUG_EPI != 0 #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */) #else /* #if DEBUG_EPI != 0 */ #define EPI_SLAB_DEBUG 0 #endif /* #if DEBUG_EPI != 0 */ /* Epoll private bits inside the event mask */ #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET) /* Maximum number of poll wake up nests we are allowing */ #define EP_MAX_POLLWAKE_NESTS 4 /* Maximum msec timeout value storeable in a long int */ #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ) #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) #define EP_UNACTIVE_PTR ((void *) -1L) #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry)) struct epoll_filefd { struct file *file; int fd; }; /* * Node that is linked into the "wake_task_list" member of the "struct poll_safewake". * It is used to keep track on all tasks that are currently inside the wake_up() code * to 1) short-circuit the one coming from the same task and same wait queue head * (loop) 2) allow a maximum number of epoll descriptors inclusion nesting * 3) let go the ones coming from other tasks. */ struct wake_task_node { struct list_head llink; struct task_struct *task; wait_queue_head_t *wq; }; /* * This is used to implement the safe poll wake up avoiding to reenter * the poll callback from inside wake_up(). */ struct poll_safewake { struct list_head wake_task_list; spinlock_t lock; }; /* * Each file descriptor added to the eventpoll interface will * have an entry of this type linked to the "rbr" RB tree. */ struct epitem { /* RB tree node used to link this structure to the eventpoll RB tree */ struct rb_node rbn; /* List header used to link this structure to the eventpoll ready list */ struct list_head rdllink; /* * Works together "struct eventpoll"->ovflist in keeping the * single linked chain of items. */ struct epitem *next; /* The file descriptor information this item refers to */ struct epoll_filefd ffd; /* Number of active wait queue attached to poll operations */ int nwait; /* List containing poll wait queues */ struct list_head pwqlist; /* The "container" of this item */ struct eventpoll *ep; /* List header used to link this item to the "struct file" items list */ struct list_head fllink; /* The structure that describe the interested events and the source fd */ struct epoll_event event; }; /* * This structure is stored inside the "private_data" member of the file * structure and rapresent the main data sructure for the eventpoll * interface. */ struct eventpoll { /* Protect the this structure access */ spinlock_t lock; /* * This mutex is used to ensure that files are not removed * while epoll is using them. This is held during the event * collection loop, the file cleanup path, the epoll file exit * code and the ctl operations. */ struct mutex mtx; /* Wait queue used by sys_epoll_wait() */ wait_queue_head_t wq; /* Wait queue used by file->poll() */ wait_queue_head_t poll_wait; /* List of ready file descriptors */ struct list_head rdllist; /* RB tree root used to store monitored fd structs */ struct rb_root rbr; /* * This is a single linked list that chains all the "struct epitem" that * happened while transfering ready events to userspace w/out * holding ->lock. */ struct epitem *ovflist; /* The user that created the eventpoll descriptor */ struct user_struct *user; }; /* Wait structure used by the poll hooks */ struct eppoll_entry { /* List header used to link this structure to the "struct epitem" */ struct list_head llink; /* The "base" pointer is set to the container "struct epitem" */ void *base; /* * Wait queue item that will be linked to the target file wait * queue head. */ wait_queue_t wait; /* The wait queue head that linked the "wait" wait queue item */ wait_queue_head_t *whead; }; /* Wrapper struct used by poll queueing */ struct ep_pqueue { poll_table pt; struct epitem *epi; }; /* * Configuration options available inside /proc/sys/fs/epoll/ */ /* Maximum number of epoll watched descriptors, per user */ static int max_user_watches __read_mostly; /* * This mutex is used to serialize ep_free() and eventpoll_release_file(). */ static DEFINE_MUTEX(epmutex); /* Safe wake up implementation */ static struct poll_safewake psw; /* Slab cache used to allocate "struct epitem" */ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static int zero; ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec_minmax, .extra1 = &zero, }, { .ctl_name = 0 } }; #endif /* CONFIG_SYSCTL */ /* Setup the structure that is used as key for the RB tree */ static inline void ep_set_ffd(struct epoll_filefd *ffd, struct file *file, int fd) { ffd->file = file; ffd->fd = fd; } /* Compare RB tree keys */ static inline int ep_cmp_ffd(struct epoll_filefd *p1, struct epoll_filefd *p2) { return (p1->file > p2->file ? +1: (p1->file < p2->file ? -1 : p1->fd - p2->fd)); } /* Tells us if the item is currently linked */ static inline int ep_is_linked(struct list_head *p) { return !list_empty(p); } /* Get the "struct epitem" from a wait queue pointer */ static inline struct epitem *ep_item_from_wait(wait_queue_t *p) { return container_of(p, struct eppoll_entry, wait)->base; } /* Get the "struct epitem" from an epoll queue wrapper */ static inline struct epitem *ep_item_from_epqueue(poll_table *p) { return container_of(p, struct ep_pqueue, pt)->epi; } /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ static inline int ep_op_has_event(int op) { return op != EPOLL_CTL_DEL; } /* Initialize the poll safe wake up structure */ static void ep_poll_safewake_init(struct poll_safewake *psw) { INIT_LIST_HEAD(&psw->wake_task_list); spin_lock_init(&psw->lock); } /* * Perform a safe wake up of the poll wait list. The problem is that * with the new callback'd wake up system, it is possible that the * poll callback is reentered from inside the call to wake_up() done * on the poll wait queue head. The rule is that we cannot reenter the * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times, * and we cannot reenter the same wait queue head at all. This will * enable to have a hierarchy of epoll file descriptor of no more than * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock * because this one gets called by the poll callback, that in turn is called * from inside a wake_up(), that might be called from irq context. */ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) { int wake_nests = 0; unsigned long flags; struct task_struct *this_task = current; struct list_head *lsthead = &psw->wake_task_list; struct wake_task_node *tncur; struct wake_task_node tnode; spin_lock_irqsave(&psw->lock, flags); /* Try to see if the current task is already inside this wakeup call */ list_for_each_entry(tncur, lsthead, llink) { if (tncur->wq == wq || (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) { /* * Ops ... loop detected or maximum nest level reached. * We abort this wake by breaking the cycle itself. */ spin_unlock_irqrestore(&psw->lock, flags); return; } } /* Add the current task to the list */ tnode.task = this_task; tnode.wq = wq; list_add(&tnode.llink, lsthead); spin_unlock_irqrestore(&psw->lock, flags); /* Do really wake up now */ wake_up_nested(wq, 1 + wake_nests); /* Remove the current task from the list */ spin_lock_irqsave(&psw->lock, flags); list_del(&tnode.llink); spin_unlock_irqrestore(&psw->lock, flags); } /* * This function unregister poll callbacks from the associated file descriptor. * Since this must be called without holding "ep->lock" the atomic exchange trick * will protect us from multiple unregister. */ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) { int nwait; struct list_head *lsthead = &epi->pwqlist; struct eppoll_entry *pwq; /* This is called without locks, so we need the atomic exchange */ nwait = xchg(&epi->nwait, 0); if (nwait) { while (!list_empty(lsthead)) { pwq = list_first_entry(lsthead, struct eppoll_entry, llink); list_del_init(&pwq->llink); remove_wait_queue(pwq->whead, &pwq->wait); kmem_cache_free(pwq_cache, pwq); } } } /* * Removes a "struct epitem" from the eventpoll RB tree and deallocates * all the associated resources. Must be called with "mtx" held. */ static int ep_remove(struct eventpoll *ep, struct epitem *epi) { unsigned long flags; struct file *file = epi->ffd.file; /* * Removes poll wait queue hooks. We _have_ to do this without holding * the "ep->lock" otherwise a deadlock might occur. This because of the * sequence of the lock acquisition. Here we do "ep->lock" then the wait * queue head lock when unregistering the wait queue. The wakeup callback * will run by holding the wait queue head lock and will call our callback * that will try to get "ep->lock". */ ep_unregister_pollwait(ep, epi); /* Remove the current item from the list of epoll hooks */ spin_lock(&file->f_ep_lock); if (ep_is_linked(&epi->fllink)) list_del_init(&epi->fllink); spin_unlock(&file->f_ep_lock); rb_erase(&epi->rbn, &ep->rbr); spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); /* At this point it is safe to free the eventpoll item */ kmem_cache_free(epi_cache, epi); atomic_dec(&ep->user->epoll_watches); DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n", current, ep, file)); return 0; } static void ep_free(struct eventpoll *ep) { struct rb_node *rbp; struct epitem *epi; /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) ep_poll_safewake(&psw, &ep->poll_wait); /* * We need to lock this because we could be hit by * eventpoll_release_file() while we're freeing the "struct eventpoll". * We do not need to hold "ep->mtx" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but * holding "epmutex" is sufficent here. */ mutex_lock(&epmutex); /* * Walks through the whole tree by unregistering poll callbacks. */ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); ep_unregister_pollwait(ep, epi); } /* * Walks through the whole tree by freeing each "struct epitem". At this * point we are sure no poll callbacks will be lingering around, and also by * holding "epmutex" we can be sure that no file cleanup code will hit * us during this operation. So we can avoid the lock on "ep->lock". */ while ((rbp = rb_first(&ep->rbr)) != NULL) { epi = rb_entry(rbp, struct epitem, rbn); ep_remove(ep, epi); } mutex_unlock(&epmutex); mutex_destroy(&ep->mtx); free_uid(ep->user); kfree(ep); } static int ep_eventpoll_release(struct inode *inode, struct file *file) { struct eventpoll *ep = file->private_data; if (ep) ep_free(ep); DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep)); return 0; } static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) { unsigned int pollflags = 0; unsigned long flags; struct eventpoll *ep = file->private_data; /* Insert inside our poll wait queue */ poll_wait(file, &ep->poll_wait, wait); /* Check our condition */ spin_lock_irqsave(&ep->lock, flags); if (!list_empty(&ep->rdllist)) pollflags = POLLIN | POLLRDNORM; spin_unlock_irqrestore(&ep->lock, flags); return pollflags; } /* File callbacks that implement the eventpoll file behaviour */ static const struct file_operations eventpoll_fops = { .release = ep_eventpoll_release, .poll = ep_eventpoll_poll }; /* Fast test to see if the file is an evenpoll file */ static inline int is_file_epoll(struct file *f) { return f->f_op == &eventpoll_fops; } /* * This is called from eventpoll_release() to unlink files from the eventpoll * interface. We need to have this facility to cleanup correctly files that are * closed without being removed from the eventpoll interface. */ void eventpoll_release_file(struct file *file) { struct list_head *lsthead = &file->f_ep_links; struct eventpoll *ep; struct epitem *epi; /* * We don't want to get "file->f_ep_lock" because it is not * necessary. It is not necessary because we're in the "struct file" * cleanup path, and this means that noone is using this file anymore. * So, for example, epoll_ctl() cannot hit here sicne if we reach this * point, the file counter already went to zero and fget() would fail. * The only hit might come from ep_free() but by holding the mutex * will correctly serialize the operation. We do need to acquire * "ep->mtx" after "epmutex" because ep_remove() requires it when called * from anywhere but ep_free(). */ mutex_lock(&epmutex); while (!list_empty(lsthead)) { epi = list_first_entry(lsthead, struct epitem, fllink); ep = epi->ep; list_del_init(&epi->fllink); mutex_lock(&ep->mtx); ep_remove(ep, epi); mutex_unlock(&ep->mtx); } mutex_unlock(&epmutex); } static int ep_alloc(struct eventpoll **pep) { int error; struct user_struct *user; struct eventpoll *ep; user = get_current_user(); error = -ENOMEM; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (unlikely(!ep)) goto free_uid; spin_lock_init(&ep->lock); mutex_init(&ep->mtx); init_waitqueue_head(&ep->wq); init_waitqueue_head(&ep->poll_wait); INIT_LIST_HEAD(&ep->rdllist); ep->rbr = RB_ROOT; ep->ovflist = EP_UNACTIVE_PTR; ep->user = user; *pep = ep; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", current, ep)); return 0; free_uid: free_uid(user); return error; } /* * Search the file inside the eventpoll tree. The RB tree operations * are protected by the "mtx" mutex, and ep_find() must be called with * "mtx" held. */ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) { int kcmp; struct rb_node *rbp; struct epitem *epi, *epir = NULL; struct epoll_filefd ffd; ep_set_ffd(&ffd, file, fd); for (rbp = ep->rbr.rb_node; rbp; ) { epi = rb_entry(rbp, struct epitem, rbn); kcmp = ep_cmp_ffd(&ffd, &epi->ffd); if (kcmp > 0) rbp = rbp->rb_right; else if (kcmp < 0) rbp = rbp->rb_left; else { epir = epi; break; } } DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n", current, file, epir)); return epir; } /* * This is the callback that is passed to the wait queue wakeup * machanism. It is called by the stored file descriptors when they * have events to report. */ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; unsigned long flags; struct epitem *epi = ep_item_from_wait(wait); struct eventpoll *ep = epi->ep; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", current, epi->ffd.file, epi, ep)); spin_lock_irqsave(&ep->lock, flags); /* * If the event mask does not contain any poll(2) event, we consider the * descriptor to be disabled. This condition is likely the effect of the * EPOLLONESHOT bit that disables the descriptor when an event is received, * until the next EPOLL_CTL_MOD will be issued. */ if (!(epi->event.events & ~EP_PRIVATE_BITS)) goto out_unlock; /* * If we are trasfering events to userspace, we can hold no locks * (because we're accessing user memory, and because of linux f_op->poll() * semantics). All the events that happens during that period of time are * chained in ep->ovflist and requeued later on. */ if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { if (epi->next == EP_UNACTIVE_PTR) { epi->next = ep->ovflist; ep->ovflist = epi; } goto out_unlock; } /* If this file is already in the ready list we exit soon */ if (ep_is_linked(&epi->rdllink)) goto is_linked; list_add_tail(&epi->rdllink, &ep->rdllist); is_linked: /* * Wake up ( if active ) both the eventpoll wait list and the ->poll() * wait list. */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; out_unlock: spin_unlock_irqrestore(&ep->lock, flags); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&psw, &ep->poll_wait); return 1; } /* * This is the callback that is used to add our wait queue to the * target file wakeup lists. */ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, poll_table *pt) { struct epitem *epi = ep_item_from_epqueue(pt); struct eppoll_entry *pwq; if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); pwq->whead = whead; pwq->base = epi; add_wait_queue(whead, &pwq->wait); list_add_tail(&pwq->llink, &epi->pwqlist); epi->nwait++; } else { /* We have to signal that an error occurred */ epi->nwait = -1; } } static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) { int kcmp; struct rb_node **p = &ep->rbr.rb_node, *parent = NULL; struct epitem *epic; while (*p) { parent = *p; epic = rb_entry(parent, struct epitem, rbn); kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); if (kcmp > 0) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&epi->rbn, parent, p); rb_insert_color(&epi->rbn, &ep->rbr); } /* * Must be called with "mtx" held. */ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd) { int error, revents, pwake = 0; unsigned long flags; struct epitem *epi; struct ep_pqueue epq; if (unlikely(atomic_read(&ep->user->epoll_watches) >= max_user_watches)) return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; /* Item initialization follow here ... */ INIT_LIST_HEAD(&epi->rdllink); INIT_LIST_HEAD(&epi->fllink); INIT_LIST_HEAD(&epi->pwqlist); epi->ep = ep; ep_set_ffd(&epi->ffd, tfile, fd); epi->event = *event; epi->nwait = 0; epi->next = EP_UNACTIVE_PTR; /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); /* * Attach the item to the poll hooks and get current event bits. * We can safely use the file* here because its usage count has * been increased by the caller of this function. Note that after * this operation completes, the poll callback can start hitting * the new item. */ revents = tfile->f_op->poll(tfile, &epq.pt); /* * We have to check if something went wrong during the poll wait queue * install process. Namely an allocation for a wait queue failed due * high memory pressure. */ error = -ENOMEM; if (epi->nwait < 0) goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_ep_lock); list_add_tail(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_ep_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); /* If the file is already "ready" we drop it inside the ready list */ if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); atomic_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&psw, &ep->poll_wait); DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n", current, ep, tfile, fd)); return 0; error_unregister: ep_unregister_pollwait(ep, epi); /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist * list, since that is used/cleaned only inside a section bound by "mtx". * And ep_insert() is called with "mtx" held. */ spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); kmem_cache_free(epi_cache, epi); return error; } /* * Modify the interest event mask by dropping an event if the new mask * has a match in the current file status. Must be called with "mtx" held. */ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) { int pwake = 0; unsigned int revents; unsigned long flags; /* * Set the new event interest mask before calling f_op->poll(), otherwise * a potential race might occur. In fact if we do this operation inside * the lock, an event might happen between the f_op->poll() call and the * new event set registering. */ epi->event.events = event->events; /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. */ revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); spin_lock_irqsave(&ep->lock, flags); /* Copy the data member from inside the lock */ epi->event.data = event->data; /* * If the item is "hot" and it is not registered inside the ready * list, push it inside. */ if (revents & event->events) { if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } } spin_unlock_irqrestore(&ep->lock, flags); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&psw, &ep->poll_wait); return 0; } static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, int maxevents) { int eventcnt, error = -EFAULT, pwake = 0; unsigned int revents; unsigned long flags; struct epitem *epi, *nepi; struct list_head txlist; INIT_LIST_HEAD(&txlist); /* * We need to lock this because we could be hit by * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL). */ mutex_lock(&ep->mtx); /* * Steal the ready list, and re-init the original one to the * empty list. Also, set ep->ovflist to NULL so that events * happening while looping w/out locks, are not lost. We cannot * have the poll callback to queue directly on ep->rdllist, * because we are doing it in the loop below, in a lockless way. */ spin_lock_irqsave(&ep->lock, flags); list_splice(&ep->rdllist, &txlist); INIT_LIST_HEAD(&ep->rdllist); ep->ovflist = NULL; spin_unlock_irqrestore(&ep->lock, flags); /* * We can loop without lock because this is a task private list. * We just splice'd out the ep->rdllist in ep_collect_ready_items(). * Items cannot vanish during the loop because we are holding "mtx". */ for (eventcnt = 0; !list_empty(&txlist) && eventcnt < maxevents;) { epi = list_first_entry(&txlist, struct epitem, rdllink); list_del_init(&epi->rdllink); /* * Get the ready file event set. We can safely use the file * because we are holding the "mtx" and this will guarantee * that both the file and the item will not vanish. */ revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); revents &= epi->event.events; /* * Is the event mask intersect the caller-requested one, * deliver the event to userspace. Again, we are holding * "mtx", so no operations coming from userspace can change * the item. */ if (revents) { if (__put_user(revents, &events[eventcnt].events) || __put_user(epi->event.data, &events[eventcnt].data)) goto errxit; if (epi->event.events & EPOLLONESHOT) epi->event.events &= EP_PRIVATE_BITS; eventcnt++; } /* * At this point, noone can insert into ep->rdllist besides * us. The epoll_ctl() callers are locked out by us holding * "mtx" and the poll callback will queue them in ep->ovflist. */ if (!(epi->event.events & EPOLLET) && (revents & epi->event.events)) list_add_tail(&epi->rdllink, &ep->rdllist); } error = 0; errxit: spin_lock_irqsave(&ep->lock, flags); /* * During the time we spent in the loop above, some other events * might have been queued by the poll callback. We re-insert them * inside the main ready-list here. */ for (nepi = ep->ovflist; (epi = nepi) != NULL; nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { /* * If the above loop quit with errors, the epoll item might still * be linked to "txlist", and the list_splice() done below will * take care of those cases. */ if (!ep_is_linked(&epi->rdllink)) list_add_tail(&epi->rdllink, &ep->rdllist); } /* * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after * releasing the lock, events will be queued in the normal way inside * ep->rdllist. */ ep->ovflist = EP_UNACTIVE_PTR; /* * In case of error in the event-send loop, or in case the number of * ready events exceeds the userspace limit, we need to splice the * "txlist" back inside ep->rdllist. */ list_splice(&txlist, &ep->rdllist); if (!list_empty(&ep->rdllist)) { /* * Wake up (if active) both the eventpoll wait list and the ->poll() * wait list (delayed after we release the lock). */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); mutex_unlock(&ep->mtx); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&psw, &ep->poll_wait); return eventcnt == 0 ? error: eventcnt; } static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { int res, eavail; unsigned long flags; long jtimeout; wait_queue_t wait; /* * Calculate the timeout by checking for the "infinite" value ( -1 ) * and the overflow condition. The passed timeout is in milliseconds, * that why (t * HZ) / 1000. */ jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ? MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; retry: spin_lock_irqsave(&ep->lock, flags); res = 0; if (list_empty(&ep->rdllist)) { /* * We don't have any available event to return to the caller. * We need to sleep here, and we will be wake up by * ep_poll_callback() when events will become available. */ init_waitqueue_entry(&wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue(&ep->wq, &wait); for (;;) { /* * We don't want to sleep if the ep_poll_callback() sends us * a wakeup in between. That's why we set the task state * to TASK_INTERRUPTIBLE before doing the checks. */ set_current_state(TASK_INTERRUPTIBLE); if (!list_empty(&ep->rdllist) || !jtimeout) break; if (signal_pending(current)) { res = -EINTR; break; } spin_unlock_irqrestore(&ep->lock, flags); jtimeout = schedule_timeout(jtimeout); spin_lock_irqsave(&ep->lock, flags); } __remove_wait_queue(&ep->wq, &wait); set_current_state(TASK_RUNNING); } /* Is it worth to try to dig for events ? */ eavail = !list_empty(&ep->rdllist); spin_unlock_irqrestore(&ep->lock, flags); /* * Try to transfer events to user space. In case we get 0 events and * there's still timeout left over, we go trying again in search of * more luck. */ if (!res && eavail && !(res = ep_send_events(ep, events, maxevents)) && jtimeout) goto retry; return res; } /* * Open an eventpoll file descriptor. */ SYSCALL_DEFINE1(epoll_create1, int, flags) { int error, fd = -1; struct eventpoll *ep; /* Check the EPOLL_* constant for consistency. */ BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); if (flags & ~EPOLL_CLOEXEC) return -EINVAL; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", current, flags)); /* * Create the internal data structure ( "struct eventpoll" ). */ error = ep_alloc(&ep); if (error < 0) { fd = error; goto error_return; } /* * Creates all the items needed to setup an eventpoll file. That is, * a file structure and a free file descriptor. */ fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, flags & O_CLOEXEC); if (fd < 0) ep_free(ep); error_return: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", current, flags, fd)); return fd; } SYSCALL_DEFINE1(epoll_create, int, size) { if (size < 0) return -EINVAL; return sys_epoll_create1(0); } /* * The following function implements the controller interface for * the eventpoll file that enables the insertion/removal/change of * file descriptors inside the interest set. */ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; struct epoll_event epds; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n", current, epfd, op, fd, event)); error = -EFAULT; if (ep_op_has_event(op) && copy_from_user(&epds, event, sizeof(struct epoll_event))) goto error_return; /* Get the "struct file *" for the eventpoll file */ error = -EBADF; file = fget(epfd); if (!file) goto error_return; /* Get the "struct file *" for the target file */ tfile = fget(fd); if (!tfile) goto error_fput; /* The target file descriptor must support poll */ error = -EPERM; if (!tfile->f_op || !tfile->f_op->poll) goto error_tgt_fput; /* * We have to check that the file structure underneath the file descriptor * the user passed to us _is_ an eventpoll file. And also we do not permit * adding an epoll file descriptor inside itself. */ error = -EINVAL; if (file == tfile || !is_file_epoll(file)) goto error_tgt_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ ep = file->private_data; mutex_lock(&ep->mtx); /* * Try to lookup the file inside our RB tree, Since we grabbed "mtx" * above, we can be sure to be able to use the item looked up by * ep_find() till we release the mutex. */ epi = ep_find(ep, tfile, fd); error = -EINVAL; switch (op) { case EPOLL_CTL_ADD: if (!epi) { epds.events |= POLLERR | POLLHUP; error = ep_insert(ep, &epds, tfile, fd); } else error = -EEXIST; break; case EPOLL_CTL_DEL: if (epi) error = ep_remove(ep, epi); else error = -ENOENT; break; case EPOLL_CTL_MOD: if (epi) { epds.events |= POLLERR | POLLHUP; error = ep_modify(ep, epi, &epds); } else error = -ENOENT; break; } mutex_unlock(&ep->mtx); error_tgt_fput: fput(tfile); error_fput: fput(file); error_return: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n", current, epfd, op, fd, event, error)); return error; } /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_wait(2). */ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout) { int error; struct file *file; struct eventpoll *ep; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n", current, epfd, events, maxevents, timeout)); /* The maximum number of event must be greater than zero */ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) return -EINVAL; /* Verify that the area passed by the user is writeable */ if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { error = -EFAULT; goto error_return; } /* Get the "struct file *" for the eventpoll file */ error = -EBADF; file = fget(epfd); if (!file) goto error_return; /* * We have to check that the file structure underneath the fd * the user passed to us _is_ an eventpoll file. */ error = -EINVAL; if (!is_file_epoll(file)) goto error_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ ep = file->private_data; /* Time to fish for events ... */ error = ep_poll(ep, events, maxevents, timeout); error_fput: fput(file); error_return: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n", current, epfd, events, maxevents, timeout, error)); return error; } #ifdef HAVE_SET_RESTORE_SIGMASK /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_pwait(2). */ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout, const sigset_t __user *, sigmask, size_t, sigsetsize) { int error; sigset_t ksigmask, sigsaved; /* * If the caller wants a certain signal mask to be set during the wait, * we apply it here. */ if (sigmask) { if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) return -EFAULT; sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } error = sys_epoll_wait(epfd, events, maxevents, timeout); /* * If we changed the signal mask, we need to restore the original one. * In case we've got a signal while waiting, we do not restore the * signal mask yet, and we allow do_signal() to deliver the signal on * the way back to userspace, before the signal mask is restored. */ if (sigmask) { if (error == -EINTR) { memcpy(&current->saved_sigmask, &sigsaved, sizeof(sigsaved)); set_restore_sigmask(); } else sigprocmask(SIG_SETMASK, &sigsaved, NULL); } return error; } #endif /* HAVE_SET_RESTORE_SIGMASK */ static int __init eventpoll_init(void) { struct sysinfo si; si_meminfo(&si); /* * Allows top 4% of lomem to be allocated for epoll watches (per user). */ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; /* Initialize the structure used to perform safe poll wait head wake ups */ ep_poll_safewake_init(&psw); /* Allocates slab cache used to allocate "struct epitem" items */ epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC, NULL); /* Allocates slab cache used to allocate "struct eppoll_entry" */ pwq_cache = kmem_cache_create("eventpoll_pwq", sizeof(struct eppoll_entry), 0, EPI_SLAB_DEBUG|SLAB_PANIC, NULL); return 0; } fs_initcall(eventpoll_init);
gpl-2.0
Feche/android_kernel_motorola_olympus_oc
arch/arm/mach-pxa/colibri-pxa320.c
341
6451
/* * arch/arm/mach-pxa/colibri-pxa320.c * * Support for Toradex PXA320/310 based Colibri module * * Daniel Mack <daniel@caiaq.de> * Matthias Meier <matthias.j.meier@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/usb/gpio_vbus.h> #include <asm/mach-types.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/pxa320.h> #include <mach/colibri.h> #include <mach/pxafb.h> #include <mach/ohci.h> #include <mach/audio.h> #include <mach/pxa27x-udc.h> #include <mach/udc.h> #include "generic.h" #include "devices.h" #ifdef CONFIG_MACH_COLIBRI_EVALBOARD static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = { /* MMC */ GPIO22_MMC1_CLK, GPIO23_MMC1_CMD, GPIO18_MMC1_DAT0, GPIO19_MMC1_DAT1, GPIO20_MMC1_DAT2, GPIO21_MMC1_DAT3, GPIO28_GPIO, /* SD detect */ /* UART 1 configuration (may be set by bootloader) */ GPIO99_UART1_CTS, GPIO104_UART1_RTS, GPIO97_UART1_RXD, GPIO98_UART1_TXD, GPIO101_UART1_DTR, GPIO103_UART1_DSR, GPIO100_UART1_DCD, GPIO102_UART1_RI, /* UART 2 configuration */ GPIO109_UART2_CTS, GPIO112_UART2_RTS, GPIO110_UART2_RXD, GPIO111_UART2_TXD, /* UART 3 configuration */ GPIO30_UART3_RXD, GPIO31_UART3_TXD, /* UHC */ GPIO2_2_USBH_PEN, GPIO3_2_USBH_PWR, /* I2C */ GPIO32_I2C_SCL, GPIO33_I2C_SDA, /* PCMCIA */ MFP_CFG(GPIO59, AF7), /* PRST ; AF7 to tristate */ MFP_CFG(GPIO61, AF7), /* PCE1 ; AF7 to tristate */ MFP_CFG(GPIO60, AF7), /* PCE2 ; AF7 to tristate */ MFP_CFG(GPIO62, AF7), /* PCD ; AF7 to tristate */ MFP_CFG(GPIO56, AF7), /* PSKTSEL ; AF7 to tristate */ GPIO27_GPIO, /* RDnWR ; input/tristate */ GPIO50_GPIO, /* PREG ; input/tristate */ GPIO2_RDY, GPIO5_NPIOR, GPIO6_NPIOW, GPIO7_NPIOS16, GPIO8_NPWAIT, GPIO29_GPIO, /* PRDY (READY GPIO) */ GPIO57_GPIO, /* PPEN (POWER GPIO) */ GPIO81_GPIO, /* PCD (DETECT GPIO) */ GPIO77_GPIO, /* PRST (RESET GPIO) */ GPIO53_GPIO, /* PBVD1 */ GPIO79_GPIO, /* PBVD2 */ GPIO54_GPIO, /* POE */ }; #else static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = {}; #endif #if defined(CONFIG_AX88796) #define COLIBRI_ETH_IRQ_GPIO mfp_to_gpio(GPIO36_GPIO) /* * Asix AX88796 Ethernet */ static struct ax_plat_data colibri_asix_platdata = { .flags = 0, /* defined later */ .wordlength = 2, }; static struct resource colibri_asix_resource[] = { [0] = { .start = PXA3xx_CS2_PHYS, .end = PXA3xx_CS2_PHYS + (0x20 * 2) - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO), .end = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, } }; static struct platform_device asix_device = { .name = "ax88796", .id = 0, .num_resources = ARRAY_SIZE(colibri_asix_resource), .resource = colibri_asix_resource, .dev = { .platform_data = &colibri_asix_platdata } }; static mfp_cfg_t colibri_pxa320_eth_pin_config[] __initdata = { GPIO3_nCS2, /* AX88796 chip select */ GPIO36_GPIO | MFP_PULL_HIGH /* AX88796 IRQ */ }; static void __init colibri_pxa320_init_eth(void) { colibri_pxa3xx_init_eth(&colibri_asix_platdata); pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_eth_pin_config)); platform_device_register(&asix_device); } #else static inline void __init colibri_pxa320_init_eth(void) {} #endif /* CONFIG_AX88796 */ #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), .gpio_pullup = -1, }; static struct platform_device colibri_pxa320_gpio_vbus = { .name = "gpio-vbus", .id = -1, .dev = { .platform_data = &colibri_pxa320_gpio_vbus_info, }, }; static void colibri_pxa320_udc_command(int cmd) { if (cmd == PXA2XX_UDC_CMD_CONNECT) UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE; else if (cmd == PXA2XX_UDC_CMD_DISCONNECT) UP2OCR = UP2OCR_HXOE; } static struct pxa2xx_udc_mach_info colibri_pxa320_udc_info __initdata = { .udc_command = colibri_pxa320_udc_command, .gpio_pullup = -1, }; static void __init colibri_pxa320_init_udc(void) { pxa_set_udc_info(&colibri_pxa320_udc_info); platform_device_register(&colibri_pxa320_gpio_vbus); } #else static inline void colibri_pxa320_init_udc(void) {} #endif #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static mfp_cfg_t colibri_pxa320_lcd_pin_config[] __initdata = { GPIO6_2_LCD_LDD_0, GPIO7_2_LCD_LDD_1, GPIO8_2_LCD_LDD_2, GPIO9_2_LCD_LDD_3, GPIO10_2_LCD_LDD_4, GPIO11_2_LCD_LDD_5, GPIO12_2_LCD_LDD_6, GPIO13_2_LCD_LDD_7, GPIO63_LCD_LDD_8, GPIO64_LCD_LDD_9, GPIO65_LCD_LDD_10, GPIO66_LCD_LDD_11, GPIO67_LCD_LDD_12, GPIO68_LCD_LDD_13, GPIO69_LCD_LDD_14, GPIO70_LCD_LDD_15, GPIO71_LCD_LDD_16, GPIO72_LCD_LDD_17, GPIO73_LCD_CS_N, GPIO74_LCD_VSYNC, GPIO14_2_LCD_FCLK, GPIO15_2_LCD_LCLK, GPIO16_2_LCD_PCLK, GPIO17_2_LCD_BIAS, }; static void __init colibri_pxa320_init_lcd(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_lcd_pin_config)); } #else static inline void colibri_pxa320_init_lcd(void) {} #endif #if defined(CONFIG_SND_AC97_CODEC) || \ defined(CONFIG_SND_AC97_CODEC_MODULE) static mfp_cfg_t colibri_pxa320_ac97_pin_config[] __initdata = { GPIO34_AC97_SYSCLK, GPIO35_AC97_SDATA_IN_0, GPIO37_AC97_SDATA_OUT, GPIO38_AC97_SYNC, GPIO39_AC97_BITCLK, GPIO40_AC97_nACRESET }; static inline void __init colibri_pxa320_init_ac97(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_ac97_pin_config)); pxa_set_ac97_info(NULL); } #else static inline void colibri_pxa320_init_ac97(void) {} #endif void __init colibri_pxa320_init(void) { colibri_pxa320_init_eth(); colibri_pxa3xx_init_nand(); colibri_pxa320_init_lcd(); colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO49_GPIO)); colibri_pxa320_init_ac97(); colibri_pxa320_init_udc(); /* Evalboard init */ pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_evalboard_pin_config)); colibri_evalboard_init(); } MACHINE_START(COLIBRI320, "Toradex Colibri PXA320") .boot_params = COLIBRI_SDRAM_BASE + 0x100, .init_machine = colibri_pxa320_init, .map_io = pxa3xx_map_io, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .timer = &pxa_timer, MACHINE_END
gpl-2.0
Dees-Troy/android_kernel_asus_tf700t
drivers/staging/gma500/psb_intel_sdvo.c
597
36259
/* * Copyright (c) 2006-2007 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/delay.h> /* #include <drm/drm_crtc.h> */ #include <drm/drmP.h> #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "psb_intel_sdvo_regs.h" struct psb_intel_sdvo_priv { struct psb_intel_i2c_chan *i2c_bus; int slaveaddr; int output_device; u16 active_outputs; struct psb_intel_sdvo_caps caps; int pixel_clock_min, pixel_clock_max; int save_sdvo_mult; u16 save_active_outputs; struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; struct psb_intel_sdvo_dtd save_output_dtd[16]; u32 save_SDVOX; u8 in_out_map[4]; u8 by_input_wiring; u32 active_device; }; /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val) { struct drm_device *dev = psb_intel_output->base.dev; struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u32 bval = val, cval = val; int i; if (sdvo_priv->output_device == SDVOB) cval = REG_READ(SDVOC); else bval = REG_READ(SDVOB); /* * Write the registers twice for luck. Sometimes, * writing them only once doesn't appear to 'stick'. * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { REG_WRITE(SDVOB, bval); REG_READ(SDVOB); REG_WRITE(SDVOC, cval); REG_READ(SDVOC); } } static bool psb_intel_sdvo_read_byte( struct psb_intel_output *psb_intel_output, u8 addr, u8 *ch) { struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u8 out_buf[2]; u8 buf[2]; int ret; struct i2c_msg msgs[] = { { .addr = sdvo_priv->i2c_bus->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = sdvo_priv->i2c_bus->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = buf, } }; out_buf[0] = addr; out_buf[1] = 0; ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2); if (ret == 2) { *ch = buf[0]; return true; } return false; } static bool psb_intel_sdvo_write_byte( struct psb_intel_output *psb_intel_output, int addr, u8 ch) { u8 out_buf[2]; struct i2c_msg msgs[] = { { .addr = psb_intel_output->i2c_bus->slave_addr, .flags = 0, .len = 2, .buf = out_buf, } }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1) return true; return false; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { u8 cmd; char *name; } sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), SDVO_CMD_NAME_ENTRY (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),}; #define SDVO_NAME(dev_priv) \ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") #define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv) static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd, void *args, int args_len) { struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; int i; if (0) { printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); for (i = 0; i < args_len; i++) printk(KERN_CONT "%02X ", ((u8 *) args)[i]); for (; i < 8; i++) printk(KERN_CONT " "); for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { if (cmd == sdvo_cmd_names[i].cmd) { printk(KERN_CONT "(%s)", sdvo_cmd_names[i].name); break; } } if (i == sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0])) printk(KERN_CONT "(%02X)", cmd); printk(KERN_CONT "\n"); } for (i = 0; i < args_len; i++) { psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i, ((u8 *) args)[i]); } psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd); } static const char *const cmd_status_names[] = { "Power on", "Success", "Not supported", "Invalid arg", "Pending", "Target not specified", "Scaling not supported" }; static u8 psb_intel_sdvo_read_response( struct psb_intel_output *psb_intel_output, void *response, int response_len) { struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; int i; u8 status; u8 retry = 50; while (retry--) { /* Read the command response */ for (i = 0; i < response_len; i++) { psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_RETURN_0 + i, &((u8 *) response)[i]); } /* read the return status */ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS, &status); if (0) { pr_debug("%s: R: ", SDVO_NAME(sdvo_priv)); for (i = 0; i < response_len; i++) printk(KERN_CONT "%02X ", ((u8 *) response)[i]); for (; i < 8; i++) printk(" "); if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) printk(KERN_CONT "(%s)", cmd_status_names[status]); else printk(KERN_CONT "(??? %d)", status); printk(KERN_CONT "\n"); } if (status != SDVO_CMD_STATUS_PENDING) return status; mdelay(50); } return status; } int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) { if (mode->clock >= 100000) return 1; else if (mode->clock >= 50000) return 2; else return 4; } /** * Don't check status code from this as it switches the bus back to the * SDVO chips which defeats the purpose of doing a bus switch in the first * place. */ void psb_intel_sdvo_set_control_bus_switch( struct psb_intel_output *psb_intel_output, u8 target) { psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); } static bool psb_intel_sdvo_set_target_input( struct psb_intel_output *psb_intel_output, bool target_0, bool target_1) { struct psb_intel_sdvo_set_target_input_args targets = { 0 }; u8 status; if (target_0 && target_1) return SDVO_CMD_STATUS_NOTSUPP; if (target_1) targets.target_1 = 1; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } /** * Return whether each input is trained. * * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output *psb_intel_output, bool *input_1, bool *input_2) { struct psb_intel_sdvo_get_trained_inputs_response response; u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, sizeof(response)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; *input_1 = response.input0_trained; *input_2 = response.input1_trained; return true; } static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output *psb_intel_output, u16 *outputs) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, outputs, sizeof(*outputs)); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output *psb_intel_output, u16 outputs) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output *psb_intel_output, int mode) { u8 status, state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: state = SDVO_ENCODER_STATE_ON; break; case DRM_MODE_DPMS_STANDBY: state = SDVO_ENCODER_STATE_STANDBY; break; case DRM_MODE_DPMS_SUSPEND: state = SDVO_ENCODER_STATE_SUSPEND; break; case DRM_MODE_DPMS_OFF: state = SDVO_ENCODER_STATE_OFF; break; } psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output *psb_intel_output, int *clock_min, int *clock_max) { struct psb_intel_sdvo_pixel_clock_range clocks; u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &clocks, sizeof(clocks)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; return true; } static bool psb_intel_sdvo_set_target_output( struct psb_intel_output *psb_intel_output, u16 outputs) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); return status == SDVO_CMD_STATUS_SUCCESS; } static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output, u8 cmd, struct psb_intel_sdvo_dtd *dtd) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1, sizeof(dtd->part1)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2, sizeof(dtd->part2)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static bool psb_intel_sdvo_get_input_timing( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_dtd *dtd) { return psb_intel_sdvo_get_timing(psb_intel_output, SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); } static bool psb_intel_sdvo_set_timing( struct psb_intel_output *psb_intel_output, u8 cmd, struct psb_intel_sdvo_dtd *dtd) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static bool psb_intel_sdvo_set_input_timing( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_dtd *dtd) { return psb_intel_sdvo_set_timing(psb_intel_output, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } static bool psb_intel_sdvo_set_output_timing( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_dtd *dtd) { return psb_intel_sdvo_set_timing(psb_intel_output, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output *psb_intel_output) { u8 response, status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); return SDVO_CLOCK_RATE_MULT_1X; } else { DRM_DEBUG("Current clock rate multiplier: %d\n", response); } return response; } static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output *psb_intel_output, u8 val) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output, u32 in0outputmask, u32 in1outputmask) { u8 byArgs[4]; u8 status; int i; struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; /* Make all fields of the args/ret to zero */ memset(byArgs, 0, sizeof(byArgs)); /* Fill up the argument values; */ byArgs[0] = (u8) (in0outputmask & 0xFF); byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF); byArgs[2] = (u8) (in1outputmask & 0xFF); byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF); /*save inoutmap arg here*/ for (i = 0; i < 4; i++) sdvo_priv->in_out_map[i] = byArgs[0]; psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4); status = psb_intel_sdvo_read_response(output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output) { u32 dwCurrentSDVOIn0 = 0; u32 dwCurrentSDVOIn1 = 0; u32 dwDevMask = 0; struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; /* Please DO NOT change the following code. */ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */ if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) { switch (sdvo_priv->active_device) { case SDVO_DEVICE_LVDS: dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; break; case SDVO_DEVICE_TMDS: dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; break; case SDVO_DEVICE_TV: dwDevMask = SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; break; case SDVO_DEVICE_CRT: dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; break; } dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask); } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) { switch (sdvo_priv->active_device) { case SDVO_DEVICE_LVDS: dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; break; case SDVO_DEVICE_TMDS: dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; break; case SDVO_DEVICE_TV: dwDevMask = SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; break; case SDVO_DEVICE_CRT: dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; break; } dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask); } psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0, dwCurrentSDVOIn1); } static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO * device will be told of the multiplier during mode_set. */ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode); return true; } static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u16 width, height; u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; u16 h_sync_offset, v_sync_offset; u32 sdvox; struct psb_intel_sdvo_dtd output_dtd; int sdvo_pixel_multiply; if (!mode) return; psb_intel_sdvo_set_target_output(psb_intel_output, 0); width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; /* do some mode translations */ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; output_dtd.part1.clock = mode->clock / 10; output_dtd.part1.h_active = width & 0xff; output_dtd.part1.h_blank = h_blank_len & 0xff; output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | ((h_blank_len >> 8) & 0xf); output_dtd.part1.v_active = height & 0xff; output_dtd.part1.v_blank = v_blank_len & 0xff; output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); output_dtd.part2.h_sync_off = h_sync_offset; output_dtd.part2.h_sync_width = h_sync_len & 0xff; output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); output_dtd.part2.dtd_flags = 0x18; if (mode->flags & DRM_MODE_FLAG_PHSYNC) output_dtd.part2.dtd_flags |= 0x2; if (mode->flags & DRM_MODE_FLAG_PVSYNC) output_dtd.part2.dtd_flags |= 0x4; output_dtd.part2.sdvo_flags = 0; output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; output_dtd.part2.reserved = 0; /* Set the output timing to the screen */ psb_intel_sdvo_set_target_output(psb_intel_output, sdvo_priv->active_outputs); /* Set the input timing to the screen. Assume always input 0. */ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd); /* We would like to use i830_sdvo_create_preferred_input_timing() to * provide the device with a timing it can support, if it supports that * feature. However, presumably we would need to adjust the CRTC to * output the preferred timing, and we don't support that currently. */ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd); switch (psb_intel_sdvo_get_pixel_multiplier(mode)) { case 1: psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, SDVO_CLOCK_RATE_MULT_1X); break; case 2: psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, SDVO_CLOCK_RATE_MULT_2X); break; case 4: psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, SDVO_CLOCK_RATE_MULT_4X); break; } /* Set the SDVO control regs. */ sdvox = REG_READ(sdvo_priv->output_device); switch (sdvo_priv->output_device) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; case SDVOC: sdvox &= SDVOC_PRESERVE_MASK; break; } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; if (psb_intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode); psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox); psb_intel_sdvo_set_iomap(psb_intel_output); } static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; u32 temp; if (mode != DRM_MODE_DPMS_ON) { psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); if (0) psb_intel_sdvo_set_encoder_power_state( psb_intel_output, mode); if (mode == DRM_MODE_DPMS_OFF) { temp = REG_READ(sdvo_priv->output_device); if ((temp & SDVO_ENABLE) != 0) { psb_intel_sdvo_write_sdvox(psb_intel_output, temp & ~SDVO_ENABLE); } } } else { bool input1, input2; int i; u8 status; temp = REG_READ(sdvo_priv->output_device); if ((temp & SDVO_ENABLE) == 0) psb_intel_sdvo_write_sdvox(psb_intel_output, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) psb_intel_wait_for_vblank(dev); status = psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { DRM_DEBUG ("First %s output reported failure to sync\n", SDVO_NAME(sdvo_priv)); } if (0) psb_intel_sdvo_set_encoder_power_state( psb_intel_output, mode); psb_intel_sdvo_set_active_outputs(psb_intel_output, sdvo_priv->active_outputs); } return; } static void psb_intel_sdvo_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; /*int o;*/ sdvo_priv->save_sdvo_mult = psb_intel_sdvo_get_clock_rate_mult(psb_intel_output); psb_intel_sdvo_get_active_outputs(psb_intel_output, &sdvo_priv->save_active_outputs); if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_get_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_1); } if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { psb_intel_sdvo_set_target_input(psb_intel_output, false, true); psb_intel_sdvo_get_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_2); } sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device); /*TODO: save the in_out_map state*/ } static void psb_intel_sdvo_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; /*int o;*/ int i; bool input1, input2; u8 status; psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_set_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_1); } if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { psb_intel_sdvo_set_target_input(psb_intel_output, false, true); psb_intel_sdvo_set_input_timing(psb_intel_output, &sdvo_priv->save_input_dtd_2); } psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, sdvo_priv->save_sdvo_mult); REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); if (sdvo_priv->save_SDVOX & SDVO_ENABLE) { for (i = 0; i < 2; i++) psb_intel_wait_for_vblank(dev); status = psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, &input2); if (status == SDVO_CMD_STATUS_SUCCESS && !input1) DRM_DEBUG ("First %s output reported failure to sync\n", SDVO_NAME(sdvo_priv)); } psb_intel_sdvo_set_active_outputs(psb_intel_output, sdvo_priv->save_active_outputs); /*TODO: restore in_out_map*/ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_IN_OUT_MAP, sdvo_priv->in_out_map, 4); psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); } static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (sdvo_priv->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; if (sdvo_priv->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; return MODE_OK; } static bool psb_intel_sdvo_get_capabilities( struct psb_intel_output *psb_intel_output, struct psb_intel_sdvo_caps *caps) { u8 status; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB) { struct drm_connector *connector = NULL; struct psb_intel_output *iout = NULL; struct psb_intel_sdvo_priv *sdvo; /* find the sdvo connector */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { iout = to_psb_intel_output(connector); if (iout->type != INTEL_OUTPUT_SDVO) continue; sdvo = iout->dev_priv; if (sdvo->output_device == SDVOB && sdvoB) return connector; if (sdvo->output_device == SDVOC && !sdvoB) return connector; } return NULL; } int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector) { u8 response[2]; u8 status; struct psb_intel_output *psb_intel_output; if (!connector) return 0; psb_intel_output = to_psb_intel_output(connector); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); if (response[0] != 0) return 1; return 0; } void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on) { u8 response[2]; u8 status; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); psb_intel_sdvo_read_response(psb_intel_output, &response, 2); if (on) { psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } else { response[0] = 0; response[1] = 0; psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); psb_intel_sdvo_read_response(psb_intel_output, &response, 2); } static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector *connector, bool force) { u8 response[2]; u8 status; struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); if ((response[0] != 0) || (response[1] != 0)) return connector_status_connected; else return connector_status_disconnected; } static int psb_intel_sdvo_get_modes(struct drm_connector *connector) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); /* set the bus switch and get the modes */ psb_intel_sdvo_set_control_bus_switch(psb_intel_output, SDVO_CONTROL_BUS_DDC2); psb_intel_ddc_get_modes(psb_intel_output); if (list_empty(&connector->probed_modes)) return 0; return 1; } static void psb_intel_sdvo_destroy(struct drm_connector *connector) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); if (psb_intel_output->i2c_bus) psb_intel_i2c_destroy(psb_intel_output->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(psb_intel_output); } static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { .dpms = psb_intel_sdvo_dpms, .mode_fixup = psb_intel_sdvo_mode_fixup, .prepare = psb_intel_encoder_prepare, .mode_set = psb_intel_sdvo_mode_set, .commit = psb_intel_encoder_commit, }; static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, .save = psb_intel_sdvo_save, .restore = psb_intel_sdvo_restore, .detect = psb_intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = psb_intel_sdvo_destroy, }; static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = { .get_modes = psb_intel_sdvo_get_modes, .mode_valid = psb_intel_sdvo_mode_valid, .best_encoder = psb_intel_best_encoder, }; void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { .destroy = psb_intel_sdvo_enc_destroy, }; void psb_intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_connector *connector; struct psb_intel_output *psb_intel_output; struct psb_intel_sdvo_priv *sdvo_priv; struct psb_intel_i2c_chan *i2cbus = NULL; int connector_type; u8 ch[0x40]; int i; int encoder_type, output_id; psb_intel_output = kcalloc(sizeof(struct psb_intel_output) + sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL); if (!psb_intel_output) return; connector = &psb_intel_output->base; drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs, DRM_MODE_CONNECTOR_Unknown); drm_connector_helper_add(connector, &psb_intel_sdvo_connector_helper_funcs); sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1); psb_intel_output->type = INTEL_OUTPUT_SDVO; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; /* setup the DDC bus. */ if (output_device == SDVOB) i2cbus = psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); else i2cbus = psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); if (!i2cbus) goto err_connector; sdvo_priv->i2c_bus = i2cbus; if (output_device == SDVOB) { output_id = 1; sdvo_priv->by_input_wiring = SDVOB_IN0; sdvo_priv->i2c_bus->slave_addr = 0x38; } else { output_id = 2; sdvo_priv->i2c_bus->slave_addr = 0x39; } sdvo_priv->output_device = output_device; psb_intel_output->i2c_bus = i2cbus; psb_intel_output->dev_priv = sdvo_priv; /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) { dev_dbg(dev->dev, "No SDVO device found on SDVO%c\n", output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } } psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps); memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs)); /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) { sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; sdvo_priv->active_device = SDVO_DEVICE_CRT; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_DAC; connector_type = DRM_MODE_CONNECTOR_VGA; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) { sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; sdvo_priv->active_outputs = SDVO_DEVICE_CRT; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_DAC; connector_type = DRM_MODE_CONNECTOR_VGA; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) { sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; sdvo_priv->active_device = SDVO_DEVICE_TMDS; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_TMDS; connector_type = DRM_MODE_CONNECTOR_DVID; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) { sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; sdvo_priv->active_device = SDVO_DEVICE_TMDS; connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_TMDS; connector_type = DRM_MODE_CONNECTOR_DVID; } else { unsigned char bytes[2]; memcpy(bytes, &sdvo_priv->caps.output_flags, 2); dev_dbg(dev->dev, "%s: No active RGB or TMDS outputs (0x%02x%02x)\n", SDVO_NAME(sdvo_priv), bytes[0], bytes[1]); goto err_i2c; } drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs, encoder_type); drm_encoder_helper_add(&psb_intel_output->enc, &psb_intel_sdvo_helper_funcs); connector->connector_type = connector_type; drm_mode_connector_attach_encoder(&psb_intel_output->base, &psb_intel_output->enc); drm_sysfs_connector_add(connector); /* Set the input timing to the screen. Assume always input 0. */ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output, &sdvo_priv->pixel_clock_min, &sdvo_priv-> pixel_clock_max); dev_dbg(dev->dev, "%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", SDVO_NAME(sdvo_priv), sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, sdvo_priv->caps.device_rev_id, sdvo_priv->pixel_clock_min / 1000, sdvo_priv->pixel_clock_max / 1000, (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ sdvo_priv->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', sdvo_priv->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); psb_intel_output->ddc_bus = i2cbus; return; err_i2c: psb_intel_i2c_destroy(psb_intel_output->i2c_bus); err_connector: drm_connector_cleanup(connector); kfree(psb_intel_output); return; }
gpl-2.0
luk1337/android_kernel_samsung_i9082
drivers/acpi/pci_root.c
597
17604
/* * pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 40 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-aspm.h> #include <linux/acpi.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/apei.h> #define PREFIX "ACPI: " #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_root"); #define ACPI_PCI_ROOT_CLASS "pci_bridge" #define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge" static int acpi_pci_root_add(struct acpi_device *device); static int acpi_pci_root_remove(struct acpi_device *device, int type); static int acpi_pci_root_start(struct acpi_device *device); #define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \ | OSC_ACTIVE_STATE_PWR_SUPPORT \ | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \ | OSC_MSI_SUPPORT) static const struct acpi_device_id root_device_ids[] = { {"PNP0A03", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, root_device_ids); static struct acpi_driver acpi_pci_root_driver = { .name = "pci_root", .class = ACPI_PCI_ROOT_CLASS, .ids = root_device_ids, .ops = { .add = acpi_pci_root_add, .remove = acpi_pci_root_remove, .start = acpi_pci_root_start, }, }; static LIST_HEAD(acpi_pci_roots); static struct acpi_pci_driver *sub_driver; static DEFINE_MUTEX(osc_lock); int acpi_pci_register_driver(struct acpi_pci_driver *driver) { int n = 0; struct acpi_pci_root *root; struct acpi_pci_driver **pptr = &sub_driver; while (*pptr) pptr = &(*pptr)->next; *pptr = driver; if (!driver->add) return 0; list_for_each_entry(root, &acpi_pci_roots, node) { driver->add(root->device->handle); n++; } return n; } EXPORT_SYMBOL(acpi_pci_register_driver); void acpi_pci_unregister_driver(struct acpi_pci_driver *driver) { struct acpi_pci_root *root; struct acpi_pci_driver **pptr = &sub_driver; while (*pptr) { if (*pptr == driver) break; pptr = &(*pptr)->next; } BUG_ON(!*pptr); *pptr = (*pptr)->next; if (!driver->remove) return; list_for_each_entry(root, &acpi_pci_roots, node) driver->remove(root->device->handle); } EXPORT_SYMBOL(acpi_pci_unregister_driver); acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) { struct acpi_pci_root *root; list_for_each_entry(root, &acpi_pci_roots, node) if ((root->segment == (u16) seg) && (root->secondary.start == (u16) bus)) return root->device->handle; return NULL; } EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle); /** * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge * @handle - the ACPI CA node in question. * * Note: we could make this API take a struct acpi_device * instead, but * for now, it's more convenient to operate on an acpi_handle. */ int acpi_is_root_bridge(acpi_handle handle) { int ret; struct acpi_device *device; ret = acpi_bus_get_device(handle, &device); if (ret) return 0; ret = acpi_match_device_ids(device, root_device_ids); if (ret) return 0; else return 1; } EXPORT_SYMBOL_GPL(acpi_is_root_bridge); static acpi_status get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) { struct resource *res = data; struct acpi_resource_address64 address; if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && resource->type != ACPI_RESOURCE_TYPE_ADDRESS32 && resource->type != ACPI_RESOURCE_TYPE_ADDRESS64) return AE_OK; acpi_resource_to_address64(resource, &address); if ((address.address_length > 0) && (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { res->start = address.minimum; res->end = address.minimum + address.address_length - 1; } return AE_OK; } static acpi_status try_get_root_bridge_busnr(acpi_handle handle, struct resource *res) { acpi_status status; res->start = -1; status = acpi_walk_resources(handle, METHOD_NAME__CRS, get_root_bridge_busnr_callback, res); if (ACPI_FAILURE(status)) return status; if (res->start == -1) return AE_ERROR; return AE_OK; } static void acpi_pci_bridge_scan(struct acpi_device *device) { int status; struct acpi_device *child = NULL; if (device->flags.bus_address) if (device->parent && device->parent->ops.bind) { status = device->parent->ops.bind(device); if (!status) { list_for_each_entry(child, &device->children, node) acpi_pci_bridge_scan(child); } } } static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766"; static acpi_status acpi_pci_run_osc(acpi_handle handle, const u32 *capbuf, u32 *retval) { struct acpi_osc_context context = { .uuid_str = pci_osc_uuid_str, .rev = 1, .cap.length = 12, .cap.pointer = (void *)capbuf, }; acpi_status status; status = acpi_run_osc(handle, &context); if (ACPI_SUCCESS(status)) { *retval = *((u32 *)(context.ret.pointer + 8)); kfree(context.ret.pointer); } return status; } static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 support, u32 *control) { acpi_status status; u32 result, capbuf[3]; support &= OSC_PCI_SUPPORT_MASKS; support |= root->osc_support_set; capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_TYPE] = support; if (control) { *control &= OSC_PCI_CONTROL_MASKS; capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; } else { /* Run _OSC query only with existing controls. */ capbuf[OSC_CONTROL_TYPE] = root->osc_control_set; } status = acpi_pci_run_osc(root->device->handle, capbuf, &result); if (ACPI_SUCCESS(status)) { root->osc_support_set = support; if (control) *control = result; } return status; } static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) { acpi_status status; acpi_handle tmp; status = acpi_get_handle(root->device->handle, "_OSC", &tmp); if (ACPI_FAILURE(status)) return status; mutex_lock(&osc_lock); status = acpi_pci_query_osc(root, flags, NULL); mutex_unlock(&osc_lock); return status; } struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) { struct acpi_pci_root *root; list_for_each_entry(root, &acpi_pci_roots, node) { if (root->device->handle == handle) return root; } return NULL; } EXPORT_SYMBOL_GPL(acpi_pci_find_root); struct acpi_handle_node { struct list_head node; acpi_handle handle; }; /** * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev * @handle: the handle in question * * Given an ACPI CA handle, the desired PCI device is located in the * list of PCI devices. * * If the device is found, its reference count is increased and this * function returns a pointer to its data structure. The caller must * decrement the reference count by calling pci_dev_put(). * If no device is found, %NULL is returned. */ struct pci_dev *acpi_get_pci_dev(acpi_handle handle) { int dev, fn; unsigned long long adr; acpi_status status; acpi_handle phandle; struct pci_bus *pbus; struct pci_dev *pdev = NULL; struct acpi_handle_node *node, *tmp; struct acpi_pci_root *root; LIST_HEAD(device_list); /* * Walk up the ACPI CA namespace until we reach a PCI root bridge. */ phandle = handle; while (!acpi_is_root_bridge(phandle)) { node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL); if (!node) goto out; INIT_LIST_HEAD(&node->node); node->handle = phandle; list_add(&node->node, &device_list); status = acpi_get_parent(phandle, &phandle); if (ACPI_FAILURE(status)) goto out; } root = acpi_pci_find_root(phandle); if (!root) goto out; pbus = root->bus; /* * Now, walk back down the PCI device tree until we return to our * original handle. Assumes that everything between the PCI root * bridge and the device we're looking for must be a P2P bridge. */ list_for_each_entry(node, &device_list, node) { acpi_handle hnd = node->handle; status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) goto out; dev = (adr >> 16) & 0xffff; fn = adr & 0xffff; pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn)); if (!pdev || hnd == handle) break; pbus = pdev->subordinate; pci_dev_put(pdev); /* * This function may be called for a non-PCI device that has a * PCI parent (eg. a disk under a PCI SATA controller). In that * case pdev->subordinate will be NULL for the parent. */ if (!pbus) { dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n"); pdev = NULL; break; } } out: list_for_each_entry_safe(node, tmp, &device_list, node) kfree(node); return pdev; } EXPORT_SYMBOL_GPL(acpi_get_pci_dev); /** * acpi_pci_osc_control_set - Request control of PCI root _OSC features. * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex). * @mask: Mask of _OSC bits to request control of, place to store control mask. * @req: Mask of _OSC bits the control of is essential to the caller. * * Run _OSC query for @mask and if that is successful, compare the returned * mask of control bits with @req. If all of the @req bits are set in the * returned mask, run _OSC request for it. * * The variable at the @mask address may be modified regardless of whether or * not the function returns success. On success it will contain the mask of * _OSC bits the BIOS has granted control of, but its contents are meaningless * on failure. **/ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) { struct acpi_pci_root *root; acpi_status status; u32 ctrl, capbuf[3]; acpi_handle tmp; if (!mask) return AE_BAD_PARAMETER; ctrl = *mask & OSC_PCI_CONTROL_MASKS; if ((ctrl & req) != req) return AE_TYPE; root = acpi_pci_find_root(handle); if (!root) return AE_NOT_EXIST; status = acpi_get_handle(handle, "_OSC", &tmp); if (ACPI_FAILURE(status)) return status; mutex_lock(&osc_lock); *mask = ctrl | root->osc_control_set; /* No need to evaluate _OSC if the control was already granted. */ if ((root->osc_control_set & ctrl) == ctrl) goto out; /* Need to check the available controls bits before requesting them. */ while (*mask) { status = acpi_pci_query_osc(root, root->osc_support_set, mask); if (ACPI_FAILURE(status)) goto out; if (ctrl == *mask) break; ctrl = *mask; } if ((ctrl & req) != req) { status = AE_SUPPORT; goto out; } capbuf[OSC_QUERY_TYPE] = 0; capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; capbuf[OSC_CONTROL_TYPE] = ctrl; status = acpi_pci_run_osc(handle, capbuf, mask); if (ACPI_SUCCESS(status)) root->osc_control_set = *mask; out: mutex_unlock(&osc_lock); return status; } EXPORT_SYMBOL(acpi_pci_osc_control_set); static int __devinit acpi_pci_root_add(struct acpi_device *device) { unsigned long long segment, bus; acpi_status status; int result; struct acpi_pci_root *root; acpi_handle handle; struct acpi_device *child; u32 flags, base_flags; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) return -ENOMEM; segment = 0; status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, &segment); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); result = -ENODEV; goto end; } /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ root->secondary.flags = IORESOURCE_BUS; status = try_get_root_bridge_busnr(device->handle, &root->secondary); if (ACPI_FAILURE(status)) { /* * We need both the start and end of the downstream bus range * to interpret _CBA (MMCONFIG base address), so it really is * supposed to be in _CRS. If we don't find it there, all we * can do is assume [_BBN-0xFF] or [0-0xFF]. */ root->secondary.end = 0xFF; printk(KERN_WARNING FW_BUG PREFIX "no secondary bus range in _CRS\n"); status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); if (ACPI_SUCCESS(status)) root->secondary.start = bus; else if (status == AE_NOT_FOUND) root->secondary.start = 0; else { printk(KERN_ERR PREFIX "can't evaluate _BBN\n"); result = -ENODEV; goto end; } } INIT_LIST_HEAD(&root->node); root->device = device; root->segment = segment & 0xFFFF; strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); device->driver_data = root; /* * All supported architectures that use ACPI have support for * PCI domains, so we indicate this in _OSC support capabilities. */ flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; acpi_pci_osc_support(root, flags); /* * TBD: Need PCI interface for enumeration/configuration of roots. */ /* TBD: Locking */ list_add_tail(&root->node, &acpi_pci_roots); printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n", acpi_device_name(device), acpi_device_bid(device), root->segment, &root->secondary); /* * Scan the Root Bridge * -------------------- * Must do this prior to any attempt to bind the root device, as the * PCI namespace does not get created until this call is made (and * thus the root bridge's pci_dev does not exist). */ root->bus = pci_acpi_scan_root(root); if (!root->bus) { printk(KERN_ERR PREFIX "Bus %04x:%02x not present in PCI namespace\n", root->segment, (unsigned int)root->secondary.start); result = -ENODEV; goto end; } /* * Attach ACPI-PCI Context * ----------------------- * Thus binding the ACPI and PCI devices. */ result = acpi_pci_bind_root(device); if (result) goto end; /* * PCI Routing Table * ----------------- * Evaluate and parse _PRT, if exists. */ status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); if (ACPI_SUCCESS(status)) result = acpi_pci_irq_add_prt(device->handle, root->bus); /* * Scan and bind all _ADR-Based Devices */ list_for_each_entry(child, &device->children, node) acpi_pci_bridge_scan(child); /* Indicate support for various _OSC capabilities. */ if (pci_ext_cfg_avail(root->bus->self)) flags |= OSC_EXT_PCI_CONFIG_SUPPORT; if (pcie_aspm_support_enabled()) flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | OSC_CLOCK_PWR_CAPABILITY_SUPPORT; if (pci_msi_enabled()) flags |= OSC_MSI_SUPPORT; if (flags != base_flags) acpi_pci_osc_support(root, flags); if (!pcie_ports_disabled && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | OSC_PCI_EXPRESS_PME_CONTROL; if (pci_aer_available()) { if (aer_acpi_firmware_first()) dev_dbg(root->bus->bridge, "PCIe errors handled by BIOS.\n"); else flags |= OSC_PCI_EXPRESS_AER_CONTROL; } dev_info(root->bus->bridge, "Requesting ACPI _OSC control (0x%02x)\n", flags); status = acpi_pci_osc_control_set(device->handle, &flags, OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); if (ACPI_SUCCESS(status)) { dev_info(root->bus->bridge, "ACPI _OSC control (0x%02x) granted\n", flags); if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { /* * We have ASPM control, but the FADT indicates * that it's unsupported. Clear it. */ pcie_clear_aspm(root->bus); } } else { dev_info(root->bus->bridge, "ACPI _OSC request failed (%s), " "returned control mask: 0x%02x\n", acpi_format_exception(status), flags); pr_info("ACPI _OSC control for PCIe not granted, " "disabling ASPM\n"); pcie_no_aspm(); } } else { dev_info(root->bus->bridge, "Unable to request _OSC control " "(_OSC support mask: 0x%02x)\n", flags); } pci_acpi_add_bus_pm_notifier(device, root->bus); if (device->wakeup.flags.run_wake) device_set_run_wake(root->bus->bridge, true); return 0; end: if (!list_empty(&root->node)) list_del(&root->node); kfree(root); return result; } static int acpi_pci_root_start(struct acpi_device *device) { struct acpi_pci_root *root = acpi_driver_data(device); pci_bus_add_devices(root->bus); return 0; } static int acpi_pci_root_remove(struct acpi_device *device, int type) { struct acpi_pci_root *root = acpi_driver_data(device); device_set_run_wake(root->bus->bridge, false); pci_acpi_remove_bus_pm_notifier(device); kfree(root); return 0; } static int __init acpi_pci_root_init(void) { acpi_hest_init(); if (acpi_pci_disabled) return 0; pci_acpi_crs_quirks(); if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) return -ENODEV; return 0; } subsys_initcall(acpi_pci_root_init);
gpl-2.0
vitaliyy/samsung-kernel-ariesve
drivers/gpu/drm/nouveau/nv04_fbcon.c
853
8396
/* * Copyright 2009 Ben Skeggs * Copyright 2008 Stuart Bennett * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_fbcon.h" void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; if (info->state != FBINFO_STATE_RUNNING) return; if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { nouveau_fbcon_gpu_lockup(info); } if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_copyarea(info, region); return; } BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); OUT_RING(chan, (region->sy << 16) | region->sx); OUT_RING(chan, (region->dy << 16) | region->dx); OUT_RING(chan, (region->height << 16) | region->width); FIRE_RING(chan); } void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; if (info->state != FBINFO_STATE_RUNNING) return; if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { nouveau_fbcon_gpu_lockup(info); } if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_fillrect(info, rect); return; } BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); else OUT_RING(chan, rect->color); BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); OUT_RING(chan, (rect->dx << 16) | rect->dy); OUT_RING(chan, (rect->width << 16) | rect->height); FIRE_RING(chan); } void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; uint32_t fg; uint32_t bg; uint32_t dsize; uint32_t width; uint32_t *data = (uint32_t *)image->data; if (info->state != FBINFO_STATE_RUNNING) return; if (image->depth != 1) { cfb_imageblit(info, image); return; } if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { nouveau_fbcon_gpu_lockup(info); } if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_imageblit(info, image); return; } width = ALIGN(image->width, 8); dsize = ALIGN(width * image->height, 32) >> 5; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; bg = ((uint32_t *) info->pseudo_palette)[image->bg_color]; } else { fg = image->fg_color; bg = image->bg_color; } BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); OUT_RING(chan, ((image->dy + image->height) << 16) | ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); OUT_RING(chan, (image->height << 16) | width); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; if (RING_SPACE(chan, iter_len + 1)) { nouveau_fbcon_gpu_lockup(info); cfb_imageblit(info, image); return; } BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); OUT_RINGp(chan, data, iter_len); data += iter_len; dsize -= iter_len; } FIRE_RING(chan); } static int nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *obj = NULL; int ret; ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj); if (ret) return ret; ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL); if (ret) return ret; return 0; } int nv04_fbcon_accel_init(struct fb_info *info) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; const int sub = NvSubCtxSurf2D; int surface_fmt, pattern_fmt, rect_fmt; int ret; switch (info->var.bits_per_pixel) { case 8: surface_fmt = 1; pattern_fmt = 3; rect_fmt = 3; break; case 16: surface_fmt = 4; pattern_fmt = 1; rect_fmt = 1; break; case 32: switch (info->var.transp.length) { case 0: /* depth 24 */ case 8: /* depth 32 */ break; default: return -EINVAL; } surface_fmt = 6; pattern_fmt = 3; rect_fmt = 3; break; default: return -EINVAL; } ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? 0x0062 : 0x0042, NvCtxSurf2D); if (ret) return ret; ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect); if (ret) return ret; ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop); if (ret) return ret; ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt); if (ret) return ret; ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect); if (ret) return ret; ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? 0x009f : 0x005f, NvImageBlit); if (ret) return ret; if (RING_SPACE(chan, 49)) { nouveau_fbcon_gpu_lockup(info); return 0; } BEGIN_RING(chan, sub, 0x0000, 1); OUT_RING(chan, NvCtxSurf2D); BEGIN_RING(chan, sub, 0x0184, 2); OUT_RING(chan, NvDmaFB); OUT_RING(chan, NvDmaFB); BEGIN_RING(chan, sub, 0x0300, 4); OUT_RING(chan, surface_fmt); OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); BEGIN_RING(chan, sub, 0x0000, 1); OUT_RING(chan, NvRop); BEGIN_RING(chan, sub, 0x0300, 1); OUT_RING(chan, 0x55); BEGIN_RING(chan, sub, 0x0000, 1); OUT_RING(chan, NvImagePatt); BEGIN_RING(chan, sub, 0x0300, 8); OUT_RING(chan, pattern_fmt); #ifdef __BIG_ENDIAN OUT_RING(chan, 2); #else OUT_RING(chan, 1); #endif OUT_RING(chan, 0); OUT_RING(chan, 1); OUT_RING(chan, ~0); OUT_RING(chan, ~0); OUT_RING(chan, ~0); OUT_RING(chan, ~0); BEGIN_RING(chan, sub, 0x0000, 1); OUT_RING(chan, NvClipRect); BEGIN_RING(chan, sub, 0x0300, 2); OUT_RING(chan, 0); OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1); OUT_RING(chan, NvImageBlit); BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1); OUT_RING(chan, NvCtxSurf2D); BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1); OUT_RING(chan, 3); BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1); OUT_RING(chan, NvGdiRect); BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1); OUT_RING(chan, NvCtxSurf2D); BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2); OUT_RING(chan, NvImagePatt); OUT_RING(chan, NvRop); BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1); OUT_RING(chan, 1); BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1); OUT_RING(chan, rect_fmt); BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); OUT_RING(chan, 3); FIRE_RING(chan); return 0; }
gpl-2.0
anoane/Ultrakernel
arch/x86/kernel/cpu/cpufreq/longhaul.c
853
26622
/* * (C) 2001-2004 Dave Jones. <davej@redhat.com> * (C) 2002 Padraig Brady. <padraig@antefacto.com> * * Licensed under the terms of the GNU GPL License version 2. * Based upon datasheets & sample CPUs kindly provided by VIA. * * VIA have currently 3 different versions of Longhaul. * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. * Version 2 of longhaul is backward compatible with v1, but adds * LONGHAUL MSR for purpose of both frequency and voltage scaling. * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C). * Version 3 of longhaul got renamed to Powersaver and redesigned * to use only the POWERSAVER MSR at 0x110a. * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. * It's pretty much the same feature wise to longhaul v2, though * there is provision for scaling FSB too, but this doesn't work * too well in practice so we don't even try to use this. * * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/timex.h> #include <linux/io.h> #include <linux/acpi.h> #include <asm/msr.h> #include <acpi/processor.h> #include "longhaul.h" #define PFX "longhaul: " #define TYPE_LONGHAUL_V1 1 #define TYPE_LONGHAUL_V2 2 #define TYPE_POWERSAVER 3 #define CPU_SAMUEL 1 #define CPU_SAMUEL2 2 #define CPU_EZRA 3 #define CPU_EZRA_T 4 #define CPU_NEHEMIAH 5 #define CPU_NEHEMIAH_C 6 /* Flags */ #define USE_ACPI_C3 (1 << 1) #define USE_NORTHBRIDGE (1 << 2) static int cpu_model; static unsigned int numscales = 16; static unsigned int fsb; static const struct mV_pos *vrm_mV_table; static const unsigned char *mV_vrm_table; static unsigned int highest_speed, lowest_speed; /* kHz */ static unsigned int minmult, maxmult; static int can_scale_voltage; static struct acpi_processor *pr; static struct acpi_processor_cx *cx; static u32 acpi_regs_addr; static u8 longhaul_flags; static unsigned int longhaul_index; /* Module parameters */ static int scale_voltage; static int disable_acpi_c3; static int revid_errata; #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ "longhaul", msg) /* Clock ratios multiplied by 10 */ static int mults[32]; static int eblcr[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; #ifdef CONFIG_CPU_FREQ_DEBUG static char speedbuffer[8]; static char *print_speed(int speed) { if (speed < 1000) { snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed); return speedbuffer; } if (speed%1000 == 0) snprintf(speedbuffer, sizeof(speedbuffer), "%dGHz", speed/1000); else snprintf(speedbuffer, sizeof(speedbuffer), "%d.%dGHz", speed/1000, (speed%1000)/100); return speedbuffer; } #endif static unsigned int calc_speed(int mult) { int khz; khz = (mult/10)*fsb; if (mult%10) khz += fsb/2; khz *= 1000; return khz; } static int longhaul_get_cpu_mult(void) { unsigned long invalue = 0, lo, hi; rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi); invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22; if (longhaul_version == TYPE_LONGHAUL_V2 || longhaul_version == TYPE_POWERSAVER) { if (lo & (1<<27)) invalue += 16; } return eblcr[invalue]; } /* For processor with BCR2 MSR */ static void do_longhaul1(unsigned int mults_index) { union msr_bcr2 bcr2; rdmsrl(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; bcr2.bits.CLOCKMUL = mults_index & 0xff; /* Sync to timer tick */ safe_halt(); /* Change frequency on next halt or sleep */ wrmsrl(MSR_VIA_BCR2, bcr2.val); /* Invoke transition */ ACPI_FLUSH_CPU_CACHE(); halt(); /* Disable software clock multiplier */ local_irq_disable(); rdmsrl(MSR_VIA_BCR2, bcr2.val); bcr2.bits.ESOFTBF = 0; wrmsrl(MSR_VIA_BCR2, bcr2.val); } /* For processor with Longhaul MSR */ static void do_powersaver(int cx_address, unsigned int mults_index, unsigned int dir) { union msr_longhaul longhaul; u32 t; rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ if (!revid_errata) longhaul.bits.RevisionKey = longhaul.bits.RevisionID; else longhaul.bits.RevisionKey = 0; longhaul.bits.SoftBusRatio = mults_index & 0xf; longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4; /* Setup new voltage */ if (can_scale_voltage) longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f; /* Sync to timer tick */ safe_halt(); /* Raise voltage if necessary */ if (can_scale_voltage && dir) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 * read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); } /* Change frequency on next halt or sleep */ longhaul.bits.EnableSoftBusRatio = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } /* Disable bus ratio bit */ longhaul.bits.EnableSoftBusRatio = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Reduce voltage if necessary */ if (can_scale_voltage && !dir) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 * read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); } } /** * longhaul_set_cpu_frequency() * @mults_index : bitpattern of the new multiplier. * * Sets a new clock ratio. */ static void longhaul_setstate(unsigned int table_index) { unsigned int mults_index; int speed, mult; struct cpufreq_freqs freqs; unsigned long flags; unsigned int pic1_mask, pic2_mask; u16 bm_status = 0; u32 bm_timeout = 1000; unsigned int dir = 0; mults_index = longhaul_table[table_index].index; /* Safety precautions */ mult = mults[mults_index & 0x1f]; if (mult == -1) return; speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) return; /* Voltage transition before frequency transition? */ if (can_scale_voltage && longhaul_index < table_index) dir = 1; freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; freqs.cpu = 0; /* longhaul.c is UP only driver */ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: preempt_disable(); local_irq_save(flags); pic2_mask = inb(0xA1); pic1_mask = inb(0x21); /* works on C3. save mask. */ outb(0xFF, 0xA1); /* Overkill */ outb(0xFE, 0x21); /* TMR0 only */ /* Wait while PCI bus is busy. */ if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE || ((pr != NULL) && pr->flags.bm_control))) { bm_status = inw(acpi_regs_addr); bm_status &= 1 << 4; while (bm_status && bm_timeout) { outw(1 << 4, acpi_regs_addr); bm_timeout--; bm_status = inw(acpi_regs_addr); bm_status &= 1 << 4; } } if (longhaul_flags & USE_NORTHBRIDGE) { /* Disable AGP and PCI arbiters */ outb(3, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Disable bus master arbitration */ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); } switch (longhaul_version) { /* * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) * Software controlled multipliers only. */ case TYPE_LONGHAUL_V1: do_longhaul1(mults_index); break; /* * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C] * * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) * Nehemiah can do FSB scaling too, but this has never been proven * to work in practice. */ case TYPE_LONGHAUL_V2: case TYPE_POWERSAVER: if (longhaul_flags & USE_ACPI_C3) { /* Don't allow wakeup */ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0); do_powersaver(cx->address, mults_index, dir); } else { do_powersaver(0, mults_index, dir); } break; } if (longhaul_flags & USE_NORTHBRIDGE) { /* Enable arbiters */ outb(0, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Enable bus master arbitration */ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); } outb(pic2_mask, 0xA1); /* restore mask */ outb(pic1_mask, 0x21); local_irq_restore(flags); preempt_enable(); freqs.new = calc_speed(longhaul_get_cpu_mult()); /* Check if requested frequency is set. */ if (unlikely(freqs.new != speed)) { printk(KERN_INFO PFX "Failed to set requested frequency!\n"); /* Revision ID = 1 but processor is expecting revision key * equal to 0. Jumpers at the bottom of processor will change * multiplier and FSB, but will not change bits in Longhaul * MSR nor enable voltage scaling. */ if (!revid_errata) { printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" " "option.\n"); revid_errata = 1; msleep(200); goto retry_loop; } /* Why ACPI C3 sometimes doesn't work is a mystery for me. * But it does happen. Processor is entering ACPI C3 state, * but it doesn't change frequency. I tried poking various * bits in northbridge registers, but without success. */ if (longhaul_flags & USE_ACPI_C3) { printk(KERN_INFO PFX "Disabling ACPI C3 support.\n"); longhaul_flags &= ~USE_ACPI_C3; if (revid_errata) { printk(KERN_INFO PFX "Disabling \"Ignore " "Revision ID\" option.\n"); revid_errata = 0; } msleep(200); goto retry_loop; } /* This shouldn't happen. Longhaul ver. 2 was reported not * working on processors without voltage scaling, but with * RevID = 1. RevID errata will make things right. Just * to be 100% sure. */ if (longhaul_version == TYPE_LONGHAUL_V2) { printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n"); longhaul_version = TYPE_LONGHAUL_V1; msleep(200); goto retry_loop; } } /* Report true CPU frequency */ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); if (!bm_timeout) printk(KERN_INFO PFX "Warning: Timeout while waiting for " "idle PCI bus.\n"); } /* * Centaur decided to make life a little more tricky. * Only longhaul v1 is allowed to read EBLCR BSEL[0:1]. * Samuel2 and above have to try and guess what the FSB is. * We do this by assuming we booted at maximum multiplier, and interpolate * between that value multiplied by possible FSBs and cpu_mhz which * was calculated at boot time. Really ugly, but no other way to do this. */ #define ROUNDING 0xf static int guess_fsb(int mult) { int speed = cpu_khz / 1000; int i; int speeds[] = { 666, 1000, 1333, 2000 }; int f_max, f_min; for (i = 0; i < 4; i++) { f_max = ((speeds[i] * mult) + 50) / 100; f_max += (ROUNDING / 2); f_min = f_max - ROUNDING; if ((speed <= f_max) && (speed >= f_min)) return speeds[i] / 10; } return 0; } static int __init longhaul_get_ranges(void) { unsigned int i, j, k = 0; unsigned int ratio; int mult; /* Get current frequency */ mult = longhaul_get_cpu_mult(); if (mult == -1) { printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n"); return -EINVAL; } fsb = guess_fsb(mult); if (fsb == 0) { printk(KERN_INFO PFX "Invalid (reserved) FSB!\n"); return -EINVAL; } /* Get max multiplier - as we always did. * Longhaul MSR is usefull only when voltage scaling is enabled. * C3 is booting at max anyway. */ maxmult = mult; /* Get min multiplier */ switch (cpu_model) { case CPU_NEHEMIAH: minmult = 50; break; case CPU_NEHEMIAH_C: minmult = 40; break; default: minmult = 30; break; } dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); if (lowest_speed == highest_speed) { printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n"); return -EINVAL; } if (lowest_speed > highest_speed) { printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", lowest_speed, highest_speed); return -EINVAL; } longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table), GFP_KERNEL); if (!longhaul_table) return -ENOMEM; for (j = 0; j < numscales; j++) { ratio = mults[j]; if (ratio == -1) continue; if (ratio > maxmult || ratio < minmult) continue; longhaul_table[k].frequency = calc_speed(ratio); longhaul_table[k].index = j; k++; } if (k <= 1) { kfree(longhaul_table); return -ENODEV; } /* Sort */ for (j = 0; j < k - 1; j++) { unsigned int min_f, min_i; min_f = longhaul_table[j].frequency; min_i = j; for (i = j + 1; i < k; i++) { if (longhaul_table[i].frequency < min_f) { min_f = longhaul_table[i].frequency; min_i = i; } } if (min_i != j) { swap(longhaul_table[j].frequency, longhaul_table[min_i].frequency); swap(longhaul_table[j].index, longhaul_table[min_i].index); } } longhaul_table[k].frequency = CPUFREQ_TABLE_END; /* Find index we are running on */ for (j = 0; j < k; j++) { if (mults[longhaul_table[j].index & 0x1f] == mult) { longhaul_index = j; break; } } return 0; } static void __init longhaul_setup_voltagescaling(void) { union msr_longhaul longhaul; struct mV_pos minvid, maxvid, vid; unsigned int j, speed, pos, kHz_step, numvscales; int min_vid_speed; rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); if (!(longhaul.bits.RevisionID & 1)) { printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n"); return; } if (!longhaul.bits.VRMRev) { printk(KERN_INFO PFX "VRM 8.5\n"); vrm_mV_table = &vrm85_mV[0]; mV_vrm_table = &mV_vrm85[0]; } else { printk(KERN_INFO PFX "Mobile VRM\n"); if (cpu_model < CPU_NEHEMIAH) return; vrm_mV_table = &mobilevrm_mV[0]; mV_vrm_table = &mV_mobilevrm[0]; } minvid = vrm_mV_table[longhaul.bits.MinimumVID]; maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " "Voltage scaling disabled.\n", minvid.mV/1000, minvid.mV%1000, maxvid.mV/1000, maxvid.mV%1000); return; } if (minvid.mV == maxvid.mV) { printk(KERN_INFO PFX "Claims to support voltage scaling but " "min & max are both %d.%03d. " "Voltage scaling disabled\n", maxvid.mV/1000, maxvid.mV%1000); return; } /* How many voltage steps*/ numvscales = maxvid.pos - minvid.pos + 1; printk(KERN_INFO PFX "Max VID=%d.%03d " "Min VID=%d.%03d, " "%d possible voltage scales\n", maxvid.mV/1000, maxvid.mV%1000, minvid.mV/1000, minvid.mV%1000, numvscales); /* Calculate max frequency at min voltage */ j = longhaul.bits.MinMHzBR; if (longhaul.bits.MinMHzBR4) j += 16; min_vid_speed = eblcr[j]; if (min_vid_speed == -1) return; switch (longhaul.bits.MinMHzFSB) { case 0: min_vid_speed *= 13333; break; case 1: min_vid_speed *= 10000; break; case 3: min_vid_speed *= 6666; break; default: return; break; } if (min_vid_speed >= highest_speed) return; /* Calculate kHz for one voltage step */ kHz_step = (highest_speed - min_vid_speed) / numvscales; j = 0; while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { speed = longhaul_table[j].frequency; if (speed > min_vid_speed) pos = (speed - min_vid_speed) / kHz_step + minvid.pos; else pos = minvid.pos; longhaul_table[j].index |= mV_vrm_table[pos] << 8; vid = vrm_mV_table[mV_vrm_table[pos]]; printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV); j++; } can_scale_voltage = 1; printk(KERN_INFO PFX "Voltage scaling enabled.\n"); } static int longhaul_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, longhaul_table); } static int longhaul_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int table_index = 0; unsigned int i; unsigned int dir = 0; u8 vid, current_vid; if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) return -EINVAL; /* Don't set same frequency again */ if (longhaul_index == table_index) return 0; if (!can_scale_voltage) longhaul_setstate(table_index); else { /* On test system voltage transitions exceeding single * step up or down were turning motherboard off. Both * "ondemand" and "userspace" are unsafe. C7 is doing * this in hardware, C3 is old and we need to do this * in software. */ i = longhaul_index; current_vid = (longhaul_table[longhaul_index].index >> 8); current_vid &= 0x1f; if (table_index > longhaul_index) dir = 1; while (i != table_index) { vid = (longhaul_table[i].index >> 8) & 0x1f; if (vid != current_vid) { longhaul_setstate(i); current_vid = vid; msleep(200); } if (dir) i++; else i--; } longhaul_setstate(table_index); } longhaul_index = table_index; return 0; } static unsigned int longhaul_get(unsigned int cpu) { if (cpu) return 0; return calc_speed(longhaul_get_cpu_mult()); } static acpi_status longhaul_walk_callback(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_device *d; if (acpi_bus_get_device(obj_handle, &d)) return 0; *return_value = acpi_driver_data(d); return 1; } /* VIA don't support PM2 reg, but have something similar */ static int enable_arbiter_disable(void) { struct pci_dev *dev; int status = 1; int reg; u8 pci_cmd; /* Find PLE133 host bridge */ reg = 0x78; dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL); /* Find PM133/VT8605 host bridge */ if (dev == NULL) dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8605_0, NULL); /* Find CLE266 host bridge */ if (dev == NULL) { reg = 0x76; dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); /* Find CN400 V-Link host bridge */ if (dev == NULL) dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL); } if (dev != NULL) { /* Enable access to port 0x22 */ pci_read_config_byte(dev, reg, &pci_cmd); if (!(pci_cmd & 1<<7)) { pci_cmd |= 1<<7; pci_write_config_byte(dev, reg, pci_cmd); pci_read_config_byte(dev, reg, &pci_cmd); if (!(pci_cmd & 1<<7)) { printk(KERN_ERR PFX "Can't enable access to port 0x22.\n"); status = 0; } } pci_dev_put(dev); return status; } return 0; } static int longhaul_setup_southbridge(void) { struct pci_dev *dev; u8 pci_cmd; /* Find VT8235 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); if (dev == NULL) /* Find VT8237 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, NULL); if (dev != NULL) { /* Set transition time to max */ pci_read_config_byte(dev, 0xec, &pci_cmd); pci_cmd &= ~(1 << 2); pci_write_config_byte(dev, 0xec, pci_cmd); pci_read_config_byte(dev, 0xe4, &pci_cmd); pci_cmd &= ~(1 << 7); pci_write_config_byte(dev, 0xe4, pci_cmd); pci_read_config_byte(dev, 0xe5, &pci_cmd); pci_cmd |= 1 << 7; pci_write_config_byte(dev, 0xe5, pci_cmd); /* Get address of ACPI registers block*/ pci_read_config_byte(dev, 0x81, &pci_cmd); if (pci_cmd & 1 << 7) { pci_read_config_dword(dev, 0x88, &acpi_regs_addr); acpi_regs_addr &= 0xff00; printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr); } pci_dev_put(dev); return 1; } return 0; } static int __init longhaul_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); char *cpuname = NULL; int ret; u32 lo, hi; /* Check what we have on this motherboard */ switch (c->x86_model) { case 6: cpu_model = CPU_SAMUEL; cpuname = "C3 'Samuel' [C5A]"; longhaul_version = TYPE_LONGHAUL_V1; memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr)); break; case 7: switch (c->x86_mask) { case 0: longhaul_version = TYPE_LONGHAUL_V1; cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; /* Note, this is not a typo, early Samuel2's had * Samuel1 ratios. */ memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V2; if (c->x86_mask < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; } else { cpu_model = CPU_EZRA; cpuname = "C3 'Ezra' [C5C]"; } memcpy(mults, ezra_mults, sizeof(ezra_mults)); memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr)); break; } break; case 8: cpu_model = CPU_EZRA_T; cpuname = "C3 'Ezra-T' [C5M]"; longhaul_version = TYPE_POWERSAVER; numscales = 32; memcpy(mults, ezrat_mults, sizeof(ezrat_mults)); memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr)); break; case 9: longhaul_version = TYPE_POWERSAVER; numscales = 32; memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); switch (c->x86_mask) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah A' [C5XLOE]"; break; case 2 ... 4: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah B' [C5XLOH]"; break; case 5 ... 15: cpu_model = CPU_NEHEMIAH_C; cpuname = "C3 'Nehemiah C' [C5P]"; break; } break; default: cpuname = "Unknown"; break; } /* Check Longhaul ver. 2 */ if (longhaul_version == TYPE_LONGHAUL_V2) { rdmsr(MSR_VIA_LONGHAUL, lo, hi); if (lo == 0 && hi == 0) /* Looks like MSR isn't present */ longhaul_version = TYPE_LONGHAUL_V1; } printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname); switch (longhaul_version) { case TYPE_LONGHAUL_V1: case TYPE_LONGHAUL_V2: printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version); break; case TYPE_POWERSAVER: printk(KERN_CONT "Powersaver supported.\n"); break; }; /* Doesn't hurt */ longhaul_setup_southbridge(); /* Find ACPI data for processor */ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, &longhaul_walk_callback, NULL, NULL, (void *)&pr); /* Check ACPI support for C3 state */ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { cx = &pr->power.states[ACPI_STATE_C3]; if (cx->address > 0 && cx->latency <= 1000) longhaul_flags |= USE_ACPI_C3; } /* Disable if it isn't working */ if (disable_acpi_c3) longhaul_flags &= ~USE_ACPI_C3; /* Check if northbridge is friendly */ if (enable_arbiter_disable()) longhaul_flags |= USE_NORTHBRIDGE; /* Check ACPI support for bus master arbiter disable */ if (!(longhaul_flags & USE_ACPI_C3 || longhaul_flags & USE_NORTHBRIDGE) && ((pr == NULL) || !(pr->flags.bm_control))) { printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge.\n"); return -ENODEV; } if (longhaul_flags & USE_NORTHBRIDGE) printk(KERN_INFO PFX "Using northbridge support.\n"); if (longhaul_flags & USE_ACPI_C3) printk(KERN_INFO PFX "Using ACPI support.\n"); ret = longhaul_get_ranges(); if (ret != 0) return ret; if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) longhaul_setup_voltagescaling(); policy->cpuinfo.transition_latency = 200000; /* nsec */ policy->cur = calc_speed(longhaul_get_cpu_mult()); ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); if (ret) return ret; cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); return 0; } static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *longhaul_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver longhaul_driver = { .verify = longhaul_verify, .target = longhaul_target, .get = longhaul_get, .init = longhaul_cpu_init, .exit = __devexit_p(longhaul_cpu_exit), .name = "longhaul", .owner = THIS_MODULE, .attr = longhaul_attr, }; static int __init longhaul_init(void) { struct cpuinfo_x86 *c = &cpu_data(0); if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) return -ENODEV; #ifdef CONFIG_SMP if (num_online_cpus() > 1) { printk(KERN_ERR PFX "More than 1 CPU detected, " "longhaul disabled.\n"); return -ENODEV; } #endif #ifdef CONFIG_X86_IO_APIC if (cpu_has_apic) { printk(KERN_ERR PFX "APIC detected. Longhaul is currently " "broken in this configuration.\n"); return -ENODEV; } #endif switch (c->x86_model) { case 6 ... 9: return cpufreq_register_driver(&longhaul_driver); case 10: printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); default: ; } return -ENODEV; } static void __exit longhaul_exit(void) { int i; for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { longhaul_setstate(i); break; } } cpufreq_unregister_driver(&longhaul_driver); kfree(longhaul_table); } /* Even if BIOS is exporting ACPI C3 state, and it is used * with success when CPU is idle, this state doesn't * trigger frequency transition in some cases. */ module_param(disable_acpi_c3, int, 0644); MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); /* Change CPU voltage with frequency. Very usefull to save * power, but most VIA C3 processors aren't supporting it. */ module_param(scale_voltage, int, 0644); MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); /* Force revision key to 0 for processors which doesn't * support voltage scaling, but are introducing itself as * such. */ module_param(revid_errata, int, 0644); MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors."); MODULE_LICENSE("GPL"); late_initcall(longhaul_init); module_exit(longhaul_exit);
gpl-2.0
Maroc-OS/Merruk-Technology
common/arch/arm/mach-ixp2000/ixdp2x01.c
853
12058
/* * arch/arm/mach-ixp2000/ixdp2x01.c * * Code common to Intel IXDP2401 and IXDP2801 platforms * * Original Author: Andrzej Mialkowski <andrzej.mialkowski@intel.com> * Maintainer: Deepak Saxena <dsaxena@plexity.net> * * Copyright (C) 2002-2003 Intel Corp. * Copyright (C) 2003-2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/system.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/pci.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> /************************************************************************* * IXDP2x01 IRQ Handling *************************************************************************/ static void ixdp2x01_irq_mask(unsigned int irq) { ixp2000_reg_wrb(IXDP2X01_INT_MASK_SET_REG, IXP2000_BOARD_IRQ_MASK(irq)); } static void ixdp2x01_irq_unmask(unsigned int irq) { ixp2000_reg_write(IXDP2X01_INT_MASK_CLR_REG, IXP2000_BOARD_IRQ_MASK(irq)); } static u32 valid_irq_mask; static void ixdp2x01_irq_handler(unsigned int irq, struct irq_desc *desc) { u32 ex_interrupt; int i; desc->chip->mask(irq); ex_interrupt = *IXDP2X01_INT_STAT_REG & valid_irq_mask; if (!ex_interrupt) { printk(KERN_ERR "Spurious IXDP2X01 CPLD interrupt!\n"); return; } for (i = 0; i < IXP2000_BOARD_IRQS; i++) { if (ex_interrupt & (1 << i)) { int cpld_irq = IXP2000_BOARD_IRQ(0) + i; generic_handle_irq(cpld_irq); } } desc->chip->unmask(irq); } static struct irq_chip ixdp2x01_irq_chip = { .mask = ixdp2x01_irq_mask, .ack = ixdp2x01_irq_mask, .unmask = ixdp2x01_irq_unmask }; /* * We only do anything if we are the master NPU on the board. * The slave NPU only has the ethernet chip going directly to * the PCIB interrupt input. */ void __init ixdp2x01_init_irq(void) { int irq = 0; /* initialize chip specific interrupts */ ixp2000_init_irq(); if (machine_is_ixdp2401()) valid_irq_mask = IXDP2401_VALID_IRQ_MASK; else valid_irq_mask = IXDP2801_VALID_IRQ_MASK; /* Mask all interrupts from CPLD, disable simulation */ ixp2000_reg_write(IXDP2X01_INT_MASK_SET_REG, 0xffffffff); ixp2000_reg_wrb(IXDP2X01_INT_SIM_REG, 0); for (irq = NR_IXP2000_IRQS; irq < NR_IXDP2X01_IRQS; irq++) { if (irq & valid_irq_mask) { set_irq_chip(irq, &ixdp2x01_irq_chip); set_irq_handler(irq, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } else { set_irq_flags(irq, 0); } } /* Hook into PCI interrupts */ set_irq_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler); } /************************************************************************* * IXDP2x01 memory map *************************************************************************/ static struct map_desc ixdp2x01_io_desc __initdata = { .virtual = IXDP2X01_VIRT_CPLD_BASE, .pfn = __phys_to_pfn(IXDP2X01_PHYS_CPLD_BASE), .length = IXDP2X01_CPLD_REGION_SIZE, .type = MT_DEVICE }; static void __init ixdp2x01_map_io(void) { ixp2000_map_io(); iotable_init(&ixdp2x01_io_desc, 1); } /************************************************************************* * IXDP2x01 serial ports *************************************************************************/ static struct plat_serial8250_port ixdp2x01_serial_port1[] = { { .mapbase = (unsigned long)IXDP2X01_UART1_PHYS_BASE, .membase = (char *)IXDP2X01_UART1_VIRT_BASE, .irq = IRQ_IXDP2X01_UART1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM32, .regshift = 2, .uartclk = IXDP2X01_UART_CLK, }, { } }; static struct resource ixdp2x01_uart_resource1 = { .start = IXDP2X01_UART1_PHYS_BASE, .end = IXDP2X01_UART1_PHYS_BASE + 0xffff, .flags = IORESOURCE_MEM, }; static struct platform_device ixdp2x01_serial_device1 = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM1, .dev = { .platform_data = ixdp2x01_serial_port1, }, .num_resources = 1, .resource = &ixdp2x01_uart_resource1, }; static struct plat_serial8250_port ixdp2x01_serial_port2[] = { { .mapbase = (unsigned long)IXDP2X01_UART2_PHYS_BASE, .membase = (char *)IXDP2X01_UART2_VIRT_BASE, .irq = IRQ_IXDP2X01_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM32, .regshift = 2, .uartclk = IXDP2X01_UART_CLK, }, { } }; static struct resource ixdp2x01_uart_resource2 = { .start = IXDP2X01_UART2_PHYS_BASE, .end = IXDP2X01_UART2_PHYS_BASE + 0xffff, .flags = IORESOURCE_MEM, }; static struct platform_device ixdp2x01_serial_device2 = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM2, .dev = { .platform_data = ixdp2x01_serial_port2, }, .num_resources = 1, .resource = &ixdp2x01_uart_resource2, }; static void ixdp2x01_uart_init(void) { platform_device_register(&ixdp2x01_serial_device1); platform_device_register(&ixdp2x01_serial_device2); } /************************************************************************* * IXDP2x01 timer tick configuration *************************************************************************/ static unsigned int ixdp2x01_clock; static int __init ixdp2x01_clock_setup(char *str) { ixdp2x01_clock = simple_strtoul(str, NULL, 10); return 1; } __setup("ixdp2x01_clock=", ixdp2x01_clock_setup); static void __init ixdp2x01_timer_init(void) { if (!ixdp2x01_clock) ixdp2x01_clock = 50000000; ixp2000_init_time(ixdp2x01_clock); } static struct sys_timer ixdp2x01_timer = { .init = ixdp2x01_timer_init, .offset = ixp2000_gettimeoffset, }; /************************************************************************* * IXDP2x01 PCI *************************************************************************/ void __init ixdp2x01_pci_preinit(void) { ixp2000_reg_write(IXP2000_PCI_ADDR_EXT, 0x00000000); ixp2000_pci_preinit(); pcibios_setup("firmware"); } #define DEVPIN(dev, pin) ((pin) | ((dev) << 3)) static int __init ixdp2x01_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { u8 bus = dev->bus->number; u32 devpin = DEVPIN(PCI_SLOT(dev->devfn), pin); struct pci_bus *tmp_bus = dev->bus; /* Primary bus, no interrupts here */ if (bus == 0) { return -1; } /* Lookup first leaf in bus tree */ while ((tmp_bus->parent != NULL) && (tmp_bus->parent->parent != NULL)) { tmp_bus = tmp_bus->parent; } /* Select between known bridges */ switch (tmp_bus->self->devfn | (tmp_bus->self->bus->number << 8)) { /* Device is located after first MB bridge */ case 0x0008: if (tmp_bus == dev->bus) { /* Device is located directly after first MB bridge */ switch (devpin) { case DEVPIN(1, 1): /* Onboard 82546 ch 0 */ if (machine_is_ixdp2401()) return IRQ_IXDP2401_INTA_82546; return -1; case DEVPIN(1, 2): /* Onboard 82546 ch 1 */ if (machine_is_ixdp2401()) return IRQ_IXDP2401_INTB_82546; return -1; case DEVPIN(0, 1): /* PMC INTA# */ return IRQ_IXDP2X01_SPCI_PMC_INTA; case DEVPIN(0, 2): /* PMC INTB# */ return IRQ_IXDP2X01_SPCI_PMC_INTB; case DEVPIN(0, 3): /* PMC INTC# */ return IRQ_IXDP2X01_SPCI_PMC_INTC; case DEVPIN(0, 4): /* PMC INTD# */ return IRQ_IXDP2X01_SPCI_PMC_INTD; } } break; case 0x0010: if (tmp_bus == dev->bus) { /* Device is located directly after second MB bridge */ /* Secondary bus of second bridge */ switch (devpin) { case DEVPIN(0, 1): /* DB#0 */ return IRQ_IXDP2X01_SPCI_DB_0; case DEVPIN(1, 1): /* DB#1 */ return IRQ_IXDP2X01_SPCI_DB_1; } } else { /* Device is located indirectly after second MB bridge */ /* Not supported now */ } break; } return -1; } static int ixdp2x01_pci_setup(int nr, struct pci_sys_data *sys) { sys->mem_offset = 0xe0000000; if (machine_is_ixdp2801() || machine_is_ixdp28x5()) sys->mem_offset -= ((*IXP2000_PCI_ADDR_EXT & 0xE000) << 16); return ixp2000_pci_setup(nr, sys); } struct hw_pci ixdp2x01_pci __initdata = { .nr_controllers = 1, .setup = ixdp2x01_pci_setup, .preinit = ixdp2x01_pci_preinit, .scan = ixp2000_pci_scan_bus, .map_irq = ixdp2x01_pci_map_irq, }; int __init ixdp2x01_pci_init(void) { if (machine_is_ixdp2401() || machine_is_ixdp2801() ||\ machine_is_ixdp28x5()) pci_common_init(&ixdp2x01_pci); return 0; } subsys_initcall(ixdp2x01_pci_init); /************************************************************************* * IXDP2x01 Machine Initialization *************************************************************************/ static struct flash_platform_data ixdp2x01_flash_platform_data = { .map_name = "cfi_probe", .width = 1, }; static unsigned long ixdp2x01_flash_bank_setup(unsigned long ofs) { ixp2000_reg_wrb(IXDP2X01_CPLD_FLASH_REG, ((ofs >> IXDP2X01_FLASH_WINDOW_BITS) | IXDP2X01_CPLD_FLASH_INTERN)); return (ofs & IXDP2X01_FLASH_WINDOW_MASK); } static struct ixp2000_flash_data ixdp2x01_flash_data = { .platform_data = &ixdp2x01_flash_platform_data, .bank_setup = ixdp2x01_flash_bank_setup }; static struct resource ixdp2x01_flash_resource = { .start = 0xc4000000, .end = 0xc4000000 + 0x01ffffff, .flags = IORESOURCE_MEM, }; static struct platform_device ixdp2x01_flash = { .name = "IXP2000-Flash", .id = 0, .dev = { .platform_data = &ixdp2x01_flash_data, }, .num_resources = 1, .resource = &ixdp2x01_flash_resource, }; static struct ixp2000_i2c_pins ixdp2x01_i2c_gpio_pins = { .sda_pin = IXDP2X01_GPIO_SDA, .scl_pin = IXDP2X01_GPIO_SCL, }; static struct platform_device ixdp2x01_i2c_controller = { .name = "IXP2000-I2C", .id = 0, .dev = { .platform_data = &ixdp2x01_i2c_gpio_pins, }, .num_resources = 0 }; static struct platform_device *ixdp2x01_devices[] __initdata = { &ixdp2x01_flash, &ixdp2x01_i2c_controller }; static void __init ixdp2x01_init_machine(void) { ixp2000_reg_wrb(IXDP2X01_CPLD_FLASH_REG, (IXDP2X01_CPLD_FLASH_BANK_MASK | IXDP2X01_CPLD_FLASH_INTERN)); ixdp2x01_flash_data.nr_banks = ((*IXDP2X01_CPLD_FLASH_REG & IXDP2X01_CPLD_FLASH_BANK_MASK) + 1); platform_add_devices(ixdp2x01_devices, ARRAY_SIZE(ixdp2x01_devices)); ixp2000_uart_init(); ixdp2x01_uart_init(); } #ifdef CONFIG_ARCH_IXDP2401 MACHINE_START(IXDP2401, "Intel IXDP2401 Development Platform") /* Maintainer: MontaVista Software, Inc. */ .phys_io = IXP2000_UART_PHYS_BASE, .io_pg_offst = ((IXP2000_UART_VIRT_BASE) >> 18) & 0xfffc, .boot_params = 0x00000100, .map_io = ixdp2x01_map_io, .init_irq = ixdp2x01_init_irq, .timer = &ixdp2x01_timer, .init_machine = ixdp2x01_init_machine, MACHINE_END #endif #ifdef CONFIG_ARCH_IXDP2801 MACHINE_START(IXDP2801, "Intel IXDP2801 Development Platform") /* Maintainer: MontaVista Software, Inc. */ .phys_io = IXP2000_UART_PHYS_BASE, .io_pg_offst = ((IXP2000_UART_VIRT_BASE) >> 18) & 0xfffc, .boot_params = 0x00000100, .map_io = ixdp2x01_map_io, .init_irq = ixdp2x01_init_irq, .timer = &ixdp2x01_timer, .init_machine = ixdp2x01_init_machine, MACHINE_END /* * IXDP28x5 is basically an IXDP2801 with a different CPU but Intel * changed the machine ID in the bootloader */ MACHINE_START(IXDP28X5, "Intel IXDP2805/2855 Development Platform") /* Maintainer: MontaVista Software, Inc. */ .phys_io = IXP2000_UART_PHYS_BASE, .io_pg_offst = ((IXP2000_UART_VIRT_BASE) >> 18) & 0xfffc, .boot_params = 0x00000100, .map_io = ixdp2x01_map_io, .init_irq = ixdp2x01_init_irq, .timer = &ixdp2x01_timer, .init_machine = ixdp2x01_init_machine, MACHINE_END #endif
gpl-2.0
stupaq/linux-file-tracer
arch/blackfin/mach-common/cpufreq.c
853
5557
/* * Blackfin core clock scaling * * Copyright 2008-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/fs.h> #include <linux/delay.h> #include <asm/blackfin.h> #include <asm/time.h> #include <asm/dpmc.h> #define CPUFREQ_CPU 0 /* this is the table of CCLK frequencies, in Hz */ /* .index is the entry in the auxillary dpm_state_table[] */ static struct cpufreq_frequency_table bfin_freq_table[] = { { .frequency = CPUFREQ_TABLE_END, .index = 0, }, { .frequency = CPUFREQ_TABLE_END, .index = 1, }, { .frequency = CPUFREQ_TABLE_END, .index = 2, }, { .frequency = CPUFREQ_TABLE_END, .index = 0, }, }; static struct bfin_dpm_state { unsigned int csel; /* system clock divider */ unsigned int tscale; /* change the divider on the core timer interrupt */ } dpm_state_table[3]; #if defined(CONFIG_CYCLES_CLOCKSOURCE) /* * normalized to maximum frequncy offset for CYCLES, * used in time-ts cycles clock source, but could be used * somewhere also. */ unsigned long long __bfin_cycles_off; unsigned int __bfin_cycles_mod; #endif /**************************************************************************/ static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk) { unsigned long csel, min_cclk; int index; /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */ #if ANOMALY_05000273 || ANOMALY_05000274 || \ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) min_cclk = sclk * 2; #else min_cclk = sclk; #endif csel = ((bfin_read_PLL_DIV() & CSEL) >> 4); for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) { bfin_freq_table[index].frequency = cclk >> index; dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1; pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", bfin_freq_table[index].frequency, dpm_state_table[index].csel, dpm_state_table[index].tscale); } return; } static void bfin_adjust_core_timer(void *info) { unsigned int tscale; unsigned int index = *(unsigned int *)info; /* we have to adjust the core timer, because it is using cclk */ tscale = dpm_state_table[index].tscale; bfin_write_TSCALE(tscale); return; } static unsigned int bfin_getfreq_khz(unsigned int cpu) { /* Both CoreA/B have the same core clock */ return get_cclk() / 1000; } static int bfin_target(struct cpufreq_policy *poli, unsigned int target_freq, unsigned int relation) { unsigned int index, plldiv, cpu; unsigned long flags, cclk_hz; struct cpufreq_freqs freqs; static unsigned long lpj_ref; static unsigned int lpj_ref_freq; #if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles_t cycles; #endif for_each_online_cpu(cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); if (!policy) continue; if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, relation, &index)) return -EINVAL; cclk_hz = bfin_freq_table[index].frequency; freqs.old = bfin_getfreq_khz(0); freqs.new = cclk_hz; freqs.cpu = cpu; pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", cclk_hz, target_freq, freqs.old); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); if (cpu == CPUFREQ_CPU) { local_irq_save_hw(flags); plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; bfin_write_PLL_DIV(plldiv); on_each_cpu(bfin_adjust_core_timer, &index, 1); #if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles = get_cycles(); SSYNC(); cycles += 10; /* ~10 cycles we lose after get_cycles() */ __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); __bfin_cycles_mod = index; #endif if (!lpj_ref_freq) { lpj_ref = loops_per_jiffy; lpj_ref_freq = freqs.old; } if (freqs.new != freqs.old) { loops_per_jiffy = cpufreq_scale(lpj_ref, lpj_ref_freq, freqs.new); } local_irq_restore_hw(flags); } /* TODO: just test case for cycles clock source, remove later */ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } pr_debug("cpufreq: done\n"); return 0; } static int bfin_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, bfin_freq_table); } static int __init __bfin_cpu_init(struct cpufreq_policy *policy) { unsigned long cclk, sclk; cclk = get_cclk() / 1000; sclk = get_sclk() / 1000; if (policy->cpu == CPUFREQ_CPU) bfin_init_tables(cclk, sclk); policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ policy->cur = cclk; cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); } static struct freq_attr *bfin_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver bfin_driver = { .verify = bfin_verify_speed, .target = bfin_target, .get = bfin_getfreq_khz, .init = __bfin_cpu_init, .name = "bfin cpufreq", .owner = THIS_MODULE, .attr = bfin_freq_attr, }; static int __init bfin_cpu_init(void) { return cpufreq_register_driver(&bfin_driver); } static void __exit bfin_cpu_exit(void) { cpufreq_unregister_driver(&bfin_driver); } MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("cpufreq driver for Blackfin"); MODULE_LICENSE("GPL"); module_init(bfin_cpu_init); module_exit(bfin_cpu_exit);
gpl-2.0
zhmz90/linux
drivers/phy/phy-exynos4x12-usb2.c
1109
10732
/* * Samsung SoC USB 1.1/2.0 PHY driver - Exynos 4x12 support * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * Author: Kamil Debski <k.debski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/phy/phy.h> #include <linux/regmap.h> #include "phy-samsung-usb2.h" /* Exynos USB PHY registers */ /* PHY power control */ #define EXYNOS_4x12_UPHYPWR 0x0 #define EXYNOS_4x12_UPHYPWR_PHY0_SUSPEND BIT(0) #define EXYNOS_4x12_UPHYPWR_PHY0_PWR BIT(3) #define EXYNOS_4x12_UPHYPWR_PHY0_OTG_PWR BIT(4) #define EXYNOS_4x12_UPHYPWR_PHY0_SLEEP BIT(5) #define EXYNOS_4x12_UPHYPWR_PHY0 ( \ EXYNOS_4x12_UPHYPWR_PHY0_SUSPEND | \ EXYNOS_4x12_UPHYPWR_PHY0_PWR | \ EXYNOS_4x12_UPHYPWR_PHY0_OTG_PWR | \ EXYNOS_4x12_UPHYPWR_PHY0_SLEEP) #define EXYNOS_4x12_UPHYPWR_PHY1_SUSPEND BIT(6) #define EXYNOS_4x12_UPHYPWR_PHY1_PWR BIT(7) #define EXYNOS_4x12_UPHYPWR_PHY1_SLEEP BIT(8) #define EXYNOS_4x12_UPHYPWR_PHY1 ( \ EXYNOS_4x12_UPHYPWR_PHY1_SUSPEND | \ EXYNOS_4x12_UPHYPWR_PHY1_PWR | \ EXYNOS_4x12_UPHYPWR_PHY1_SLEEP) #define EXYNOS_4x12_UPHYPWR_HSIC0_SUSPEND BIT(9) #define EXYNOS_4x12_UPHYPWR_HSIC0_PWR BIT(10) #define EXYNOS_4x12_UPHYPWR_HSIC0_SLEEP BIT(11) #define EXYNOS_4x12_UPHYPWR_HSIC0 ( \ EXYNOS_4x12_UPHYPWR_HSIC0_SUSPEND | \ EXYNOS_4x12_UPHYPWR_HSIC0_PWR | \ EXYNOS_4x12_UPHYPWR_HSIC0_SLEEP) #define EXYNOS_4x12_UPHYPWR_HSIC1_SUSPEND BIT(12) #define EXYNOS_4x12_UPHYPWR_HSIC1_PWR BIT(13) #define EXYNOS_4x12_UPHYPWR_HSIC1_SLEEP BIT(14) #define EXYNOS_4x12_UPHYPWR_HSIC1 ( \ EXYNOS_4x12_UPHYPWR_HSIC1_SUSPEND | \ EXYNOS_4x12_UPHYPWR_HSIC1_PWR | \ EXYNOS_4x12_UPHYPWR_HSIC1_SLEEP) /* PHY clock control */ #define EXYNOS_4x12_UPHYCLK 0x4 #define EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK (0x7 << 0) #define EXYNOS_4x12_UPHYCLK_PHYFSEL_OFFSET 0 #define EXYNOS_4x12_UPHYCLK_PHYFSEL_9MHZ6 (0x0 << 0) #define EXYNOS_4x12_UPHYCLK_PHYFSEL_10MHZ (0x1 << 0) #define EXYNOS_4x12_UPHYCLK_PHYFSEL_12MHZ (0x2 << 0) #define EXYNOS_4x12_UPHYCLK_PHYFSEL_19MHZ2 (0x3 << 0) #define EXYNOS_4x12_UPHYCLK_PHYFSEL_20MHZ (0x4 << 0) #define EXYNOS_4x12_UPHYCLK_PHYFSEL_24MHZ (0x5 << 0) #define EXYNOS_4x12_UPHYCLK_PHYFSEL_50MHZ (0x7 << 0) #define EXYNOS_3250_UPHYCLK_REFCLKSEL (0x2 << 8) #define EXYNOS_4x12_UPHYCLK_PHY0_ID_PULLUP BIT(3) #define EXYNOS_4x12_UPHYCLK_PHY0_COMMON_ON BIT(4) #define EXYNOS_4x12_UPHYCLK_PHY1_COMMON_ON BIT(7) #define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_MASK (0x7f << 10) #define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_OFFSET 10 #define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_12MHZ (0x24 << 10) #define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_15MHZ (0x1c << 10) #define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_16MHZ (0x1a << 10) #define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_19MHZ2 (0x15 << 10) #define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_20MHZ (0x14 << 10) /* PHY reset control */ #define EXYNOS_4x12_UPHYRST 0x8 #define EXYNOS_4x12_URSTCON_PHY0 BIT(0) #define EXYNOS_4x12_URSTCON_OTG_HLINK BIT(1) #define EXYNOS_4x12_URSTCON_OTG_PHYLINK BIT(2) #define EXYNOS_4x12_URSTCON_HOST_PHY BIT(3) /* The following bit defines are presented in the * order taken from the Exynos4412 reference manual. * * During experiments with the hardware and debugging * it was determined that the hardware behaves contrary * to the manual. * * The following bit values were chaned accordingly to the * results of real hardware experiments. */ #define EXYNOS_4x12_URSTCON_PHY1 BIT(4) #define EXYNOS_4x12_URSTCON_HSIC0 BIT(6) #define EXYNOS_4x12_URSTCON_HSIC1 BIT(5) #define EXYNOS_4x12_URSTCON_HOST_LINK_ALL BIT(7) #define EXYNOS_4x12_URSTCON_HOST_LINK_P0 BIT(10) #define EXYNOS_4x12_URSTCON_HOST_LINK_P1 BIT(9) #define EXYNOS_4x12_URSTCON_HOST_LINK_P2 BIT(8) /* Isolation, configured in the power management unit */ #define EXYNOS_4x12_USB_ISOL_OFFSET 0x704 #define EXYNOS_4x12_USB_ISOL_OTG BIT(0) #define EXYNOS_4x12_USB_ISOL_HSIC0_OFFSET 0x708 #define EXYNOS_4x12_USB_ISOL_HSIC0 BIT(0) #define EXYNOS_4x12_USB_ISOL_HSIC1_OFFSET 0x70c #define EXYNOS_4x12_USB_ISOL_HSIC1 BIT(0) /* Mode switching SUB Device <-> Host */ #define EXYNOS_4x12_MODE_SWITCH_OFFSET 0x21c #define EXYNOS_4x12_MODE_SWITCH_MASK 1 #define EXYNOS_4x12_MODE_SWITCH_DEVICE 0 #define EXYNOS_4x12_MODE_SWITCH_HOST 1 enum exynos4x12_phy_id { EXYNOS4x12_DEVICE, EXYNOS4x12_HOST, EXYNOS4x12_HSIC0, EXYNOS4x12_HSIC1, EXYNOS4x12_NUM_PHYS, }; /* * exynos4x12_rate_to_clk() converts the supplied clock rate to the value that * can be written to the phy register. */ static int exynos4x12_rate_to_clk(unsigned long rate, u32 *reg) { /* EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK */ switch (rate) { case 9600 * KHZ: *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_9MHZ6; break; case 10 * MHZ: *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_10MHZ; break; case 12 * MHZ: *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_12MHZ; break; case 19200 * KHZ: *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_19MHZ2; break; case 20 * MHZ: *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_20MHZ; break; case 24 * MHZ: *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_24MHZ; break; case 50 * MHZ: *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_50MHZ; break; default: return -EINVAL; } return 0; } static void exynos4x12_isol(struct samsung_usb2_phy_instance *inst, bool on) { struct samsung_usb2_phy_driver *drv = inst->drv; u32 offset; u32 mask; switch (inst->cfg->id) { case EXYNOS4x12_DEVICE: case EXYNOS4x12_HOST: offset = EXYNOS_4x12_USB_ISOL_OFFSET; mask = EXYNOS_4x12_USB_ISOL_OTG; break; case EXYNOS4x12_HSIC0: offset = EXYNOS_4x12_USB_ISOL_HSIC0_OFFSET; mask = EXYNOS_4x12_USB_ISOL_HSIC0; break; case EXYNOS4x12_HSIC1: offset = EXYNOS_4x12_USB_ISOL_HSIC1_OFFSET; mask = EXYNOS_4x12_USB_ISOL_HSIC1; break; default: return; }; regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask); } static void exynos4x12_setup_clk(struct samsung_usb2_phy_instance *inst) { struct samsung_usb2_phy_driver *drv = inst->drv; u32 clk; clk = readl(drv->reg_phy + EXYNOS_4x12_UPHYCLK); clk &= ~EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK; if (drv->cfg->has_refclk_sel) clk = EXYNOS_3250_UPHYCLK_REFCLKSEL; clk |= drv->ref_reg_val << EXYNOS_4x12_UPHYCLK_PHYFSEL_OFFSET; clk |= EXYNOS_4x12_UPHYCLK_PHY1_COMMON_ON; writel(clk, drv->reg_phy + EXYNOS_4x12_UPHYCLK); } static void exynos4x12_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) { struct samsung_usb2_phy_driver *drv = inst->drv; u32 rstbits = 0; u32 phypwr = 0; u32 rst; u32 pwr; switch (inst->cfg->id) { case EXYNOS4x12_DEVICE: phypwr = EXYNOS_4x12_UPHYPWR_PHY0; rstbits = EXYNOS_4x12_URSTCON_PHY0; break; case EXYNOS4x12_HOST: phypwr = EXYNOS_4x12_UPHYPWR_PHY1; rstbits = EXYNOS_4x12_URSTCON_HOST_PHY | EXYNOS_4x12_URSTCON_PHY1 | EXYNOS_4x12_URSTCON_HOST_LINK_P0; break; case EXYNOS4x12_HSIC0: phypwr = EXYNOS_4x12_UPHYPWR_HSIC0; rstbits = EXYNOS_4x12_URSTCON_HSIC0 | EXYNOS_4x12_URSTCON_HOST_LINK_P1; break; case EXYNOS4x12_HSIC1: phypwr = EXYNOS_4x12_UPHYPWR_HSIC1; rstbits = EXYNOS_4x12_URSTCON_HSIC1 | EXYNOS_4x12_URSTCON_HOST_LINK_P1; break; }; if (on) { pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR); pwr &= ~phypwr; writel(pwr, drv->reg_phy + EXYNOS_4x12_UPHYPWR); rst = readl(drv->reg_phy + EXYNOS_4x12_UPHYRST); rst |= rstbits; writel(rst, drv->reg_phy + EXYNOS_4x12_UPHYRST); udelay(10); rst &= ~rstbits; writel(rst, drv->reg_phy + EXYNOS_4x12_UPHYRST); /* The following delay is necessary for the reset sequence to be * completed */ udelay(80); } else { pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR); pwr |= phypwr; writel(pwr, drv->reg_phy + EXYNOS_4x12_UPHYPWR); } } static void exynos4x12_power_on_int(struct samsung_usb2_phy_instance *inst) { if (inst->int_cnt++ > 0) return; exynos4x12_setup_clk(inst); exynos4x12_isol(inst, 0); exynos4x12_phy_pwr(inst, 1); } static int exynos4x12_power_on(struct samsung_usb2_phy_instance *inst) { struct samsung_usb2_phy_driver *drv = inst->drv; if (inst->ext_cnt++ > 0) return 0; if (inst->cfg->id == EXYNOS4x12_HOST) { regmap_update_bits(drv->reg_sys, EXYNOS_4x12_MODE_SWITCH_OFFSET, EXYNOS_4x12_MODE_SWITCH_MASK, EXYNOS_4x12_MODE_SWITCH_HOST); exynos4x12_power_on_int(&drv->instances[EXYNOS4x12_DEVICE]); } if (inst->cfg->id == EXYNOS4x12_DEVICE && drv->cfg->has_mode_switch) regmap_update_bits(drv->reg_sys, EXYNOS_4x12_MODE_SWITCH_OFFSET, EXYNOS_4x12_MODE_SWITCH_MASK, EXYNOS_4x12_MODE_SWITCH_DEVICE); if (inst->cfg->id == EXYNOS4x12_HSIC0 || inst->cfg->id == EXYNOS4x12_HSIC1) { exynos4x12_power_on_int(&drv->instances[EXYNOS4x12_DEVICE]); exynos4x12_power_on_int(&drv->instances[EXYNOS4x12_HOST]); } exynos4x12_power_on_int(inst); return 0; } static void exynos4x12_power_off_int(struct samsung_usb2_phy_instance *inst) { if (inst->int_cnt-- > 1) return; exynos4x12_isol(inst, 1); exynos4x12_phy_pwr(inst, 0); } static int exynos4x12_power_off(struct samsung_usb2_phy_instance *inst) { struct samsung_usb2_phy_driver *drv = inst->drv; if (inst->ext_cnt-- > 1) return 0; if (inst->cfg->id == EXYNOS4x12_DEVICE && drv->cfg->has_mode_switch) regmap_update_bits(drv->reg_sys, EXYNOS_4x12_MODE_SWITCH_OFFSET, EXYNOS_4x12_MODE_SWITCH_MASK, EXYNOS_4x12_MODE_SWITCH_HOST); if (inst->cfg->id == EXYNOS4x12_HOST) exynos4x12_power_off_int(&drv->instances[EXYNOS4x12_DEVICE]); if (inst->cfg->id == EXYNOS4x12_HSIC0 || inst->cfg->id == EXYNOS4x12_HSIC1) { exynos4x12_power_off_int(&drv->instances[EXYNOS4x12_DEVICE]); exynos4x12_power_off_int(&drv->instances[EXYNOS4x12_HOST]); } exynos4x12_power_off_int(inst); return 0; } static const struct samsung_usb2_common_phy exynos4x12_phys[] = { { .label = "device", .id = EXYNOS4x12_DEVICE, .power_on = exynos4x12_power_on, .power_off = exynos4x12_power_off, }, { .label = "host", .id = EXYNOS4x12_HOST, .power_on = exynos4x12_power_on, .power_off = exynos4x12_power_off, }, { .label = "hsic0", .id = EXYNOS4x12_HSIC0, .power_on = exynos4x12_power_on, .power_off = exynos4x12_power_off, }, { .label = "hsic1", .id = EXYNOS4x12_HSIC1, .power_on = exynos4x12_power_on, .power_off = exynos4x12_power_off, }, }; const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { .has_refclk_sel = 1, .num_phys = 1, .phys = exynos4x12_phys, .rate_to_clk = exynos4x12_rate_to_clk, }; const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config = { .has_mode_switch = 1, .num_phys = EXYNOS4x12_NUM_PHYS, .phys = exynos4x12_phys, .rate_to_clk = exynos4x12_rate_to_clk, };
gpl-2.0
kofemann/linux-redpatch
arch/arm/mach-lh7a40x/arch-kev7a400.c
1621
2933
/* arch/arm/mach-lh7a40x/arch-kev7a400.c * * Copyright (C) 2004 Logic Product Development * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * */ #include <linux/tty.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> #include "common.h" /* This function calls the board specific IRQ initialization function. */ static struct map_desc kev7a400_io_desc[] __initdata = { { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHYS), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = CPLD_VIRT, .pfn = __phys_to_pfn(CPLD_PHYS), .length = CPLD_SIZE, .type = MT_DEVICE } }; void __init kev7a400_map_io(void) { iotable_init (kev7a400_io_desc, ARRAY_SIZE (kev7a400_io_desc)); } static u16 CPLD_IRQ_mask; /* Mask for CPLD IRQs, 1 == unmasked */ static void kev7a400_ack_cpld_irq (u32 irq) { CPLD_CL_INT = 1 << (irq - IRQ_KEV7A400_CPLD); } static void kev7a400_mask_cpld_irq (u32 irq) { CPLD_IRQ_mask &= ~(1 << (irq - IRQ_KEV7A400_CPLD)); CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask; } static void kev7a400_unmask_cpld_irq (u32 irq) { CPLD_IRQ_mask |= 1 << (irq - IRQ_KEV7A400_CPLD); CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask; } static struct irq_chip kev7a400_cpld_chip = { .name = "CPLD", .ack = kev7a400_ack_cpld_irq, .mask = kev7a400_mask_cpld_irq, .unmask = kev7a400_unmask_cpld_irq, }; static void kev7a400_cpld_handler (unsigned int irq, struct irq_desc *desc) { u32 mask = CPLD_LATCHED_INTS; irq = IRQ_KEV7A400_CPLD; for (; mask; mask >>= 1, ++irq) if (mask & 1) generic_handle_irq(irq); } void __init lh7a40x_init_board_irq (void) { int irq; for (irq = IRQ_KEV7A400_CPLD; irq < IRQ_KEV7A400_CPLD + NR_IRQ_BOARD; ++irq) { set_irq_chip (irq, &kev7a400_cpld_chip); set_irq_handler (irq, handle_edge_irq); set_irq_flags (irq, IRQF_VALID); } set_irq_chained_handler (IRQ_CPLD, kev7a400_cpld_handler); /* Clear all CPLD interrupts */ CPLD_CL_INT = 0xff; /* CPLD_INTR_MMC_CD | CPLD_INTR_ETH_INT; */ GPIO_GPIOINTEN = 0; /* Disable all GPIO interrupts */ barrier(); #if 0 GPIO_INTTYPE1 = (GPIO_INTR_PCC1_CD | GPIO_INTR_PCC1_CD); /* Edge trig. */ GPIO_INTTYPE2 = 0; /* Falling edge & low-level */ GPIO_GPIOFEOI = 0xff; /* Clear all GPIO interrupts */ GPIO_GPIOINTEN = 0xff; /* Enable all GPIO interrupts */ init_FIQ(); #endif } MACHINE_START (KEV7A400, "Sharp KEV7a400") /* Maintainer: Marc Singer */ .phys_io = 0x80000000, .io_pg_offst = ((io_p2v (0x80000000))>>18) & 0xfffc, .boot_params = 0xc0000100, .map_io = kev7a400_map_io, .init_irq = lh7a400_init_irq, .timer = &lh7a40x_timer, MACHINE_END
gpl-2.0
HelllGuest/phoenix_kernel_lux
fs/fat/namei_msdos.c
2133
17126
/* * linux/fs/msdos/namei.c * * Written 1992,1993 by Werner Almesberger * Hidden files 1995 by Albert Cahalan <albert@ccs.neu.edu> <adc@coe.neu.edu> * Rewritten for constant inumbers 1999 by Al Viro */ #include <linux/module.h> #include <linux/time.h> #include <linux/buffer_head.h> #include "fat.h" /* Characters that are undesirable in an MS-DOS file name */ static unsigned char bad_chars[] = "*?<>|\""; static unsigned char bad_if_strict[] = "+=,; "; /***** Formats an MS-DOS file name. Rejects invalid names. */ static int msdos_format_name(const unsigned char *name, int len, unsigned char *res, struct fat_mount_options *opts) /* * name is the proposed name, len is its length, res is * the resulting name, opts->name_check is either (r)elaxed, * (n)ormal or (s)trict, opts->dotsOK allows dots at the * beginning of name (for hidden files) */ { unsigned char *walk; unsigned char c; int space; if (name[0] == '.') { /* dotfile because . and .. already done */ if (opts->dotsOK) { /* Get rid of dot - test for it elsewhere */ name++; len--; } else return -EINVAL; } /* * disallow names that _really_ start with a dot */ space = 1; c = 0; for (walk = res; len && walk - res < 8; walk++) { c = *name++; len--; if (opts->name_check != 'r' && strchr(bad_chars, c)) return -EINVAL; if (opts->name_check == 's' && strchr(bad_if_strict, c)) return -EINVAL; if (c >= 'A' && c <= 'Z' && opts->name_check == 's') return -EINVAL; if (c < ' ' || c == ':' || c == '\\') return -EINVAL; /* * 0xE5 is legal as a first character, but we must substitute * 0x05 because 0xE5 marks deleted files. Yes, DOS really * does this. * It seems that Microsoft hacked DOS to support non-US * characters after the 0xE5 character was already in use to * mark deleted files. */ if ((res == walk) && (c == 0xE5)) c = 0x05; if (c == '.') break; space = (c == ' '); *walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c; } if (space) return -EINVAL; if (opts->name_check == 's' && len && c != '.') { c = *name++; len--; if (c != '.') return -EINVAL; } while (c != '.' && len--) c = *name++; if (c == '.') { while (walk - res < 8) *walk++ = ' '; while (len > 0 && walk - res < MSDOS_NAME) { c = *name++; len--; if (opts->name_check != 'r' && strchr(bad_chars, c)) return -EINVAL; if (opts->name_check == 's' && strchr(bad_if_strict, c)) return -EINVAL; if (c < ' ' || c == ':' || c == '\\') return -EINVAL; if (c == '.') { if (opts->name_check == 's') return -EINVAL; break; } if (c >= 'A' && c <= 'Z' && opts->name_check == 's') return -EINVAL; space = c == ' '; if (!opts->nocase && c >= 'a' && c <= 'z') *walk++ = c - 32; else *walk++ = c; } if (space) return -EINVAL; if (opts->name_check == 's' && len) return -EINVAL; } while (walk - res < MSDOS_NAME) *walk++ = ' '; return 0; } /***** Locates a directory entry. Uses unformatted name. */ static int msdos_find(struct inode *dir, const unsigned char *name, int len, struct fat_slot_info *sinfo) { struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); unsigned char msdos_name[MSDOS_NAME]; int err; err = msdos_format_name(name, len, msdos_name, &sbi->options); if (err) return -ENOENT; err = fat_scan(dir, msdos_name, sinfo); if (!err && sbi->options.dotsOK) { if (name[0] == '.') { if (!(sinfo->de->attr & ATTR_HIDDEN)) err = -ENOENT; } else { if (sinfo->de->attr & ATTR_HIDDEN) err = -ENOENT; } if (err) brelse(sinfo->bh); } return err; } /* * Compute the hash for the msdos name corresponding to the dentry. * Note: if the name is invalid, we leave the hash code unchanged so * that the existing dentry can be used. The msdos fs routines will * return ENOENT or EINVAL as appropriate. */ static int msdos_hash(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options; unsigned char msdos_name[MSDOS_NAME]; int error; error = msdos_format_name(qstr->name, qstr->len, msdos_name, options); if (!error) qstr->hash = full_name_hash(msdos_name, MSDOS_NAME); return 0; } /* * Compare two msdos names. If either of the names are invalid, * we fall back to doing the standard name comparison. */ static int msdos_cmp(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { struct fat_mount_options *options = &MSDOS_SB(parent->d_sb)->options; unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME]; int error; error = msdos_format_name(name->name, name->len, a_msdos_name, options); if (error) goto old_compare; error = msdos_format_name(str, len, b_msdos_name, options); if (error) goto old_compare; error = memcmp(a_msdos_name, b_msdos_name, MSDOS_NAME); out: return error; old_compare: error = 1; if (name->len == len) error = memcmp(name->name, str, len); goto out; } static const struct dentry_operations msdos_dentry_operations = { .d_hash = msdos_hash, .d_compare = msdos_cmp, }; /* * AV. Wrappers for FAT sb operations. Is it wise? */ /***** Get inode using directory and name */ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; struct inode *inode; int err; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); switch (err) { case -ENOENT: inode = NULL; break; case 0: inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); break; default: inode = ERR_PTR(err); } mutex_unlock(&MSDOS_SB(sb)->s_lock); return d_splice_alias(inode, dentry); } /***** Creates a directory entry (name is already formatted). */ static int msdos_add_entry(struct inode *dir, const unsigned char *name, int is_dir, int is_hid, int cluster, struct timespec *ts, struct fat_slot_info *sinfo) { struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); struct msdos_dir_entry de; __le16 time, date; int err; memcpy(de.name, name, MSDOS_NAME); de.attr = is_dir ? ATTR_DIR : ATTR_ARCH; if (is_hid) de.attr |= ATTR_HIDDEN; de.lcase = 0; fat_time_unix2fat(sbi, ts, &time, &date, NULL); de.cdate = de.adate = 0; de.ctime = 0; de.ctime_cs = 0; de.time = time; de.date = date; fat_set_start(&de, cluster); de.size = 0; err = fat_add_entries(dir, &de, 1, sinfo); if (err) return err; dir->i_ctime = dir->i_mtime = *ts; if (IS_DIRSYNC(dir)) (void)fat_sync_inode(dir); else mark_inode_dirty(dir); return 0; } /***** Create a file */ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct super_block *sb = dir->i_sb; struct inode *inode = NULL; struct fat_slot_info sinfo; struct timespec ts; unsigned char msdos_name[MSDOS_NAME]; int err, is_hid; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_format_name(dentry->d_name.name, dentry->d_name.len, msdos_name, &MSDOS_SB(sb)->options); if (err) goto out; is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.'); /* Have to do it due to foo vs. .foo conflicts */ if (!fat_scan(dir, msdos_name, &sinfo)) { brelse(sinfo.bh); err = -EINVAL; goto out; } ts = CURRENT_TIME_SEC; err = msdos_add_entry(dir, msdos_name, 0, is_hid, 0, &ts, &sinfo); if (err) goto out; inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } inode->i_mtime = inode->i_atime = inode->i_ctime = ts; /* timestamp is already written, so mark_inode_dirty() is unneeded. */ d_instantiate(dentry, inode); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, dir, inode); return err; } /***** Remove a directory */ static int msdos_rmdir(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct inode *inode = dentry->d_inode; struct fat_slot_info sinfo; int err; mutex_lock(&MSDOS_SB(sb)->s_lock); /* * Check whether the directory is not in use, then check * whether it is empty. */ err = fat_dir_empty(inode); if (err) goto out; err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); /* and releases bh */ if (err) goto out; drop_nlink(dir); clear_nlink(inode); inode->i_ctime = CURRENT_TIME_SEC; fat_detach(inode); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, dir, inode); return err; } /***** Make a directory */ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; struct inode *inode; unsigned char msdos_name[MSDOS_NAME]; struct timespec ts; int err, is_hid, cluster; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_format_name(dentry->d_name.name, dentry->d_name.len, msdos_name, &MSDOS_SB(sb)->options); if (err) goto out; is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.'); /* foo vs .foo situation */ if (!fat_scan(dir, msdos_name, &sinfo)) { brelse(sinfo.bh); err = -EINVAL; goto out; } ts = CURRENT_TIME_SEC; cluster = fat_alloc_new_dir(dir, &ts); if (cluster < 0) { err = cluster; goto out; } err = msdos_add_entry(dir, msdos_name, 1, is_hid, cluster, &ts, &sinfo); if (err) goto out_free; inc_nlink(dir); inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); /* the directory was completed, just return a error */ goto out; } set_nlink(inode, 2); inode->i_mtime = inode->i_atime = inode->i_ctime = ts; /* timestamp is already written, so mark_inode_dirty() is unneeded. */ d_instantiate(dentry, inode); mutex_unlock(&MSDOS_SB(sb)->s_lock); fat_flush_inodes(sb, dir, inode); return 0; out_free: fat_free_clusters(dir, cluster); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); return err; } /***** Unlink a file */ static int msdos_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; struct fat_slot_info sinfo; int err; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); /* and releases bh */ if (err) goto out; clear_nlink(inode); inode->i_ctime = CURRENT_TIME_SEC; fat_detach(inode); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, dir, inode); return err; } static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name, struct dentry *old_dentry, struct inode *new_dir, unsigned char *new_name, struct dentry *new_dentry, int is_hid) { struct buffer_head *dotdot_bh; struct msdos_dir_entry *dotdot_de; struct inode *old_inode, *new_inode; struct fat_slot_info old_sinfo, sinfo; struct timespec ts; loff_t new_i_pos; int err, old_attrs, is_dir, update_dotdot, corrupt = 0; old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; old_inode = old_dentry->d_inode; new_inode = new_dentry->d_inode; err = fat_scan(old_dir, old_name, &old_sinfo); if (err) { err = -EIO; goto out; } is_dir = S_ISDIR(old_inode->i_mode); update_dotdot = (is_dir && old_dir != new_dir); if (update_dotdot) { if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de)) { err = -EIO; goto out; } } old_attrs = MSDOS_I(old_inode)->i_attrs; err = fat_scan(new_dir, new_name, &sinfo); if (!err) { if (!new_inode) { /* "foo" -> ".foo" case. just change the ATTR_HIDDEN */ if (sinfo.de != old_sinfo.de) { err = -EINVAL; goto out; } if (is_hid) MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN; else MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN; if (IS_DIRSYNC(old_dir)) { err = fat_sync_inode(old_inode); if (err) { MSDOS_I(old_inode)->i_attrs = old_attrs; goto out; } } else mark_inode_dirty(old_inode); old_dir->i_version++; old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC; if (IS_DIRSYNC(old_dir)) (void)fat_sync_inode(old_dir); else mark_inode_dirty(old_dir); goto out; } } ts = CURRENT_TIME_SEC; if (new_inode) { if (err) goto out; if (is_dir) { err = fat_dir_empty(new_inode); if (err) goto out; } new_i_pos = MSDOS_I(new_inode)->i_pos; fat_detach(new_inode); } else { err = msdos_add_entry(new_dir, new_name, is_dir, is_hid, 0, &ts, &sinfo); if (err) goto out; new_i_pos = sinfo.i_pos; } new_dir->i_version++; fat_detach(old_inode); fat_attach(old_inode, new_i_pos); if (is_hid) MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN; else MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN; if (IS_DIRSYNC(new_dir)) { err = fat_sync_inode(old_inode); if (err) goto error_inode; } else mark_inode_dirty(old_inode); if (update_dotdot) { fat_set_start(dotdot_de, MSDOS_I(new_dir)->i_logstart); mark_buffer_dirty_inode(dotdot_bh, old_inode); if (IS_DIRSYNC(new_dir)) { err = sync_dirty_buffer(dotdot_bh); if (err) goto error_dotdot; } drop_nlink(old_dir); if (!new_inode) inc_nlink(new_dir); } err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */ old_sinfo.bh = NULL; if (err) goto error_dotdot; old_dir->i_version++; old_dir->i_ctime = old_dir->i_mtime = ts; if (IS_DIRSYNC(old_dir)) (void)fat_sync_inode(old_dir); else mark_inode_dirty(old_dir); if (new_inode) { drop_nlink(new_inode); if (is_dir) drop_nlink(new_inode); new_inode->i_ctime = ts; } out: brelse(sinfo.bh); brelse(dotdot_bh); brelse(old_sinfo.bh); return err; error_dotdot: /* data cluster is shared, serious corruption */ corrupt = 1; if (update_dotdot) { fat_set_start(dotdot_de, MSDOS_I(old_dir)->i_logstart); mark_buffer_dirty_inode(dotdot_bh, old_inode); corrupt |= sync_dirty_buffer(dotdot_bh); } error_inode: fat_detach(old_inode); fat_attach(old_inode, old_sinfo.i_pos); MSDOS_I(old_inode)->i_attrs = old_attrs; if (new_inode) { fat_attach(new_inode, new_i_pos); if (corrupt) corrupt |= fat_sync_inode(new_inode); } else { /* * If new entry was not sharing the data cluster, it * shouldn't be serious corruption. */ int err2 = fat_remove_entries(new_dir, &sinfo); if (corrupt) corrupt |= err2; sinfo.bh = NULL; } if (corrupt < 0) { fat_fs_error(new_dir->i_sb, "%s: Filesystem corrupted (i_pos %lld)", __func__, sinfo.i_pos); } goto out; } /***** Rename, a wrapper for rename_same_dir & rename_diff_dir */ static int msdos_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; unsigned char old_msdos_name[MSDOS_NAME], new_msdos_name[MSDOS_NAME]; int err, is_hid; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_format_name(old_dentry->d_name.name, old_dentry->d_name.len, old_msdos_name, &MSDOS_SB(old_dir->i_sb)->options); if (err) goto out; err = msdos_format_name(new_dentry->d_name.name, new_dentry->d_name.len, new_msdos_name, &MSDOS_SB(new_dir->i_sb)->options); if (err) goto out; is_hid = (new_dentry->d_name.name[0] == '.') && (new_msdos_name[0] != '.'); err = do_msdos_rename(old_dir, old_msdos_name, old_dentry, new_dir, new_msdos_name, new_dentry, is_hid); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, old_dir, new_dir); return err; } static const struct inode_operations msdos_dir_inode_operations = { .create = msdos_create, .lookup = msdos_lookup, .unlink = msdos_unlink, .mkdir = msdos_mkdir, .rmdir = msdos_rmdir, .rename = msdos_rename, .setattr = fat_setattr, .getattr = fat_getattr, }; static void setup(struct super_block *sb) { MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations; sb->s_d_op = &msdos_dentry_operations; sb->s_flags |= MS_NOATIME; } static int msdos_fill_super(struct super_block *sb, void *data, int silent) { return fat_fill_super(sb, data, silent, 0, setup); } static struct dentry *msdos_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, msdos_fill_super); } static struct file_system_type msdos_fs_type = { .owner = THIS_MODULE, .name = "msdos", .mount = msdos_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("msdos"); static int __init init_msdos_fs(void) { return register_filesystem(&msdos_fs_type); } static void __exit exit_msdos_fs(void) { unregister_filesystem(&msdos_fs_type); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Werner Almesberger"); MODULE_DESCRIPTION("MS-DOS filesystem support"); module_init(init_msdos_fs) module_exit(exit_msdos_fs)
gpl-2.0
BenefitA3/android_kernel_ark_msm8916
drivers/ata/pata_cs5536.c
2645
7700
/* * pata_cs5536.c - CS5536 PATA for new ATA layer * (C) 2007 Martin K. Petersen <mkp@mkp.net> * (C) 2011 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Documentation: * Available from AMD web site. * * The IDE timing registers for the CS5536 live in the Geode Machine * Specific Register file and not PCI config space. Most BIOSes * virtualize the PCI registers so the chip looks like a standard IDE * controller. Unfortunately not all implementations get this right. * In particular some have problems with unaligned accesses to the * virtualized PCI registers. This driver always does full dword * writes to work around the issue. Also, in case of a bad BIOS this * driver can be loaded with the "msr=1" parameter which forces using * the Machine Specific Registers to configure the device. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/libata.h> #include <scsi/scsi_host.h> #include <linux/dmi.h> #ifdef CONFIG_X86_32 #include <asm/msr.h> static int use_msr; module_param_named(msr, use_msr, int, 0644); MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)"); #else #undef rdmsr /* avoid accidental MSR usage on, e.g. x86-64 */ #undef wrmsr #define rdmsr(x, y, z) do { } while (0) #define wrmsr(x, y, z) do { } while (0) #define use_msr 0 #endif #define DRV_NAME "pata_cs5536" #define DRV_VERSION "0.0.8" enum { MSR_IDE_CFG = 0x51300010, PCI_IDE_CFG = 0x40, CFG = 0, DTC = 2, CAST = 3, ETC = 4, IDE_CFG_CHANEN = (1 << 1), IDE_CFG_CABLE = (1 << 17) | (1 << 16), IDE_D0_SHIFT = 24, IDE_D1_SHIFT = 16, IDE_DRV_MASK = 0xff, IDE_CAST_D0_SHIFT = 6, IDE_CAST_D1_SHIFT = 4, IDE_CAST_DRV_MASK = 0x3, IDE_CAST_CMD_MASK = 0xff, IDE_CAST_CMD_SHIFT = 24, IDE_ETC_UDMA_MASK = 0xc0, }; /* Some Bachmann OT200 devices have a non working UDMA support due a * missing resistor. */ static const struct dmi_system_id udma_quirk_dmi_table[] = { { .ident = "Bachmann electronic OT200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Bachmann electronic"), DMI_MATCH(DMI_PRODUCT_NAME, "OT200"), DMI_MATCH(DMI_PRODUCT_VERSION, "1") }, }, { } }; static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val) { if (unlikely(use_msr)) { u32 dummy __maybe_unused; rdmsr(MSR_IDE_CFG + reg, *val, dummy); return 0; } return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static int cs5536_write(struct pci_dev *pdev, int reg, int val) { if (unlikely(use_msr)) { wrmsr(MSR_IDE_CFG + reg, val, 0); return 0; } return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static void cs5536_program_dtc(struct ata_device *adev, u8 tim) { struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev); int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT; u32 dtc; cs5536_read(pdev, DTC, &dtc); dtc &= ~(IDE_DRV_MASK << dshift); dtc |= tim << dshift; cs5536_write(pdev, DTC, dtc); } /** * cs5536_cable_detect - detect cable type * @ap: Port to detect on * * Perform cable detection for ATA66 capable cable. * * Returns a cable type. */ static int cs5536_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 cfg; cs5536_read(pdev, CFG, &cfg); if (cfg & IDE_CFG_CABLE) return ATA_CBL_PATA80; else return ATA_CBL_PATA40; } /** * cs5536_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface */ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const u8 drv_timings[5] = { 0x98, 0x55, 0x32, 0x21, 0x20, }; static const u8 addr_timings[5] = { 0x2, 0x1, 0x0, 0x0, 0x0, }; static const u8 cmd_timings[5] = { 0x99, 0x92, 0x90, 0x22, 0x20, }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_device *pair = ata_dev_pair(adev); int mode = adev->pio_mode - XFER_PIO_0; int cmdmode = mode; int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; u32 cast; if (pair) cmdmode = min(mode, pair->pio_mode - XFER_PIO_0); cs5536_program_dtc(adev, drv_timings[mode]); cs5536_read(pdev, CAST, &cast); cast &= ~(IDE_CAST_DRV_MASK << cshift); cast |= addr_timings[mode] << cshift; cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT); cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT; cs5536_write(pdev, CAST, cast); } /** * cs5536_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * */ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev) { static const u8 udma_timings[6] = { 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, }; static const u8 mwdma_timings[3] = { 0x67, 0x21, 0x20, }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 etc; int mode = adev->dma_mode; int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT; cs5536_read(pdev, ETC, &etc); if (mode >= XFER_UDMA_0) { etc &= ~(IDE_DRV_MASK << dshift); etc |= udma_timings[mode - XFER_UDMA_0] << dshift; } else { /* MWDMA */ etc &= ~(IDE_ETC_UDMA_MASK << dshift); cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]); } cs5536_write(pdev, ETC, etc); } static struct scsi_host_template cs5536_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations cs5536_port_ops = { .inherits = &ata_bmdma32_port_ops, .cable_detect = cs5536_cable_detect, .set_piomode = cs5536_set_piomode, .set_dmamode = cs5536_set_dmamode, }; /** * cs5536_init_one * @dev: PCI device * @id: Entry in match table * */ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &cs5536_port_ops, }; static const struct ata_port_info no_udma_info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &cs5536_port_ops, }; const struct ata_port_info *ppi[2]; u32 cfg; if (dmi_check_system(udma_quirk_dmi_table)) ppi[0] = &no_udma_info; else ppi[0] = &info; ppi[1] = &ata_dummy_port_info; if (use_msr) printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n"); cs5536_read(dev, CFG, &cfg); if ((cfg & IDE_CFG_CHANEN) == 0) { printk(KERN_ERR DRV_NAME ": disabled by BIOS\n"); return -ENODEV; } return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0); } static const struct pci_device_id cs5536[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), }, { }, }; static struct pci_driver cs5536_pci_driver = { .name = DRV_NAME, .id_table = cs5536, .probe = cs5536_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; module_pci_driver(cs5536_pci_driver); MODULE_AUTHOR("Martin K. Petersen"); MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5536); MODULE_VERSION(DRV_VERSION);
gpl-2.0
Phreya/phreya_kernel_bacon_cm
arch/mips/sgi-ip22/ip22-reset.c
4693
5074
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1997, 1998, 2001, 03, 05, 06 by Ralf Baechle */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/ds1286.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/notifier.h> #include <linux/pm.h> #include <linux/timer.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/reboot.h> #include <asm/sgialib.h> #include <asm/sgi/ioc.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/mc.h> #include <asm/sgi/ip22.h> /* * Just powerdown if init hasn't done after POWERDOWN_TIMEOUT seconds. * I'm not sure if this feature is a good idea, for now it's here just to * make the power button make behave just like under IRIX. */ #define POWERDOWN_TIMEOUT 120 /* * Blink frequency during reboot grace period and when panicked. */ #define POWERDOWN_FREQ (HZ / 4) #define PANIC_FREQ (HZ / 8) static struct timer_list power_timer, blink_timer, debounce_timer; #define MACHINE_PANICED 1 #define MACHINE_SHUTTING_DOWN 2 static int machine_state; static void __noreturn sgi_machine_power_off(void) { unsigned int tmp; local_irq_disable(); /* Disable watchdog */ tmp = hpc3c0->rtcregs[RTC_CMD] & 0xff; hpc3c0->rtcregs[RTC_CMD] = tmp | RTC_WAM; hpc3c0->rtcregs[RTC_WSEC] = 0; hpc3c0->rtcregs[RTC_WHSEC] = 0; while (1) { sgioc->panel = ~SGIOC_PANEL_POWERON; /* Good bye cruel world ... */ /* If we're still running, we probably got sent an alarm interrupt. Read the flag to clear it. */ tmp = hpc3c0->rtcregs[RTC_HOURS_ALARM]; } } static void __noreturn sgi_machine_restart(char *command) { if (machine_state & MACHINE_SHUTTING_DOWN) sgi_machine_power_off(); sgimc->cpuctrl0 |= SGIMC_CCTRL0_SYSINIT; while (1); } static void __noreturn sgi_machine_halt(void) { if (machine_state & MACHINE_SHUTTING_DOWN) sgi_machine_power_off(); ArcEnterInteractiveMode(); } static void power_timeout(unsigned long data) { sgi_machine_power_off(); } static void blink_timeout(unsigned long data) { /* XXX fix this for fullhouse */ sgi_ioc_reset ^= (SGIOC_RESET_LC0OFF|SGIOC_RESET_LC1OFF); sgioc->reset = sgi_ioc_reset; mod_timer(&blink_timer, jiffies + data); } static void debounce(unsigned long data) { del_timer(&debounce_timer); if (sgint->istat1 & SGINT_ISTAT1_PWR) { /* Interrupt still being sent. */ debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */ add_timer(&debounce_timer); sgioc->panel = SGIOC_PANEL_POWERON | SGIOC_PANEL_POWERINTR | SGIOC_PANEL_VOLDNINTR | SGIOC_PANEL_VOLDNHOLD | SGIOC_PANEL_VOLUPINTR | SGIOC_PANEL_VOLUPHOLD; return; } if (machine_state & MACHINE_PANICED) sgimc->cpuctrl0 |= SGIMC_CCTRL0_SYSINIT; enable_irq(SGI_PANEL_IRQ); } static inline void power_button(void) { if (machine_state & MACHINE_PANICED) return; if ((machine_state & MACHINE_SHUTTING_DOWN) || kill_cad_pid(SIGINT, 1)) { /* No init process or button pressed twice. */ sgi_machine_power_off(); } machine_state |= MACHINE_SHUTTING_DOWN; blink_timer.data = POWERDOWN_FREQ; blink_timeout(POWERDOWN_FREQ); init_timer(&power_timer); power_timer.function = power_timeout; power_timer.expires = jiffies + POWERDOWN_TIMEOUT * HZ; add_timer(&power_timer); } static irqreturn_t panel_int(int irq, void *dev_id) { unsigned int buttons; buttons = sgioc->panel; sgioc->panel = SGIOC_PANEL_POWERON | SGIOC_PANEL_POWERINTR; if (sgint->istat1 & SGINT_ISTAT1_PWR) { /* Wait until interrupt goes away */ disable_irq_nosync(SGI_PANEL_IRQ); init_timer(&debounce_timer); debounce_timer.function = debounce; debounce_timer.expires = jiffies + 5; add_timer(&debounce_timer); } /* Power button was pressed * ioc.ps page 22: "The Panel Register is called Power Control by Full * House. Only lowest 2 bits are used. Guiness uses upper four bits * for volume control". This is not true, all bits are pulled high * on fullhouse */ if (!(buttons & SGIOC_PANEL_POWERINTR)) power_button(); return IRQ_HANDLED; } static int panic_event(struct notifier_block *this, unsigned long event, void *ptr) { if (machine_state & MACHINE_PANICED) return NOTIFY_DONE; machine_state |= MACHINE_PANICED; blink_timer.data = PANIC_FREQ; blink_timeout(PANIC_FREQ); return NOTIFY_DONE; } static struct notifier_block panic_block = { .notifier_call = panic_event, }; static int __init reboot_setup(void) { int res; _machine_restart = sgi_machine_restart; _machine_halt = sgi_machine_halt; pm_power_off = sgi_machine_power_off; res = request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL); if (res) { printk(KERN_ERR "Allocation of front panel IRQ failed\n"); return res; } init_timer(&blink_timer); blink_timer.function = blink_timeout; atomic_notifier_chain_register(&panic_notifier_list, &panic_block); return 0; } subsys_initcall(reboot_setup);
gpl-2.0
smac0628/htc_gpe_51
arch/mips/dec/wbflush.c
4693
2110
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/wbflush.h> #include <asm/barrier.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
meredydd/capsicum-linux
drivers/media/video/cx88/cx88-alsa.c
4949
25403
/* * * Support for audio capture * PCI function #1 of the cx2388x. * * (c) 2007 Trent Piepho <xyzzy@speakeasy.org> * (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org> * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> * Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org> * Based on dummy.c by Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/slab.h> #include <asm/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> #include <sound/tlv.h> #include <media/wm8775.h> #include "cx88.h" #include "cx88-reg.h" #define dprintk(level,fmt, arg...) if (debug >= level) \ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg) #define dprintk_core(level,fmt, arg...) if (debug >= level) \ printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg) /**************************************************************************** Data type declarations - Can be moded to a header file later ****************************************************************************/ struct cx88_audio_buffer { unsigned int bpl; struct btcx_riscmem risc; struct videobuf_dmabuf dma; }; struct cx88_audio_dev { struct cx88_core *core; struct cx88_dmaqueue q; /* pci i/o */ struct pci_dev *pci; /* audio controls */ int irq; struct snd_card *card; spinlock_t reg_lock; atomic_t count; unsigned int dma_size; unsigned int period_size; unsigned int num_periods; struct videobuf_dmabuf *dma_risc; struct cx88_audio_buffer *buf; struct snd_pcm_substream *substream; }; typedef struct cx88_audio_dev snd_cx88_card_t; /**************************************************************************** Module global static vars ****************************************************************************/ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled."); module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s)."); /**************************************************************************** Module macros ****************************************************************************/ MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards"); MODULE_AUTHOR("Ricardo Cerqueira"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX88_VERSION); MODULE_SUPPORTED_DEVICE("{{Conexant,23881}," "{{Conexant,23882}," "{{Conexant,23883}"); static unsigned int debug; module_param(debug,int,0644); MODULE_PARM_DESC(debug,"enable debug messages"); /**************************************************************************** Module specific funtions ****************************************************************************/ /* * BOARD Specific: Sets audio DMA */ static int _cx88_start_audio_dma(snd_cx88_card_t *chip) { struct cx88_audio_buffer *buf = chip->buf; struct cx88_core *core=chip->core; const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25]; /* Make sure RISC/FIFO are off before changing FIFO/RISC settings */ cx_clear(MO_AUD_DMACNTRL, 0x11); /* setup fifo + format - out channel */ cx88_sram_channel_setup(chip->core, audio_ch, buf->bpl, buf->risc.dma); /* sets bpl size */ cx_write(MO_AUDD_LNGTH, buf->bpl); /* reset counter */ cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET); atomic_set(&chip->count, 0); dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d " "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start + 8)>>1, chip->num_periods, buf->bpl * chip->num_periods); /* Enables corresponding bits at AUD_INT_STAT */ cx_write(MO_AUD_INTMSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1); /* Clean any pending interrupt bits already set */ cx_write(MO_AUD_INTSTAT, ~0); /* enable audio irqs */ cx_set(MO_PCI_INTMSK, chip->core->pci_irqmask | PCI_INT_AUDINT); /* start dma */ cx_set(MO_DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */ cx_set(MO_AUD_DMACNTRL, 0x11); /* audio downstream FIFO and RISC enable */ if (debug) cx88_sram_channel_dump(chip->core, audio_ch); return 0; } /* * BOARD Specific: Resets audio DMA */ static int _cx88_stop_audio_dma(snd_cx88_card_t *chip) { struct cx88_core *core=chip->core; dprintk(1, "Stopping audio DMA\n"); /* stop dma */ cx_clear(MO_AUD_DMACNTRL, 0x11); /* disable irqs */ cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT); cx_clear(MO_AUD_INTMSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1); if (debug) cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]); return 0; } #define MAX_IRQ_LOOP 50 /* * BOARD Specific: IRQ dma bits */ static const char *cx88_aud_irqs[32] = { "dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */ NULL, /* reserved */ "dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */ NULL, /* reserved */ "dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */ NULL, /* reserved */ "dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */ NULL, /* reserved */ "opc_err", "par_err", "rip_err", /* 16-18 */ "pci_abort", "ber_irq", "mchg_irq" /* 19-21 */ }; /* * BOARD Specific: Threats IRQ audio specific calls */ static void cx8801_aud_irq(snd_cx88_card_t *chip) { struct cx88_core *core = chip->core; u32 status, mask; status = cx_read(MO_AUD_INTSTAT); mask = cx_read(MO_AUD_INTMSK); if (0 == (status & mask)) return; cx_write(MO_AUD_INTSTAT, status); if (debug > 1 || (status & mask & ~0xff)) cx88_print_irqbits(core->name, "irq aud", cx88_aud_irqs, ARRAY_SIZE(cx88_aud_irqs), status, mask); /* risc op code error */ if (status & AUD_INT_OPC_ERR) { printk(KERN_WARNING "%s/1: Audio risc op code error\n",core->name); cx_clear(MO_AUD_DMACNTRL, 0x11); cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH25]); } if (status & AUD_INT_DN_SYNC) { dprintk(1, "Downstream sync error\n"); cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET); return; } /* risc1 downstream */ if (status & AUD_INT_DN_RISCI1) { atomic_set(&chip->count, cx_read(MO_AUDD_GPCNT)); snd_pcm_period_elapsed(chip->substream); } /* FIXME: Any other status should deserve a special handling? */ } /* * BOARD Specific: Handles IRQ calls */ static irqreturn_t cx8801_irq(int irq, void *dev_id) { snd_cx88_card_t *chip = dev_id; struct cx88_core *core = chip->core; u32 status; int loop, handled = 0; for (loop = 0; loop < MAX_IRQ_LOOP; loop++) { status = cx_read(MO_PCI_INTSTAT) & (core->pci_irqmask | PCI_INT_AUDINT); if (0 == status) goto out; dprintk(3, "cx8801_irq loop %d/%d, status %x\n", loop, MAX_IRQ_LOOP, status); handled = 1; cx_write(MO_PCI_INTSTAT, status); if (status & core->pci_irqmask) cx88_core_irq(core, status); if (status & PCI_INT_AUDINT) cx8801_aud_irq(chip); } if (MAX_IRQ_LOOP == loop) { printk(KERN_ERR "%s/1: IRQ loop detected, disabling interrupts\n", core->name); cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT); } out: return IRQ_RETVAL(handled); } static int dsp_buffer_free(snd_cx88_card_t *chip) { BUG_ON(!chip->dma_size); dprintk(2,"Freeing buffer\n"); videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci,&chip->buf->risc); kfree(chip->buf); chip->dma_risc = NULL; chip->dma_size = 0; return 0; } /**************************************************************************** ALSA PCM Interface ****************************************************************************/ /* * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 4096 static const struct snd_pcm_hardware snd_cx88_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, /* Analog audio output will be full of clicks and pops if there are not exactly four lines in the SRAM FIFO buffer. */ .period_bytes_min = DEFAULT_FIFO_SIZE/4, .period_bytes_max = DEFAULT_FIFO_SIZE/4, .periods_min = 1, .periods_max = 1024, .buffer_bytes_max = (1024*1024), }; /* * audio pcm capture open callback */ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if (!chip) { printk(KERN_ERR "BUG: cx88 can't find device struct." " Can't proceed with open\n"); return -ENODEV; } err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; runtime->hw = snd_cx88_digital_hw; if (cx88_sram_channels[SRAM_CH25].fifo_size != DEFAULT_FIFO_SIZE) { unsigned int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4; bpl &= ~7; /* must be multiple of 8 */ runtime->hw.period_bytes_min = bpl; runtime->hw.period_bytes_max = bpl; } return 0; _error: dprintk(1,"Error opening PCM!\n"); return err; } /* * audio close callback */ static int snd_cx88_close(struct snd_pcm_substream *substream) { return 0; } /* * hw_params callback */ static int snd_cx88_hw_params(struct snd_pcm_substream * substream, struct snd_pcm_hw_params * hw_params) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); struct videobuf_dmabuf *dma; struct cx88_audio_buffer *buf; int ret; if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } chip->period_size = params_period_bytes(hw_params); chip->num_periods = params_periods(hw_params); chip->dma_size = chip->period_size * params_periods(hw_params); BUG_ON(!chip->dma_size); BUG_ON(chip->num_periods & (chip->num_periods-1)); buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (NULL == buf) return -ENOMEM; buf->bpl = chip->period_size; dma = &buf->dma; videobuf_dma_init(dma); ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE, (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT)); if (ret < 0) goto error; ret = videobuf_dma_map(&chip->pci->dev, dma); if (ret < 0) goto error; ret = cx88_risc_databuffer(chip->pci, &buf->risc, dma->sglist, chip->period_size, chip->num_periods, 1); if (ret < 0) goto error; /* Loop back to start of program */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma); chip->buf = buf; chip->dma_risc = dma; substream->runtime->dma_area = chip->dma_risc->vaddr; substream->runtime->dma_bytes = chip->dma_size; substream->runtime->dma_addr = 0; return 0; error: kfree(buf); return ret; } /* * hw free callback */ static int snd_cx88_hw_free(struct snd_pcm_substream * substream) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } return 0; } /* * prepare callback */ static int snd_cx88_prepare(struct snd_pcm_substream *substream) { return 0; } /* * trigger callback */ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); int err; /* Local interrupts are already disabled by ALSA */ spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: err=_cx88_start_audio_dma(chip); break; case SNDRV_PCM_TRIGGER_STOP: err=_cx88_stop_audio_dma(chip); break; default: err=-EINVAL; break; } spin_unlock(&chip->reg_lock); return err; } /* * pointer callback */ static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; u16 count; count = atomic_read(&chip->count); // dprintk(2, "%s - count %d (+%u), period %d, frame %lu\n", __func__, // count, new, count & (runtime->periods-1), // runtime->period_size * (count & (runtime->periods-1))); return runtime->period_size * (count & (runtime->periods-1)); } /* * page callback (needed for mmap) */ static struct page *snd_cx88_page(struct snd_pcm_substream *substream, unsigned long offset) { void *pageptr = substream->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * operators */ static struct snd_pcm_ops snd_cx88_pcm_ops = { .open = snd_cx88_pcm_open, .close = snd_cx88_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cx88_hw_params, .hw_free = snd_cx88_hw_free, .prepare = snd_cx88_prepare, .trigger = snd_cx88_card_trigger, .pointer = snd_cx88_pointer, .page = snd_cx88_page, }; /* * create a PCM device */ static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name) { int err; struct snd_pcm *pcm; err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm); if (err < 0) return err; pcm->private_data = chip; strcpy(pcm->name, name); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx88_pcm_ops); return 0; } /**************************************************************************** CONTROL INTERFACE ****************************************************************************/ static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0; info->value.integer.max = 0x3f; return 0; } static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core=chip->core; int vol = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f), bal = cx_read(AUD_BAL_CTL); value->value.integer.value[(bal & 0x40) ? 0 : 1] = vol; vol -= (bal & 0x3f); value->value.integer.value[(bal & 0x40) ? 1 : 0] = vol < 0 ? 0 : vol; return 0; } static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; struct v4l2_control client_ctl; int left = value->value.integer.value[0]; int right = value->value.integer.value[1]; int v, b; memset(&client_ctl, 0, sizeof(client_ctl)); /* Pass volume & balance onto any WM8775 */ if (left >= right) { v = left << 10; b = left ? (0x8000 * right) / left : 0x8000; } else { v = right << 10; b = right ? 0xffff - (0x8000 * left) / right : 0x8000; } client_ctl.value = v; client_ctl.id = V4L2_CID_AUDIO_VOLUME; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); client_ctl.value = b; client_ctl.id = V4L2_CID_AUDIO_BALANCE; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); } /* OK - TODO: test it */ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core=chip->core; int left, right, v, b; int changed = 0; u32 old; if (core->board.audio_chip == V4L2_IDENT_WM8775) snd_cx88_wm8775_volume_put(kcontrol, value); left = value->value.integer.value[0] & 0x3f; right = value->value.integer.value[1] & 0x3f; b = right - left; if (b < 0) { v = 0x3f - left; b = (-b) | 0x40; } else { v = 0x3f - right; } /* Do we really know this will always be called with IRQs on? */ spin_lock_irq(&chip->reg_lock); old = cx_read(AUD_VOL_CTL); if (v != (old & 0x3f)) { cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, (old & ~0x3f) | v); changed = 1; } if ((cx_read(AUD_BAL_CTL) & 0x7f) != b) { cx_write(AUD_BAL_CTL, b); changed = 1; } spin_unlock_irq(&chip->reg_lock); return changed; } static const DECLARE_TLV_DB_SCALE(snd_cx88_db_scale, -6300, 100, 0); static const struct snd_kcontrol_new snd_cx88_volume = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .name = "Analog-TV Volume", .info = snd_cx88_volume_info, .get = snd_cx88_volume_get, .put = snd_cx88_volume_put, .tlv.p = snd_cx88_db_scale, }; static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; u32 bit = kcontrol->private_value; value->value.integer.value[0] = !(cx_read(AUD_VOL_CTL) & bit); return 0; } static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; u32 bit = kcontrol->private_value; int ret = 0; u32 vol; spin_lock_irq(&chip->reg_lock); vol = cx_read(AUD_VOL_CTL); if (value->value.integer.value[0] != !(vol & bit)) { vol ^= bit; cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol); /* Pass mute onto any WM8775 */ if ((core->board.audio_chip == V4L2_IDENT_WM8775) && ((1<<6) == bit)) { struct v4l2_control client_ctl; memset(&client_ctl, 0, sizeof(client_ctl)); client_ctl.value = 0 != (vol & bit); client_ctl.id = V4L2_CID_AUDIO_MUTE; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); } ret = 1; } spin_unlock_irq(&chip->reg_lock); return ret; } static const struct snd_kcontrol_new snd_cx88_dac_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Audio-Out Switch", .info = snd_ctl_boolean_mono_info, .get = snd_cx88_switch_get, .put = snd_cx88_switch_put, .private_value = (1<<8), }; static const struct snd_kcontrol_new snd_cx88_source_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog-TV Switch", .info = snd_ctl_boolean_mono_info, .get = snd_cx88_switch_get, .put = snd_cx88_switch_put, .private_value = (1<<6), }; static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; struct v4l2_control client_ctl; memset(&client_ctl, 0, sizeof(client_ctl)); client_ctl.id = V4L2_CID_AUDIO_LOUDNESS; call_hw(core, WM8775_GID, core, g_ctrl, &client_ctl); value->value.integer.value[0] = client_ctl.value ? 1 : 0; return 0; } static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; struct v4l2_control client_ctl; memset(&client_ctl, 0, sizeof(client_ctl)); client_ctl.value = 0 != value->value.integer.value[0]; client_ctl.id = V4L2_CID_AUDIO_LOUDNESS; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); return 0; } static struct snd_kcontrol_new snd_cx88_alc_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line-In ALC Switch", .info = snd_ctl_boolean_mono_info, .get = snd_cx88_alc_get, .put = snd_cx88_alc_put, }; /**************************************************************************** Basic Flow for Sound Devices ****************************************************************************/ /* * PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio * Only boards with eeprom and byte 1 at eeprom=1 have it */ static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = { {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, {0, } }; MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl); /* * Chip-specific destructor */ static int snd_cx88_free(snd_cx88_card_t *chip) { if (chip->irq >= 0) free_irq(chip->irq, chip); cx88_core_put(chip->core,chip->pci); pci_disable_device(chip->pci); return 0; } /* * Component Destructor */ static void snd_cx88_dev_free(struct snd_card * card) { snd_cx88_card_t *chip = card->private_data; snd_cx88_free(chip); } /* * Alsa Constructor - Component probe */ static int devno; static int __devinit snd_cx88_create(struct snd_card *card, struct pci_dev *pci, snd_cx88_card_t **rchip, struct cx88_core **core_ptr) { snd_cx88_card_t *chip; struct cx88_core *core; int err; unsigned char pci_lat; *rchip = NULL; err = pci_enable_device(pci); if (err < 0) return err; pci_set_master(pci); chip = card->private_data; core = cx88_core_get(pci); if (NULL == core) { err = -EINVAL; return err; } if (!pci_dma_supported(pci,DMA_BIT_MASK(32))) { dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); err = -EIO; cx88_core_put(core, pci); return err; } /* pci init */ chip->card = card; chip->pci = pci; chip->irq = -1; spin_lock_init(&chip->reg_lock); chip->core = core; /* get irq */ err = request_irq(chip->pci->irq, cx8801_irq, IRQF_SHARED | IRQF_DISABLED, chip->core->name, chip); if (err < 0) { dprintk(0, "%s: can't get IRQ %d\n", chip->core->name, chip->pci->irq); return err; } /* print pci info */ pci_read_config_byte(pci, PCI_LATENCY_TIMER, &pci_lat); dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, " "latency: %d, mmio: 0x%llx\n", core->name, devno, pci_name(pci), pci->revision, pci->irq, pci_lat, (unsigned long long)pci_resource_start(pci,0)); chip->irq = pci->irq; synchronize_irq(chip->irq); snd_card_set_dev(card, &pci->dev); *rchip = chip; *core_ptr = core; return 0; } static int __devinit cx88_audio_initdev(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; snd_cx88_card_t *chip; struct cx88_core *core = NULL; int err; if (devno >= SNDRV_CARDS) return (-ENODEV); if (!enable[devno]) { ++devno; return (-ENOENT); } err = snd_card_create(index[devno], id[devno], THIS_MODULE, sizeof(snd_cx88_card_t), &card); if (err < 0) return err; card->private_free = snd_cx88_dev_free; err = snd_cx88_create(card, pci, &chip, &core); if (err < 0) goto error; err = snd_cx88_pcm(chip, 0, "CX88 Digital"); if (err < 0) goto error; err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_volume, chip)); if (err < 0) goto error; err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_dac_switch, chip)); if (err < 0) goto error; err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_source_switch, chip)); if (err < 0) goto error; /* If there's a wm8775 then add a Line-In ALC switch */ if (core->board.audio_chip == V4L2_IDENT_WM8775) snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip)); strcpy (card->driver, "CX88x"); sprintf(card->shortname, "Conexant CX%x", pci->device); sprintf(card->longname, "%s at %#llx", card->shortname,(unsigned long long)pci_resource_start(pci, 0)); strcpy (card->mixername, "CX88"); dprintk (0, "%s/%i: ALSA support for cx2388x boards\n", card->driver,devno); err = snd_card_register(card); if (err < 0) goto error; pci_set_drvdata(pci,card); devno++; return 0; error: snd_card_free(card); return err; } /* * ALSA destructor */ static void __devexit cx88_audio_finidev(struct pci_dev *pci) { struct cx88_audio_dev *card = pci_get_drvdata(pci); snd_card_free((void *)card); pci_set_drvdata(pci, NULL); devno--; } /* * PCI driver definition */ static struct pci_driver cx88_audio_pci_driver = { .name = "cx88_audio", .id_table = cx88_audio_pci_tbl, .probe = cx88_audio_initdev, .remove = __devexit_p(cx88_audio_finidev), }; /**************************************************************************** LINUX MODULE INIT ****************************************************************************/ /* * module init */ static int __init cx88_audio_init(void) { printk(KERN_INFO "cx2388x alsa driver version %s loaded\n", CX88_VERSION); return pci_register_driver(&cx88_audio_pci_driver); } /* * module remove */ static void __exit cx88_audio_fini(void) { pci_unregister_driver(&cx88_audio_pci_driver); } module_init(cx88_audio_init); module_exit(cx88_audio_fini);
gpl-2.0
jrior001/android_kernel_oneplus_msm8974
arch/openrisc/lib/delay.c
5205
1383
/* * OpenRISC Linux * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation * * Precise Delay Loops */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <asm/delay.h> #include <asm/timex.h> #include <asm/processor.h> int __devinit read_current_timer(unsigned long *timer_value) { *timer_value = mfspr(SPR_TTCR); return 0; } void __delay(unsigned long cycles) { cycles_t target = get_cycles() + cycles; while (get_cycles() < target) cpu_relax(); } EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { unsigned long long loops; loops = xloops * loops_per_jiffy * HZ; __delay(loops >> 32); } EXPORT_SYMBOL(__const_udelay); void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__ndelay);
gpl-2.0
zarboz/Evita_UL_422-JB
arch/blackfin/mach-common/pm.c
7509
5469
/* * Blackfin power management * * Copyright 2006-2009 Analog Devices Inc. * * Licensed under the GPL-2 * based on arm/mach-omap/pm.c * Copyright 2001, Cliff Brake <cbrake@accelent.com> and others */ #include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/irq.h> #include <asm/cplb.h> #include <asm/gpio.h> #include <asm/dma.h> #include <asm/dpmc.h> void bfin_pm_suspend_standby_enter(void) { bfin_pm_standby_setup(); #ifdef CONFIG_PM_BFIN_SLEEP_DEEPER sleep_deeper(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]); #else sleep_mode(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]); #endif bfin_pm_standby_restore(); #ifdef SIC_IWR0 bfin_write_SIC_IWR0(IWR_DISABLE_ALL); # ifdef SIC_IWR1 /* BF52x system reset does not properly reset SIC_IWR1 which * will screw up the bootrom as it relies on MDMA0/1 waking it * up from IDLE instructions. See this report for more info: * http://blackfin.uclinux.org/gf/tracker/4323 */ if (ANOMALY_05000435) bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11)); else bfin_write_SIC_IWR1(IWR_DISABLE_ALL); # endif # ifdef SIC_IWR2 bfin_write_SIC_IWR2(IWR_DISABLE_ALL); # endif #else bfin_write_SIC_IWR(IWR_DISABLE_ALL); #endif } int bf53x_suspend_l1_mem(unsigned char *memptr) { dma_memcpy_nocache(memptr, (const void *) L1_CODE_START, L1_CODE_LENGTH); dma_memcpy_nocache(memptr + L1_CODE_LENGTH, (const void *) L1_DATA_A_START, L1_DATA_A_LENGTH); dma_memcpy_nocache(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH, (const void *) L1_DATA_B_START, L1_DATA_B_LENGTH); memcpy(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH + L1_DATA_B_LENGTH, (const void *) L1_SCRATCH_START, L1_SCRATCH_LENGTH); return 0; } int bf53x_resume_l1_mem(unsigned char *memptr) { dma_memcpy_nocache((void *) L1_CODE_START, memptr, L1_CODE_LENGTH); dma_memcpy_nocache((void *) L1_DATA_A_START, memptr + L1_CODE_LENGTH, L1_DATA_A_LENGTH); dma_memcpy_nocache((void *) L1_DATA_B_START, memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH, L1_DATA_B_LENGTH); memcpy((void *) L1_SCRATCH_START, memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH + L1_DATA_B_LENGTH, L1_SCRATCH_LENGTH); return 0; } #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) static void flushinv_all_dcache(void) { u32 way, bank, subbank, set; u32 status, addr; u32 dmem_ctl = bfin_read_DMEM_CONTROL(); for (bank = 0; bank < 2; ++bank) { if (!(dmem_ctl & (1 << (DMC1_P - bank)))) continue; for (way = 0; way < 2; ++way) for (subbank = 0; subbank < 4; ++subbank) for (set = 0; set < 64; ++set) { bfin_write_DTEST_COMMAND( way << 26 | bank << 23 | subbank << 16 | set << 5 ); CSYNC(); status = bfin_read_DTEST_DATA0(); /* only worry about valid/dirty entries */ if ((status & 0x3) != 0x3) continue; /* construct the address using the tag */ addr = (status & 0xFFFFC800) | (subbank << 12) | (set << 5); /* flush it */ __asm__ __volatile__("FLUSHINV[%0];" : : "a"(addr)); } } } #endif int bfin_pm_suspend_mem_enter(void) { int wakeup, ret; unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH + L1_DATA_B_LENGTH + L1_SCRATCH_LENGTH, GFP_KERNEL); if (memptr == NULL) { panic("bf53x_suspend_l1_mem malloc failed"); return -ENOMEM; } wakeup = bfin_read_VR_CTL() & ~FREQ; wakeup |= SCKELOW; #ifdef CONFIG_PM_BFIN_WAKE_PH6 wakeup |= PHYWE; #endif #ifdef CONFIG_PM_BFIN_WAKE_GP wakeup |= GPWE; #endif ret = blackfin_dma_suspend(); if (ret) { kfree(memptr); return ret; } bfin_gpio_pm_hibernate_suspend(); #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) flushinv_all_dcache(); #endif _disable_dcplb(); _disable_icplb(); bf53x_suspend_l1_mem(memptr); do_hibernate(wakeup | vr_wakeup); /* See you later! */ bf53x_resume_l1_mem(memptr); _enable_icplb(); _enable_dcplb(); bfin_gpio_pm_hibernate_restore(); blackfin_dma_resume(); kfree(memptr); return 0; } /* * bfin_pm_valid - Tell the PM core that we only support the standby sleep * state * @state: suspend state we're checking. * */ static int bfin_pm_valid(suspend_state_t state) { return (state == PM_SUSPEND_STANDBY #if !(defined(BF533_FAMILY) || defined(CONFIG_BF561)) /* * On BF533/2/1: * If we enter Hibernate the SCKE Pin is driven Low, * so that the SDRAM enters Self Refresh Mode. * However when the reset sequence that follows hibernate * state is executed, SCKE is driven High, taking the * SDRAM out of Self Refresh. * * If you reconfigure and access the SDRAM "very quickly", * you are likely to avoid errors, otherwise the SDRAM * start losing its contents. * An external HW workaround is possible using logic gates. */ || state == PM_SUSPEND_MEM #endif ); } /* * bfin_pm_enter - Actually enter a sleep state. * @state: State we're entering. * */ static int bfin_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: bfin_pm_suspend_standby_enter(); break; case PM_SUSPEND_MEM: bfin_pm_suspend_mem_enter(); break; default: return -EINVAL; } return 0; } static const struct platform_suspend_ops bfin_pm_ops = { .enter = bfin_pm_enter, .valid = bfin_pm_valid, }; static int __init bfin_pm_init(void) { suspend_set_ops(&bfin_pm_ops); return 0; } __initcall(bfin_pm_init);
gpl-2.0
shaqfu786/android_kernel_lge_omap4-common
drivers/gpu/drm/nouveau/nv50_fb.c
8021
7250
#include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_drm.h" struct nv50_fb_priv { struct page *r100c08_page; dma_addr_t r100c08; }; static void nv50_fb_destroy(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nv50_fb_priv *priv = pfb->priv; if (drm_mm_initialized(&pfb->tag_heap)) drm_mm_takedown(&pfb->tag_heap); if (priv->r100c08_page) { pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); __free_page(priv->r100c08_page); } kfree(priv); pfb->priv = NULL; } static int nv50_fb_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nv50_fb_priv *priv; u32 tagmem; int ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; pfb->priv = priv; priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!priv->r100c08_page) { nv50_fb_destroy(dev); return -ENOMEM; } priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) { nv50_fb_destroy(dev); return -EFAULT; } tagmem = nv_rd32(dev, 0x100320); NV_DEBUG(dev, "%d tags available\n", tagmem); ret = drm_mm_init(&pfb->tag_heap, 0, tagmem); if (ret) { nv50_fb_destroy(dev); return ret; } return 0; } int nv50_fb_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_fb_priv *priv; int ret; if (!dev_priv->engine.fb.priv) { ret = nv50_fb_create(dev); if (ret) return ret; } priv = dev_priv->engine.fb.priv; /* Not a clue what this is exactly. Without pointing it at a * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) * cause IOMMU "read from address 0" errors (rh#561267) */ nv_wr32(dev, 0x100c08, priv->r100c08 >> 8); /* This is needed to get meaningful information from 100c90 * on traps. No idea what these values mean exactly. */ switch (dev_priv->chipset) { case 0x50: nv_wr32(dev, 0x100c90, 0x000707ff); break; case 0xa3: case 0xa5: case 0xa8: nv_wr32(dev, 0x100c90, 0x000d0fff); break; case 0xaf: nv_wr32(dev, 0x100c90, 0x089d1fff); break; default: nv_wr32(dev, 0x100c90, 0x001d07ff); break; } return 0; } void nv50_fb_takedown(struct drm_device *dev) { nv50_fb_destroy(dev); } static struct nouveau_enum vm_dispatch_subclients[] = { { 0x00000000, "GRCTX", NULL }, { 0x00000001, "NOTIFY", NULL }, { 0x00000002, "QUERY", NULL }, { 0x00000003, "COND", NULL }, { 0x00000004, "M2M_IN", NULL }, { 0x00000005, "M2M_OUT", NULL }, { 0x00000006, "M2M_NOTIFY", NULL }, {} }; static struct nouveau_enum vm_ccache_subclients[] = { { 0x00000000, "CB", NULL }, { 0x00000001, "TIC", NULL }, { 0x00000002, "TSC", NULL }, {} }; static struct nouveau_enum vm_prop_subclients[] = { { 0x00000000, "RT0", NULL }, { 0x00000001, "RT1", NULL }, { 0x00000002, "RT2", NULL }, { 0x00000003, "RT3", NULL }, { 0x00000004, "RT4", NULL }, { 0x00000005, "RT5", NULL }, { 0x00000006, "RT6", NULL }, { 0x00000007, "RT7", NULL }, { 0x00000008, "ZETA", NULL }, { 0x00000009, "LOCAL", NULL }, { 0x0000000a, "GLOBAL", NULL }, { 0x0000000b, "STACK", NULL }, { 0x0000000c, "DST2D", NULL }, {} }; static struct nouveau_enum vm_pfifo_subclients[] = { { 0x00000000, "PUSHBUF", NULL }, { 0x00000001, "SEMAPHORE", NULL }, {} }; static struct nouveau_enum vm_bar_subclients[] = { { 0x00000000, "FB", NULL }, { 0x00000001, "IN", NULL }, {} }; static struct nouveau_enum vm_client[] = { { 0x00000000, "STRMOUT", NULL }, { 0x00000003, "DISPATCH", vm_dispatch_subclients }, { 0x00000004, "PFIFO_WRITE", NULL }, { 0x00000005, "CCACHE", vm_ccache_subclients }, { 0x00000006, "PPPP", NULL }, { 0x00000007, "CLIPID", NULL }, { 0x00000008, "PFIFO_READ", NULL }, { 0x00000009, "VFETCH", NULL }, { 0x0000000a, "TEXTURE", NULL }, { 0x0000000b, "PROP", vm_prop_subclients }, { 0x0000000c, "PVP", NULL }, { 0x0000000d, "PBSP", NULL }, { 0x0000000e, "PCRYPT", NULL }, { 0x0000000f, "PCOUNTER", NULL }, { 0x00000011, "PDAEMON", NULL }, {} }; static struct nouveau_enum vm_engine[] = { { 0x00000000, "PGRAPH", NULL }, { 0x00000001, "PVP", NULL }, { 0x00000004, "PEEPHOLE", NULL }, { 0x00000005, "PFIFO", vm_pfifo_subclients }, { 0x00000006, "BAR", vm_bar_subclients }, { 0x00000008, "PPPP", NULL }, { 0x00000009, "PBSP", NULL }, { 0x0000000a, "PCRYPT", NULL }, { 0x0000000b, "PCOUNTER", NULL }, { 0x0000000c, "SEMAPHORE_BG", NULL }, { 0x0000000d, "PCOPY", NULL }, { 0x0000000e, "PDAEMON", NULL }, {} }; static struct nouveau_enum vm_fault[] = { { 0x00000000, "PT_NOT_PRESENT", NULL }, { 0x00000001, "PT_TOO_SHORT", NULL }, { 0x00000002, "PAGE_NOT_PRESENT", NULL }, { 0x00000003, "PAGE_SYSTEM_ONLY", NULL }, { 0x00000004, "PAGE_READ_ONLY", NULL }, { 0x00000006, "NULL_DMAOBJ", NULL }, { 0x00000007, "WRONG_MEMTYPE", NULL }, { 0x0000000b, "VRAM_LIMIT", NULL }, { 0x0000000f, "DMAOBJ_LIMIT", NULL }, {} }; void nv50_fb_vm_trap(struct drm_device *dev, int display) { struct drm_nouveau_private *dev_priv = dev->dev_private; const struct nouveau_enum *en, *cl; unsigned long flags; u32 trap[6], idx, chinst; u8 st0, st1, st2, st3; int i, ch; idx = nv_rd32(dev, 0x100c90); if (!(idx & 0x80000000)) return; idx &= 0x00ffffff; for (i = 0; i < 6; i++) { nv_wr32(dev, 0x100c90, idx | i << 24); trap[i] = nv_rd32(dev, 0x100c94); } nv_wr32(dev, 0x100c90, idx | 0x80000000); if (!display) return; /* lookup channel id */ chinst = (trap[2] << 16) | trap[1]; spin_lock_irqsave(&dev_priv->channels.lock, flags); for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; if (!chan || !chan->ramin) continue; if (chinst == chan->ramin->vinst >> 12) break; } spin_unlock_irqrestore(&dev_priv->channels.lock, flags); /* decode status bits into something more useful */ if (dev_priv->chipset < 0xa3 || dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { st0 = (trap[0] & 0x0000000f) >> 0; st1 = (trap[0] & 0x000000f0) >> 4; st2 = (trap[0] & 0x00000f00) >> 8; st3 = (trap[0] & 0x0000f000) >> 12; } else { st0 = (trap[0] & 0x000000ff) >> 0; st1 = (trap[0] & 0x0000ff00) >> 8; st2 = (trap[0] & 0x00ff0000) >> 16; st3 = (trap[0] & 0xff000000) >> 24; } NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ", (trap[5] & 0x00000100) ? "read" : "write", trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst); en = nouveau_enum_find(vm_engine, st0); if (en) printk("%s/", en->name); else printk("%02x/", st0); cl = nouveau_enum_find(vm_client, st2); if (cl) printk("%s/", cl->name); else printk("%02x/", st2); if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3); else if (en && en->data) cl = nouveau_enum_find(en->data, st3); else cl = NULL; if (cl) printk("%s", cl->name); else printk("%02x", st3); printk(" reason: "); en = nouveau_enum_find(vm_fault, st1); if (en) printk("%s\n", en->name); else printk("0x%08x\n", st1); }
gpl-2.0
kernel-hut/android_kernel_xiaomi_cancro
drivers/media/common/tuners/mt2060.c
8021
10254
/* * Driver for Microtune MT2060 "Single chip dual conversion broadband tuner" * * Copyright (c) 2006 Olivier DANET <odanet@caramail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ /* In that file, frequencies are expressed in kiloHertz to avoid 32 bits overflows */ #include <linux/module.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mt2060.h" #include "mt2060_priv.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk(args...) do { if (debug) {printk(KERN_DEBUG "MT2060: " args); printk("\n"); }} while (0) // Reads a single register static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) { struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, }; if (i2c_transfer(priv->i2c, msg, 2) != 2) { printk(KERN_WARNING "mt2060 I2C read failed\n"); return -EREMOTEIO; } return 0; } // Writes a single register static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) { u8 buf[2] = { reg, val }; struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2060 I2C write failed\n"); return -EREMOTEIO; } return 0; } // Writes a set of consecutive registers static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) { struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = len }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n",(int)len); return -EREMOTEIO; } return 0; } // Initialisation sequences // LNABAND=3, NUM1=0x3C, DIV1=0x74, NUM2=0x1080, DIV2=0x49 static u8 mt2060_config1[] = { REG_LO1C1, 0x3F, 0x74, 0x00, 0x08, 0x93 }; // FMCG=2, GP2=0, GP1=0 static u8 mt2060_config2[] = { REG_MISC_CTRL, 0x20, 0x1E, 0x30, 0xff, 0x80, 0xff, 0x00, 0x2c, 0x42 }; // VGAG=3, V1CSE=1 #ifdef MT2060_SPURCHECK /* The function below calculates the frequency offset between the output frequency if2 and the closer cross modulation subcarrier between lo1 and lo2 up to the tenth harmonic */ static int mt2060_spurcalc(u32 lo1,u32 lo2,u32 if2) { int I,J; int dia,diamin,diff; diamin=1000000; for (I = 1; I < 10; I++) { J = ((2*I*lo1)/lo2+1)/2; diff = I*(int)lo1-J*(int)lo2; if (diff < 0) diff=-diff; dia = (diff-(int)if2); if (dia < 0) dia=-dia; if (diamin > dia) diamin=dia; } return diamin; } #define BANDWIDTH 4000 // kHz /* Calculates the frequency offset to add to avoid spurs. Returns 0 if no offset is needed */ static int mt2060_spurcheck(u32 lo1,u32 lo2,u32 if2) { u32 Spur,Sp1,Sp2; int I,J; I=0; J=1000; Spur=mt2060_spurcalc(lo1,lo2,if2); if (Spur < BANDWIDTH) { /* Potential spurs detected */ dprintk("Spurs before : f_lo1: %d f_lo2: %d (kHz)", (int)lo1,(int)lo2); I=1000; Sp1 = mt2060_spurcalc(lo1+I,lo2+I,if2); Sp2 = mt2060_spurcalc(lo1-I,lo2-I,if2); if (Sp1 < Sp2) { J=-J; I=-I; Spur=Sp2; } else Spur=Sp1; while (Spur < BANDWIDTH) { I += J; Spur = mt2060_spurcalc(lo1+I,lo2+I,if2); } dprintk("Spurs after : f_lo1: %d f_lo2: %d (kHz)", (int)(lo1+I),(int)(lo2+I)); } return I; } #endif #define IF2 36150 // IF2 frequency = 36.150 MHz #define FREF 16000 // Quartz oscillator 16 MHz static int mt2060_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct mt2060_priv *priv; int ret=0; int i=0; u32 freq; u8 lnaband; u32 f_lo1,f_lo2; u32 div1,num1,div2,num2; u8 b[8]; u32 if1; priv = fe->tuner_priv; if1 = priv->if1_freq; b[0] = REG_LO1B1; b[1] = 0xFF; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ mt2060_writeregs(priv,b,2); freq = c->frequency / 1000; /* Hz -> kHz */ f_lo1 = freq + if1 * 1000; f_lo1 = (f_lo1 / 250) * 250; f_lo2 = f_lo1 - freq - IF2; // From the Comtech datasheet, the step used is 50kHz. The tuner chip could be more precise f_lo2 = ((f_lo2 + 25) / 50) * 50; priv->frequency = (f_lo1 - f_lo2 - IF2) * 1000, #ifdef MT2060_SPURCHECK // LO-related spurs detection and correction num1 = mt2060_spurcheck(f_lo1,f_lo2,IF2); f_lo1 += num1; f_lo2 += num1; #endif //Frequency LO1 = 16MHz * (DIV1 + NUM1/64 ) num1 = f_lo1 / (FREF / 64); div1 = num1 / 64; num1 &= 0x3f; // Frequency LO2 = 16MHz * (DIV2 + NUM2/8192 ) num2 = f_lo2 * 64 / (FREF / 128); div2 = num2 / 8192; num2 &= 0x1fff; if (freq <= 95000) lnaband = 0xB0; else if (freq <= 180000) lnaband = 0xA0; else if (freq <= 260000) lnaband = 0x90; else if (freq <= 335000) lnaband = 0x80; else if (freq <= 425000) lnaband = 0x70; else if (freq <= 480000) lnaband = 0x60; else if (freq <= 570000) lnaband = 0x50; else if (freq <= 645000) lnaband = 0x40; else if (freq <= 730000) lnaband = 0x30; else if (freq <= 810000) lnaband = 0x20; else lnaband = 0x10; b[0] = REG_LO1C1; b[1] = lnaband | ((num1 >>2) & 0x0F); b[2] = div1; b[3] = (num2 & 0x0F) | ((num1 & 3) << 4); b[4] = num2 >> 4; b[5] = ((num2 >>12) & 1) | (div2 << 1); dprintk("IF1: %dMHz",(int)if1); dprintk("PLL freq=%dkHz f_lo1=%dkHz f_lo2=%dkHz",(int)freq,(int)f_lo1,(int)f_lo2); dprintk("PLL div1=%d num1=%d div2=%d num2=%d",(int)div1,(int)num1,(int)div2,(int)num2); dprintk("PLL [1..5]: %2x %2x %2x %2x %2x",(int)b[1],(int)b[2],(int)b[3],(int)b[4],(int)b[5]); mt2060_writeregs(priv,b,6); //Waits for pll lock or timeout i = 0; do { mt2060_readreg(priv,REG_LO_STATUS,b); if ((b[0] & 0x88)==0x88) break; msleep(4); i++; } while (i<10); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } static void mt2060_calibrate(struct mt2060_priv *priv) { u8 b = 0; int i = 0; if (mt2060_writeregs(priv,mt2060_config1,sizeof(mt2060_config1))) return; if (mt2060_writeregs(priv,mt2060_config2,sizeof(mt2060_config2))) return; /* initialize the clock output */ mt2060_writereg(priv, REG_VGAG, (priv->cfg->clock_out << 6) | 0x30); do { b |= (1 << 6); // FM1SS; mt2060_writereg(priv, REG_LO2C1,b); msleep(20); if (i == 0) { b |= (1 << 7); // FM1CA; mt2060_writereg(priv, REG_LO2C1,b); b &= ~(1 << 7); // FM1CA; msleep(20); } b &= ~(1 << 6); // FM1SS mt2060_writereg(priv, REG_LO2C1,b); msleep(20); i++; } while (i < 9); i = 0; while (i++ < 10 && mt2060_readreg(priv, REG_MISC_STAT, &b) == 0 && (b & (1 << 6)) == 0) msleep(20); if (i <= 10) { mt2060_readreg(priv, REG_FM_FREQ, &priv->fmfreq); // now find out, what is fmreq used for :) dprintk("calibration was successful: %d", (int)priv->fmfreq); } else dprintk("FMCAL timed out"); } static int mt2060_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct mt2060_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int mt2060_get_if_frequency(struct dvb_frontend *fe, u32 *frequency) { *frequency = IF2 * 1000; return 0; } static int mt2060_init(struct dvb_frontend *fe) { struct mt2060_priv *priv = fe->tuner_priv; int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = mt2060_writereg(priv, REG_VGAG, (priv->cfg->clock_out << 6) | 0x33); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } static int mt2060_sleep(struct dvb_frontend *fe) { struct mt2060_priv *priv = fe->tuner_priv; int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = mt2060_writereg(priv, REG_VGAG, (priv->cfg->clock_out << 6) | 0x30); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } static int mt2060_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static const struct dvb_tuner_ops mt2060_tuner_ops = { .info = { .name = "Microtune MT2060", .frequency_min = 48000000, .frequency_max = 860000000, .frequency_step = 50000, }, .release = mt2060_release, .init = mt2060_init, .sleep = mt2060_sleep, .set_params = mt2060_set_params, .get_frequency = mt2060_get_frequency, .get_if_frequency = mt2060_get_if_frequency, }; /* This functions tries to identify a MT2060 tuner by reading the PART/REV register. This is hasty. */ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1) { struct mt2060_priv *priv = NULL; u8 id = 0; priv = kzalloc(sizeof(struct mt2060_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->cfg = cfg; priv->i2c = i2c; priv->if1_freq = if1; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ if (mt2060_readreg(priv,REG_PART_REV,&id) != 0) { kfree(priv); return NULL; } if (id != PART_REV) { kfree(priv); return NULL; } printk(KERN_INFO "MT2060: successfully identified (IF1 = %d)\n", if1); memcpy(&fe->ops.tuner_ops, &mt2060_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; mt2060_calibrate(priv); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return fe; } EXPORT_SYMBOL(mt2060_attach); MODULE_AUTHOR("Olivier DANET"); MODULE_DESCRIPTION("Microtune MT2060 silicon tuner driver"); MODULE_LICENSE("GPL");
gpl-2.0
tilaksidduram/Stock_kernel
fs/partitions/ibm.c
8277
6956
/* * File...........: linux/fs/partitions/ibm.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Volker Sameske <sameske@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 */ #include <linux/buffer_head.h> #include <linux/hdreg.h> #include <linux/slab.h> #include <asm/dasd.h> #include <asm/ebcdic.h> #include <asm/uaccess.h> #include <asm/vtoc.h> #include "check.h" #include "ibm.h" /* * compute the block number from a * cyl-cyl-head-head structure */ static sector_t cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) { sector_t cyl; __u16 head; /*decode cylinder and heads for large volumes */ cyl = ptr->hh & 0xFFF0; cyl <<= 12; cyl |= ptr->cc; head = ptr->hh & 0x000F; return cyl * geo->heads * geo->sectors + head * geo->sectors; } /* * compute the block number from a * cyl-cyl-head-head-block structure */ static sector_t cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) { sector_t cyl; __u16 head; /*decode cylinder and heads for large volumes */ cyl = ptr->hh & 0xFFF0; cyl <<= 12; cyl |= ptr->cc; head = ptr->hh & 0x000F; return cyl * geo->heads * geo->sectors + head * geo->sectors + ptr->b; } /* */ int ibm_partition(struct parsed_partitions *state) { struct block_device *bdev = state->bdev; int blocksize, res; loff_t i_size, offset, size, fmt_size; dasd_information2_t *info; struct hd_geometry *geo; char type[5] = {0,}; char name[7] = {0,}; union label_t { struct vtoc_volume_label_cdl vol; struct vtoc_volume_label_ldl lnx; struct vtoc_cms_label cms; } *label; unsigned char *data; Sector sect; sector_t labelsect; char tmp[64]; res = 0; blocksize = bdev_logical_block_size(bdev); if (blocksize <= 0) goto out_exit; i_size = i_size_read(bdev->bd_inode); if (i_size == 0) goto out_exit; info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL); if (info == NULL) goto out_exit; geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL); if (geo == NULL) goto out_nogeo; label = kmalloc(sizeof(union label_t), GFP_KERNEL); if (label == NULL) goto out_nolab; if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0 || ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) goto out_freeall; /* * Special case for FBA disks: label sector does not depend on * blocksize. */ if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) || (info->cu_type == 0x3880 && info->dev_type == 0x3370)) labelsect = info->label_block; else labelsect = info->label_block * (blocksize >> 9); /* * Get volume label, extract name and type. */ data = read_part_sector(state, labelsect, &sect); if (data == NULL) goto out_readerr; memcpy(label, data, sizeof(union label_t)); put_dev_sector(sect); if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) { strncpy(type, label->vol.vollbl, 4); strncpy(name, label->vol.volid, 6); } else { strncpy(type, label->lnx.vollbl, 4); strncpy(name, label->lnx.volid, 6); } EBCASC(type, 4); EBCASC(name, 6); res = 1; /* * Three different formats: LDL, CDL and unformated disk * * identified by info->format * * unformated disks we do not have to care about */ if (info->format == DASD_FORMAT_LDL) { if (strncmp(type, "CMS1", 4) == 0) { /* * VM style CMS1 labeled disk */ blocksize = label->cms.block_size; if (label->cms.disk_offset != 0) { snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name); strlcat(state->pp_buf, tmp, PAGE_SIZE); /* disk is reserved minidisk */ offset = label->cms.disk_offset; size = (label->cms.block_count - 1) * (blocksize >> 9); } else { snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name); strlcat(state->pp_buf, tmp, PAGE_SIZE); offset = (info->label_block + 1); size = label->cms.block_count * (blocksize >> 9); } put_partition(state, 1, offset*(blocksize >> 9), size-offset*(blocksize >> 9)); } else { if (strncmp(type, "LNX1", 4) == 0) { snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name); strlcat(state->pp_buf, tmp, PAGE_SIZE); if (label->lnx.ldl_version == 0xf2) { fmt_size = label->lnx.formatted_blocks * (blocksize >> 9); } else if (!strcmp(info->type, "ECKD")) { /* formated w/o large volume support */ fmt_size = geo->cylinders * geo->heads * geo->sectors * (blocksize >> 9); } else { /* old label and no usable disk geometry * (e.g. DIAG) */ fmt_size = i_size >> 9; } size = i_size >> 9; if (fmt_size < size) size = fmt_size; offset = (info->label_block + 1); } else { /* unlabeled disk */ strlcat(state->pp_buf, "(nonl)", PAGE_SIZE); size = i_size >> 9; offset = (info->label_block + 1); } put_partition(state, 1, offset*(blocksize >> 9), size-offset*(blocksize >> 9)); } } else if (info->format == DASD_FORMAT_CDL) { /* * New style CDL formatted disk */ sector_t blk; int counter; /* * check if VOL1 label is available * if not, something is wrong, skipping partition detection */ if (strncmp(type, "VOL1", 4) == 0) { snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name); strlcat(state->pp_buf, tmp, PAGE_SIZE); /* * get block number and read then go through format1 * labels */ blk = cchhb2blk(&label->vol.vtoc, geo) + 1; counter = 0; data = read_part_sector(state, blk * (blocksize/512), &sect); while (data != NULL) { struct vtoc_format1_label f1; memcpy(&f1, data, sizeof(struct vtoc_format1_label)); put_dev_sector(sect); /* skip FMT4 / FMT5 / FMT7 labels */ if (f1.DS1FMTID == _ascebc['4'] || f1.DS1FMTID == _ascebc['5'] || f1.DS1FMTID == _ascebc['7'] || f1.DS1FMTID == _ascebc['9']) { blk++; data = read_part_sector(state, blk * (blocksize/512), &sect); continue; } /* only FMT1 and 8 labels valid at this point */ if (f1.DS1FMTID != _ascebc['1'] && f1.DS1FMTID != _ascebc['8']) break; /* OK, we got valid partition data */ offset = cchh2blk(&f1.DS1EXT1.llimit, geo); size = cchh2blk(&f1.DS1EXT1.ulimit, geo) - offset + geo->sectors; if (counter >= state->limit) break; put_partition(state, counter + 1, offset * (blocksize >> 9), size * (blocksize >> 9)); counter++; blk++; data = read_part_sector(state, blk * (blocksize/512), &sect); } if (!data) /* Are we not supposed to report this ? */ goto out_readerr; } else printk(KERN_WARNING "Warning, expected Label VOL1 not " "found, treating as CDL formated Disk"); } strlcat(state->pp_buf, "\n", PAGE_SIZE); goto out_freeall; out_readerr: res = -1; out_freeall: kfree(label); out_nolab: kfree(geo); out_nogeo: kfree(info); out_exit: return res; }
gpl-2.0
Silverblade-nz/Alpha15Copy
drivers/isdn/gigaset/asyncdata.c
9557
17022
/* * Common data handling layer for ser_gigaset and usb_gigaset * * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>, * Hansjoerg Lipp <hjlipp@web.de>, * Stefan Eilers. * * ===================================================================== * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ===================================================================== */ #include "gigaset.h" #include <linux/crc-ccitt.h> #include <linux/bitrev.h> #include <linux/export.h> /* check if byte must be stuffed/escaped * I'm not sure which data should be encoded. * Therefore I will go the hard way and encode every value * less than 0x20, the flag sequence and the control escape char. */ static inline int muststuff(unsigned char c) { if (c < PPP_TRANS) return 1; if (c == PPP_FLAG) return 1; if (c == PPP_ESCAPE) return 1; /* other possible candidates: */ /* 0x91: XON with parity set */ /* 0x93: XOFF with parity set */ return 0; } /* == data input =========================================================== */ /* process a block of received bytes in command mode * (mstate != MS_LOCKED && (inputstate & INS_command)) * Append received bytes to the command response buffer and forward them * line by line to the response handler. Exit whenever a mode/state change * might have occurred. * Note: Received lines may be terminated by CR, LF, or CR LF, which will be * removed before passing the line to the response handler. * Return value: * number of processed bytes */ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf) { unsigned char *src = inbuf->data + inbuf->head; struct cardstate *cs = inbuf->cs; unsigned cbytes = cs->cbytes; unsigned procbytes = 0; unsigned char c; while (procbytes < numbytes) { c = *src++; procbytes++; switch (c) { case '\n': if (cbytes == 0 && cs->respdata[0] == '\r') { /* collapse LF with preceding CR */ cs->respdata[0] = 0; break; } /* --v-- fall through --v-- */ case '\r': /* end of message line, pass to response handler */ if (cbytes >= MAX_RESP_SIZE) { dev_warn(cs->dev, "response too large (%d)\n", cbytes); cbytes = MAX_RESP_SIZE; } cs->cbytes = cbytes; gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response", cbytes, cs->respdata); gigaset_handle_modem_response(cs); cbytes = 0; /* store EOL byte for CRLF collapsing */ cs->respdata[0] = c; /* cs->dle may have changed */ if (cs->dle && !(inbuf->inputstate & INS_DLE_command)) inbuf->inputstate &= ~INS_command; /* return for reevaluating state */ goto exit; case DLE_FLAG: if (inbuf->inputstate & INS_DLE_char) { /* quoted DLE: clear quote flag */ inbuf->inputstate &= ~INS_DLE_char; } else if (cs->dle || (inbuf->inputstate & INS_DLE_command)) { /* DLE escape, pass up for handling */ inbuf->inputstate |= INS_DLE_char; goto exit; } /* quoted or not in DLE mode: treat as regular data */ /* --v-- fall through --v-- */ default: /* append to line buffer if possible */ if (cbytes < MAX_RESP_SIZE) cs->respdata[cbytes] = c; cbytes++; } } exit: cs->cbytes = cbytes; return procbytes; } /* process a block of received bytes in lock mode * All received bytes are passed unmodified to the tty i/f. * Return value: * number of processed bytes */ static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf) { unsigned char *src = inbuf->data + inbuf->head; gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src); gigaset_if_receive(inbuf->cs, src, numbytes); return numbytes; } /* process a block of received bytes in HDLC data mode * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC) * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. * When a frame is complete, check the FCS and pass valid frames to the LL. * If DLE is encountered, return immediately to let the caller handle it. * Return value: * number of processed bytes */ static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf) { struct cardstate *cs = inbuf->cs; struct bc_state *bcs = cs->bcs; int inputstate = bcs->inputstate; __u16 fcs = bcs->rx_fcs; struct sk_buff *skb = bcs->rx_skb; unsigned char *src = inbuf->data + inbuf->head; unsigned procbytes = 0; unsigned char c; if (inputstate & INS_byte_stuff) { if (!numbytes) return 0; inputstate &= ~INS_byte_stuff; goto byte_stuff; } while (procbytes < numbytes) { c = *src++; procbytes++; if (c == DLE_FLAG) { if (inputstate & INS_DLE_char) { /* quoted DLE: clear quote flag */ inputstate &= ~INS_DLE_char; } else if (cs->dle || (inputstate & INS_DLE_command)) { /* DLE escape, pass up for handling */ inputstate |= INS_DLE_char; break; } } if (c == PPP_ESCAPE) { /* byte stuffing indicator: pull in next byte */ if (procbytes >= numbytes) { /* end of buffer, save for later processing */ inputstate |= INS_byte_stuff; break; } byte_stuff: c = *src++; procbytes++; if (c == DLE_FLAG) { if (inputstate & INS_DLE_char) { /* quoted DLE: clear quote flag */ inputstate &= ~INS_DLE_char; } else if (cs->dle || (inputstate & INS_DLE_command)) { /* DLE escape, pass up for handling */ inputstate |= INS_DLE_char | INS_byte_stuff; break; } } c ^= PPP_TRANS; #ifdef CONFIG_GIGASET_DEBUG if (!muststuff(c)) gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c); #endif } else if (c == PPP_FLAG) { /* end of frame: process content if any */ if (inputstate & INS_have_data) { gig_dbg(DEBUG_HDLC, "7e----------------------------"); /* check and pass received frame */ if (!skb) { /* skipped frame */ gigaset_isdn_rcv_err(bcs); } else if (skb->len < 2) { /* frame too short for FCS */ dev_warn(cs->dev, "short frame (%d)\n", skb->len); gigaset_isdn_rcv_err(bcs); dev_kfree_skb_any(skb); } else if (fcs != PPP_GOODFCS) { /* frame check error */ dev_err(cs->dev, "Checksum failed, %u bytes corrupted!\n", skb->len); gigaset_isdn_rcv_err(bcs); dev_kfree_skb_any(skb); } else { /* good frame */ __skb_trim(skb, skb->len - 2); gigaset_skb_rcvd(bcs, skb); } /* prepare reception of next frame */ inputstate &= ~INS_have_data; skb = gigaset_new_rx_skb(bcs); } else { /* empty frame (7E 7E) */ #ifdef CONFIG_GIGASET_DEBUG ++bcs->emptycount; #endif if (!skb) { /* skipped (?) */ gigaset_isdn_rcv_err(bcs); skb = gigaset_new_rx_skb(bcs); } } fcs = PPP_INITFCS; continue; #ifdef CONFIG_GIGASET_DEBUG } else if (muststuff(c)) { /* Should not happen. Possible after ZDLE=1<CR><LF>. */ gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c); #endif } /* regular data byte, append to skb */ #ifdef CONFIG_GIGASET_DEBUG if (!(inputstate & INS_have_data)) { gig_dbg(DEBUG_HDLC, "7e (%d x) ================", bcs->emptycount); bcs->emptycount = 0; } #endif inputstate |= INS_have_data; if (skb) { if (skb->len >= bcs->rx_bufsize) { dev_warn(cs->dev, "received packet too long\n"); dev_kfree_skb_any(skb); /* skip remainder of packet */ bcs->rx_skb = skb = NULL; } else { *__skb_put(skb, 1) = c; fcs = crc_ccitt_byte(fcs, c); } } } bcs->inputstate = inputstate; bcs->rx_fcs = fcs; return procbytes; } /* process a block of received bytes in transparent data mode * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC) * Invert bytes, undoing byte stuffing and watching for DLE escapes. * If DLE is encountered, return immediately to let the caller handle it. * Return value: * number of processed bytes */ static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf) { struct cardstate *cs = inbuf->cs; struct bc_state *bcs = cs->bcs; int inputstate = bcs->inputstate; struct sk_buff *skb = bcs->rx_skb; unsigned char *src = inbuf->data + inbuf->head; unsigned procbytes = 0; unsigned char c; if (!skb) { /* skip this block */ gigaset_new_rx_skb(bcs); return numbytes; } while (procbytes < numbytes && skb->len < bcs->rx_bufsize) { c = *src++; procbytes++; if (c == DLE_FLAG) { if (inputstate & INS_DLE_char) { /* quoted DLE: clear quote flag */ inputstate &= ~INS_DLE_char; } else if (cs->dle || (inputstate & INS_DLE_command)) { /* DLE escape, pass up for handling */ inputstate |= INS_DLE_char; break; } } /* regular data byte: append to current skb */ inputstate |= INS_have_data; *__skb_put(skb, 1) = bitrev8(c); } /* pass data up */ if (inputstate & INS_have_data) { gigaset_skb_rcvd(bcs, skb); inputstate &= ~INS_have_data; gigaset_new_rx_skb(bcs); } bcs->inputstate = inputstate; return procbytes; } /* process DLE escapes * Called whenever a DLE sequence might be encountered in the input stream. * Either processes the entire DLE sequence or, if that isn't possible, * notes the fact that an initial DLE has been received in the INS_DLE_char * inputstate flag and resumes processing of the sequence on the next call. */ static void handle_dle(struct inbuf_t *inbuf) { struct cardstate *cs = inbuf->cs; if (cs->mstate == MS_LOCKED) return; /* no DLE processing in lock mode */ if (!(inbuf->inputstate & INS_DLE_char)) { /* no DLE pending */ if (inbuf->data[inbuf->head] == DLE_FLAG && (cs->dle || inbuf->inputstate & INS_DLE_command)) { /* start of DLE sequence */ inbuf->head++; if (inbuf->head == inbuf->tail || inbuf->head == RBUFSIZE) { /* end of buffer, save for later processing */ inbuf->inputstate |= INS_DLE_char; return; } } else { /* regular data byte */ return; } } /* consume pending DLE */ inbuf->inputstate &= ~INS_DLE_char; switch (inbuf->data[inbuf->head]) { case 'X': /* begin of event message */ if (inbuf->inputstate & INS_command) dev_notice(cs->dev, "received <DLE>X in command mode\n"); inbuf->inputstate |= INS_command | INS_DLE_command; inbuf->head++; /* byte consumed */ break; case '.': /* end of event message */ if (!(inbuf->inputstate & INS_DLE_command)) dev_notice(cs->dev, "received <DLE>. without <DLE>X\n"); inbuf->inputstate &= ~INS_DLE_command; /* return to data mode if in DLE mode */ if (cs->dle) inbuf->inputstate &= ~INS_command; inbuf->head++; /* byte consumed */ break; case DLE_FLAG: /* DLE in data stream */ /* mark as quoted */ inbuf->inputstate |= INS_DLE_char; if (!(cs->dle || inbuf->inputstate & INS_DLE_command)) dev_notice(cs->dev, "received <DLE><DLE> not in DLE mode\n"); break; /* quoted byte left in buffer */ default: dev_notice(cs->dev, "received <DLE><%02x>\n", inbuf->data[inbuf->head]); /* quoted byte left in buffer */ } } /** * gigaset_m10x_input() - process a block of data received from the device * @inbuf: received data and device descriptor structure. * * Called by hardware module {ser,usb}_gigaset with a block of received * bytes. Separates the bytes received over the serial data channel into * user data and command replies (locked/unlocked) according to the * current state of the interface. */ void gigaset_m10x_input(struct inbuf_t *inbuf) { struct cardstate *cs = inbuf->cs; unsigned numbytes, procbytes; gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail); while (inbuf->head != inbuf->tail) { /* check for DLE escape */ handle_dle(inbuf); /* process a contiguous block of bytes */ numbytes = (inbuf->head > inbuf->tail ? RBUFSIZE : inbuf->tail) - inbuf->head; gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes); /* * numbytes may be 0 if handle_dle() ate the last byte. * This does no harm, *_loop() will just return 0 immediately. */ if (cs->mstate == MS_LOCKED) procbytes = lock_loop(numbytes, inbuf); else if (inbuf->inputstate & INS_command) procbytes = cmd_loop(numbytes, inbuf); else if (cs->bcs->proto2 == L2_HDLC) procbytes = hdlc_loop(numbytes, inbuf); else procbytes = iraw_loop(numbytes, inbuf); inbuf->head += procbytes; /* check for buffer wraparound */ if (inbuf->head >= RBUFSIZE) inbuf->head = 0; gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head); } } EXPORT_SYMBOL_GPL(gigaset_m10x_input); /* == data output ========================================================== */ /* * Encode a data packet into an octet stuffed HDLC frame with FCS, * opening and closing flags, preserving headroom data. * parameters: * skb skb containing original packet (freed upon return) * Return value: * pointer to newly allocated skb containing the result frame * and the original link layer header, NULL on error */ static struct sk_buff *HDLC_Encode(struct sk_buff *skb) { struct sk_buff *hdlc_skb; __u16 fcs; unsigned char c; unsigned char *cp; int len; unsigned int stuf_cnt; stuf_cnt = 0; fcs = PPP_INITFCS; cp = skb->data; len = skb->len; while (len--) { if (muststuff(*cp)) stuf_cnt++; fcs = crc_ccitt_byte(fcs, *cp++); } fcs ^= 0xffff; /* complement */ /* size of new buffer: original size + number of stuffing bytes * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes * + room for link layer header */ hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len); if (!hdlc_skb) { dev_kfree_skb_any(skb); return NULL; } /* Copy link layer header into new skb */ skb_reset_mac_header(hdlc_skb); skb_reserve(hdlc_skb, skb->mac_len); memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len); hdlc_skb->mac_len = skb->mac_len; /* Add flag sequence in front of everything.. */ *(skb_put(hdlc_skb, 1)) = PPP_FLAG; /* Perform byte stuffing while copying data. */ while (skb->len--) { if (muststuff(*skb->data)) { *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; *(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS; } else *(skb_put(hdlc_skb, 1)) = *skb->data++; } /* Finally add FCS (byte stuffed) and flag sequence */ c = (fcs & 0x00ff); /* least significant byte first */ if (muststuff(c)) { *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; c ^= PPP_TRANS; } *(skb_put(hdlc_skb, 1)) = c; c = ((fcs >> 8) & 0x00ff); if (muststuff(c)) { *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; c ^= PPP_TRANS; } *(skb_put(hdlc_skb, 1)) = c; *(skb_put(hdlc_skb, 1)) = PPP_FLAG; dev_kfree_skb_any(skb); return hdlc_skb; } /* * Encode a data packet into an octet stuffed raw bit inverted frame, * preserving headroom data. * parameters: * skb skb containing original packet (freed upon return) * Return value: * pointer to newly allocated skb containing the result frame * and the original link layer header, NULL on error */ static struct sk_buff *iraw_encode(struct sk_buff *skb) { struct sk_buff *iraw_skb; unsigned char c; unsigned char *cp; int len; /* size of new buffer (worst case = every byte must be stuffed): * 2 * original size + room for link layer header */ iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len); if (!iraw_skb) { dev_kfree_skb_any(skb); return NULL; } /* copy link layer header into new skb */ skb_reset_mac_header(iraw_skb); skb_reserve(iraw_skb, skb->mac_len); memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len); iraw_skb->mac_len = skb->mac_len; /* copy and stuff data */ cp = skb->data; len = skb->len; while (len--) { c = bitrev8(*cp++); if (c == DLE_FLAG) *(skb_put(iraw_skb, 1)) = c; *(skb_put(iraw_skb, 1)) = c; } dev_kfree_skb_any(skb); return iraw_skb; } /** * gigaset_m10x_send_skb() - queue an skb for sending * @bcs: B channel descriptor structure. * @skb: data to send. * * Called by LL to encode and queue an skb for sending, and start * transmission if necessary. * Once the payload data has been transmitted completely, gigaset_skb_sent() * will be called with the skb's link layer header preserved. * * Return value: * number of bytes accepted for sending (skb->len) if ok, * error code < 0 (eg. -ENOMEM) on error */ int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) { struct cardstate *cs = bcs->cs; unsigned len = skb->len; unsigned long flags; if (bcs->proto2 == L2_HDLC) skb = HDLC_Encode(skb); else skb = iraw_encode(skb); if (!skb) { dev_err(cs->dev, "unable to allocate memory for encoding!\n"); return -ENOMEM; } skb_queue_tail(&bcs->squeue, skb); spin_lock_irqsave(&cs->lock, flags); if (cs->connected) tasklet_schedule(&cs->write_tasklet); spin_unlock_irqrestore(&cs->lock, flags); return len; /* ok so far */ } EXPORT_SYMBOL_GPL(gigaset_m10x_send_skb);
gpl-2.0
glenlee75/linux-at91
drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
10581
15773
/* ZD1211 USB-WLAN driver for Linux * * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/slab.h> #include "zd_rf.h" #include "zd_usb.h" #include "zd_chip.h" /* This RF programming code is based upon the code found in v2.16.0.0 of the * ZyDAS vendor driver. Unlike other RF's, Ubec publish full technical specs * for this RF on their website, so we're able to understand more than * usual as to what is going on. Thumbs up for Ubec for doing that. */ /* The 3-wire serial interface provides access to 8 write-only registers. * The data format is a 4 bit register address followed by a 20 bit value. */ #define UW2453_REGWRITE(reg, val) ((((reg) & 0xf) << 20) | ((val) & 0xfffff)) /* For channel tuning, we have to configure registers 1 (synthesizer), 2 (synth * fractional divide ratio) and 3 (VCO config). * * We configure the RF to produce an interrupt when the PLL is locked onto * the configured frequency. During initialization, we run through a variety * of different VCO configurations on channel 1 until we detect a PLL lock. * When this happens, we remember which VCO configuration produced the lock * and use it later. Actually, we use the configuration *after* the one that * produced the lock, which seems odd, but it works. * * If we do not see a PLL lock on any standard VCO config, we fall back on an * autocal configuration, which has a fixed (as opposed to per-channel) VCO * config and different synth values from the standard set (divide ratio * is still shared with the standard set). */ /* The per-channel synth values for all standard VCO configurations. These get * written to register 1. */ static const u8 uw2453_std_synth[] = { RF_CHANNEL( 1) = 0x47, RF_CHANNEL( 2) = 0x47, RF_CHANNEL( 3) = 0x67, RF_CHANNEL( 4) = 0x67, RF_CHANNEL( 5) = 0x67, RF_CHANNEL( 6) = 0x67, RF_CHANNEL( 7) = 0x57, RF_CHANNEL( 8) = 0x57, RF_CHANNEL( 9) = 0x57, RF_CHANNEL(10) = 0x57, RF_CHANNEL(11) = 0x77, RF_CHANNEL(12) = 0x77, RF_CHANNEL(13) = 0x77, RF_CHANNEL(14) = 0x4f, }; /* This table stores the synthesizer fractional divide ratio for *all* VCO * configurations (both standard and autocal). These get written to register 2. */ static const u16 uw2453_synth_divide[] = { RF_CHANNEL( 1) = 0x999, RF_CHANNEL( 2) = 0x99b, RF_CHANNEL( 3) = 0x998, RF_CHANNEL( 4) = 0x99a, RF_CHANNEL( 5) = 0x999, RF_CHANNEL( 6) = 0x99b, RF_CHANNEL( 7) = 0x998, RF_CHANNEL( 8) = 0x99a, RF_CHANNEL( 9) = 0x999, RF_CHANNEL(10) = 0x99b, RF_CHANNEL(11) = 0x998, RF_CHANNEL(12) = 0x99a, RF_CHANNEL(13) = 0x999, RF_CHANNEL(14) = 0xccc, }; /* Here is the data for all the standard VCO configurations. We shrink our * table a little by observing that both channels in a consecutive pair share * the same value. We also observe that the high 4 bits ([0:3] in the specs) * are all 'Reserved' and are always set to 0x4 - we chop them off in the data * below. */ #define CHAN_TO_PAIRIDX(a) ((a - 1) / 2) #define RF_CHANPAIR(a,b) [CHAN_TO_PAIRIDX(a)] static const u16 uw2453_std_vco_cfg[][7] = { { /* table 1 */ RF_CHANPAIR( 1, 2) = 0x664d, RF_CHANPAIR( 3, 4) = 0x604d, RF_CHANPAIR( 5, 6) = 0x6675, RF_CHANPAIR( 7, 8) = 0x6475, RF_CHANPAIR( 9, 10) = 0x6655, RF_CHANPAIR(11, 12) = 0x6455, RF_CHANPAIR(13, 14) = 0x6665, }, { /* table 2 */ RF_CHANPAIR( 1, 2) = 0x666d, RF_CHANPAIR( 3, 4) = 0x606d, RF_CHANPAIR( 5, 6) = 0x664d, RF_CHANPAIR( 7, 8) = 0x644d, RF_CHANPAIR( 9, 10) = 0x6675, RF_CHANPAIR(11, 12) = 0x6475, RF_CHANPAIR(13, 14) = 0x6655, }, { /* table 3 */ RF_CHANPAIR( 1, 2) = 0x665d, RF_CHANPAIR( 3, 4) = 0x605d, RF_CHANPAIR( 5, 6) = 0x666d, RF_CHANPAIR( 7, 8) = 0x646d, RF_CHANPAIR( 9, 10) = 0x664d, RF_CHANPAIR(11, 12) = 0x644d, RF_CHANPAIR(13, 14) = 0x6675, }, { /* table 4 */ RF_CHANPAIR( 1, 2) = 0x667d, RF_CHANPAIR( 3, 4) = 0x607d, RF_CHANPAIR( 5, 6) = 0x665d, RF_CHANPAIR( 7, 8) = 0x645d, RF_CHANPAIR( 9, 10) = 0x666d, RF_CHANPAIR(11, 12) = 0x646d, RF_CHANPAIR(13, 14) = 0x664d, }, { /* table 5 */ RF_CHANPAIR( 1, 2) = 0x6643, RF_CHANPAIR( 3, 4) = 0x6043, RF_CHANPAIR( 5, 6) = 0x667d, RF_CHANPAIR( 7, 8) = 0x647d, RF_CHANPAIR( 9, 10) = 0x665d, RF_CHANPAIR(11, 12) = 0x645d, RF_CHANPAIR(13, 14) = 0x666d, }, { /* table 6 */ RF_CHANPAIR( 1, 2) = 0x6663, RF_CHANPAIR( 3, 4) = 0x6063, RF_CHANPAIR( 5, 6) = 0x6643, RF_CHANPAIR( 7, 8) = 0x6443, RF_CHANPAIR( 9, 10) = 0x667d, RF_CHANPAIR(11, 12) = 0x647d, RF_CHANPAIR(13, 14) = 0x665d, }, { /* table 7 */ RF_CHANPAIR( 1, 2) = 0x6653, RF_CHANPAIR( 3, 4) = 0x6053, RF_CHANPAIR( 5, 6) = 0x6663, RF_CHANPAIR( 7, 8) = 0x6463, RF_CHANPAIR( 9, 10) = 0x6643, RF_CHANPAIR(11, 12) = 0x6443, RF_CHANPAIR(13, 14) = 0x667d, }, { /* table 8 */ RF_CHANPAIR( 1, 2) = 0x6673, RF_CHANPAIR( 3, 4) = 0x6073, RF_CHANPAIR( 5, 6) = 0x6653, RF_CHANPAIR( 7, 8) = 0x6453, RF_CHANPAIR( 9, 10) = 0x6663, RF_CHANPAIR(11, 12) = 0x6463, RF_CHANPAIR(13, 14) = 0x6643, }, { /* table 9 */ RF_CHANPAIR( 1, 2) = 0x664b, RF_CHANPAIR( 3, 4) = 0x604b, RF_CHANPAIR( 5, 6) = 0x6673, RF_CHANPAIR( 7, 8) = 0x6473, RF_CHANPAIR( 9, 10) = 0x6653, RF_CHANPAIR(11, 12) = 0x6453, RF_CHANPAIR(13, 14) = 0x6663, }, { /* table 10 */ RF_CHANPAIR( 1, 2) = 0x666b, RF_CHANPAIR( 3, 4) = 0x606b, RF_CHANPAIR( 5, 6) = 0x664b, RF_CHANPAIR( 7, 8) = 0x644b, RF_CHANPAIR( 9, 10) = 0x6673, RF_CHANPAIR(11, 12) = 0x6473, RF_CHANPAIR(13, 14) = 0x6653, }, { /* table 11 */ RF_CHANPAIR( 1, 2) = 0x665b, RF_CHANPAIR( 3, 4) = 0x605b, RF_CHANPAIR( 5, 6) = 0x666b, RF_CHANPAIR( 7, 8) = 0x646b, RF_CHANPAIR( 9, 10) = 0x664b, RF_CHANPAIR(11, 12) = 0x644b, RF_CHANPAIR(13, 14) = 0x6673, }, }; /* The per-channel synth values for autocal. These get written to register 1. */ static const u16 uw2453_autocal_synth[] = { RF_CHANNEL( 1) = 0x6847, RF_CHANNEL( 2) = 0x6847, RF_CHANNEL( 3) = 0x6867, RF_CHANNEL( 4) = 0x6867, RF_CHANNEL( 5) = 0x6867, RF_CHANNEL( 6) = 0x6867, RF_CHANNEL( 7) = 0x6857, RF_CHANNEL( 8) = 0x6857, RF_CHANNEL( 9) = 0x6857, RF_CHANNEL(10) = 0x6857, RF_CHANNEL(11) = 0x6877, RF_CHANNEL(12) = 0x6877, RF_CHANNEL(13) = 0x6877, RF_CHANNEL(14) = 0x684f, }; /* The VCO configuration for autocal (all channels) */ static const u16 UW2453_AUTOCAL_VCO_CFG = 0x6662; /* TX gain settings. The array index corresponds to the TX power integration * values found in the EEPROM. The values get written to register 7. */ static u32 uw2453_txgain[] = { [0x00] = 0x0e313, [0x01] = 0x0fb13, [0x02] = 0x0e093, [0x03] = 0x0f893, [0x04] = 0x0ea93, [0x05] = 0x1f093, [0x06] = 0x1f493, [0x07] = 0x1f693, [0x08] = 0x1f393, [0x09] = 0x1f35b, [0x0a] = 0x1e6db, [0x0b] = 0x1ff3f, [0x0c] = 0x1ffff, [0x0d] = 0x361d7, [0x0e] = 0x37fbf, [0x0f] = 0x3ff8b, [0x10] = 0x3ff33, [0x11] = 0x3fb3f, [0x12] = 0x3ffff, }; /* RF-specific structure */ struct uw2453_priv { /* index into synth/VCO config tables where PLL lock was found * -1 means autocal */ int config; }; #define UW2453_PRIV(rf) ((struct uw2453_priv *) (rf)->priv) static int uw2453_synth_set_channel(struct zd_chip *chip, int channel, bool autocal) { int r; int idx = channel - 1; u32 val; if (autocal) val = UW2453_REGWRITE(1, uw2453_autocal_synth[idx]); else val = UW2453_REGWRITE(1, uw2453_std_synth[idx]); r = zd_rfwrite_locked(chip, val, RF_RV_BITS); if (r) return r; return zd_rfwrite_locked(chip, UW2453_REGWRITE(2, uw2453_synth_divide[idx]), RF_RV_BITS); } static int uw2453_write_vco_cfg(struct zd_chip *chip, u16 value) { /* vendor driver always sets these upper bits even though the specs say * they are reserved */ u32 val = 0x40000 | value; return zd_rfwrite_locked(chip, UW2453_REGWRITE(3, val), RF_RV_BITS); } static int uw2453_init_mode(struct zd_chip *chip) { static const u32 rv[] = { UW2453_REGWRITE(0, 0x25f98), /* enter IDLE mode */ UW2453_REGWRITE(0, 0x25f9a), /* enter CAL_VCO mode */ UW2453_REGWRITE(0, 0x25f94), /* enter RX/TX mode */ UW2453_REGWRITE(0, 0x27fd4), /* power down RSSI circuit */ }; return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); } static int uw2453_set_tx_gain_level(struct zd_chip *chip, int channel) { u8 int_value = chip->pwr_int_values[channel - 1]; if (int_value >= ARRAY_SIZE(uw2453_txgain)) { dev_dbg_f(zd_chip_dev(chip), "can't configure TX gain for " "int value %x on channel %d\n", int_value, channel); return 0; } return zd_rfwrite_locked(chip, UW2453_REGWRITE(7, uw2453_txgain[int_value]), RF_RV_BITS); } static int uw2453_init_hw(struct zd_rf *rf) { int i, r; int found_config = -1; u16 intr_status; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR10, 0x89 }, { ZD_CR15, 0x20 }, { ZD_CR17, 0x28 }, /* 6112 no change */ { ZD_CR23, 0x38 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 }, { ZD_CR27, 0x15 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR33, 0x28 }, { ZD_CR34, 0x30 }, { ZD_CR35, 0x43 }, /* 6112 3e->43 */ { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, { ZD_CR46, 0x92 }, /* 6112 96->92 */ { ZD_CR47, 0x1e }, { ZD_CR48, 0x04 }, /* 5602 Roger */ { ZD_CR49, 0xfa }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 }, { ZD_CR91, 0x00 }, { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d }, { ZD_CR99, 0x28 }, { ZD_CR100, 0x02 }, { ZD_CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */ { ZD_CR102, 0x27 }, { ZD_CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f * 6221 1f->1c */ { ZD_CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */ { ZD_CR109, 0x13 }, { ZD_CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */ { ZD_CR111, 0x13 }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 }, { ZD_CR114, 0x23 }, /* 6221 27->23 */ { ZD_CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */ { ZD_CR116, 0x24 }, /* 6220 1c->24 */ { ZD_CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */ { ZD_CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */ { ZD_CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */ { ZD_CR120, 0x4f }, { ZD_CR121, 0x1f }, /* 6220 4f->1f */ { ZD_CR122, 0xf0 }, { ZD_CR123, 0x57 }, { ZD_CR125, 0xad }, { ZD_CR126, 0x6c }, { ZD_CR127, 0x03 }, { ZD_CR128, 0x14 }, /* 6302 12->11 */ { ZD_CR129, 0x12 }, /* 6301 10->0f */ { ZD_CR130, 0x10 }, { ZD_CR137, 0x50 }, { ZD_CR138, 0xa8 }, { ZD_CR144, 0xac }, { ZD_CR146, 0x20 }, { ZD_CR252, 0xff }, { ZD_CR253, 0xff }, }; static const u32 rv[] = { UW2453_REGWRITE(4, 0x2b), /* configure receiver gain */ UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */ UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */ UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */ /* enter CAL_FIL mode, TX gain set by registers, RX gain set by pins, * RSSI circuit powered down, reduced RSSI range */ UW2453_REGWRITE(0, 0x25f9c), /* 5d01 cal_fil */ /* synthesizer configuration for channel 1 */ UW2453_REGWRITE(1, 0x47), UW2453_REGWRITE(2, 0x999), /* disable manual VCO band selection */ UW2453_REGWRITE(3, 0x7602), /* enable manual VCO band selection, configure current level */ UW2453_REGWRITE(3, 0x46063), }; r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); if (r) return r; r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); if (r) return r; r = uw2453_init_mode(chip); if (r) return r; /* Try all standard VCO configuration settings on channel 1 */ for (i = 0; i < ARRAY_SIZE(uw2453_std_vco_cfg) - 1; i++) { /* Configure synthesizer for channel 1 */ r = uw2453_synth_set_channel(chip, 1, false); if (r) return r; /* Write VCO config */ r = uw2453_write_vco_cfg(chip, uw2453_std_vco_cfg[i][0]); if (r) return r; /* ack interrupt event */ r = zd_iowrite16_locked(chip, 0x0f, UW2453_INTR_REG); if (r) return r; /* check interrupt status */ r = zd_ioread16_locked(chip, &intr_status, UW2453_INTR_REG); if (r) return r; if (!(intr_status & 0xf)) { dev_dbg_f(zd_chip_dev(chip), "PLL locked on configuration %d\n", i); found_config = i; break; } } if (found_config == -1) { /* autocal */ dev_dbg_f(zd_chip_dev(chip), "PLL did not lock, using autocal\n"); r = uw2453_synth_set_channel(chip, 1, true); if (r) return r; r = uw2453_write_vco_cfg(chip, UW2453_AUTOCAL_VCO_CFG); if (r) return r; } /* To match the vendor driver behaviour, we use the configuration after * the one that produced a lock. */ UW2453_PRIV(rf)->config = found_config + 1; return zd_iowrite16_locked(chip, 0x06, ZD_CR203); } static int uw2453_set_channel(struct zd_rf *rf, u8 channel) { int r; u16 vco_cfg; int config = UW2453_PRIV(rf)->config; bool autocal = (config == -1); struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 }, { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 }, }; r = uw2453_synth_set_channel(chip, channel, autocal); if (r) return r; if (autocal) vco_cfg = UW2453_AUTOCAL_VCO_CFG; else vco_cfg = uw2453_std_vco_cfg[config][CHAN_TO_PAIRIDX(channel)]; r = uw2453_write_vco_cfg(chip, vco_cfg); if (r) return r; r = uw2453_init_mode(chip); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); if (r) return r; r = uw2453_set_tx_gain_level(chip, channel); if (r) return r; return zd_iowrite16_locked(chip, 0x06, ZD_CR203); } static int uw2453_switch_radio_on(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x3f }, }; /* enter RXTX mode */ r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f94), RF_RV_BITS); if (r) return r; if (zd_chip_is_zd1211b(chip)) ioreqs[1].value = 0x7f; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int uw2453_switch_radio_off(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x04 }, { ZD_CR251, 0x2f }, }; /* enter IDLE mode */ /* FIXME: shouldn't we go to SLEEP? sent email to zydas */ r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f90), RF_RV_BITS); if (r) return r; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static void uw2453_clear(struct zd_rf *rf) { kfree(rf->priv); } int zd_rf_init_uw2453(struct zd_rf *rf) { rf->init_hw = uw2453_init_hw; rf->set_channel = uw2453_set_channel; rf->switch_radio_on = uw2453_switch_radio_on; rf->switch_radio_off = uw2453_switch_radio_off; rf->patch_6m_band_edge = zd_rf_generic_patch_6m; rf->clear = uw2453_clear; /* we have our own TX integration code */ rf->update_channel_int = 0; rf->priv = kmalloc(sizeof(struct uw2453_priv), GFP_KERNEL); if (rf->priv == NULL) return -ENOMEM; return 0; }
gpl-2.0
amarchandole/capprobe_mptcp
arch/x86/kernel/i8237.c
12629
1211
/* * 8237A DMA controller suspend functions. * * Written by Pierre Ossman, 2005. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/init.h> #include <linux/syscore_ops.h> #include <asm/dma.h> /* * This module just handles suspend/resume issues with the * 8237A DMA controller (used for ISA and LPC). * Allocation is handled in kernel/dma.c and normal usage is * in asm/dma.h. */ static void i8237A_resume(void) { unsigned long flags; int i; flags = claim_dma_lock(); dma_outb(0, DMA1_RESET_REG); dma_outb(0, DMA2_RESET_REG); for (i = 0; i < 8; i++) { set_dma_addr(i, 0x000000); /* DMA count is a bit weird so this is not 0 */ set_dma_count(i, 1); } /* Enable cascade DMA or channel 0-3 won't work */ enable_dma(4); release_dma_lock(flags); } static struct syscore_ops i8237_syscore_ops = { .resume = i8237A_resume, }; static int __init i8237A_init_ops(void) { register_syscore_ops(&i8237_syscore_ops); return 0; } device_initcall(i8237A_init_ops);
gpl-2.0
jeffsf/daothanhduy_kernel_aries
arch/x86/math-emu/reg_add_sub.c
14421
8847
/*---------------------------------------------------------------------------+ | reg_add_sub.c | | | | Functions to add or subtract two registers and put the result in a third. | | | | Copyright (C) 1992,1993,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | For each function, the destination may be any FPU_REG, including one of | | the source FPU_REGs. | | Each function returns 0 if the answer is o.k., otherwise a non-zero | | value is returned, indicating either an exception condition or an | | internal error. | +---------------------------------------------------------------------------*/ #include "exception.h" #include "reg_constant.h" #include "fpu_emu.h" #include "control_w.h" #include "fpu_system.h" static int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa, FPU_REG const *b, u_char tagb, u_char signb, FPU_REG * dest, int deststnr, int control_w); /* Operates on st(0) and st(n), or on st(0) and temporary data. The destination must be one of the source st(x). */ int FPU_add(FPU_REG const *b, u_char tagb, int deststnr, int control_w) { FPU_REG *a = &st(0); FPU_REG *dest = &st(deststnr); u_char signb = getsign(b); u_char taga = FPU_gettag0(); u_char signa = getsign(a); u_char saved_sign = getsign(dest); int diff, tag, expa, expb; if (!(taga | tagb)) { expa = exponent(a); expb = exponent(b); valid_add: /* Both registers are valid */ if (!(signa ^ signb)) { /* signs are the same */ tag = FPU_u_add(a, b, dest, control_w, signa, expa, expb); } else { /* The signs are different, so do a subtraction */ diff = expa - expb; if (!diff) { diff = a->sigh - b->sigh; /* This works only if the ms bits are identical. */ if (!diff) { diff = a->sigl > b->sigl; if (!diff) diff = -(a->sigl < b->sigl); } } if (diff > 0) { tag = FPU_u_sub(a, b, dest, control_w, signa, expa, expb); } else if (diff < 0) { tag = FPU_u_sub(b, a, dest, control_w, signb, expb, expa); } else { FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr); /* sign depends upon rounding mode */ setsign(dest, ((control_w & CW_RC) != RC_DOWN) ? SIGN_POS : SIGN_NEG); return TAG_Zero; } } if (tag < 0) { setsign(dest, saved_sign); return tag; } FPU_settagi(deststnr, tag); return tag; } if (taga == TAG_Special) taga = FPU_Special(a); if (tagb == TAG_Special) tagb = FPU_Special(b); if (((taga == TAG_Valid) && (tagb == TW_Denormal)) || ((taga == TW_Denormal) && (tagb == TAG_Valid)) || ((taga == TW_Denormal) && (tagb == TW_Denormal))) { FPU_REG x, y; if (denormal_operand() < 0) return FPU_Exception; FPU_to_exp16(a, &x); FPU_to_exp16(b, &y); a = &x; b = &y; expa = exponent16(a); expb = exponent16(b); goto valid_add; } if ((taga == TW_NaN) || (tagb == TW_NaN)) { if (deststnr == 0) return real_2op_NaN(b, tagb, deststnr, a); else return real_2op_NaN(a, taga, deststnr, a); } return add_sub_specials(a, taga, signa, b, tagb, signb, dest, deststnr, control_w); } /* Subtract b from a. (a-b) -> dest */ int FPU_sub(int flags, int rm, int control_w) { FPU_REG const *a, *b; FPU_REG *dest; u_char taga, tagb, signa, signb, saved_sign, sign; int diff, tag = 0, expa, expb, deststnr; a = &st(0); taga = FPU_gettag0(); deststnr = 0; if (flags & LOADED) { b = (FPU_REG *) rm; tagb = flags & 0x0f; } else { b = &st(rm); tagb = FPU_gettagi(rm); if (flags & DEST_RM) deststnr = rm; } signa = getsign(a); signb = getsign(b); if (flags & REV) { signa ^= SIGN_NEG; signb ^= SIGN_NEG; } dest = &st(deststnr); saved_sign = getsign(dest); if (!(taga | tagb)) { expa = exponent(a); expb = exponent(b); valid_subtract: /* Both registers are valid */ diff = expa - expb; if (!diff) { diff = a->sigh - b->sigh; /* Works only if ms bits are identical */ if (!diff) { diff = a->sigl > b->sigl; if (!diff) diff = -(a->sigl < b->sigl); } } switch ((((int)signa) * 2 + signb) / SIGN_NEG) { case 0: /* P - P */ case 3: /* N - N */ if (diff > 0) { /* |a| > |b| */ tag = FPU_u_sub(a, b, dest, control_w, signa, expa, expb); } else if (diff == 0) { FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr); /* sign depends upon rounding mode */ setsign(dest, ((control_w & CW_RC) != RC_DOWN) ? SIGN_POS : SIGN_NEG); return TAG_Zero; } else { sign = signa ^ SIGN_NEG; tag = FPU_u_sub(b, a, dest, control_w, sign, expb, expa); } break; case 1: /* P - N */ tag = FPU_u_add(a, b, dest, control_w, SIGN_POS, expa, expb); break; case 2: /* N - P */ tag = FPU_u_add(a, b, dest, control_w, SIGN_NEG, expa, expb); break; #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x111); return -1; #endif } if (tag < 0) { setsign(dest, saved_sign); return tag; } FPU_settagi(deststnr, tag); return tag; } if (taga == TAG_Special) taga = FPU_Special(a); if (tagb == TAG_Special) tagb = FPU_Special(b); if (((taga == TAG_Valid) && (tagb == TW_Denormal)) || ((taga == TW_Denormal) && (tagb == TAG_Valid)) || ((taga == TW_Denormal) && (tagb == TW_Denormal))) { FPU_REG x, y; if (denormal_operand() < 0) return FPU_Exception; FPU_to_exp16(a, &x); FPU_to_exp16(b, &y); a = &x; b = &y; expa = exponent16(a); expb = exponent16(b); goto valid_subtract; } if ((taga == TW_NaN) || (tagb == TW_NaN)) { FPU_REG const *d1, *d2; if (flags & REV) { d1 = b; d2 = a; } else { d1 = a; d2 = b; } if (flags & LOADED) return real_2op_NaN(b, tagb, deststnr, d1); if (flags & DEST_RM) return real_2op_NaN(a, taga, deststnr, d2); else return real_2op_NaN(b, tagb, deststnr, d2); } return add_sub_specials(a, taga, signa, b, tagb, signb ^ SIGN_NEG, dest, deststnr, control_w); } static int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa, FPU_REG const *b, u_char tagb, u_char signb, FPU_REG * dest, int deststnr, int control_w) { if (((taga == TW_Denormal) || (tagb == TW_Denormal)) && (denormal_operand() < 0)) return FPU_Exception; if (taga == TAG_Zero) { if (tagb == TAG_Zero) { /* Both are zero, result will be zero. */ u_char different_signs = signa ^ signb; FPU_copy_to_regi(a, TAG_Zero, deststnr); if (different_signs) { /* Signs are different. */ /* Sign of answer depends upon rounding mode. */ setsign(dest, ((control_w & CW_RC) != RC_DOWN) ? SIGN_POS : SIGN_NEG); } else setsign(dest, signa); /* signa may differ from the sign of a. */ return TAG_Zero; } else { reg_copy(b, dest); if ((tagb == TW_Denormal) && (b->sigh & 0x80000000)) { /* A pseudoDenormal, convert it. */ addexponent(dest, 1); tagb = TAG_Valid; } else if (tagb > TAG_Empty) tagb = TAG_Special; setsign(dest, signb); /* signb may differ from the sign of b. */ FPU_settagi(deststnr, tagb); return tagb; } } else if (tagb == TAG_Zero) { reg_copy(a, dest); if ((taga == TW_Denormal) && (a->sigh & 0x80000000)) { /* A pseudoDenormal */ addexponent(dest, 1); taga = TAG_Valid; } else if (taga > TAG_Empty) taga = TAG_Special; setsign(dest, signa); /* signa may differ from the sign of a. */ FPU_settagi(deststnr, taga); return taga; } else if (taga == TW_Infinity) { if ((tagb != TW_Infinity) || (signa == signb)) { FPU_copy_to_regi(a, TAG_Special, deststnr); setsign(dest, signa); /* signa may differ from the sign of a. */ return taga; } /* Infinity-Infinity is undefined. */ return arith_invalid(deststnr); } else if (tagb == TW_Infinity) { FPU_copy_to_regi(b, TAG_Special, deststnr); setsign(dest, signb); /* signb may differ from the sign of b. */ return tagb; } #ifdef PARANOID EXCEPTION(EX_INTERNAL | 0x101); #endif return FPU_Exception; }
gpl-2.0
jpihet/linux-omap
arch/x86/math-emu/reg_convert.c
14421
1629
/*---------------------------------------------------------------------------+ | reg_convert.c | | | | Convert register representation. | | | | Copyright (C) 1992,1993,1994,1996,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" int FPU_to_exp16(FPU_REG const *a, FPU_REG *x) { int sign = getsign(a); *(long long *)&(x->sigl) = *(const long long *)&(a->sigl); /* Set up the exponent as a 16 bit quantity. */ setexponent16(x, exponent(a)); if (exponent16(x) == EXP_UNDER) { /* The number is a de-normal or pseudodenormal. */ /* We only deal with the significand and exponent. */ if (x->sigh & 0x80000000) { /* Is a pseudodenormal. */ /* This is non-80486 behaviour because the number loses its 'denormal' identity. */ addexponent(x, 1); } else { /* Is a denormal. */ addexponent(x, 1); FPU_normalize_nuo(x); } } if (!(x->sigh & 0x80000000)) { EXCEPTION(EX_INTERNAL | 0x180); } return sign; }
gpl-2.0
haydenbbickerton/dv7_4285dx-kernel
drivers/mtd/nand/nandsim.c
86
68438
/* * NAND flash simulator. * * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org> * * Copyright (C) 2004 Nokia Corporation * * Note: NS means "NAND Simulator". * Note: Input means input TO flash chip, output means output FROM chip. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA */ #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/math64.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_bch.h> #include <linux/mtd/partitions.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/seq_file.h> #include <linux/debugfs.h> /* Default simulator parameters values */ #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \ !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \ !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE) #define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98 #define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39 #define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */ #define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */ #endif #ifndef CONFIG_NANDSIM_ACCESS_DELAY #define CONFIG_NANDSIM_ACCESS_DELAY 25 #endif #ifndef CONFIG_NANDSIM_PROGRAMM_DELAY #define CONFIG_NANDSIM_PROGRAMM_DELAY 200 #endif #ifndef CONFIG_NANDSIM_ERASE_DELAY #define CONFIG_NANDSIM_ERASE_DELAY 2 #endif #ifndef CONFIG_NANDSIM_OUTPUT_CYCLE #define CONFIG_NANDSIM_OUTPUT_CYCLE 40 #endif #ifndef CONFIG_NANDSIM_INPUT_CYCLE #define CONFIG_NANDSIM_INPUT_CYCLE 50 #endif #ifndef CONFIG_NANDSIM_BUS_WIDTH #define CONFIG_NANDSIM_BUS_WIDTH 8 #endif #ifndef CONFIG_NANDSIM_DO_DELAYS #define CONFIG_NANDSIM_DO_DELAYS 0 #endif #ifndef CONFIG_NANDSIM_LOG #define CONFIG_NANDSIM_LOG 0 #endif #ifndef CONFIG_NANDSIM_DBG #define CONFIG_NANDSIM_DBG 0 #endif #ifndef CONFIG_NANDSIM_MAX_PARTS #define CONFIG_NANDSIM_MAX_PARTS 32 #endif static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE; static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE; static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY; static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY; static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY; static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE; static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE; static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH; static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; static uint log = CONFIG_NANDSIM_LOG; static uint dbg = CONFIG_NANDSIM_DBG; static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS]; static unsigned int parts_num; static char *badblocks = NULL; static char *weakblocks = NULL; static char *weakpages = NULL; static unsigned int bitflips = 0; static char *gravepages = NULL; static unsigned int overridesize = 0; static char *cache_file = NULL; static unsigned int bbt; static unsigned int bch; module_param(first_id_byte, uint, 0400); module_param(second_id_byte, uint, 0400); module_param(third_id_byte, uint, 0400); module_param(fourth_id_byte, uint, 0400); module_param(access_delay, uint, 0400); module_param(programm_delay, uint, 0400); module_param(erase_delay, uint, 0400); module_param(output_cycle, uint, 0400); module_param(input_cycle, uint, 0400); module_param(bus_width, uint, 0400); module_param(do_delays, uint, 0400); module_param(log, uint, 0400); module_param(dbg, uint, 0400); module_param_array(parts, ulong, &parts_num, 0400); module_param(badblocks, charp, 0400); module_param(weakblocks, charp, 0400); module_param(weakpages, charp, 0400); module_param(bitflips, uint, 0400); module_param(gravepages, charp, 0400); module_param(overridesize, uint, 0400); module_param(cache_file, charp, 0400); module_param(bbt, uint, 0400); module_param(bch, uint, 0400); MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command"); MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command"); MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)"); MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)"); MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); MODULE_PARM_DESC(log, "Perform logging if not zero"); MODULE_PARM_DESC(dbg, "Output debug information if not zero"); MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas"); /* Page and erase block positions for the following parameters are independent of any partitions */ MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas"); MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]" " separated by commas e.g. 113:2 means eb 113" " can be erased only twice before failing"); MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]" " separated by commas e.g. 1401:2 means page 1401" " can be written only twice before failing"); MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)"); MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]" " separated by commas e.g. 1401:2 means page 1401" " can be read only twice before failing"); MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " "The size is specified in erase blocks and as the exponent of a power of two" " e.g. 5 means a size of 32 erase blocks"); MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " "be correctable in 512-byte blocks"); /* The largest possible page size */ #define NS_LARGEST_PAGE_SIZE 4096 /* The prefix for simulator output */ #define NS_OUTPUT_PREFIX "[nandsim]" /* Simulator's output macros (logging, debugging, warning, error) */ #define NS_LOG(args...) \ do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0) #define NS_DBG(args...) \ do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0) #define NS_WARN(args...) \ do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0) #define NS_ERR(args...) \ do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0) #define NS_INFO(args...) \ do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0) /* Busy-wait delay macros (microseconds, milliseconds) */ #define NS_UDELAY(us) \ do { if (do_delays) udelay(us); } while(0) #define NS_MDELAY(us) \ do { if (do_delays) mdelay(us); } while(0) /* Is the nandsim structure initialized ? */ #define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0) /* Good operation completion status */ #define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0))) /* Operation failed completion status */ #define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns)) /* Calculate the page offset in flash RAM image by (row, column) address */ #define NS_RAW_OFFSET(ns) \ (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column) /* Calculate the OOB offset in flash RAM image by (row, column) address */ #define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz) /* After a command is input, the simulator goes to one of the following states */ #define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */ #define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */ #define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */ #define STATE_CMD_PAGEPROG 0x00000004 /* start page program */ #define STATE_CMD_READOOB 0x00000005 /* read OOB area */ #define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */ #define STATE_CMD_STATUS 0x00000007 /* read status */ #define STATE_CMD_SEQIN 0x00000009 /* sequential data input */ #define STATE_CMD_READID 0x0000000A /* read ID */ #define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */ #define STATE_CMD_RESET 0x0000000C /* reset */ #define STATE_CMD_RNDOUT 0x0000000D /* random output command */ #define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */ #define STATE_CMD_MASK 0x0000000F /* command states mask */ /* After an address is input, the simulator goes to one of these states */ #define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */ #define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */ #define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */ #define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */ #define STATE_ADDR_MASK 0x00000070 /* address states mask */ /* During data input/output the simulator is in these states */ #define STATE_DATAIN 0x00000100 /* waiting for data input */ #define STATE_DATAIN_MASK 0x00000100 /* data input states mask */ #define STATE_DATAOUT 0x00001000 /* waiting for page data output */ #define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */ #define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */ #define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */ #define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */ /* Previous operation is done, ready to accept new requests */ #define STATE_READY 0x00000000 /* This state is used to mark that the next state isn't known yet */ #define STATE_UNKNOWN 0x10000000 /* Simulator's actions bit masks */ #define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */ #define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */ #define ACTION_SECERASE 0x00300000 /* erase sector */ #define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */ #define ACTION_HALFOFF 0x00500000 /* add to address half of page */ #define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */ #define ACTION_MASK 0x00700000 /* action mask */ #define NS_OPER_NUM 13 /* Number of operations supported by the simulator */ #define NS_OPER_STATES 6 /* Maximum number of states in operation */ #define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ #define OPT_PAGE512 0x00000002 /* 512-byte page chips */ #define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ #define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ #define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ #define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ #define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */ /* Remove action bits from state */ #define NS_STATE(x) ((x) & ~ACTION_MASK) /* * Maximum previous states which need to be saved. Currently saving is * only needed for page program operation with preceded read command * (which is only valid for 512-byte pages). */ #define NS_MAX_PREVSTATES 1 /* Maximum page cache pages needed to read or write a NAND page to the cache_file */ #define NS_MAX_HELD_PAGES 16 struct nandsim_debug_info { struct dentry *dfs_root; struct dentry *dfs_wear_report; }; /* * A union to represent flash memory contents and flash buffer. */ union ns_mem { u_char *byte; /* for byte access */ uint16_t *word; /* for 16-bit word access */ }; /* * The structure which describes all the internal simulator data. */ struct nandsim { struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS]; unsigned int nbparts; uint busw; /* flash chip bus width (8 or 16) */ u_char ids[4]; /* chip's ID bytes */ uint32_t options; /* chip's characteristic bits */ uint32_t state; /* current chip state */ uint32_t nxstate; /* next expected state */ uint32_t *op; /* current operation, NULL operations isn't known yet */ uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */ uint16_t npstates; /* number of previous states saved */ uint16_t stateidx; /* current state index */ /* The simulated NAND flash pages array */ union ns_mem *pages; /* Slab allocator for nand pages */ struct kmem_cache *nand_pages_slab; /* Internal buffer of page + OOB size bytes */ union ns_mem buf; /* NAND flash "geometry" */ struct { uint64_t totsz; /* total flash size, bytes */ uint32_t secsz; /* flash sector (erase block) size, bytes */ uint pgsz; /* NAND flash page size, bytes */ uint oobsz; /* page OOB area size, bytes */ uint64_t totszoob; /* total flash size including OOB, bytes */ uint pgszoob; /* page size including OOB , bytes*/ uint secszoob; /* sector size including OOB, bytes */ uint pgnum; /* total number of pages */ uint pgsec; /* number of pages per sector */ uint secshift; /* bits number in sector size */ uint pgshift; /* bits number in page size */ uint pgaddrbytes; /* bytes per page address */ uint secaddrbytes; /* bytes per sector address */ uint idbytes; /* the number ID bytes that this chip outputs */ } geom; /* NAND flash internal registers */ struct { unsigned command; /* the command register */ u_char status; /* the status register */ uint row; /* the page number */ uint column; /* the offset within page */ uint count; /* internal counter */ uint num; /* number of bytes which must be processed */ uint off; /* fixed page offset */ } regs; /* NAND flash lines state */ struct { int ce; /* chip Enable */ int cle; /* command Latch Enable */ int ale; /* address Latch Enable */ int wp; /* write Protect */ } lines; /* Fields needed when using a cache file */ struct file *cfile; /* Open file */ unsigned long *pages_written; /* Which pages have been written */ void *file_buf; struct page *held_pages[NS_MAX_HELD_PAGES]; int held_cnt; struct nandsim_debug_info dbg; }; /* * Operations array. To perform any operation the simulator must pass * through the correspondent states chain. */ static struct nandsim_operations { uint32_t reqopts; /* options which are required to perform the operation */ uint32_t states[NS_OPER_STATES]; /* operation's states */ } ops[NS_OPER_NUM] = { /* Read page + OOB from the beginning */ {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Read page + OOB from the second half */ {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Read OOB */ {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Program page starting from the beginning */ {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Program page starting from the beginning */ {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Program page starting from the second half */ {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Program OOB */ {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Erase sector */ {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}}, /* Read status */ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}}, /* Read ID */ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}}, /* Large page devices read page */ {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Large page devices random page read */ {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, }; struct weak_block { struct list_head list; unsigned int erase_block_no; unsigned int max_erases; unsigned int erases_done; }; static LIST_HEAD(weak_blocks); struct weak_page { struct list_head list; unsigned int page_no; unsigned int max_writes; unsigned int writes_done; }; static LIST_HEAD(weak_pages); struct grave_page { struct list_head list; unsigned int page_no; unsigned int max_reads; unsigned int reads_done; }; static LIST_HEAD(grave_pages); static unsigned long *erase_block_wear = NULL; static unsigned int wear_eb_count = 0; static unsigned long total_wear = 0; /* MTD structure for NAND controller */ static struct mtd_info *nsmtd; static int nandsim_debugfs_show(struct seq_file *m, void *private) { unsigned long wmin = -1, wmax = 0, avg; unsigned long deciles[10], decile_max[10], tot = 0; unsigned int i; /* Calc wear stats */ for (i = 0; i < wear_eb_count; ++i) { unsigned long wear = erase_block_wear[i]; if (wear < wmin) wmin = wear; if (wear > wmax) wmax = wear; tot += wear; } for (i = 0; i < 9; ++i) { deciles[i] = 0; decile_max[i] = (wmax * (i + 1) + 5) / 10; } deciles[9] = 0; decile_max[9] = wmax; for (i = 0; i < wear_eb_count; ++i) { int d; unsigned long wear = erase_block_wear[i]; for (d = 0; d < 10; ++d) if (wear <= decile_max[d]) { deciles[d] += 1; break; } } avg = tot / wear_eb_count; /* Output wear report */ seq_printf(m, "Total numbers of erases: %lu\n", tot); seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count); seq_printf(m, "Average number of erases: %lu\n", avg); seq_printf(m, "Maximum number of erases: %lu\n", wmax); seq_printf(m, "Minimum number of erases: %lu\n", wmin); for (i = 0; i < 10; ++i) { unsigned long from = (i ? decile_max[i - 1] + 1 : 0); if (from > decile_max[i]) continue; seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n", from, decile_max[i], deciles[i]); } return 0; } static int nandsim_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, nandsim_debugfs_show, inode->i_private); } static const struct file_operations dfs_fops = { .open = nandsim_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** * nandsim_debugfs_create - initialize debugfs * @dev: nandsim device description object * * This function creates all debugfs files for UBI device @ubi. Returns zero in * case of success and a negative error code in case of failure. */ static int nandsim_debugfs_create(struct nandsim *dev) { struct nandsim_debug_info *dbg = &dev->dbg; struct dentry *dent; int err; if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; dent = debugfs_create_dir("nandsim", NULL); if (IS_ERR_OR_NULL(dent)) { int err = dent ? -ENODEV : PTR_ERR(dent); NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n", err); return err; } dbg->dfs_root = dent; dent = debugfs_create_file("wear_report", S_IRUSR, dbg->dfs_root, dev, &dfs_fops); if (IS_ERR_OR_NULL(dent)) goto out_remove; dbg->dfs_wear_report = dent; return 0; out_remove: debugfs_remove_recursive(dbg->dfs_root); err = dent ? PTR_ERR(dent) : -ENODEV; return err; } /** * nandsim_debugfs_remove - destroy all debugfs files */ static void nandsim_debugfs_remove(struct nandsim *ns) { if (IS_ENABLED(CONFIG_DEBUG_FS)) debugfs_remove_recursive(ns->dbg.dfs_root); } /* * Allocate array of page pointers, create slab allocation for an array * and initialize the array by NULL pointers. * * RETURNS: 0 if success, -ENOMEM if memory alloc fails. */ static int alloc_device(struct nandsim *ns) { struct file *cfile; int i, err; if (cache_file) { cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600); if (IS_ERR(cfile)) return PTR_ERR(cfile); if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) { NS_ERR("alloc_device: cache file not readable\n"); err = -EINVAL; goto err_close; } if (!cfile->f_op->write && !cfile->f_op->aio_write) { NS_ERR("alloc_device: cache file not writeable\n"); err = -EINVAL; goto err_close; } ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) * sizeof(unsigned long)); if (!ns->pages_written) { NS_ERR("alloc_device: unable to allocate pages written array\n"); err = -ENOMEM; goto err_close; } ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->file_buf) { NS_ERR("alloc_device: unable to allocate file buf\n"); err = -ENOMEM; goto err_free; } ns->cfile = cfile; return 0; } ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem)); if (!ns->pages) { NS_ERR("alloc_device: unable to allocate page array\n"); return -ENOMEM; } for (i = 0; i < ns->geom.pgnum; i++) { ns->pages[i].byte = NULL; } ns->nand_pages_slab = kmem_cache_create("nandsim", ns->geom.pgszoob, 0, 0, NULL); if (!ns->nand_pages_slab) { NS_ERR("cache_create: unable to create kmem_cache\n"); return -ENOMEM; } return 0; err_free: vfree(ns->pages_written); err_close: filp_close(cfile, NULL); return err; } /* * Free any allocated pages, and free the array of page pointers. */ static void free_device(struct nandsim *ns) { int i; if (ns->cfile) { kfree(ns->file_buf); vfree(ns->pages_written); filp_close(ns->cfile, NULL); return; } if (ns->pages) { for (i = 0; i < ns->geom.pgnum; i++) { if (ns->pages[i].byte) kmem_cache_free(ns->nand_pages_slab, ns->pages[i].byte); } kmem_cache_destroy(ns->nand_pages_slab); vfree(ns->pages); } } static char *get_partition_name(int i) { return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i); } /* * Initialize the nandsim structure. * * RETURNS: 0 if success, -ERRNO if failure. */ static int init_nandsim(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct nandsim *ns = chip->priv; int i, ret = 0; uint64_t remains; uint64_t next_offset; if (NS_IS_INITIALIZED(ns)) { NS_ERR("init_nandsim: nandsim is already initialized\n"); return -EIO; } /* Force mtd to not do delays */ chip->chip_delay = 0; /* Initialize the NAND flash parameters */ ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8; ns->geom.totsz = mtd->size; ns->geom.pgsz = mtd->writesize; ns->geom.oobsz = mtd->oobsize; ns->geom.secsz = mtd->erasesize; ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz; ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz); ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz; ns->geom.secshift = ffs(ns->geom.secsz) - 1; ns->geom.pgshift = chip->page_shift; ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz; ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec; ns->options = 0; if (ns->geom.pgsz == 512) { ns->options |= OPT_PAGE512; if (ns->busw == 8) ns->options |= OPT_PAGE512_8BIT; } else if (ns->geom.pgsz == 2048) { ns->options |= OPT_PAGE2048; } else if (ns->geom.pgsz == 4096) { ns->options |= OPT_PAGE4096; } else { NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz); return -EIO; } if (ns->options & OPT_SMALLPAGE) { if (ns->geom.totsz <= (32 << 20)) { ns->geom.pgaddrbytes = 3; ns->geom.secaddrbytes = 2; } else { ns->geom.pgaddrbytes = 4; ns->geom.secaddrbytes = 3; } } else { if (ns->geom.totsz <= (128 << 20)) { ns->geom.pgaddrbytes = 4; ns->geom.secaddrbytes = 2; } else { ns->geom.pgaddrbytes = 5; ns->geom.secaddrbytes = 3; } } /* Fill the partition_info structure */ if (parts_num > ARRAY_SIZE(ns->partitions)) { NS_ERR("too many partitions.\n"); ret = -EINVAL; goto error; } remains = ns->geom.totsz; next_offset = 0; for (i = 0; i < parts_num; ++i) { uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz; if (!part_sz || part_sz > remains) { NS_ERR("bad partition size.\n"); ret = -EINVAL; goto error; } ns->partitions[i].name = get_partition_name(i); ns->partitions[i].offset = next_offset; ns->partitions[i].size = part_sz; next_offset += ns->partitions[i].size; remains -= ns->partitions[i].size; } ns->nbparts = parts_num; if (remains) { if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) { NS_ERR("too many partitions.\n"); ret = -EINVAL; goto error; } ns->partitions[i].name = get_partition_name(i); ns->partitions[i].offset = next_offset; ns->partitions[i].size = remains; ns->nbparts += 1; } if (ns->busw == 16) NS_WARN("16-bit flashes support wasn't tested\n"); printk("flash size: %llu MiB\n", (unsigned long long)ns->geom.totsz >> 20); printk("page size: %u bytes\n", ns->geom.pgsz); printk("OOB area size: %u bytes\n", ns->geom.oobsz); printk("sector size: %u KiB\n", ns->geom.secsz >> 10); printk("pages number: %u\n", ns->geom.pgnum); printk("pages per sector: %u\n", ns->geom.pgsec); printk("bus width: %u\n", ns->busw); printk("bits in sector size: %u\n", ns->geom.secshift); printk("bits in page size: %u\n", ns->geom.pgshift); printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1); printk("flash size with OOB: %llu KiB\n", (unsigned long long)ns->geom.totszoob >> 10); printk("page address bytes: %u\n", ns->geom.pgaddrbytes); printk("sector address bytes: %u\n", ns->geom.secaddrbytes); printk("options: %#x\n", ns->options); if ((ret = alloc_device(ns)) != 0) goto error; /* Allocate / initialize the internal buffer */ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->buf.byte) { NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n", ns->geom.pgszoob); ret = -ENOMEM; goto error; } memset(ns->buf.byte, 0xFF, ns->geom.pgszoob); return 0; error: free_device(ns); return ret; } /* * Free the nandsim structure. */ static void free_nandsim(struct nandsim *ns) { kfree(ns->buf.byte); free_device(ns); return; } static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) { char *w; int zero_ok; unsigned int erase_block_no; loff_t offset; if (!badblocks) return 0; w = badblocks; do { zero_ok = (*w == '0' ? 1 : 0); erase_block_no = simple_strtoul(w, &w, 0); if (!zero_ok && !erase_block_no) { NS_ERR("invalid badblocks.\n"); return -EINVAL; } offset = erase_block_no * ns->geom.secsz; if (mtd_block_markbad(mtd, offset)) { NS_ERR("invalid badblocks.\n"); return -EINVAL; } if (*w == ',') w += 1; } while (*w); return 0; } static int parse_weakblocks(void) { char *w; int zero_ok; unsigned int erase_block_no; unsigned int max_erases; struct weak_block *wb; if (!weakblocks) return 0; w = weakblocks; do { zero_ok = (*w == '0' ? 1 : 0); erase_block_no = simple_strtoul(w, &w, 0); if (!zero_ok && !erase_block_no) { NS_ERR("invalid weakblocks.\n"); return -EINVAL; } max_erases = 3; if (*w == ':') { w += 1; max_erases = simple_strtoul(w, &w, 0); } if (*w == ',') w += 1; wb = kzalloc(sizeof(*wb), GFP_KERNEL); if (!wb) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; } wb->erase_block_no = erase_block_no; wb->max_erases = max_erases; list_add(&wb->list, &weak_blocks); } while (*w); return 0; } static int erase_error(unsigned int erase_block_no) { struct weak_block *wb; list_for_each_entry(wb, &weak_blocks, list) if (wb->erase_block_no == erase_block_no) { if (wb->erases_done >= wb->max_erases) return 1; wb->erases_done += 1; return 0; } return 0; } static int parse_weakpages(void) { char *w; int zero_ok; unsigned int page_no; unsigned int max_writes; struct weak_page *wp; if (!weakpages) return 0; w = weakpages; do { zero_ok = (*w == '0' ? 1 : 0); page_no = simple_strtoul(w, &w, 0); if (!zero_ok && !page_no) { NS_ERR("invalid weakpagess.\n"); return -EINVAL; } max_writes = 3; if (*w == ':') { w += 1; max_writes = simple_strtoul(w, &w, 0); } if (*w == ',') w += 1; wp = kzalloc(sizeof(*wp), GFP_KERNEL); if (!wp) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; } wp->page_no = page_no; wp->max_writes = max_writes; list_add(&wp->list, &weak_pages); } while (*w); return 0; } static int write_error(unsigned int page_no) { struct weak_page *wp; list_for_each_entry(wp, &weak_pages, list) if (wp->page_no == page_no) { if (wp->writes_done >= wp->max_writes) return 1; wp->writes_done += 1; return 0; } return 0; } static int parse_gravepages(void) { char *g; int zero_ok; unsigned int page_no; unsigned int max_reads; struct grave_page *gp; if (!gravepages) return 0; g = gravepages; do { zero_ok = (*g == '0' ? 1 : 0); page_no = simple_strtoul(g, &g, 0); if (!zero_ok && !page_no) { NS_ERR("invalid gravepagess.\n"); return -EINVAL; } max_reads = 3; if (*g == ':') { g += 1; max_reads = simple_strtoul(g, &g, 0); } if (*g == ',') g += 1; gp = kzalloc(sizeof(*gp), GFP_KERNEL); if (!gp) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; } gp->page_no = page_no; gp->max_reads = max_reads; list_add(&gp->list, &grave_pages); } while (*g); return 0; } static int read_error(unsigned int page_no) { struct grave_page *gp; list_for_each_entry(gp, &grave_pages, list) if (gp->page_no == page_no) { if (gp->reads_done >= gp->max_reads) return 1; gp->reads_done += 1; return 0; } return 0; } static void free_lists(void) { struct list_head *pos, *n; list_for_each_safe(pos, n, &weak_blocks) { list_del(pos); kfree(list_entry(pos, struct weak_block, list)); } list_for_each_safe(pos, n, &weak_pages) { list_del(pos); kfree(list_entry(pos, struct weak_page, list)); } list_for_each_safe(pos, n, &grave_pages) { list_del(pos); kfree(list_entry(pos, struct grave_page, list)); } kfree(erase_block_wear); } static int setup_wear_reporting(struct mtd_info *mtd) { size_t mem; wear_eb_count = div_u64(mtd->size, mtd->erasesize); mem = wear_eb_count * sizeof(unsigned long); if (mem / sizeof(unsigned long) != wear_eb_count) { NS_ERR("Too many erase blocks for wear reporting\n"); return -ENOMEM; } erase_block_wear = kzalloc(mem, GFP_KERNEL); if (!erase_block_wear) { NS_ERR("Too many erase blocks for wear reporting\n"); return -ENOMEM; } return 0; } static void update_wear(unsigned int erase_block_no) { if (!erase_block_wear) return; total_wear += 1; /* * TODO: Notify this through a debugfs entry, * instead of showing an error message. */ if (total_wear == 0) NS_ERR("Erase counter total overflow\n"); erase_block_wear[erase_block_no] += 1; if (erase_block_wear[erase_block_no] == 0) NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no); } /* * Returns the string representation of 'state' state. */ static char *get_state_name(uint32_t state) { switch (NS_STATE(state)) { case STATE_CMD_READ0: return "STATE_CMD_READ0"; case STATE_CMD_READ1: return "STATE_CMD_READ1"; case STATE_CMD_PAGEPROG: return "STATE_CMD_PAGEPROG"; case STATE_CMD_READOOB: return "STATE_CMD_READOOB"; case STATE_CMD_READSTART: return "STATE_CMD_READSTART"; case STATE_CMD_ERASE1: return "STATE_CMD_ERASE1"; case STATE_CMD_STATUS: return "STATE_CMD_STATUS"; case STATE_CMD_SEQIN: return "STATE_CMD_SEQIN"; case STATE_CMD_READID: return "STATE_CMD_READID"; case STATE_CMD_ERASE2: return "STATE_CMD_ERASE2"; case STATE_CMD_RESET: return "STATE_CMD_RESET"; case STATE_CMD_RNDOUT: return "STATE_CMD_RNDOUT"; case STATE_CMD_RNDOUTSTART: return "STATE_CMD_RNDOUTSTART"; case STATE_ADDR_PAGE: return "STATE_ADDR_PAGE"; case STATE_ADDR_SEC: return "STATE_ADDR_SEC"; case STATE_ADDR_ZERO: return "STATE_ADDR_ZERO"; case STATE_ADDR_COLUMN: return "STATE_ADDR_COLUMN"; case STATE_DATAIN: return "STATE_DATAIN"; case STATE_DATAOUT: return "STATE_DATAOUT"; case STATE_DATAOUT_ID: return "STATE_DATAOUT_ID"; case STATE_DATAOUT_STATUS: return "STATE_DATAOUT_STATUS"; case STATE_DATAOUT_STATUS_M: return "STATE_DATAOUT_STATUS_M"; case STATE_READY: return "STATE_READY"; case STATE_UNKNOWN: return "STATE_UNKNOWN"; } NS_ERR("get_state_name: unknown state, BUG\n"); return NULL; } /* * Check if command is valid. * * RETURNS: 1 if wrong command, 0 if right. */ static int check_command(int cmd) { switch (cmd) { case NAND_CMD_READ0: case NAND_CMD_READ1: case NAND_CMD_READSTART: case NAND_CMD_PAGEPROG: case NAND_CMD_READOOB: case NAND_CMD_ERASE1: case NAND_CMD_STATUS: case NAND_CMD_SEQIN: case NAND_CMD_READID: case NAND_CMD_ERASE2: case NAND_CMD_RESET: case NAND_CMD_RNDOUT: case NAND_CMD_RNDOUTSTART: return 0; default: return 1; } } /* * Returns state after command is accepted by command number. */ static uint32_t get_state_by_command(unsigned command) { switch (command) { case NAND_CMD_READ0: return STATE_CMD_READ0; case NAND_CMD_READ1: return STATE_CMD_READ1; case NAND_CMD_PAGEPROG: return STATE_CMD_PAGEPROG; case NAND_CMD_READSTART: return STATE_CMD_READSTART; case NAND_CMD_READOOB: return STATE_CMD_READOOB; case NAND_CMD_ERASE1: return STATE_CMD_ERASE1; case NAND_CMD_STATUS: return STATE_CMD_STATUS; case NAND_CMD_SEQIN: return STATE_CMD_SEQIN; case NAND_CMD_READID: return STATE_CMD_READID; case NAND_CMD_ERASE2: return STATE_CMD_ERASE2; case NAND_CMD_RESET: return STATE_CMD_RESET; case NAND_CMD_RNDOUT: return STATE_CMD_RNDOUT; case NAND_CMD_RNDOUTSTART: return STATE_CMD_RNDOUTSTART; } NS_ERR("get_state_by_command: unknown command, BUG\n"); return 0; } /* * Move an address byte to the correspondent internal register. */ static inline void accept_addr_byte(struct nandsim *ns, u_char bt) { uint byte = (uint)bt; if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) ns->regs.column |= (byte << 8 * ns->regs.count); else { ns->regs.row |= (byte << 8 * (ns->regs.count - ns->geom.pgaddrbytes + ns->geom.secaddrbytes)); } return; } /* * Switch to STATE_READY state. */ static inline void switch_to_ready_state(struct nandsim *ns, u_char status) { NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY)); ns->state = STATE_READY; ns->nxstate = STATE_UNKNOWN; ns->op = NULL; ns->npstates = 0; ns->stateidx = 0; ns->regs.num = 0; ns->regs.count = 0; ns->regs.off = 0; ns->regs.row = 0; ns->regs.column = 0; ns->regs.status = status; } /* * If the operation isn't known yet, try to find it in the global array * of supported operations. * * Operation can be unknown because of the following. * 1. New command was accepted and this is the first call to find the * correspondent states chain. In this case ns->npstates = 0; * 2. There are several operations which begin with the same command(s) * (for example program from the second half and read from the * second half operations both begin with the READ1 command). In this * case the ns->pstates[] array contains previous states. * * Thus, the function tries to find operation containing the following * states (if the 'flag' parameter is 0): * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state * * If (one and only one) matching operation is found, it is accepted ( * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is * zeroed). * * If there are several matches, the current state is pushed to the * ns->pstates. * * The operation can be unknown only while commands are input to the chip. * As soon as address command is accepted, the operation must be known. * In such situation the function is called with 'flag' != 0, and the * operation is searched using the following pattern: * ns->pstates[0], ... ns->pstates[ns->npstates], <address input> * * It is supposed that this pattern must either match one operation or * none. There can't be ambiguity in that case. * * If no matches found, the function does the following: * 1. if there are saved states present, try to ignore them and search * again only using the last command. If nothing was found, switch * to the STATE_READY state. * 2. if there are no saved states, switch to the STATE_READY state. * * RETURNS: -2 - no matched operations found. * -1 - several matches. * 0 - operation is found. */ static int find_operation(struct nandsim *ns, uint32_t flag) { int opsfound = 0; int i, j, idx = 0; for (i = 0; i < NS_OPER_NUM; i++) { int found = 1; if (!(ns->options & ops[i].reqopts)) /* Ignore operations we can't perform */ continue; if (flag) { if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK)) continue; } else { if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates])) continue; } for (j = 0; j < ns->npstates; j++) if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j]) && (ns->options & ops[idx].reqopts)) { found = 0; break; } if (found) { idx = i; opsfound += 1; } } if (opsfound == 1) { /* Exact match */ ns->op = &ops[idx].states[0]; if (flag) { /* * In this case the find_operation function was * called when address has just began input. But it isn't * yet fully input and the current state must * not be one of STATE_ADDR_*, but the STATE_ADDR_* * state must be the next state (ns->nxstate). */ ns->stateidx = ns->npstates - 1; } else { ns->stateidx = ns->npstates; } ns->npstates = 0; ns->state = ns->op[ns->stateidx]; ns->nxstate = ns->op[ns->stateidx + 1]; NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n", idx, get_state_name(ns->state), get_state_name(ns->nxstate)); return 0; } if (opsfound == 0) { /* Nothing was found. Try to ignore previous commands (if any) and search again */ if (ns->npstates != 0) { NS_DBG("find_operation: no operation found, try again with state %s\n", get_state_name(ns->state)); ns->npstates = 0; return find_operation(ns, 0); } NS_DBG("find_operation: no operations found\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return -2; } if (flag) { /* This shouldn't happen */ NS_DBG("find_operation: BUG, operation must be known if address is input\n"); return -2; } NS_DBG("find_operation: there is still ambiguity\n"); ns->pstates[ns->npstates++] = ns->state; return -1; } static void put_pages(struct nandsim *ns) { int i; for (i = 0; i < ns->held_cnt; i++) page_cache_release(ns->held_pages[i]); } /* Get page cache pages in advance to provide NOFS memory allocation */ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos) { pgoff_t index, start_index, end_index; struct page *page; struct address_space *mapping = file->f_mapping; start_index = pos >> PAGE_CACHE_SHIFT; end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) return -EINVAL; ns->held_cnt = 0; for (index = start_index; index <= end_index; index++) { page = find_get_page(mapping, index); if (page == NULL) { page = find_or_create_page(mapping, index, GFP_NOFS); if (page == NULL) { write_inode_now(mapping->host, 1); page = find_or_create_page(mapping, index, GFP_NOFS); } if (page == NULL) { put_pages(ns); return -ENOMEM; } unlock_page(page); } ns->held_pages[ns->held_cnt++] = page; } return 0; } static int set_memalloc(void) { if (current->flags & PF_MEMALLOC) return 0; current->flags |= PF_MEMALLOC; return 1; } static void clear_memalloc(int memalloc) { if (memalloc) current->flags &= ~PF_MEMALLOC; } static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) { ssize_t tx; int err, memalloc; err = get_pages(ns, file, count, pos); if (err) return err; memalloc = set_memalloc(); tx = kernel_read(file, pos, buf, count); clear_memalloc(memalloc); put_pages(ns); return tx; } static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) { ssize_t tx; int err, memalloc; err = get_pages(ns, file, count, pos); if (err) return err; memalloc = set_memalloc(); tx = kernel_write(file, buf, count, pos); clear_memalloc(memalloc); put_pages(ns); return tx; } /* * Returns a pointer to the current page. */ static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns) { return &(ns->pages[ns->regs.row]); } /* * Retuns a pointer to the current byte, within the current page. */ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns) { return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; } static int do_read_error(struct nandsim *ns, int num) { unsigned int page_no = ns->regs.row; if (read_error(page_no)) { prandom_bytes(ns->buf.byte, num); NS_WARN("simulating read error in page %u\n", page_no); return 1; } return 0; } static void do_bit_flips(struct nandsim *ns, int num) { if (bitflips && prandom_u32() < (1 << 22)) { int flips = 1; if (bitflips > 1) flips = (prandom_u32() % (int) bitflips) + 1; while (flips--) { int pos = prandom_u32() % (num * 8); ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); NS_WARN("read_page: flipping bit %d in page %d " "reading from %d ecc: corrected=%u failed=%u\n", pos, ns->regs.row, ns->regs.column + ns->regs.off, nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed); } } } /* * Fill the NAND buffer with data read from the specified page. */ static void read_page(struct nandsim *ns, int num) { union ns_mem *mypage; if (ns->cfile) { if (!test_bit(ns->regs.row, ns->pages_written)) { NS_DBG("read_page: page %d not written\n", ns->regs.row); memset(ns->buf.byte, 0xFF, num); } else { loff_t pos; ssize_t tx; NS_DBG("read_page: page %d written, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); if (do_read_error(ns, num)) return; pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off; tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos); if (tx != num) { NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return; } do_bit_flips(ns, num); } return; } mypage = NS_GET_PAGE(ns); if (mypage->byte == NULL) { NS_DBG("read_page: page %d not allocated\n", ns->regs.row); memset(ns->buf.byte, 0xFF, num); } else { NS_DBG("read_page: page %d allocated, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); if (do_read_error(ns, num)) return; memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); do_bit_flips(ns, num); } } /* * Erase all pages in the specified sector. */ static void erase_sector(struct nandsim *ns) { union ns_mem *mypage; int i; if (ns->cfile) { for (i = 0; i < ns->geom.pgsec; i++) if (__test_and_clear_bit(ns->regs.row + i, ns->pages_written)) { NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i); } return; } mypage = NS_GET_PAGE(ns); for (i = 0; i < ns->geom.pgsec; i++) { if (mypage->byte != NULL) { NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i); kmem_cache_free(ns->nand_pages_slab, mypage->byte); mypage->byte = NULL; } mypage++; } } /* * Program the specified page with the contents from the NAND buffer. */ static int prog_page(struct nandsim *ns, int num) { int i; union ns_mem *mypage; u_char *pg_off; if (ns->cfile) { loff_t off; ssize_t tx; int all; NS_DBG("prog_page: writing page %d\n", ns->regs.row); pg_off = ns->file_buf + ns->regs.column + ns->regs.off; off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off; if (!test_bit(ns->regs.row, ns->pages_written)) { all = 1; memset(ns->file_buf, 0xff, ns->geom.pgszoob); } else { all = 0; tx = read_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } } for (i = 0; i < num; i++) pg_off[i] &= ns->buf.byte[i]; if (all) { loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob; tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos); if (tx != ns->geom.pgszoob) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } __set_bit(ns->regs.row, ns->pages_written); } else { tx = write_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } } return 0; } mypage = NS_GET_PAGE(ns); if (mypage->byte == NULL) { NS_DBG("prog_page: allocating page %d\n", ns->regs.row); /* * We allocate memory with GFP_NOFS because a flash FS may * utilize this. If it is holding an FS lock, then gets here, * then kernel memory alloc runs writeback which goes to the FS * again and deadlocks. This was seen in practice. */ mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS); if (mypage->byte == NULL) { NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); return -1; } memset(mypage->byte, 0xFF, ns->geom.pgszoob); } pg_off = NS_PAGE_BYTE_OFF(ns); for (i = 0; i < num; i++) pg_off[i] &= ns->buf.byte[i]; return 0; } /* * If state has any action bit, perform this action. * * RETURNS: 0 if success, -1 if error. */ static int do_state_action(struct nandsim *ns, uint32_t action) { int num; int busdiv = ns->busw == 8 ? 1 : 2; unsigned int erase_block_no, page_no; action &= ACTION_MASK; /* Check that page address input is correct */ if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) { NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row); return -1; } switch (action) { case ACTION_CPY: /* * Copy page data to the internal buffer. */ /* Column shouldn't be very large */ if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) { NS_ERR("do_state_action: column number is too large\n"); break; } num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; read_page(ns, num); NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n", num, NS_RAW_OFFSET(ns) + ns->regs.off); if (ns->regs.off == 0) NS_LOG("read page %d\n", ns->regs.row); else if (ns->regs.off < ns->geom.pgsz) NS_LOG("read page %d (second half)\n", ns->regs.row); else NS_LOG("read OOB of page %d\n", ns->regs.row); NS_UDELAY(access_delay); NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv); break; case ACTION_SECERASE: /* * Erase sector. */ if (ns->lines.wp) { NS_ERR("do_state_action: device is write-protected, ignore sector erase\n"); return -1; } if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec || (ns->regs.row & ~(ns->geom.secsz - 1))) { NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row); return -1; } ns->regs.row = (ns->regs.row << 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column; ns->regs.column = 0; erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift); NS_DBG("do_state_action: erase sector at address %#x, off = %d\n", ns->regs.row, NS_RAW_OFFSET(ns)); NS_LOG("erase sector %u\n", erase_block_no); erase_sector(ns); NS_MDELAY(erase_delay); if (erase_block_wear) update_wear(erase_block_no); if (erase_error(erase_block_no)) { NS_WARN("simulating erase failure in erase block %u\n", erase_block_no); return -1; } break; case ACTION_PRGPAGE: /* * Program page - move internal buffer data to the page. */ if (ns->lines.wp) { NS_WARN("do_state_action: device is write-protected, programm\n"); return -1; } num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; if (num != ns->regs.count) { NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n", ns->regs.count, num); return -1; } if (prog_page(ns, num) == -1) return -1; page_no = ns->regs.row; NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n", num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off); NS_LOG("programm page %d\n", ns->regs.row); NS_UDELAY(programm_delay); NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv); if (write_error(page_no)) { NS_WARN("simulating write failure in page %u\n", page_no); return -1; } break; case ACTION_ZEROOFF: NS_DBG("do_state_action: set internal offset to 0\n"); ns->regs.off = 0; break; case ACTION_HALFOFF: if (!(ns->options & OPT_PAGE512_8BIT)) { NS_ERR("do_state_action: BUG! can't skip half of page for non-512" "byte page size 8x chips\n"); return -1; } NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2); ns->regs.off = ns->geom.pgsz/2; break; case ACTION_OOBOFF: NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz); ns->regs.off = ns->geom.pgsz; break; default: NS_DBG("do_state_action: BUG! unknown action\n"); } return 0; } /* * Switch simulator's state. */ static void switch_state(struct nandsim *ns) { if (ns->op) { /* * The current operation have already been identified. * Just follow the states chain. */ ns->stateidx += 1; ns->state = ns->nxstate; ns->nxstate = ns->op[ns->stateidx + 1]; NS_DBG("switch_state: operation is known, switch to the next state, " "state: %s, nxstate: %s\n", get_state_name(ns->state), get_state_name(ns->nxstate)); /* See, whether we need to do some action */ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } } else { /* * We don't yet know which operation we perform. * Try to identify it. */ /* * The only event causing the switch_state function to * be called with yet unknown operation is new command. */ ns->state = get_state_by_command(ns->regs.command); NS_DBG("switch_state: operation is unknown, try to find it\n"); if (find_operation(ns, 0) != 0) return; if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } } /* For 16x devices column means the page offset in words */ if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) { NS_DBG("switch_state: double the column number for 16x device\n"); ns->regs.column <<= 1; } if (NS_STATE(ns->nxstate) == STATE_READY) { /* * The current state is the last. Return to STATE_READY */ u_char status = NS_STATUS_OK(ns); /* In case of data states, see if all bytes were input/output */ if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) && ns->regs.count != ns->regs.num) { NS_WARN("switch_state: not all bytes were processed, %d left\n", ns->regs.num - ns->regs.count); status = NS_STATUS_FAILED(ns); } NS_DBG("switch_state: operation complete, switch to STATE_READY state\n"); switch_to_ready_state(ns, status); return; } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) { /* * If the next state is data input/output, switch to it now */ ns->state = ns->nxstate; ns->nxstate = ns->op[++ns->stateidx + 1]; ns->regs.num = ns->regs.count = 0; NS_DBG("switch_state: the next state is data I/O, switch, " "state: %s, nxstate: %s\n", get_state_name(ns->state), get_state_name(ns->nxstate)); /* * Set the internal register to the count of bytes which * are expected to be input or output */ switch (NS_STATE(ns->state)) { case STATE_DATAIN: case STATE_DATAOUT: ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; break; case STATE_DATAOUT_ID: ns->regs.num = ns->geom.idbytes; break; case STATE_DATAOUT_STATUS: case STATE_DATAOUT_STATUS_M: ns->regs.count = ns->regs.num = 0; break; default: NS_ERR("switch_state: BUG! unknown data state\n"); } } else if (ns->nxstate & STATE_ADDR_MASK) { /* * If the next state is address input, set the internal * register to the number of expected address bytes */ ns->regs.count = 0; switch (NS_STATE(ns->nxstate)) { case STATE_ADDR_PAGE: ns->regs.num = ns->geom.pgaddrbytes; break; case STATE_ADDR_SEC: ns->regs.num = ns->geom.secaddrbytes; break; case STATE_ADDR_ZERO: ns->regs.num = 1; break; case STATE_ADDR_COLUMN: /* Column address is always 2 bytes */ ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes; break; default: NS_ERR("switch_state: BUG! unknown address state\n"); } } else { /* * Just reset internal counters. */ ns->regs.num = 0; ns->regs.count = 0; } } static u_char ns_nand_read_byte(struct mtd_info *mtd) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; u_char outb = 0x00; /* Sanity and correctness checks */ if (!ns->lines.ce) { NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb); return outb; } if (ns->lines.ale || ns->lines.cle) { NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb); return outb; } if (!(ns->state & STATE_DATAOUT_MASK)) { NS_WARN("read_byte: unexpected data output cycle, state is %s " "return %#x\n", get_state_name(ns->state), (uint)outb); return outb; } /* Status register may be read as many times as it is wanted */ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) { NS_DBG("read_byte: return %#x status\n", ns->regs.status); return ns->regs.status; } /* Check if there is any data in the internal buffer which may be read */ if (ns->regs.count == ns->regs.num) { NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb); return outb; } switch (NS_STATE(ns->state)) { case STATE_DATAOUT: if (ns->busw == 8) { outb = ns->buf.byte[ns->regs.count]; ns->regs.count += 1; } else { outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]); ns->regs.count += 2; } break; case STATE_DATAOUT_ID: NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num); outb = ns->ids[ns->regs.count]; ns->regs.count += 1; break; default: BUG(); } if (ns->regs.count == ns->regs.num) { NS_DBG("read_byte: all bytes were read\n"); if (NS_STATE(ns->nxstate) == STATE_READY) switch_state(ns); } return outb; } static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Sanity and correctness checks */ if (!ns->lines.ce) { NS_ERR("write_byte: chip is disabled, ignore write\n"); return; } if (ns->lines.ale && ns->lines.cle) { NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n"); return; } if (ns->lines.cle == 1) { /* * The byte written is a command. */ if (byte == NAND_CMD_RESET) { NS_LOG("reset chip\n"); switch_to_ready_state(ns, NS_STATUS_OK(ns)); return; } /* Check that the command byte is correct */ if (check_command(byte)) { NS_ERR("write_byte: unknown command %#x\n", (uint)byte); return; } if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M || NS_STATE(ns->state) == STATE_DATAOUT) { int row = ns->regs.row; switch_state(ns); if (byte == NAND_CMD_RNDOUT) ns->regs.row = row; } /* Check if chip is expecting command */ if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) { /* Do not warn if only 2 id bytes are read */ if (!(ns->regs.command == NAND_CMD_READID && NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) { /* * We are in situation when something else (not command) * was expected but command was input. In this case ignore * previous command(s)/state(s) and accept the last one. */ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); } switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); } NS_DBG("command byte corresponding to %s state accepted\n", get_state_name(get_state_by_command(byte))); ns->regs.command = byte; switch_state(ns); } else if (ns->lines.ale == 1) { /* * The byte written is an address. */ if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) { NS_DBG("write_byte: operation isn't known yet, identify it\n"); if (find_operation(ns, 1) < 0) return; if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } ns->regs.count = 0; switch (NS_STATE(ns->nxstate)) { case STATE_ADDR_PAGE: ns->regs.num = ns->geom.pgaddrbytes; break; case STATE_ADDR_SEC: ns->regs.num = ns->geom.secaddrbytes; break; case STATE_ADDR_ZERO: ns->regs.num = 1; break; default: BUG(); } } /* Check that chip is expecting address */ if (!(ns->nxstate & STATE_ADDR_MASK)) { NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, " "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate)); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if this is expected byte */ if (ns->regs.count == ns->regs.num) { NS_ERR("write_byte: no more address bytes expected\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } accept_addr_byte(ns, byte); ns->regs.count += 1; NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n", (uint)byte, ns->regs.count, ns->regs.num); if (ns->regs.count == ns->regs.num) { NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column); switch_state(ns); } } else { /* * The byte written is an input data. */ /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, " "switch to %s\n", (uint)byte, get_state_name(ns->state), get_state_name(STATE_READY)); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if this is expected byte */ if (ns->regs.count == ns->regs.num) { NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n", ns->regs.num); return; } if (ns->busw == 8) { ns->buf.byte[ns->regs.count] = byte; ns->regs.count += 1; } else { ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte); ns->regs.count += 2; } } return; } static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; ns->lines.cle = bitmask & NAND_CLE ? 1 : 0; ns->lines.ale = bitmask & NAND_ALE ? 1 : 0; ns->lines.ce = bitmask & NAND_NCE ? 1 : 0; if (cmd != NAND_CMD_NONE) ns_nand_write_byte(mtd, cmd); } static int ns_device_ready(struct mtd_info *mtd) { NS_DBG("device_ready\n"); return 1; } static uint16_t ns_nand_read_word(struct mtd_info *mtd) { struct nand_chip *chip = (struct nand_chip *)mtd->priv; NS_DBG("read_word\n"); return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8); } static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { NS_ERR("write_buf: data input isn't expected, state is %s, " "switch to STATE_READY\n", get_state_name(ns->state)); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("write_buf: too many input bytes\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } memcpy(ns->buf.byte + ns->regs.count, buf, len); ns->regs.count += len; if (ns->regs.count == ns->regs.num) { NS_DBG("write_buf: %d bytes were written\n", ns->regs.count); } } static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Sanity and correctness checks */ if (!ns->lines.ce) { NS_ERR("read_buf: chip is disabled\n"); return; } if (ns->lines.ale || ns->lines.cle) { NS_ERR("read_buf: ALE or CLE pin is high\n"); return; } if (!(ns->state & STATE_DATAOUT_MASK)) { NS_WARN("read_buf: unexpected data output cycle, current state is %s\n", get_state_name(ns->state)); return; } if (NS_STATE(ns->state) != STATE_DATAOUT) { int i; for (i = 0; i < len; i++) buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd); return; } /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("read_buf: too many bytes to read\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } memcpy(buf, ns->buf.byte + ns->regs.count, len); ns->regs.count += len; if (ns->regs.count == ns->regs.num) { if (NS_STATE(ns->nxstate) == STATE_READY) switch_state(ns); } return; } /* * Module initialization function */ static int __init ns_init_module(void) { struct nand_chip *chip; struct nandsim *nand; int retval = -ENOMEM, i; if (bus_width != 8 && bus_width != 16) { NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width); return -EINVAL; } /* Allocate and initialize mtd_info, nand_chip and nandsim structures */ nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip) + sizeof(struct nandsim), GFP_KERNEL); if (!nsmtd) { NS_ERR("unable to allocate core structures.\n"); return -ENOMEM; } chip = (struct nand_chip *)(nsmtd + 1); nsmtd->priv = (void *)chip; nand = (struct nandsim *)(chip + 1); chip->priv = (void *)nand; /* * Register simulator's callbacks. */ chip->cmd_ctrl = ns_hwcontrol; chip->read_byte = ns_nand_read_byte; chip->dev_ready = ns_device_ready; chip->write_buf = ns_nand_write_buf; chip->read_buf = ns_nand_read_buf; chip->read_word = ns_nand_read_word; chip->ecc.mode = NAND_ECC_SOFT; /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ /* and 'badblocks' parameters to work */ chip->options |= NAND_SKIP_BBTSCAN; switch (bbt) { case 2: chip->bbt_options |= NAND_BBT_NO_OOB; case 1: chip->bbt_options |= NAND_BBT_USE_FLASH; case 0: break; default: NS_ERR("bbt has to be 0..2\n"); retval = -EINVAL; goto error; } /* * Perform minimum nandsim structure initialization to handle * the initial ID read command correctly */ if (third_id_byte != 0xFF || fourth_id_byte != 0xFF) nand->geom.idbytes = 4; else nand->geom.idbytes = 2; nand->regs.status = NS_STATUS_OK(nand); nand->nxstate = STATE_UNKNOWN; nand->options |= OPT_PAGE512; /* temporary value */ nand->ids[0] = first_id_byte; nand->ids[1] = second_id_byte; nand->ids[2] = third_id_byte; nand->ids[3] = fourth_id_byte; if (bus_width == 16) { nand->busw = 16; chip->options |= NAND_BUSWIDTH_16; } nsmtd->owner = THIS_MODULE; if ((retval = parse_weakblocks()) != 0) goto error; if ((retval = parse_weakpages()) != 0) goto error; if ((retval = parse_gravepages()) != 0) goto error; retval = nand_scan_ident(nsmtd, 1, NULL); if (retval) { NS_ERR("cannot scan NAND Simulator device\n"); if (retval > 0) retval = -ENXIO; goto error; } if (bch) { unsigned int eccsteps, eccbytes; if (!mtd_nand_has_bch()) { NS_ERR("BCH ECC support is disabled\n"); retval = -EINVAL; goto error; } /* use 512-byte ecc blocks */ eccsteps = nsmtd->writesize/512; eccbytes = (bch*13+7)/8; /* do not bother supporting small page devices */ if ((nsmtd->oobsize < 64) || !eccsteps) { NS_ERR("bch not available on small page devices\n"); retval = -EINVAL; goto error; } if ((eccbytes*eccsteps+2) > nsmtd->oobsize) { NS_ERR("invalid bch value %u\n", bch); retval = -EINVAL; goto error; } chip->ecc.mode = NAND_ECC_SOFT_BCH; chip->ecc.size = 512; chip->ecc.bytes = eccbytes; NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); } retval = nand_scan_tail(nsmtd); if (retval) { NS_ERR("can't register NAND Simulator\n"); if (retval > 0) retval = -ENXIO; goto error; } if (overridesize) { uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; if (new_size >> overridesize != nsmtd->erasesize) { NS_ERR("overridesize is too big\n"); retval = -EINVAL; goto err_exit; } /* N.B. This relies on nand_scan not doing anything with the size before we change it */ nsmtd->size = new_size; chip->chipsize = new_size; chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1; chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; } if ((retval = setup_wear_reporting(nsmtd)) != 0) goto err_exit; if ((retval = nandsim_debugfs_create(nand)) != 0) goto err_exit; if ((retval = init_nandsim(nsmtd)) != 0) goto err_exit; if ((retval = nand_default_bbt(nsmtd)) != 0) goto err_exit; if ((retval = parse_badblocks(nand, nsmtd)) != 0) goto err_exit; /* Register NAND partitions */ retval = mtd_device_register(nsmtd, &nand->partitions[0], nand->nbparts); if (retval != 0) goto err_exit; return 0; err_exit: free_nandsim(nand); nand_release(nsmtd); for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) kfree(nand->partitions[i].name); error: kfree(nsmtd); free_lists(); return retval; } module_init(ns_init_module); /* * Module clean-up function */ static void __exit ns_cleanup_module(void) { struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv; int i; nandsim_debugfs_remove(ns); free_nandsim(ns); /* Free nandsim private resources */ nand_release(nsmtd); /* Unregister driver */ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) kfree(ns->partitions[i].name); kfree(nsmtd); /* Free other structures */ free_lists(); } module_exit(ns_cleanup_module); MODULE_LICENSE ("GPL"); MODULE_AUTHOR ("Artem B. Bityuckiy"); MODULE_DESCRIPTION ("The NAND flash simulator");
gpl-2.0
johnhubbard/pnotify-linux-3.12.20
drivers/mtd/nand/nandsim.c
86
68438
/* * NAND flash simulator. * * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org> * * Copyright (C) 2004 Nokia Corporation * * Note: NS means "NAND Simulator". * Note: Input means input TO flash chip, output means output FROM chip. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA */ #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/math64.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_bch.h> #include <linux/mtd/partitions.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/seq_file.h> #include <linux/debugfs.h> /* Default simulator parameters values */ #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \ !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \ !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE) #define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98 #define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39 #define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */ #define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */ #endif #ifndef CONFIG_NANDSIM_ACCESS_DELAY #define CONFIG_NANDSIM_ACCESS_DELAY 25 #endif #ifndef CONFIG_NANDSIM_PROGRAMM_DELAY #define CONFIG_NANDSIM_PROGRAMM_DELAY 200 #endif #ifndef CONFIG_NANDSIM_ERASE_DELAY #define CONFIG_NANDSIM_ERASE_DELAY 2 #endif #ifndef CONFIG_NANDSIM_OUTPUT_CYCLE #define CONFIG_NANDSIM_OUTPUT_CYCLE 40 #endif #ifndef CONFIG_NANDSIM_INPUT_CYCLE #define CONFIG_NANDSIM_INPUT_CYCLE 50 #endif #ifndef CONFIG_NANDSIM_BUS_WIDTH #define CONFIG_NANDSIM_BUS_WIDTH 8 #endif #ifndef CONFIG_NANDSIM_DO_DELAYS #define CONFIG_NANDSIM_DO_DELAYS 0 #endif #ifndef CONFIG_NANDSIM_LOG #define CONFIG_NANDSIM_LOG 0 #endif #ifndef CONFIG_NANDSIM_DBG #define CONFIG_NANDSIM_DBG 0 #endif #ifndef CONFIG_NANDSIM_MAX_PARTS #define CONFIG_NANDSIM_MAX_PARTS 32 #endif static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE; static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE; static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY; static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY; static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY; static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE; static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE; static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH; static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; static uint log = CONFIG_NANDSIM_LOG; static uint dbg = CONFIG_NANDSIM_DBG; static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS]; static unsigned int parts_num; static char *badblocks = NULL; static char *weakblocks = NULL; static char *weakpages = NULL; static unsigned int bitflips = 0; static char *gravepages = NULL; static unsigned int overridesize = 0; static char *cache_file = NULL; static unsigned int bbt; static unsigned int bch; module_param(first_id_byte, uint, 0400); module_param(second_id_byte, uint, 0400); module_param(third_id_byte, uint, 0400); module_param(fourth_id_byte, uint, 0400); module_param(access_delay, uint, 0400); module_param(programm_delay, uint, 0400); module_param(erase_delay, uint, 0400); module_param(output_cycle, uint, 0400); module_param(input_cycle, uint, 0400); module_param(bus_width, uint, 0400); module_param(do_delays, uint, 0400); module_param(log, uint, 0400); module_param(dbg, uint, 0400); module_param_array(parts, ulong, &parts_num, 0400); module_param(badblocks, charp, 0400); module_param(weakblocks, charp, 0400); module_param(weakpages, charp, 0400); module_param(bitflips, uint, 0400); module_param(gravepages, charp, 0400); module_param(overridesize, uint, 0400); module_param(cache_file, charp, 0400); module_param(bbt, uint, 0400); module_param(bch, uint, 0400); MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command"); MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command"); MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)"); MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)"); MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); MODULE_PARM_DESC(log, "Perform logging if not zero"); MODULE_PARM_DESC(dbg, "Output debug information if not zero"); MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas"); /* Page and erase block positions for the following parameters are independent of any partitions */ MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas"); MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]" " separated by commas e.g. 113:2 means eb 113" " can be erased only twice before failing"); MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]" " separated by commas e.g. 1401:2 means page 1401" " can be written only twice before failing"); MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)"); MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]" " separated by commas e.g. 1401:2 means page 1401" " can be read only twice before failing"); MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " "The size is specified in erase blocks and as the exponent of a power of two" " e.g. 5 means a size of 32 erase blocks"); MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " "be correctable in 512-byte blocks"); /* The largest possible page size */ #define NS_LARGEST_PAGE_SIZE 4096 /* The prefix for simulator output */ #define NS_OUTPUT_PREFIX "[nandsim]" /* Simulator's output macros (logging, debugging, warning, error) */ #define NS_LOG(args...) \ do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0) #define NS_DBG(args...) \ do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0) #define NS_WARN(args...) \ do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0) #define NS_ERR(args...) \ do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0) #define NS_INFO(args...) \ do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0) /* Busy-wait delay macros (microseconds, milliseconds) */ #define NS_UDELAY(us) \ do { if (do_delays) udelay(us); } while(0) #define NS_MDELAY(us) \ do { if (do_delays) mdelay(us); } while(0) /* Is the nandsim structure initialized ? */ #define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0) /* Good operation completion status */ #define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0))) /* Operation failed completion status */ #define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns)) /* Calculate the page offset in flash RAM image by (row, column) address */ #define NS_RAW_OFFSET(ns) \ (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column) /* Calculate the OOB offset in flash RAM image by (row, column) address */ #define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz) /* After a command is input, the simulator goes to one of the following states */ #define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */ #define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */ #define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */ #define STATE_CMD_PAGEPROG 0x00000004 /* start page program */ #define STATE_CMD_READOOB 0x00000005 /* read OOB area */ #define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */ #define STATE_CMD_STATUS 0x00000007 /* read status */ #define STATE_CMD_SEQIN 0x00000009 /* sequential data input */ #define STATE_CMD_READID 0x0000000A /* read ID */ #define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */ #define STATE_CMD_RESET 0x0000000C /* reset */ #define STATE_CMD_RNDOUT 0x0000000D /* random output command */ #define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */ #define STATE_CMD_MASK 0x0000000F /* command states mask */ /* After an address is input, the simulator goes to one of these states */ #define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */ #define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */ #define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */ #define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */ #define STATE_ADDR_MASK 0x00000070 /* address states mask */ /* During data input/output the simulator is in these states */ #define STATE_DATAIN 0x00000100 /* waiting for data input */ #define STATE_DATAIN_MASK 0x00000100 /* data input states mask */ #define STATE_DATAOUT 0x00001000 /* waiting for page data output */ #define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */ #define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */ #define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */ #define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */ /* Previous operation is done, ready to accept new requests */ #define STATE_READY 0x00000000 /* This state is used to mark that the next state isn't known yet */ #define STATE_UNKNOWN 0x10000000 /* Simulator's actions bit masks */ #define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */ #define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */ #define ACTION_SECERASE 0x00300000 /* erase sector */ #define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */ #define ACTION_HALFOFF 0x00500000 /* add to address half of page */ #define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */ #define ACTION_MASK 0x00700000 /* action mask */ #define NS_OPER_NUM 13 /* Number of operations supported by the simulator */ #define NS_OPER_STATES 6 /* Maximum number of states in operation */ #define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ #define OPT_PAGE512 0x00000002 /* 512-byte page chips */ #define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ #define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ #define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ #define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ #define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */ /* Remove action bits from state */ #define NS_STATE(x) ((x) & ~ACTION_MASK) /* * Maximum previous states which need to be saved. Currently saving is * only needed for page program operation with preceded read command * (which is only valid for 512-byte pages). */ #define NS_MAX_PREVSTATES 1 /* Maximum page cache pages needed to read or write a NAND page to the cache_file */ #define NS_MAX_HELD_PAGES 16 struct nandsim_debug_info { struct dentry *dfs_root; struct dentry *dfs_wear_report; }; /* * A union to represent flash memory contents and flash buffer. */ union ns_mem { u_char *byte; /* for byte access */ uint16_t *word; /* for 16-bit word access */ }; /* * The structure which describes all the internal simulator data. */ struct nandsim { struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS]; unsigned int nbparts; uint busw; /* flash chip bus width (8 or 16) */ u_char ids[4]; /* chip's ID bytes */ uint32_t options; /* chip's characteristic bits */ uint32_t state; /* current chip state */ uint32_t nxstate; /* next expected state */ uint32_t *op; /* current operation, NULL operations isn't known yet */ uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */ uint16_t npstates; /* number of previous states saved */ uint16_t stateidx; /* current state index */ /* The simulated NAND flash pages array */ union ns_mem *pages; /* Slab allocator for nand pages */ struct kmem_cache *nand_pages_slab; /* Internal buffer of page + OOB size bytes */ union ns_mem buf; /* NAND flash "geometry" */ struct { uint64_t totsz; /* total flash size, bytes */ uint32_t secsz; /* flash sector (erase block) size, bytes */ uint pgsz; /* NAND flash page size, bytes */ uint oobsz; /* page OOB area size, bytes */ uint64_t totszoob; /* total flash size including OOB, bytes */ uint pgszoob; /* page size including OOB , bytes*/ uint secszoob; /* sector size including OOB, bytes */ uint pgnum; /* total number of pages */ uint pgsec; /* number of pages per sector */ uint secshift; /* bits number in sector size */ uint pgshift; /* bits number in page size */ uint pgaddrbytes; /* bytes per page address */ uint secaddrbytes; /* bytes per sector address */ uint idbytes; /* the number ID bytes that this chip outputs */ } geom; /* NAND flash internal registers */ struct { unsigned command; /* the command register */ u_char status; /* the status register */ uint row; /* the page number */ uint column; /* the offset within page */ uint count; /* internal counter */ uint num; /* number of bytes which must be processed */ uint off; /* fixed page offset */ } regs; /* NAND flash lines state */ struct { int ce; /* chip Enable */ int cle; /* command Latch Enable */ int ale; /* address Latch Enable */ int wp; /* write Protect */ } lines; /* Fields needed when using a cache file */ struct file *cfile; /* Open file */ unsigned long *pages_written; /* Which pages have been written */ void *file_buf; struct page *held_pages[NS_MAX_HELD_PAGES]; int held_cnt; struct nandsim_debug_info dbg; }; /* * Operations array. To perform any operation the simulator must pass * through the correspondent states chain. */ static struct nandsim_operations { uint32_t reqopts; /* options which are required to perform the operation */ uint32_t states[NS_OPER_STATES]; /* operation's states */ } ops[NS_OPER_NUM] = { /* Read page + OOB from the beginning */ {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Read page + OOB from the second half */ {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Read OOB */ {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Program page starting from the beginning */ {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Program page starting from the beginning */ {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Program page starting from the second half */ {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Program OOB */ {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE, STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, /* Erase sector */ {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}}, /* Read status */ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}}, /* Read ID */ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}}, /* Large page devices read page */ {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, /* Large page devices random page read */ {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY, STATE_DATAOUT, STATE_READY}}, }; struct weak_block { struct list_head list; unsigned int erase_block_no; unsigned int max_erases; unsigned int erases_done; }; static LIST_HEAD(weak_blocks); struct weak_page { struct list_head list; unsigned int page_no; unsigned int max_writes; unsigned int writes_done; }; static LIST_HEAD(weak_pages); struct grave_page { struct list_head list; unsigned int page_no; unsigned int max_reads; unsigned int reads_done; }; static LIST_HEAD(grave_pages); static unsigned long *erase_block_wear = NULL; static unsigned int wear_eb_count = 0; static unsigned long total_wear = 0; /* MTD structure for NAND controller */ static struct mtd_info *nsmtd; static int nandsim_debugfs_show(struct seq_file *m, void *private) { unsigned long wmin = -1, wmax = 0, avg; unsigned long deciles[10], decile_max[10], tot = 0; unsigned int i; /* Calc wear stats */ for (i = 0; i < wear_eb_count; ++i) { unsigned long wear = erase_block_wear[i]; if (wear < wmin) wmin = wear; if (wear > wmax) wmax = wear; tot += wear; } for (i = 0; i < 9; ++i) { deciles[i] = 0; decile_max[i] = (wmax * (i + 1) + 5) / 10; } deciles[9] = 0; decile_max[9] = wmax; for (i = 0; i < wear_eb_count; ++i) { int d; unsigned long wear = erase_block_wear[i]; for (d = 0; d < 10; ++d) if (wear <= decile_max[d]) { deciles[d] += 1; break; } } avg = tot / wear_eb_count; /* Output wear report */ seq_printf(m, "Total numbers of erases: %lu\n", tot); seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count); seq_printf(m, "Average number of erases: %lu\n", avg); seq_printf(m, "Maximum number of erases: %lu\n", wmax); seq_printf(m, "Minimum number of erases: %lu\n", wmin); for (i = 0; i < 10; ++i) { unsigned long from = (i ? decile_max[i - 1] + 1 : 0); if (from > decile_max[i]) continue; seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n", from, decile_max[i], deciles[i]); } return 0; } static int nandsim_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, nandsim_debugfs_show, inode->i_private); } static const struct file_operations dfs_fops = { .open = nandsim_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** * nandsim_debugfs_create - initialize debugfs * @dev: nandsim device description object * * This function creates all debugfs files for UBI device @ubi. Returns zero in * case of success and a negative error code in case of failure. */ static int nandsim_debugfs_create(struct nandsim *dev) { struct nandsim_debug_info *dbg = &dev->dbg; struct dentry *dent; int err; if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; dent = debugfs_create_dir("nandsim", NULL); if (IS_ERR_OR_NULL(dent)) { int err = dent ? -ENODEV : PTR_ERR(dent); NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n", err); return err; } dbg->dfs_root = dent; dent = debugfs_create_file("wear_report", S_IRUSR, dbg->dfs_root, dev, &dfs_fops); if (IS_ERR_OR_NULL(dent)) goto out_remove; dbg->dfs_wear_report = dent; return 0; out_remove: debugfs_remove_recursive(dbg->dfs_root); err = dent ? PTR_ERR(dent) : -ENODEV; return err; } /** * nandsim_debugfs_remove - destroy all debugfs files */ static void nandsim_debugfs_remove(struct nandsim *ns) { if (IS_ENABLED(CONFIG_DEBUG_FS)) debugfs_remove_recursive(ns->dbg.dfs_root); } /* * Allocate array of page pointers, create slab allocation for an array * and initialize the array by NULL pointers. * * RETURNS: 0 if success, -ENOMEM if memory alloc fails. */ static int alloc_device(struct nandsim *ns) { struct file *cfile; int i, err; if (cache_file) { cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600); if (IS_ERR(cfile)) return PTR_ERR(cfile); if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) { NS_ERR("alloc_device: cache file not readable\n"); err = -EINVAL; goto err_close; } if (!cfile->f_op->write && !cfile->f_op->aio_write) { NS_ERR("alloc_device: cache file not writeable\n"); err = -EINVAL; goto err_close; } ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) * sizeof(unsigned long)); if (!ns->pages_written) { NS_ERR("alloc_device: unable to allocate pages written array\n"); err = -ENOMEM; goto err_close; } ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->file_buf) { NS_ERR("alloc_device: unable to allocate file buf\n"); err = -ENOMEM; goto err_free; } ns->cfile = cfile; return 0; } ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem)); if (!ns->pages) { NS_ERR("alloc_device: unable to allocate page array\n"); return -ENOMEM; } for (i = 0; i < ns->geom.pgnum; i++) { ns->pages[i].byte = NULL; } ns->nand_pages_slab = kmem_cache_create("nandsim", ns->geom.pgszoob, 0, 0, NULL); if (!ns->nand_pages_slab) { NS_ERR("cache_create: unable to create kmem_cache\n"); return -ENOMEM; } return 0; err_free: vfree(ns->pages_written); err_close: filp_close(cfile, NULL); return err; } /* * Free any allocated pages, and free the array of page pointers. */ static void free_device(struct nandsim *ns) { int i; if (ns->cfile) { kfree(ns->file_buf); vfree(ns->pages_written); filp_close(ns->cfile, NULL); return; } if (ns->pages) { for (i = 0; i < ns->geom.pgnum; i++) { if (ns->pages[i].byte) kmem_cache_free(ns->nand_pages_slab, ns->pages[i].byte); } kmem_cache_destroy(ns->nand_pages_slab); vfree(ns->pages); } } static char *get_partition_name(int i) { return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i); } /* * Initialize the nandsim structure. * * RETURNS: 0 if success, -ERRNO if failure. */ static int init_nandsim(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct nandsim *ns = chip->priv; int i, ret = 0; uint64_t remains; uint64_t next_offset; if (NS_IS_INITIALIZED(ns)) { NS_ERR("init_nandsim: nandsim is already initialized\n"); return -EIO; } /* Force mtd to not do delays */ chip->chip_delay = 0; /* Initialize the NAND flash parameters */ ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8; ns->geom.totsz = mtd->size; ns->geom.pgsz = mtd->writesize; ns->geom.oobsz = mtd->oobsize; ns->geom.secsz = mtd->erasesize; ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz; ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz); ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz; ns->geom.secshift = ffs(ns->geom.secsz) - 1; ns->geom.pgshift = chip->page_shift; ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz; ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec; ns->options = 0; if (ns->geom.pgsz == 512) { ns->options |= OPT_PAGE512; if (ns->busw == 8) ns->options |= OPT_PAGE512_8BIT; } else if (ns->geom.pgsz == 2048) { ns->options |= OPT_PAGE2048; } else if (ns->geom.pgsz == 4096) { ns->options |= OPT_PAGE4096; } else { NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz); return -EIO; } if (ns->options & OPT_SMALLPAGE) { if (ns->geom.totsz <= (32 << 20)) { ns->geom.pgaddrbytes = 3; ns->geom.secaddrbytes = 2; } else { ns->geom.pgaddrbytes = 4; ns->geom.secaddrbytes = 3; } } else { if (ns->geom.totsz <= (128 << 20)) { ns->geom.pgaddrbytes = 4; ns->geom.secaddrbytes = 2; } else { ns->geom.pgaddrbytes = 5; ns->geom.secaddrbytes = 3; } } /* Fill the partition_info structure */ if (parts_num > ARRAY_SIZE(ns->partitions)) { NS_ERR("too many partitions.\n"); ret = -EINVAL; goto error; } remains = ns->geom.totsz; next_offset = 0; for (i = 0; i < parts_num; ++i) { uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz; if (!part_sz || part_sz > remains) { NS_ERR("bad partition size.\n"); ret = -EINVAL; goto error; } ns->partitions[i].name = get_partition_name(i); ns->partitions[i].offset = next_offset; ns->partitions[i].size = part_sz; next_offset += ns->partitions[i].size; remains -= ns->partitions[i].size; } ns->nbparts = parts_num; if (remains) { if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) { NS_ERR("too many partitions.\n"); ret = -EINVAL; goto error; } ns->partitions[i].name = get_partition_name(i); ns->partitions[i].offset = next_offset; ns->partitions[i].size = remains; ns->nbparts += 1; } if (ns->busw == 16) NS_WARN("16-bit flashes support wasn't tested\n"); printk("flash size: %llu MiB\n", (unsigned long long)ns->geom.totsz >> 20); printk("page size: %u bytes\n", ns->geom.pgsz); printk("OOB area size: %u bytes\n", ns->geom.oobsz); printk("sector size: %u KiB\n", ns->geom.secsz >> 10); printk("pages number: %u\n", ns->geom.pgnum); printk("pages per sector: %u\n", ns->geom.pgsec); printk("bus width: %u\n", ns->busw); printk("bits in sector size: %u\n", ns->geom.secshift); printk("bits in page size: %u\n", ns->geom.pgshift); printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1); printk("flash size with OOB: %llu KiB\n", (unsigned long long)ns->geom.totszoob >> 10); printk("page address bytes: %u\n", ns->geom.pgaddrbytes); printk("sector address bytes: %u\n", ns->geom.secaddrbytes); printk("options: %#x\n", ns->options); if ((ret = alloc_device(ns)) != 0) goto error; /* Allocate / initialize the internal buffer */ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->buf.byte) { NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n", ns->geom.pgszoob); ret = -ENOMEM; goto error; } memset(ns->buf.byte, 0xFF, ns->geom.pgszoob); return 0; error: free_device(ns); return ret; } /* * Free the nandsim structure. */ static void free_nandsim(struct nandsim *ns) { kfree(ns->buf.byte); free_device(ns); return; } static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) { char *w; int zero_ok; unsigned int erase_block_no; loff_t offset; if (!badblocks) return 0; w = badblocks; do { zero_ok = (*w == '0' ? 1 : 0); erase_block_no = simple_strtoul(w, &w, 0); if (!zero_ok && !erase_block_no) { NS_ERR("invalid badblocks.\n"); return -EINVAL; } offset = erase_block_no * ns->geom.secsz; if (mtd_block_markbad(mtd, offset)) { NS_ERR("invalid badblocks.\n"); return -EINVAL; } if (*w == ',') w += 1; } while (*w); return 0; } static int parse_weakblocks(void) { char *w; int zero_ok; unsigned int erase_block_no; unsigned int max_erases; struct weak_block *wb; if (!weakblocks) return 0; w = weakblocks; do { zero_ok = (*w == '0' ? 1 : 0); erase_block_no = simple_strtoul(w, &w, 0); if (!zero_ok && !erase_block_no) { NS_ERR("invalid weakblocks.\n"); return -EINVAL; } max_erases = 3; if (*w == ':') { w += 1; max_erases = simple_strtoul(w, &w, 0); } if (*w == ',') w += 1; wb = kzalloc(sizeof(*wb), GFP_KERNEL); if (!wb) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; } wb->erase_block_no = erase_block_no; wb->max_erases = max_erases; list_add(&wb->list, &weak_blocks); } while (*w); return 0; } static int erase_error(unsigned int erase_block_no) { struct weak_block *wb; list_for_each_entry(wb, &weak_blocks, list) if (wb->erase_block_no == erase_block_no) { if (wb->erases_done >= wb->max_erases) return 1; wb->erases_done += 1; return 0; } return 0; } static int parse_weakpages(void) { char *w; int zero_ok; unsigned int page_no; unsigned int max_writes; struct weak_page *wp; if (!weakpages) return 0; w = weakpages; do { zero_ok = (*w == '0' ? 1 : 0); page_no = simple_strtoul(w, &w, 0); if (!zero_ok && !page_no) { NS_ERR("invalid weakpagess.\n"); return -EINVAL; } max_writes = 3; if (*w == ':') { w += 1; max_writes = simple_strtoul(w, &w, 0); } if (*w == ',') w += 1; wp = kzalloc(sizeof(*wp), GFP_KERNEL); if (!wp) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; } wp->page_no = page_no; wp->max_writes = max_writes; list_add(&wp->list, &weak_pages); } while (*w); return 0; } static int write_error(unsigned int page_no) { struct weak_page *wp; list_for_each_entry(wp, &weak_pages, list) if (wp->page_no == page_no) { if (wp->writes_done >= wp->max_writes) return 1; wp->writes_done += 1; return 0; } return 0; } static int parse_gravepages(void) { char *g; int zero_ok; unsigned int page_no; unsigned int max_reads; struct grave_page *gp; if (!gravepages) return 0; g = gravepages; do { zero_ok = (*g == '0' ? 1 : 0); page_no = simple_strtoul(g, &g, 0); if (!zero_ok && !page_no) { NS_ERR("invalid gravepagess.\n"); return -EINVAL; } max_reads = 3; if (*g == ':') { g += 1; max_reads = simple_strtoul(g, &g, 0); } if (*g == ',') g += 1; gp = kzalloc(sizeof(*gp), GFP_KERNEL); if (!gp) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; } gp->page_no = page_no; gp->max_reads = max_reads; list_add(&gp->list, &grave_pages); } while (*g); return 0; } static int read_error(unsigned int page_no) { struct grave_page *gp; list_for_each_entry(gp, &grave_pages, list) if (gp->page_no == page_no) { if (gp->reads_done >= gp->max_reads) return 1; gp->reads_done += 1; return 0; } return 0; } static void free_lists(void) { struct list_head *pos, *n; list_for_each_safe(pos, n, &weak_blocks) { list_del(pos); kfree(list_entry(pos, struct weak_block, list)); } list_for_each_safe(pos, n, &weak_pages) { list_del(pos); kfree(list_entry(pos, struct weak_page, list)); } list_for_each_safe(pos, n, &grave_pages) { list_del(pos); kfree(list_entry(pos, struct grave_page, list)); } kfree(erase_block_wear); } static int setup_wear_reporting(struct mtd_info *mtd) { size_t mem; wear_eb_count = div_u64(mtd->size, mtd->erasesize); mem = wear_eb_count * sizeof(unsigned long); if (mem / sizeof(unsigned long) != wear_eb_count) { NS_ERR("Too many erase blocks for wear reporting\n"); return -ENOMEM; } erase_block_wear = kzalloc(mem, GFP_KERNEL); if (!erase_block_wear) { NS_ERR("Too many erase blocks for wear reporting\n"); return -ENOMEM; } return 0; } static void update_wear(unsigned int erase_block_no) { if (!erase_block_wear) return; total_wear += 1; /* * TODO: Notify this through a debugfs entry, * instead of showing an error message. */ if (total_wear == 0) NS_ERR("Erase counter total overflow\n"); erase_block_wear[erase_block_no] += 1; if (erase_block_wear[erase_block_no] == 0) NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no); } /* * Returns the string representation of 'state' state. */ static char *get_state_name(uint32_t state) { switch (NS_STATE(state)) { case STATE_CMD_READ0: return "STATE_CMD_READ0"; case STATE_CMD_READ1: return "STATE_CMD_READ1"; case STATE_CMD_PAGEPROG: return "STATE_CMD_PAGEPROG"; case STATE_CMD_READOOB: return "STATE_CMD_READOOB"; case STATE_CMD_READSTART: return "STATE_CMD_READSTART"; case STATE_CMD_ERASE1: return "STATE_CMD_ERASE1"; case STATE_CMD_STATUS: return "STATE_CMD_STATUS"; case STATE_CMD_SEQIN: return "STATE_CMD_SEQIN"; case STATE_CMD_READID: return "STATE_CMD_READID"; case STATE_CMD_ERASE2: return "STATE_CMD_ERASE2"; case STATE_CMD_RESET: return "STATE_CMD_RESET"; case STATE_CMD_RNDOUT: return "STATE_CMD_RNDOUT"; case STATE_CMD_RNDOUTSTART: return "STATE_CMD_RNDOUTSTART"; case STATE_ADDR_PAGE: return "STATE_ADDR_PAGE"; case STATE_ADDR_SEC: return "STATE_ADDR_SEC"; case STATE_ADDR_ZERO: return "STATE_ADDR_ZERO"; case STATE_ADDR_COLUMN: return "STATE_ADDR_COLUMN"; case STATE_DATAIN: return "STATE_DATAIN"; case STATE_DATAOUT: return "STATE_DATAOUT"; case STATE_DATAOUT_ID: return "STATE_DATAOUT_ID"; case STATE_DATAOUT_STATUS: return "STATE_DATAOUT_STATUS"; case STATE_DATAOUT_STATUS_M: return "STATE_DATAOUT_STATUS_M"; case STATE_READY: return "STATE_READY"; case STATE_UNKNOWN: return "STATE_UNKNOWN"; } NS_ERR("get_state_name: unknown state, BUG\n"); return NULL; } /* * Check if command is valid. * * RETURNS: 1 if wrong command, 0 if right. */ static int check_command(int cmd) { switch (cmd) { case NAND_CMD_READ0: case NAND_CMD_READ1: case NAND_CMD_READSTART: case NAND_CMD_PAGEPROG: case NAND_CMD_READOOB: case NAND_CMD_ERASE1: case NAND_CMD_STATUS: case NAND_CMD_SEQIN: case NAND_CMD_READID: case NAND_CMD_ERASE2: case NAND_CMD_RESET: case NAND_CMD_RNDOUT: case NAND_CMD_RNDOUTSTART: return 0; default: return 1; } } /* * Returns state after command is accepted by command number. */ static uint32_t get_state_by_command(unsigned command) { switch (command) { case NAND_CMD_READ0: return STATE_CMD_READ0; case NAND_CMD_READ1: return STATE_CMD_READ1; case NAND_CMD_PAGEPROG: return STATE_CMD_PAGEPROG; case NAND_CMD_READSTART: return STATE_CMD_READSTART; case NAND_CMD_READOOB: return STATE_CMD_READOOB; case NAND_CMD_ERASE1: return STATE_CMD_ERASE1; case NAND_CMD_STATUS: return STATE_CMD_STATUS; case NAND_CMD_SEQIN: return STATE_CMD_SEQIN; case NAND_CMD_READID: return STATE_CMD_READID; case NAND_CMD_ERASE2: return STATE_CMD_ERASE2; case NAND_CMD_RESET: return STATE_CMD_RESET; case NAND_CMD_RNDOUT: return STATE_CMD_RNDOUT; case NAND_CMD_RNDOUTSTART: return STATE_CMD_RNDOUTSTART; } NS_ERR("get_state_by_command: unknown command, BUG\n"); return 0; } /* * Move an address byte to the correspondent internal register. */ static inline void accept_addr_byte(struct nandsim *ns, u_char bt) { uint byte = (uint)bt; if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) ns->regs.column |= (byte << 8 * ns->regs.count); else { ns->regs.row |= (byte << 8 * (ns->regs.count - ns->geom.pgaddrbytes + ns->geom.secaddrbytes)); } return; } /* * Switch to STATE_READY state. */ static inline void switch_to_ready_state(struct nandsim *ns, u_char status) { NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY)); ns->state = STATE_READY; ns->nxstate = STATE_UNKNOWN; ns->op = NULL; ns->npstates = 0; ns->stateidx = 0; ns->regs.num = 0; ns->regs.count = 0; ns->regs.off = 0; ns->regs.row = 0; ns->regs.column = 0; ns->regs.status = status; } /* * If the operation isn't known yet, try to find it in the global array * of supported operations. * * Operation can be unknown because of the following. * 1. New command was accepted and this is the first call to find the * correspondent states chain. In this case ns->npstates = 0; * 2. There are several operations which begin with the same command(s) * (for example program from the second half and read from the * second half operations both begin with the READ1 command). In this * case the ns->pstates[] array contains previous states. * * Thus, the function tries to find operation containing the following * states (if the 'flag' parameter is 0): * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state * * If (one and only one) matching operation is found, it is accepted ( * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is * zeroed). * * If there are several matches, the current state is pushed to the * ns->pstates. * * The operation can be unknown only while commands are input to the chip. * As soon as address command is accepted, the operation must be known. * In such situation the function is called with 'flag' != 0, and the * operation is searched using the following pattern: * ns->pstates[0], ... ns->pstates[ns->npstates], <address input> * * It is supposed that this pattern must either match one operation or * none. There can't be ambiguity in that case. * * If no matches found, the function does the following: * 1. if there are saved states present, try to ignore them and search * again only using the last command. If nothing was found, switch * to the STATE_READY state. * 2. if there are no saved states, switch to the STATE_READY state. * * RETURNS: -2 - no matched operations found. * -1 - several matches. * 0 - operation is found. */ static int find_operation(struct nandsim *ns, uint32_t flag) { int opsfound = 0; int i, j, idx = 0; for (i = 0; i < NS_OPER_NUM; i++) { int found = 1; if (!(ns->options & ops[i].reqopts)) /* Ignore operations we can't perform */ continue; if (flag) { if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK)) continue; } else { if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates])) continue; } for (j = 0; j < ns->npstates; j++) if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j]) && (ns->options & ops[idx].reqopts)) { found = 0; break; } if (found) { idx = i; opsfound += 1; } } if (opsfound == 1) { /* Exact match */ ns->op = &ops[idx].states[0]; if (flag) { /* * In this case the find_operation function was * called when address has just began input. But it isn't * yet fully input and the current state must * not be one of STATE_ADDR_*, but the STATE_ADDR_* * state must be the next state (ns->nxstate). */ ns->stateidx = ns->npstates - 1; } else { ns->stateidx = ns->npstates; } ns->npstates = 0; ns->state = ns->op[ns->stateidx]; ns->nxstate = ns->op[ns->stateidx + 1]; NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n", idx, get_state_name(ns->state), get_state_name(ns->nxstate)); return 0; } if (opsfound == 0) { /* Nothing was found. Try to ignore previous commands (if any) and search again */ if (ns->npstates != 0) { NS_DBG("find_operation: no operation found, try again with state %s\n", get_state_name(ns->state)); ns->npstates = 0; return find_operation(ns, 0); } NS_DBG("find_operation: no operations found\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return -2; } if (flag) { /* This shouldn't happen */ NS_DBG("find_operation: BUG, operation must be known if address is input\n"); return -2; } NS_DBG("find_operation: there is still ambiguity\n"); ns->pstates[ns->npstates++] = ns->state; return -1; } static void put_pages(struct nandsim *ns) { int i; for (i = 0; i < ns->held_cnt; i++) page_cache_release(ns->held_pages[i]); } /* Get page cache pages in advance to provide NOFS memory allocation */ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos) { pgoff_t index, start_index, end_index; struct page *page; struct address_space *mapping = file->f_mapping; start_index = pos >> PAGE_CACHE_SHIFT; end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) return -EINVAL; ns->held_cnt = 0; for (index = start_index; index <= end_index; index++) { page = find_get_page(mapping, index); if (page == NULL) { page = find_or_create_page(mapping, index, GFP_NOFS); if (page == NULL) { write_inode_now(mapping->host, 1); page = find_or_create_page(mapping, index, GFP_NOFS); } if (page == NULL) { put_pages(ns); return -ENOMEM; } unlock_page(page); } ns->held_pages[ns->held_cnt++] = page; } return 0; } static int set_memalloc(void) { if (current->flags & PF_MEMALLOC) return 0; current->flags |= PF_MEMALLOC; return 1; } static void clear_memalloc(int memalloc) { if (memalloc) current->flags &= ~PF_MEMALLOC; } static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) { ssize_t tx; int err, memalloc; err = get_pages(ns, file, count, pos); if (err) return err; memalloc = set_memalloc(); tx = kernel_read(file, pos, buf, count); clear_memalloc(memalloc); put_pages(ns); return tx; } static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) { ssize_t tx; int err, memalloc; err = get_pages(ns, file, count, pos); if (err) return err; memalloc = set_memalloc(); tx = kernel_write(file, buf, count, pos); clear_memalloc(memalloc); put_pages(ns); return tx; } /* * Returns a pointer to the current page. */ static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns) { return &(ns->pages[ns->regs.row]); } /* * Retuns a pointer to the current byte, within the current page. */ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns) { return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; } static int do_read_error(struct nandsim *ns, int num) { unsigned int page_no = ns->regs.row; if (read_error(page_no)) { prandom_bytes(ns->buf.byte, num); NS_WARN("simulating read error in page %u\n", page_no); return 1; } return 0; } static void do_bit_flips(struct nandsim *ns, int num) { if (bitflips && prandom_u32() < (1 << 22)) { int flips = 1; if (bitflips > 1) flips = (prandom_u32() % (int) bitflips) + 1; while (flips--) { int pos = prandom_u32() % (num * 8); ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); NS_WARN("read_page: flipping bit %d in page %d " "reading from %d ecc: corrected=%u failed=%u\n", pos, ns->regs.row, ns->regs.column + ns->regs.off, nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed); } } } /* * Fill the NAND buffer with data read from the specified page. */ static void read_page(struct nandsim *ns, int num) { union ns_mem *mypage; if (ns->cfile) { if (!test_bit(ns->regs.row, ns->pages_written)) { NS_DBG("read_page: page %d not written\n", ns->regs.row); memset(ns->buf.byte, 0xFF, num); } else { loff_t pos; ssize_t tx; NS_DBG("read_page: page %d written, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); if (do_read_error(ns, num)) return; pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off; tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos); if (tx != num) { NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return; } do_bit_flips(ns, num); } return; } mypage = NS_GET_PAGE(ns); if (mypage->byte == NULL) { NS_DBG("read_page: page %d not allocated\n", ns->regs.row); memset(ns->buf.byte, 0xFF, num); } else { NS_DBG("read_page: page %d allocated, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); if (do_read_error(ns, num)) return; memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); do_bit_flips(ns, num); } } /* * Erase all pages in the specified sector. */ static void erase_sector(struct nandsim *ns) { union ns_mem *mypage; int i; if (ns->cfile) { for (i = 0; i < ns->geom.pgsec; i++) if (__test_and_clear_bit(ns->regs.row + i, ns->pages_written)) { NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i); } return; } mypage = NS_GET_PAGE(ns); for (i = 0; i < ns->geom.pgsec; i++) { if (mypage->byte != NULL) { NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i); kmem_cache_free(ns->nand_pages_slab, mypage->byte); mypage->byte = NULL; } mypage++; } } /* * Program the specified page with the contents from the NAND buffer. */ static int prog_page(struct nandsim *ns, int num) { int i; union ns_mem *mypage; u_char *pg_off; if (ns->cfile) { loff_t off; ssize_t tx; int all; NS_DBG("prog_page: writing page %d\n", ns->regs.row); pg_off = ns->file_buf + ns->regs.column + ns->regs.off; off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off; if (!test_bit(ns->regs.row, ns->pages_written)) { all = 1; memset(ns->file_buf, 0xff, ns->geom.pgszoob); } else { all = 0; tx = read_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } } for (i = 0; i < num; i++) pg_off[i] &= ns->buf.byte[i]; if (all) { loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob; tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos); if (tx != ns->geom.pgszoob) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } __set_bit(ns->regs.row, ns->pages_written); } else { tx = write_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } } return 0; } mypage = NS_GET_PAGE(ns); if (mypage->byte == NULL) { NS_DBG("prog_page: allocating page %d\n", ns->regs.row); /* * We allocate memory with GFP_NOFS because a flash FS may * utilize this. If it is holding an FS lock, then gets here, * then kernel memory alloc runs writeback which goes to the FS * again and deadlocks. This was seen in practice. */ mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS); if (mypage->byte == NULL) { NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); return -1; } memset(mypage->byte, 0xFF, ns->geom.pgszoob); } pg_off = NS_PAGE_BYTE_OFF(ns); for (i = 0; i < num; i++) pg_off[i] &= ns->buf.byte[i]; return 0; } /* * If state has any action bit, perform this action. * * RETURNS: 0 if success, -1 if error. */ static int do_state_action(struct nandsim *ns, uint32_t action) { int num; int busdiv = ns->busw == 8 ? 1 : 2; unsigned int erase_block_no, page_no; action &= ACTION_MASK; /* Check that page address input is correct */ if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) { NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row); return -1; } switch (action) { case ACTION_CPY: /* * Copy page data to the internal buffer. */ /* Column shouldn't be very large */ if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) { NS_ERR("do_state_action: column number is too large\n"); break; } num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; read_page(ns, num); NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n", num, NS_RAW_OFFSET(ns) + ns->regs.off); if (ns->regs.off == 0) NS_LOG("read page %d\n", ns->regs.row); else if (ns->regs.off < ns->geom.pgsz) NS_LOG("read page %d (second half)\n", ns->regs.row); else NS_LOG("read OOB of page %d\n", ns->regs.row); NS_UDELAY(access_delay); NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv); break; case ACTION_SECERASE: /* * Erase sector. */ if (ns->lines.wp) { NS_ERR("do_state_action: device is write-protected, ignore sector erase\n"); return -1; } if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec || (ns->regs.row & ~(ns->geom.secsz - 1))) { NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row); return -1; } ns->regs.row = (ns->regs.row << 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column; ns->regs.column = 0; erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift); NS_DBG("do_state_action: erase sector at address %#x, off = %d\n", ns->regs.row, NS_RAW_OFFSET(ns)); NS_LOG("erase sector %u\n", erase_block_no); erase_sector(ns); NS_MDELAY(erase_delay); if (erase_block_wear) update_wear(erase_block_no); if (erase_error(erase_block_no)) { NS_WARN("simulating erase failure in erase block %u\n", erase_block_no); return -1; } break; case ACTION_PRGPAGE: /* * Program page - move internal buffer data to the page. */ if (ns->lines.wp) { NS_WARN("do_state_action: device is write-protected, programm\n"); return -1; } num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; if (num != ns->regs.count) { NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n", ns->regs.count, num); return -1; } if (prog_page(ns, num) == -1) return -1; page_no = ns->regs.row; NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n", num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off); NS_LOG("programm page %d\n", ns->regs.row); NS_UDELAY(programm_delay); NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv); if (write_error(page_no)) { NS_WARN("simulating write failure in page %u\n", page_no); return -1; } break; case ACTION_ZEROOFF: NS_DBG("do_state_action: set internal offset to 0\n"); ns->regs.off = 0; break; case ACTION_HALFOFF: if (!(ns->options & OPT_PAGE512_8BIT)) { NS_ERR("do_state_action: BUG! can't skip half of page for non-512" "byte page size 8x chips\n"); return -1; } NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2); ns->regs.off = ns->geom.pgsz/2; break; case ACTION_OOBOFF: NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz); ns->regs.off = ns->geom.pgsz; break; default: NS_DBG("do_state_action: BUG! unknown action\n"); } return 0; } /* * Switch simulator's state. */ static void switch_state(struct nandsim *ns) { if (ns->op) { /* * The current operation have already been identified. * Just follow the states chain. */ ns->stateidx += 1; ns->state = ns->nxstate; ns->nxstate = ns->op[ns->stateidx + 1]; NS_DBG("switch_state: operation is known, switch to the next state, " "state: %s, nxstate: %s\n", get_state_name(ns->state), get_state_name(ns->nxstate)); /* See, whether we need to do some action */ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } } else { /* * We don't yet know which operation we perform. * Try to identify it. */ /* * The only event causing the switch_state function to * be called with yet unknown operation is new command. */ ns->state = get_state_by_command(ns->regs.command); NS_DBG("switch_state: operation is unknown, try to find it\n"); if (find_operation(ns, 0) != 0) return; if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } } /* For 16x devices column means the page offset in words */ if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) { NS_DBG("switch_state: double the column number for 16x device\n"); ns->regs.column <<= 1; } if (NS_STATE(ns->nxstate) == STATE_READY) { /* * The current state is the last. Return to STATE_READY */ u_char status = NS_STATUS_OK(ns); /* In case of data states, see if all bytes were input/output */ if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) && ns->regs.count != ns->regs.num) { NS_WARN("switch_state: not all bytes were processed, %d left\n", ns->regs.num - ns->regs.count); status = NS_STATUS_FAILED(ns); } NS_DBG("switch_state: operation complete, switch to STATE_READY state\n"); switch_to_ready_state(ns, status); return; } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) { /* * If the next state is data input/output, switch to it now */ ns->state = ns->nxstate; ns->nxstate = ns->op[++ns->stateidx + 1]; ns->regs.num = ns->regs.count = 0; NS_DBG("switch_state: the next state is data I/O, switch, " "state: %s, nxstate: %s\n", get_state_name(ns->state), get_state_name(ns->nxstate)); /* * Set the internal register to the count of bytes which * are expected to be input or output */ switch (NS_STATE(ns->state)) { case STATE_DATAIN: case STATE_DATAOUT: ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; break; case STATE_DATAOUT_ID: ns->regs.num = ns->geom.idbytes; break; case STATE_DATAOUT_STATUS: case STATE_DATAOUT_STATUS_M: ns->regs.count = ns->regs.num = 0; break; default: NS_ERR("switch_state: BUG! unknown data state\n"); } } else if (ns->nxstate & STATE_ADDR_MASK) { /* * If the next state is address input, set the internal * register to the number of expected address bytes */ ns->regs.count = 0; switch (NS_STATE(ns->nxstate)) { case STATE_ADDR_PAGE: ns->regs.num = ns->geom.pgaddrbytes; break; case STATE_ADDR_SEC: ns->regs.num = ns->geom.secaddrbytes; break; case STATE_ADDR_ZERO: ns->regs.num = 1; break; case STATE_ADDR_COLUMN: /* Column address is always 2 bytes */ ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes; break; default: NS_ERR("switch_state: BUG! unknown address state\n"); } } else { /* * Just reset internal counters. */ ns->regs.num = 0; ns->regs.count = 0; } } static u_char ns_nand_read_byte(struct mtd_info *mtd) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; u_char outb = 0x00; /* Sanity and correctness checks */ if (!ns->lines.ce) { NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb); return outb; } if (ns->lines.ale || ns->lines.cle) { NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb); return outb; } if (!(ns->state & STATE_DATAOUT_MASK)) { NS_WARN("read_byte: unexpected data output cycle, state is %s " "return %#x\n", get_state_name(ns->state), (uint)outb); return outb; } /* Status register may be read as many times as it is wanted */ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) { NS_DBG("read_byte: return %#x status\n", ns->regs.status); return ns->regs.status; } /* Check if there is any data in the internal buffer which may be read */ if (ns->regs.count == ns->regs.num) { NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb); return outb; } switch (NS_STATE(ns->state)) { case STATE_DATAOUT: if (ns->busw == 8) { outb = ns->buf.byte[ns->regs.count]; ns->regs.count += 1; } else { outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]); ns->regs.count += 2; } break; case STATE_DATAOUT_ID: NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num); outb = ns->ids[ns->regs.count]; ns->regs.count += 1; break; default: BUG(); } if (ns->regs.count == ns->regs.num) { NS_DBG("read_byte: all bytes were read\n"); if (NS_STATE(ns->nxstate) == STATE_READY) switch_state(ns); } return outb; } static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Sanity and correctness checks */ if (!ns->lines.ce) { NS_ERR("write_byte: chip is disabled, ignore write\n"); return; } if (ns->lines.ale && ns->lines.cle) { NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n"); return; } if (ns->lines.cle == 1) { /* * The byte written is a command. */ if (byte == NAND_CMD_RESET) { NS_LOG("reset chip\n"); switch_to_ready_state(ns, NS_STATUS_OK(ns)); return; } /* Check that the command byte is correct */ if (check_command(byte)) { NS_ERR("write_byte: unknown command %#x\n", (uint)byte); return; } if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M || NS_STATE(ns->state) == STATE_DATAOUT) { int row = ns->regs.row; switch_state(ns); if (byte == NAND_CMD_RNDOUT) ns->regs.row = row; } /* Check if chip is expecting command */ if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) { /* Do not warn if only 2 id bytes are read */ if (!(ns->regs.command == NAND_CMD_READID && NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) { /* * We are in situation when something else (not command) * was expected but command was input. In this case ignore * previous command(s)/state(s) and accept the last one. */ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); } switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); } NS_DBG("command byte corresponding to %s state accepted\n", get_state_name(get_state_by_command(byte))); ns->regs.command = byte; switch_state(ns); } else if (ns->lines.ale == 1) { /* * The byte written is an address. */ if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) { NS_DBG("write_byte: operation isn't known yet, identify it\n"); if (find_operation(ns, 1) < 0) return; if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } ns->regs.count = 0; switch (NS_STATE(ns->nxstate)) { case STATE_ADDR_PAGE: ns->regs.num = ns->geom.pgaddrbytes; break; case STATE_ADDR_SEC: ns->regs.num = ns->geom.secaddrbytes; break; case STATE_ADDR_ZERO: ns->regs.num = 1; break; default: BUG(); } } /* Check that chip is expecting address */ if (!(ns->nxstate & STATE_ADDR_MASK)) { NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, " "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate)); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if this is expected byte */ if (ns->regs.count == ns->regs.num) { NS_ERR("write_byte: no more address bytes expected\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } accept_addr_byte(ns, byte); ns->regs.count += 1; NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n", (uint)byte, ns->regs.count, ns->regs.num); if (ns->regs.count == ns->regs.num) { NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column); switch_state(ns); } } else { /* * The byte written is an input data. */ /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, " "switch to %s\n", (uint)byte, get_state_name(ns->state), get_state_name(STATE_READY)); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if this is expected byte */ if (ns->regs.count == ns->regs.num) { NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n", ns->regs.num); return; } if (ns->busw == 8) { ns->buf.byte[ns->regs.count] = byte; ns->regs.count += 1; } else { ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte); ns->regs.count += 2; } } return; } static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; ns->lines.cle = bitmask & NAND_CLE ? 1 : 0; ns->lines.ale = bitmask & NAND_ALE ? 1 : 0; ns->lines.ce = bitmask & NAND_NCE ? 1 : 0; if (cmd != NAND_CMD_NONE) ns_nand_write_byte(mtd, cmd); } static int ns_device_ready(struct mtd_info *mtd) { NS_DBG("device_ready\n"); return 1; } static uint16_t ns_nand_read_word(struct mtd_info *mtd) { struct nand_chip *chip = (struct nand_chip *)mtd->priv; NS_DBG("read_word\n"); return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8); } static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { NS_ERR("write_buf: data input isn't expected, state is %s, " "switch to STATE_READY\n", get_state_name(ns->state)); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("write_buf: too many input bytes\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } memcpy(ns->buf.byte + ns->regs.count, buf, len); ns->regs.count += len; if (ns->regs.count == ns->regs.num) { NS_DBG("write_buf: %d bytes were written\n", ns->regs.count); } } static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Sanity and correctness checks */ if (!ns->lines.ce) { NS_ERR("read_buf: chip is disabled\n"); return; } if (ns->lines.ale || ns->lines.cle) { NS_ERR("read_buf: ALE or CLE pin is high\n"); return; } if (!(ns->state & STATE_DATAOUT_MASK)) { NS_WARN("read_buf: unexpected data output cycle, current state is %s\n", get_state_name(ns->state)); return; } if (NS_STATE(ns->state) != STATE_DATAOUT) { int i; for (i = 0; i < len; i++) buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd); return; } /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("read_buf: too many bytes to read\n"); switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } memcpy(buf, ns->buf.byte + ns->regs.count, len); ns->regs.count += len; if (ns->regs.count == ns->regs.num) { if (NS_STATE(ns->nxstate) == STATE_READY) switch_state(ns); } return; } /* * Module initialization function */ static int __init ns_init_module(void) { struct nand_chip *chip; struct nandsim *nand; int retval = -ENOMEM, i; if (bus_width != 8 && bus_width != 16) { NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width); return -EINVAL; } /* Allocate and initialize mtd_info, nand_chip and nandsim structures */ nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip) + sizeof(struct nandsim), GFP_KERNEL); if (!nsmtd) { NS_ERR("unable to allocate core structures.\n"); return -ENOMEM; } chip = (struct nand_chip *)(nsmtd + 1); nsmtd->priv = (void *)chip; nand = (struct nandsim *)(chip + 1); chip->priv = (void *)nand; /* * Register simulator's callbacks. */ chip->cmd_ctrl = ns_hwcontrol; chip->read_byte = ns_nand_read_byte; chip->dev_ready = ns_device_ready; chip->write_buf = ns_nand_write_buf; chip->read_buf = ns_nand_read_buf; chip->read_word = ns_nand_read_word; chip->ecc.mode = NAND_ECC_SOFT; /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ /* and 'badblocks' parameters to work */ chip->options |= NAND_SKIP_BBTSCAN; switch (bbt) { case 2: chip->bbt_options |= NAND_BBT_NO_OOB; case 1: chip->bbt_options |= NAND_BBT_USE_FLASH; case 0: break; default: NS_ERR("bbt has to be 0..2\n"); retval = -EINVAL; goto error; } /* * Perform minimum nandsim structure initialization to handle * the initial ID read command correctly */ if (third_id_byte != 0xFF || fourth_id_byte != 0xFF) nand->geom.idbytes = 4; else nand->geom.idbytes = 2; nand->regs.status = NS_STATUS_OK(nand); nand->nxstate = STATE_UNKNOWN; nand->options |= OPT_PAGE512; /* temporary value */ nand->ids[0] = first_id_byte; nand->ids[1] = second_id_byte; nand->ids[2] = third_id_byte; nand->ids[3] = fourth_id_byte; if (bus_width == 16) { nand->busw = 16; chip->options |= NAND_BUSWIDTH_16; } nsmtd->owner = THIS_MODULE; if ((retval = parse_weakblocks()) != 0) goto error; if ((retval = parse_weakpages()) != 0) goto error; if ((retval = parse_gravepages()) != 0) goto error; retval = nand_scan_ident(nsmtd, 1, NULL); if (retval) { NS_ERR("cannot scan NAND Simulator device\n"); if (retval > 0) retval = -ENXIO; goto error; } if (bch) { unsigned int eccsteps, eccbytes; if (!mtd_nand_has_bch()) { NS_ERR("BCH ECC support is disabled\n"); retval = -EINVAL; goto error; } /* use 512-byte ecc blocks */ eccsteps = nsmtd->writesize/512; eccbytes = (bch*13+7)/8; /* do not bother supporting small page devices */ if ((nsmtd->oobsize < 64) || !eccsteps) { NS_ERR("bch not available on small page devices\n"); retval = -EINVAL; goto error; } if ((eccbytes*eccsteps+2) > nsmtd->oobsize) { NS_ERR("invalid bch value %u\n", bch); retval = -EINVAL; goto error; } chip->ecc.mode = NAND_ECC_SOFT_BCH; chip->ecc.size = 512; chip->ecc.bytes = eccbytes; NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); } retval = nand_scan_tail(nsmtd); if (retval) { NS_ERR("can't register NAND Simulator\n"); if (retval > 0) retval = -ENXIO; goto error; } if (overridesize) { uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; if (new_size >> overridesize != nsmtd->erasesize) { NS_ERR("overridesize is too big\n"); retval = -EINVAL; goto err_exit; } /* N.B. This relies on nand_scan not doing anything with the size before we change it */ nsmtd->size = new_size; chip->chipsize = new_size; chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1; chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; } if ((retval = setup_wear_reporting(nsmtd)) != 0) goto err_exit; if ((retval = nandsim_debugfs_create(nand)) != 0) goto err_exit; if ((retval = init_nandsim(nsmtd)) != 0) goto err_exit; if ((retval = nand_default_bbt(nsmtd)) != 0) goto err_exit; if ((retval = parse_badblocks(nand, nsmtd)) != 0) goto err_exit; /* Register NAND partitions */ retval = mtd_device_register(nsmtd, &nand->partitions[0], nand->nbparts); if (retval != 0) goto err_exit; return 0; err_exit: free_nandsim(nand); nand_release(nsmtd); for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) kfree(nand->partitions[i].name); error: kfree(nsmtd); free_lists(); return retval; } module_init(ns_init_module); /* * Module clean-up function */ static void __exit ns_cleanup_module(void) { struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv; int i; nandsim_debugfs_remove(ns); free_nandsim(ns); /* Free nandsim private resources */ nand_release(nsmtd); /* Unregister driver */ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) kfree(ns->partitions[i].name); kfree(nsmtd); /* Free other structures */ free_lists(); } module_exit(ns_cleanup_module); MODULE_LICENSE ("GPL"); MODULE_AUTHOR ("Artem B. Bityuckiy"); MODULE_DESCRIPTION ("The NAND flash simulator");
gpl-2.0
Foxda-Tech/argo8-kernel
drivers/eisa/eisa-bus.c
598
10844
/* * EISA bus support functions for sysfs. * * (C) 2002, 2003 Marc Zyngier <maz@wild-wind.fr.eu.org> * * This code is released under the GPL version 2. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/eisa.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/ioport.h> #include <asm/io.h> #define SLOT_ADDRESS(r,n) (r->bus_base_addr + (0x1000 * n)) #define EISA_DEVINFO(i,s) { .id = { .sig = i }, .name = s } struct eisa_device_info { struct eisa_device_id id; char name[50]; }; #ifdef CONFIG_EISA_NAMES static struct eisa_device_info __initdata eisa_table[] = { #include "devlist.h" }; #define EISA_INFOS (sizeof (eisa_table) / (sizeof (struct eisa_device_info))) #endif #define EISA_MAX_FORCED_DEV 16 static int enable_dev[EISA_MAX_FORCED_DEV]; static unsigned int enable_dev_count; static int disable_dev[EISA_MAX_FORCED_DEV]; static unsigned int disable_dev_count; static int is_forced_dev(int *forced_tab, int forced_count, struct eisa_root_device *root, struct eisa_device *edev) { int i, x; for (i = 0; i < forced_count; i++) { x = (root->bus_nr << 8) | edev->slot; if (forced_tab[i] == x) return 1; } return 0; } static void __init eisa_name_device(struct eisa_device *edev) { #ifdef CONFIG_EISA_NAMES int i; for (i = 0; i < EISA_INFOS; i++) { if (!strcmp(edev->id.sig, eisa_table[i].id.sig)) { strlcpy(edev->pretty_name, eisa_table[i].name, sizeof(edev->pretty_name)); return; } } /* No name was found */ sprintf(edev->pretty_name, "EISA device %.7s", edev->id.sig); #endif } static char __init *decode_eisa_sig(unsigned long addr) { static char sig_str[EISA_SIG_LEN]; u8 sig[4]; u16 rev; int i; for (i = 0; i < 4; i++) { #ifdef CONFIG_EISA_VLB_PRIMING /* * This ugly stuff is used to wake up VL-bus cards * (AHA-284x is the only known example), so we can * read the EISA id. * * Thankfully, this only exists on x86... */ outb(0x80 + i, addr); #endif sig[i] = inb(addr + i); if (!i && (sig[0] & 0x80)) return NULL; } sig_str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1); sig_str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1); sig_str[2] = (sig[1] & 0x1f) + ('A' - 1); rev = (sig[2] << 8) | sig[3]; sprintf(sig_str + 3, "%04X", rev); return sig_str; } static int eisa_bus_match(struct device *dev, struct device_driver *drv) { struct eisa_device *edev = to_eisa_device(dev); struct eisa_driver *edrv = to_eisa_driver(drv); const struct eisa_device_id *eids = edrv->id_table; if (!eids) return 0; while (strlen(eids->sig)) { if (!strcmp(eids->sig, edev->id.sig) && edev->state & EISA_CONFIG_ENABLED) { edev->id.driver_data = eids->driver_data; return 1; } eids++; } return 0; } static int eisa_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { struct eisa_device *edev = to_eisa_device(dev); add_uevent_var(env, "MODALIAS=" EISA_DEVICE_MODALIAS_FMT, edev->id.sig); return 0; } struct bus_type eisa_bus_type = { .name = "eisa", .match = eisa_bus_match, .uevent = eisa_bus_uevent, }; EXPORT_SYMBOL(eisa_bus_type); int eisa_driver_register(struct eisa_driver *edrv) { edrv->driver.bus = &eisa_bus_type; return driver_register(&edrv->driver); } EXPORT_SYMBOL(eisa_driver_register); void eisa_driver_unregister(struct eisa_driver *edrv) { driver_unregister(&edrv->driver); } EXPORT_SYMBOL(eisa_driver_unregister); static ssize_t eisa_show_sig(struct device *dev, struct device_attribute *attr, char *buf) { struct eisa_device *edev = to_eisa_device(dev); return sprintf(buf, "%s\n", edev->id.sig); } static DEVICE_ATTR(signature, S_IRUGO, eisa_show_sig, NULL); static ssize_t eisa_show_state(struct device *dev, struct device_attribute *attr, char *buf) { struct eisa_device *edev = to_eisa_device(dev); return sprintf(buf, "%d\n", edev->state & EISA_CONFIG_ENABLED); } static DEVICE_ATTR(enabled, S_IRUGO, eisa_show_state, NULL); static ssize_t eisa_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) { struct eisa_device *edev = to_eisa_device(dev); return sprintf(buf, EISA_DEVICE_MODALIAS_FMT "\n", edev->id.sig); } static DEVICE_ATTR(modalias, S_IRUGO, eisa_show_modalias, NULL); static int __init eisa_init_device(struct eisa_root_device *root, struct eisa_device *edev, int slot) { char *sig; unsigned long sig_addr; int i; sig_addr = SLOT_ADDRESS(root, slot) + EISA_VENDOR_ID_OFFSET; sig = decode_eisa_sig(sig_addr); if (!sig) return -1; /* No EISA device here */ memcpy(edev->id.sig, sig, EISA_SIG_LEN); edev->slot = slot; edev->state = inb(SLOT_ADDRESS(root, slot) + EISA_CONFIG_OFFSET) & EISA_CONFIG_ENABLED; edev->base_addr = SLOT_ADDRESS(root, slot); edev->dma_mask = root->dma_mask; /* Default DMA mask */ eisa_name_device(edev); edev->dev.parent = root->dev; edev->dev.bus = &eisa_bus_type; edev->dev.dma_mask = &edev->dma_mask; edev->dev.coherent_dma_mask = edev->dma_mask; dev_set_name(&edev->dev, "%02X:%02X", root->bus_nr, slot); for (i = 0; i < EISA_MAX_RESOURCES; i++) { #ifdef CONFIG_EISA_NAMES edev->res[i].name = edev->pretty_name; #else edev->res[i].name = edev->id.sig; #endif } if (is_forced_dev(enable_dev, enable_dev_count, root, edev)) edev->state = EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED; if (is_forced_dev(disable_dev, disable_dev_count, root, edev)) edev->state = EISA_CONFIG_FORCED; return 0; } static int __init eisa_register_device(struct eisa_device *edev) { int rc = device_register(&edev->dev); if (rc) return rc; rc = device_create_file(&edev->dev, &dev_attr_signature); if (rc) goto err_devreg; rc = device_create_file(&edev->dev, &dev_attr_enabled); if (rc) goto err_sig; rc = device_create_file(&edev->dev, &dev_attr_modalias); if (rc) goto err_enab; return 0; err_enab: device_remove_file(&edev->dev, &dev_attr_enabled); err_sig: device_remove_file(&edev->dev, &dev_attr_signature); err_devreg: device_unregister(&edev->dev); return rc; } static int __init eisa_request_resources(struct eisa_root_device *root, struct eisa_device *edev, int slot) { int i; for (i = 0; i < EISA_MAX_RESOURCES; i++) { /* Don't register resource for slot 0, since this is * very likely to fail... :-( Instead, grab the EISA * id, now we can display something in /proc/ioports. */ /* Only one region for mainboard */ if (!slot && i > 0) { edev->res[i].start = edev->res[i].end = 0; continue; } if (slot) { edev->res[i].start = SLOT_ADDRESS(root, slot) + (i * 0x400); edev->res[i].end = edev->res[i].start + 0xff; edev->res[i].flags = IORESOURCE_IO; } else { edev->res[i].start = SLOT_ADDRESS(root, slot) + EISA_VENDOR_ID_OFFSET; edev->res[i].end = edev->res[i].start + 3; edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY; } dev_printk(KERN_DEBUG, &edev->dev, "%pR\n", &edev->res[i]); if (request_resource(root->res, &edev->res[i])) goto failed; } return 0; failed: while (--i >= 0) release_resource(&edev->res[i]); return -1; } static void __init eisa_release_resources(struct eisa_device *edev) { int i; for (i = 0; i < EISA_MAX_RESOURCES; i++) if (edev->res[i].start || edev->res[i].end) release_resource(&edev->res[i]); } static int __init eisa_probe(struct eisa_root_device *root) { int i, c; struct eisa_device *edev; char *enabled_str; dev_info(root->dev, "Probing EISA bus %d\n", root->bus_nr); /* First try to get hold of slot 0. If there is no device * here, simply fail, unless root->force_probe is set. */ edev = kzalloc(sizeof(*edev), GFP_KERNEL); if (!edev) { dev_err(root->dev, "EISA: Couldn't allocate mainboard slot\n"); return -ENOMEM; } if (eisa_init_device(root, edev, 0)) { kfree(edev); if (!root->force_probe) return -ENODEV; goto force_probe; } if (eisa_request_resources(root, edev, 0)) { dev_warn(root->dev, "EISA: Cannot allocate resource for mainboard\n"); kfree(edev); if (!root->force_probe) return -EBUSY; goto force_probe; } dev_info(&edev->dev, "EISA: Mainboard %s detected\n", edev->id.sig); if (eisa_register_device(edev)) { dev_err(&edev->dev, "EISA: Failed to register %s\n", edev->id.sig); eisa_release_resources(edev); kfree(edev); } force_probe: for (c = 0, i = 1; i <= root->slots; i++) { edev = kzalloc(sizeof(*edev), GFP_KERNEL); if (!edev) { dev_err(root->dev, "EISA: Out of memory for slot %d\n", i); continue; } if (eisa_init_device(root, edev, i)) { kfree(edev); continue; } if (eisa_request_resources(root, edev, i)) { dev_warn(root->dev, "Cannot allocate resource for EISA slot %d\n", i); kfree(edev); continue; } if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED)) enabled_str = " (forced enabled)"; else if (edev->state == EISA_CONFIG_FORCED) enabled_str = " (forced disabled)"; else if (edev->state == 0) enabled_str = " (disabled)"; else enabled_str = ""; dev_info(&edev->dev, "EISA: slot %d: %s detected%s\n", i, edev->id.sig, enabled_str); c++; if (eisa_register_device(edev)) { dev_err(&edev->dev, "EISA: Failed to register %s\n", edev->id.sig); eisa_release_resources(edev); kfree(edev); } } dev_info(root->dev, "EISA: Detected %d card%s\n", c, c == 1 ? "" : "s"); return 0; } static struct resource eisa_root_res = { .name = "EISA root resource", .start = 0, .end = 0xffffffff, .flags = IORESOURCE_IO, }; static int eisa_bus_count; int __init eisa_root_register(struct eisa_root_device *root) { int err; /* Use our own resources to check if this bus base address has * been already registered. This prevents the virtual root * device from registering after the real one has, for * example... */ root->eisa_root_res.name = eisa_root_res.name; root->eisa_root_res.start = root->res->start; root->eisa_root_res.end = root->res->end; root->eisa_root_res.flags = IORESOURCE_BUSY; err = request_resource(&eisa_root_res, &root->eisa_root_res); if (err) return err; root->bus_nr = eisa_bus_count++; err = eisa_probe(root); if (err) release_resource(&root->eisa_root_res); return err; } static int __init eisa_init(void) { int r; r = bus_register(&eisa_bus_type); if (r) return r; printk(KERN_INFO "EISA bus registered\n"); return 0; } module_param_array(enable_dev, int, &enable_dev_count, 0444); module_param_array(disable_dev, int, &disable_dev_count, 0444); postcore_initcall(eisa_init); int EISA_bus; /* for legacy drivers */ EXPORT_SYMBOL(EISA_bus);
gpl-2.0
chkohn/linux-xlnx
arch/x86/power/hibernate_64.c
1366
3612
/* * Hibernation support for x86-64 * * Distribute under GPLv2 * * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> */ #include <linux/gfp.h> #include <linux/smp.h> #include <linux/suspend.h> #include <asm/init.h> #include <asm/proto.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mtrr.h> #include <asm/sections.h> #include <asm/suspend.h> /* Defined in hibernate_asm_64.S */ extern asmlinkage __visible int restore_image(void); /* * Address to jump to in the last phase of restore in order to get to the image * kernel's text (this value is passed in the image header). */ unsigned long restore_jump_address __visible; /* * Value of the cr3 register from before the hibernation (this value is passed * in the image header). */ unsigned long restore_cr3 __visible; pgd_t *temp_level4_pgt __visible; void *relocated_restore_code __visible; static void *alloc_pgt_page(void *context) { return (void *)get_safe_page(GFP_ATOMIC); } static int set_up_temporary_mappings(void) { struct x86_mapping_info info = { .alloc_pgt_page = alloc_pgt_page, .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, .kernel_mapping = true, }; unsigned long mstart, mend; int result; int i; temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!temp_level4_pgt) return -ENOMEM; /* It is safe to reuse the original kernel mapping */ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), init_level4_pgt[pgd_index(__START_KERNEL_map)]); /* Set up the direct mapping from scratch */ for (i = 0; i < nr_pfn_mapped; i++) { mstart = pfn_mapped[i].start << PAGE_SHIFT; mend = pfn_mapped[i].end << PAGE_SHIFT; result = kernel_ident_mapping_init(&info, temp_level4_pgt, mstart, mend); if (result) return result; } return 0; } int swsusp_arch_resume(void) { int error; /* We have got enough memory and from now on we cannot recover */ if ((error = set_up_temporary_mappings())) return error; relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); if (!relocated_restore_code) return -ENOMEM; memcpy(relocated_restore_code, &core_restore_code, &restore_registers - &core_restore_code); restore_image(); return 0; } /* * pfn_is_nosave - check if given pfn is in the 'nosave' section */ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } struct restore_data_record { unsigned long jump_address; unsigned long cr3; unsigned long magic; }; #define RESTORE_MAGIC 0x0123456789ABCDEFUL /** * arch_hibernation_header_save - populate the architecture specific part * of a hibernation image header * @addr: address to save the data at */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { struct restore_data_record *rdr = addr; if (max_size < sizeof(struct restore_data_record)) return -EOVERFLOW; rdr->jump_address = restore_jump_address; rdr->cr3 = restore_cr3; rdr->magic = RESTORE_MAGIC; return 0; } /** * arch_hibernation_header_restore - read the architecture specific data * from the hibernation image header * @addr: address to read the data from */ int arch_hibernation_header_restore(void *addr) { struct restore_data_record *rdr = addr; restore_jump_address = rdr->jump_address; restore_cr3 = rdr->cr3; return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; }
gpl-2.0
FlorentRevest/linux-sunxi-cedrus
drivers/hwmon/emc1403.c
1878
15623
/* * emc1403.c - SMSC Thermal Driver * * Copyright (C) 2008 Intel Corp * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/mutex.h> #include <linux/regmap.h> #define THERMAL_PID_REG 0xfd #define THERMAL_SMSC_ID_REG 0xfe #define THERMAL_REVISION_REG 0xff enum emc1403_chip { emc1402, emc1403, emc1404 }; struct thermal_data { struct regmap *regmap; struct mutex mutex; const struct attribute_group *groups[4]; }; static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct thermal_data *data = dev_get_drvdata(dev); unsigned int val; int retval; retval = regmap_read(data->regmap, sda->index, &val); if (retval < 0) return retval; return sprintf(buf, "%d000\n", val); } static ssize_t show_bit(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); struct thermal_data *data = dev_get_drvdata(dev); unsigned int val; int retval; retval = regmap_read(data->regmap, sda->nr, &val); if (retval < 0) return retval; return sprintf(buf, "%d\n", !!(val & sda->index)); } static ssize_t store_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct thermal_data *data = dev_get_drvdata(dev); unsigned long val; int retval; if (kstrtoul(buf, 10, &val)) return -EINVAL; retval = regmap_write(data->regmap, sda->index, DIV_ROUND_CLOSEST(val, 1000)); if (retval < 0) return retval; return count; } static ssize_t store_bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); struct thermal_data *data = dev_get_drvdata(dev); unsigned long val; int retval; if (kstrtoul(buf, 10, &val)) return -EINVAL; retval = regmap_update_bits(data->regmap, sda->nr, sda->index, val ? sda->index : 0); if (retval < 0) return retval; return count; } static ssize_t show_hyst_common(struct device *dev, struct device_attribute *attr, char *buf, bool is_min) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct thermal_data *data = dev_get_drvdata(dev); struct regmap *regmap = data->regmap; unsigned int limit; unsigned int hyst; int retval; retval = regmap_read(regmap, sda->index, &limit); if (retval < 0) return retval; retval = regmap_read(regmap, 0x21, &hyst); if (retval < 0) return retval; return sprintf(buf, "%d000\n", is_min ? limit + hyst : limit - hyst); } static ssize_t show_hyst(struct device *dev, struct device_attribute *attr, char *buf) { return show_hyst_common(dev, attr, buf, false); } static ssize_t show_min_hyst(struct device *dev, struct device_attribute *attr, char *buf) { return show_hyst_common(dev, attr, buf, true); } static ssize_t store_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct thermal_data *data = dev_get_drvdata(dev); struct regmap *regmap = data->regmap; unsigned int limit; int retval; int hyst; unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->mutex); retval = regmap_read(regmap, sda->index, &limit); if (retval < 0) goto fail; hyst = limit * 1000 - val; hyst = clamp_val(DIV_ROUND_CLOSEST(hyst, 1000), 0, 255); retval = regmap_write(regmap, 0x21, hyst); if (retval == 0) retval = count; fail: mutex_unlock(&data->mutex); return retval; } /* * Sensors. We pass the actual i2c register to the methods. */ static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x06); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x05); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x20); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00); static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x01); static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x01); static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x01); static SENSOR_DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_min_hyst, NULL, 0x06); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_hyst, NULL, 0x05); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x20); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x08); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x07); static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x19); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01); static SENSOR_DEVICE_ATTR_2(temp2_fault, S_IRUGO, show_bit, NULL, 0x1b, 0x02); static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x02); static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x02); static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x02); static SENSOR_DEVICE_ATTR(temp2_min_hyst, S_IRUGO, show_min_hyst, NULL, 0x08); static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO, show_hyst, NULL, 0x07); static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_hyst, NULL, 0x19); static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x16); static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x15); static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x1A); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23); static SENSOR_DEVICE_ATTR_2(temp3_fault, S_IRUGO, show_bit, NULL, 0x1b, 0x04); static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x04); static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x04); static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x04); static SENSOR_DEVICE_ATTR(temp3_min_hyst, S_IRUGO, show_min_hyst, NULL, 0x16); static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IRUGO, show_hyst, NULL, 0x15); static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_hyst, NULL, 0x1A); static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x2D); static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x2C); static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO | S_IWUSR, show_temp, store_temp, 0x30); static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 0x2A); static SENSOR_DEVICE_ATTR_2(temp4_fault, S_IRUGO, show_bit, NULL, 0x1b, 0x08); static SENSOR_DEVICE_ATTR_2(temp4_min_alarm, S_IRUGO, show_bit, NULL, 0x36, 0x08); static SENSOR_DEVICE_ATTR_2(temp4_max_alarm, S_IRUGO, show_bit, NULL, 0x35, 0x08); static SENSOR_DEVICE_ATTR_2(temp4_crit_alarm, S_IRUGO, show_bit, NULL, 0x37, 0x08); static SENSOR_DEVICE_ATTR(temp4_min_hyst, S_IRUGO, show_min_hyst, NULL, 0x2D); static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IRUGO, show_hyst, NULL, 0x2C); static SENSOR_DEVICE_ATTR(temp4_crit_hyst, S_IRUGO, show_hyst, NULL, 0x30); static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR, show_bit, store_bit, 0x03, 0x40); static struct attribute *emc1402_attrs[] = { &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min_hyst.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min_hyst.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, &sensor_dev_attr_power_state.dev_attr.attr, NULL }; static const struct attribute_group emc1402_group = { .attrs = emc1402_attrs, }; static struct attribute *emc1403_attrs[] = { &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_min_hyst.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, NULL }; static const struct attribute_group emc1403_group = { .attrs = emc1403_attrs, }; static struct attribute *emc1404_attrs[] = { &sensor_dev_attr_temp4_min.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_crit.dev_attr.attr, &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp4_fault.dev_attr.attr, &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp4_min_hyst.dev_attr.attr, &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, &sensor_dev_attr_temp4_crit_hyst.dev_attr.attr, NULL }; static const struct attribute_group emc1404_group = { .attrs = emc1404_attrs, }; /* * EMC14x2 uses a different register and different bits to report alarm and * fault status. For simplicity, provide a separate attribute group for this * chip series. * Since we can not re-use the same attribute names, create a separate attribute * array. */ static struct sensor_device_attribute_2 emc1402_alarms[] = { SENSOR_ATTR_2(temp1_min_alarm, S_IRUGO, show_bit, NULL, 0x02, 0x20), SENSOR_ATTR_2(temp1_max_alarm, S_IRUGO, show_bit, NULL, 0x02, 0x40), SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_bit, NULL, 0x02, 0x01), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_bit, NULL, 0x02, 0x04), SENSOR_ATTR_2(temp2_min_alarm, S_IRUGO, show_bit, NULL, 0x02, 0x08), SENSOR_ATTR_2(temp2_max_alarm, S_IRUGO, show_bit, NULL, 0x02, 0x10), SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_bit, NULL, 0x02, 0x02), }; static struct attribute *emc1402_alarm_attrs[] = { &emc1402_alarms[0].dev_attr.attr, &emc1402_alarms[1].dev_attr.attr, &emc1402_alarms[2].dev_attr.attr, &emc1402_alarms[3].dev_attr.attr, &emc1402_alarms[4].dev_attr.attr, &emc1402_alarms[5].dev_attr.attr, &emc1402_alarms[6].dev_attr.attr, NULL, }; static const struct attribute_group emc1402_alarm_group = { .attrs = emc1402_alarm_attrs, }; static int emc1403_detect(struct i2c_client *client, struct i2c_board_info *info) { int id; /* Check if thermal chip is SMSC and EMC1403 or EMC1423 */ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG); if (id != 0x5d) return -ENODEV; id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG); switch (id) { case 0x20: strlcpy(info->type, "emc1402", I2C_NAME_SIZE); break; case 0x21: strlcpy(info->type, "emc1403", I2C_NAME_SIZE); break; case 0x22: strlcpy(info->type, "emc1422", I2C_NAME_SIZE); break; case 0x23: strlcpy(info->type, "emc1423", I2C_NAME_SIZE); break; case 0x25: strlcpy(info->type, "emc1404", I2C_NAME_SIZE); break; case 0x27: strlcpy(info->type, "emc1424", I2C_NAME_SIZE); break; default: return -ENODEV; } id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); if (id < 0x01 || id > 0x04) return -ENODEV; return 0; } static bool emc1403_regmap_is_volatile(struct device *dev, unsigned int reg) { switch (reg) { case 0x00: /* internal diode high byte */ case 0x01: /* external diode 1 high byte */ case 0x02: /* status */ case 0x10: /* external diode 1 low byte */ case 0x1b: /* external diode fault */ case 0x23: /* external diode 2 high byte */ case 0x24: /* external diode 2 low byte */ case 0x29: /* internal diode low byte */ case 0x2a: /* externl diode 3 high byte */ case 0x2b: /* external diode 3 low byte */ case 0x35: /* high limit status */ case 0x36: /* low limit status */ case 0x37: /* therm limit status */ return true; default: return false; } } static const struct regmap_config emc1403_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, .volatile_reg = emc1403_regmap_is_volatile, }; static int emc1403_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct thermal_data *data; struct device *hwmon_dev; data = devm_kzalloc(&client->dev, sizeof(struct thermal_data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->regmap = devm_regmap_init_i2c(client, &emc1403_regmap_config); if (IS_ERR(data->regmap)) return PTR_ERR(data->regmap); mutex_init(&data->mutex); switch (id->driver_data) { case emc1404: data->groups[2] = &emc1404_group; case emc1403: data->groups[1] = &emc1403_group; case emc1402: data->groups[0] = &emc1402_group; } if (id->driver_data == emc1402) data->groups[1] = &emc1402_alarm_group; hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, client->name, data, data->groups); if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev); dev_info(&client->dev, "%s Thermal chip found\n", id->name); return 0; } static const unsigned short emc1403_address_list[] = { 0x18, 0x1c, 0x29, 0x4c, 0x4d, 0x5c, I2C_CLIENT_END }; /* Last digit of chip name indicates number of channels */ static const struct i2c_device_id emc1403_idtable[] = { { "emc1402", emc1402 }, { "emc1403", emc1403 }, { "emc1404", emc1404 }, { "emc1412", emc1402 }, { "emc1413", emc1403 }, { "emc1414", emc1404 }, { "emc1422", emc1402 }, { "emc1423", emc1403 }, { "emc1424", emc1404 }, { } }; MODULE_DEVICE_TABLE(i2c, emc1403_idtable); static struct i2c_driver sensor_emc1403 = { .class = I2C_CLASS_HWMON, .driver = { .name = "emc1403", }, .detect = emc1403_detect, .probe = emc1403_probe, .id_table = emc1403_idtable, .address_list = emc1403_address_list, }; module_i2c_driver(sensor_emc1403); MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); MODULE_DESCRIPTION("emc1403 Thermal Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Solitarily/htc_kernel_rider_ics
drivers/scsi/bfa/bfa_fcs_fcpim.c
2646
20420
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * fcpim.c - FCP initiator mode i-t nexus state machine */ #include "bfad_drv.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" #include "bfad_im.h" BFA_TRC_FILE(FCS, FCPIM); /* * forward declarations */ static void bfa_fcs_itnim_timeout(void *arg); static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); /* * fcs_itnim_sm FCS itnim state machine events */ enum bfa_fcs_itnim_event { BFA_FCS_ITNIM_SM_ONLINE = 1, /* rport online event */ BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */ BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */ BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */ BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */ BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */ BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */ BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */ BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ }; static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static struct bfa_sm_table_s itnim_sm_table[] = { {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE}, {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND}, {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT}, {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY}, {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE}, {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE}, {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE}, {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, }; /* * fcs_itnim_sm FCS itnim state machine */ static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_ONLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); itnim->prli_retries = 0; bfa_fcs_itnim_send_prli(itnim, NULL); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_FRMSENT: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_RSP_OK: if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) { bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); } else { bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); } break; case BFA_FCS_ITNIM_SM_RSP_ERROR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); bfa_timer_start(itnim->fcs->bfa, &itnim->timer, bfa_fcs_itnim_timeout, itnim, BFA_FCS_RETRY_TIMEOUT); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_discard(itnim->fcxp); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_TIMEOUT: if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) { itnim->prli_retries++; bfa_trc(itnim->fcs, itnim->prli_retries); bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); bfa_fcs_itnim_send_prli(itnim, NULL); } else { /* invoke target offline */ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); } break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_timer_stop(&itnim->timer); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HCB_ONLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); bfa_fcb_itnim_online(itnim->itnim_drv); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); wwn2str(rpwwn_buf, itnim->rport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) is online for initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_itnim_offline(itnim->bfa_itnim); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); bfa_fcb_itnim_offline(itnim->itnim_drv); bfa_itnim_offline(itnim->bfa_itnim); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); wwn2str(rpwwn_buf, itnim->rport->pwwn); if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Target (WWN = %s) connectivity lost for " "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); else BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) offlined by initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HCB_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } /* * This state is set when a discovered rport is also in intiator mode. * This ITN is marked as no_op and is not active and will not be truned into * online state. */ static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: case BFA_FCS_ITNIM_SM_ONLINE: case BFA_FCS_ITNIM_SM_INITIATOR: break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_itnim_s *itnim = itnim_cbarg; struct bfa_fcs_rport_s *rport = itnim->rport; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_trc(itnim->fcs, itnim->rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { itnim->stats.fcxp_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, bfa_fcs_itnim_send_prli, itnim); return; } itnim->fcxp = fcxp; len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0); bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, FC_ELS_TOV); itnim->stats.prli_sent++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); } static void bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; struct fc_els_cmd_s *els_cmd; struct fc_prli_s *prli_resp; struct fc_ls_rjt_s *ls_rjt; struct fc_prli_params_s *sparams; bfa_trc(itnim->fcs, req_status); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { itnim->stats.prli_rsp_err++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); if (els_cmd->els_code == FC_ELS_ACC) { prli_resp = (struct fc_prli_s *) els_cmd; if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) { bfa_trc(itnim->fcs, rsp_len); /* * Check if this r-port is also in Initiator mode. * If so, we need to set this ITN as a no-op. */ if (prli_resp->parampage.servparams.initiator) { bfa_trc(itnim->fcs, prli_resp->parampage.type); itnim->rport->scsi_function = BFA_RPORT_INITIATOR; itnim->stats.prli_rsp_acc++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); return; } itnim->stats.prli_rsp_parse_err++; return; } itnim->rport->scsi_function = BFA_RPORT_TARGET; sparams = &prli_resp->parampage.servparams; itnim->seq_rec = sparams->retry; itnim->rec_support = sparams->rec_support; itnim->task_retry_id = sparams->task_retry_id; itnim->conf_comp = sparams->confirm; itnim->stats.prli_rsp_acc++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); } else { ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(itnim->fcs, ls_rjt->reason_code); bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); itnim->stats.prli_rsp_rjt++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); } } static void bfa_fcs_itnim_timeout(void *arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg; itnim->stats.timeout++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); } static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) { bfa_itnim_delete(itnim->bfa_itnim); bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv); } /* * itnim_public FCS ITNIM public interfaces */ /* * Called by rport when a new rport is created. * * @param[in] rport - remote port. */ struct bfa_fcs_itnim_s * bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct bfa_fcs_itnim_s *itnim; struct bfad_itnim_s *itnim_drv; struct bfa_itnim_s *bfa_itnim; /* * call bfad to allocate the itnim */ bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); if (itnim == NULL) { bfa_trc(port->fcs, rport->pwwn); return NULL; } /* * Initialize itnim */ itnim->rport = rport; itnim->fcs = rport->fcs; itnim->itnim_drv = itnim_drv; /* * call BFA to create the itnim */ bfa_itnim = bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim); if (bfa_itnim == NULL) { bfa_trc(port->fcs, rport->pwwn); bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv); WARN_ON(1); return NULL; } itnim->bfa_itnim = bfa_itnim; itnim->seq_rec = BFA_FALSE; itnim->rec_support = BFA_FALSE; itnim->conf_comp = BFA_FALSE; itnim->task_retry_id = BFA_FALSE; /* * Set State machine */ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); return itnim; } /* * Called by rport to delete the instance of FCPIM. * * @param[in] rport - remote port. */ void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); } /* * Notification from rport that PLOGI is complete to initiate FC-4 session. */ void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim) { itnim->stats.onlines++; if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) { bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE); } else { /* * For well known addresses, we set the itnim to initiator * state */ itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); } } /* * Called by rport to handle a remote device offline. */ void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim) { itnim->stats.offlines++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); } /* * Called by rport when remote port is known to be an initiator from * PRLI received. */ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); } /* * Called by rport to check if the itnim is online. */ bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) { case BFA_ITNIM_ONLINE: case BFA_ITNIM_INITIATIOR: return BFA_STATUS_OK; default: return BFA_STATUS_NO_FCPIM_NEXUS; } } /* * BFA completion callback for bfa_itnim_online(). */ void bfa_cb_itnim_online(void *cbarg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); } /* * BFA completion callback for bfa_itnim_offline(). */ void bfa_cb_itnim_offline(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); } /* * Mark the beginning of PATH TOV handling. IO completion callbacks * are still pending. */ void bfa_cb_itnim_tov_begin(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); } /* * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. */ void bfa_cb_itnim_tov(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; struct bfad_itnim_s *itnim_drv = itnim->itnim_drv; bfa_trc(itnim->fcs, itnim->rport->pwwn); itnim_drv->state = ITNIM_STATE_TIMEOUT; } /* * BFA notification to FCS/driver for second level error recovery. * * Atleast one I/O request has timedout and target is unresponsive to * repeated abort requests. Second level error recovery should be initiated * by starting implicit logout and recovery procedures. */ void bfa_cb_itnim_sler(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; itnim->stats.sler++; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); } struct bfa_fcs_itnim_s * bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_rport_s *rport; rport = bfa_fcs_rport_lookup(port, rpwwn); if (!rport) return NULL; WARN_ON(rport->itnim == NULL); return rport->itnim; } bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, struct bfa_itnim_attr_s *attr) { struct bfa_fcs_itnim_s *itnim = NULL; itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); attr->retry = itnim->seq_rec; attr->rec_support = itnim->rec_support; attr->conf_comp = itnim->conf_comp; attr->task_retry_id = itnim->task_retry_id; return BFA_STATUS_OK; } bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, struct bfa_itnim_stats_s *stats) { struct bfa_fcs_itnim_s *itnim = NULL; WARN_ON(port == NULL); itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_itnim_s *itnim = NULL; WARN_ON(port == NULL); itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd; bfa_trc(itnim->fcs, fchs->type); if (fchs->type != FC_TYPE_ELS) return; els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(itnim->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_PRLO: bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id); break; default: WARN_ON(1); } }
gpl-2.0
Marvell-Semi/EBU_mainline_public
sound/core/oss/pcm_oss.c
3414
87106
/* * Digital Audio (PCM) abstract layer / OSS compatible * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #if 0 #define PLUGIN_DEBUG #endif #if 0 #define OSS_DEBUG #endif #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/math64.h> #include <linux/string.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "pcm_plugin.h" #include <sound/info.h> #include <linux/soundcard.h> #include <sound/initval.h> #include <sound/mixer_oss.h> #define OSS_ALSAEMULVER _SIOR ('M', 249, int) static int dsp_map[SNDRV_CARDS]; static int adsp_map[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; static bool nonblock_open = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Abramo Bagnara <abramo@alsa-project.org>"); MODULE_DESCRIPTION("PCM OSS emulation for ALSA."); MODULE_LICENSE("GPL"); module_param_array(dsp_map, int, NULL, 0444); MODULE_PARM_DESC(dsp_map, "PCM device number assigned to 1st OSS device."); module_param_array(adsp_map, int, NULL, 0444); MODULE_PARM_DESC(adsp_map, "PCM device number assigned to 2nd OSS device."); module_param(nonblock_open, bool, 0644); MODULE_PARM_DESC(nonblock_open, "Don't block opening busy PCM devices."); MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_PCM); MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_PCM1); static int snd_pcm_oss_get_rate(struct snd_pcm_oss_file *pcm_oss_file); static int snd_pcm_oss_get_channels(struct snd_pcm_oss_file *pcm_oss_file); static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file); static inline mm_segment_t snd_enter_user(void) { mm_segment_t fs = get_fs(); set_fs(get_ds()); return fs; } static inline void snd_leave_user(mm_segment_t fs) { set_fs(fs); } /* * helper functions to process hw_params */ static int snd_interval_refine_min(struct snd_interval *i, unsigned int min, int openmin) { int changed = 0; if (i->min < min) { i->min = min; i->openmin = openmin; changed = 1; } else if (i->min == min && !i->openmin && openmin) { i->openmin = 1; changed = 1; } if (i->integer) { if (i->openmin) { i->min++; i->openmin = 0; } } if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } static int snd_interval_refine_max(struct snd_interval *i, unsigned int max, int openmax) { int changed = 0; if (i->max > max) { i->max = max; i->openmax = openmax; changed = 1; } else if (i->max == max && !i->openmax && openmax) { i->openmax = 1; changed = 1; } if (i->integer) { if (i->openmax) { i->max--; i->openmax = 0; } } if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } static int snd_interval_refine_set(struct snd_interval *i, unsigned int val) { struct snd_interval t; t.empty = 0; t.min = t.max = val; t.openmin = t.openmax = 0; t.integer = 1; return snd_interval_refine(i, &t); } /** * snd_pcm_hw_param_value_min * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or NULL * * Return the minimum value for field PAR. */ static unsigned int snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { if (dir) *dir = 0; return snd_mask_min(hw_param_mask_c(params, var)); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (dir) *dir = i->openmin; return snd_interval_min(i); } return -EINVAL; } /** * snd_pcm_hw_param_value_max * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or NULL * * Return the maximum value for field PAR. */ static unsigned int snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { if (dir) *dir = 0; return snd_mask_max(hw_param_mask_c(params, var)); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (dir) *dir = - (int) i->openmax; return snd_interval_max(i); } return -EINVAL; } static int _snd_pcm_hw_param_mask(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, const struct snd_mask *val) { int changed; changed = snd_mask_refine(hw_param_mask(params, var), val); if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } static int snd_pcm_hw_param_mask(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, const struct snd_mask *val) { int changed = _snd_pcm_hw_param_mask(params, var, val); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return 0; } static int _snd_pcm_hw_param_min(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed; int open = 0; if (dir) { if (dir > 0) { open = 1; } else if (dir < 0) { if (val > 0) { open = 1; val--; } } } if (hw_is_mask(var)) changed = snd_mask_refine_min(hw_param_mask(params, var), val + !!open); else if (hw_is_interval(var)) changed = snd_interval_refine_min(hw_param_interval(params, var), val, open); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_min * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @val: minimal value * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS remove from PAR all * values < VAL. Reduce configuration space accordingly. * Return new minimum or -EINVAL if the configuration space is empty */ static int snd_pcm_hw_param_min(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int *dir) { int changed = _snd_pcm_hw_param_min(params, var, val, dir ? *dir : 0); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value_min(params, var, dir); } static int _snd_pcm_hw_param_max(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed; int open = 0; if (dir) { if (dir < 0) { open = 1; } else if (dir > 0) { open = 1; val++; } } if (hw_is_mask(var)) { if (val == 0 && open) { snd_mask_none(hw_param_mask(params, var)); changed = -EINVAL; } else changed = snd_mask_refine_max(hw_param_mask(params, var), val - !!open); } else if (hw_is_interval(var)) changed = snd_interval_refine_max(hw_param_interval(params, var), val, open); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_max * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @val: maximal value * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS remove from PAR all * values >= VAL + 1. Reduce configuration space accordingly. * Return new maximum or -EINVAL if the configuration space is empty */ static int snd_pcm_hw_param_max(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int *dir) { int changed = _snd_pcm_hw_param_max(params, var, val, dir ? *dir : 0); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value_max(params, var, dir); } static int boundary_sub(int a, int adir, int b, int bdir, int *c, int *cdir) { adir = adir < 0 ? -1 : (adir > 0 ? 1 : 0); bdir = bdir < 0 ? -1 : (bdir > 0 ? 1 : 0); *c = a - b; *cdir = adir - bdir; if (*cdir == -2) { (*c)--; } else if (*cdir == 2) { (*c)++; } return 0; } static int boundary_lt(unsigned int a, int adir, unsigned int b, int bdir) { if (adir < 0) { a--; adir = 1; } else if (adir > 0) adir = 1; if (bdir < 0) { b--; bdir = 1; } else if (bdir > 0) bdir = 1; return a < b || (a == b && adir < bdir); } /* Return 1 if min is nearer to best than max */ static int boundary_nearer(int min, int mindir, int best, int bestdir, int max, int maxdir) { int dmin, dmindir; int dmax, dmaxdir; boundary_sub(best, bestdir, min, mindir, &dmin, &dmindir); boundary_sub(max, maxdir, best, bestdir, &dmax, &dmaxdir); return boundary_lt(dmin, dmindir, dmax, dmaxdir); } /** * snd_pcm_hw_param_near * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @best: value to set * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS set PAR to the available value * nearest to VAL. Reduce configuration space accordingly. * This function cannot be called for SNDRV_PCM_HW_PARAM_ACCESS, * SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_SUBFORMAT. * Return the value found. */ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int best, int *dir) { struct snd_pcm_hw_params *save = NULL; int v; unsigned int saved_min; int last = 0; int min, max; int mindir, maxdir; int valdir = dir ? *dir : 0; /* FIXME */ if (best > INT_MAX) best = INT_MAX; min = max = best; mindir = maxdir = valdir; if (maxdir > 0) maxdir = 0; else if (maxdir == 0) maxdir = -1; else { maxdir = 1; max--; } save = kmalloc(sizeof(*save), GFP_KERNEL); if (save == NULL) return -ENOMEM; *save = *params; saved_min = min; min = snd_pcm_hw_param_min(pcm, params, var, min, &mindir); if (min >= 0) { struct snd_pcm_hw_params *params1; if (max < 0) goto _end; if ((unsigned int)min == saved_min && mindir == valdir) goto _end; params1 = kmalloc(sizeof(*params1), GFP_KERNEL); if (params1 == NULL) { kfree(save); return -ENOMEM; } *params1 = *save; max = snd_pcm_hw_param_max(pcm, params1, var, max, &maxdir); if (max < 0) { kfree(params1); goto _end; } if (boundary_nearer(max, maxdir, best, valdir, min, mindir)) { *params = *params1; last = 1; } kfree(params1); } else { *params = *save; max = snd_pcm_hw_param_max(pcm, params, var, max, &maxdir); if (max < 0) { kfree(save); return max; } last = 1; } _end: kfree(save); if (last) v = snd_pcm_hw_param_last(pcm, params, var, dir); else v = snd_pcm_hw_param_first(pcm, params, var, dir); snd_BUG_ON(v < 0); return v; } static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed; if (hw_is_mask(var)) { struct snd_mask *m = hw_param_mask(params, var); if (val == 0 && dir < 0) { changed = -EINVAL; snd_mask_none(m); } else { if (dir > 0) val++; else if (dir < 0) val--; changed = snd_mask_refine_set(hw_param_mask(params, var), val); } } else if (hw_is_interval(var)) { struct snd_interval *i = hw_param_interval(params, var); if (val == 0 && dir < 0) { changed = -EINVAL; snd_interval_none(i); } else if (dir == 0) changed = snd_interval_refine_set(i, val); else { struct snd_interval t; t.openmin = 1; t.openmax = 1; t.empty = 0; t.integer = 0; if (dir < 0) { t.min = val - 1; t.max = val; } else { t.min = val; t.max = val+1; } changed = snd_interval_refine(i, &t); } } else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_set * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @val: value to set * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS remove from PAR all * values != VAL. Reduce configuration space accordingly. * Return VAL or -EINVAL if the configuration space is empty */ static int snd_pcm_hw_param_set(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed = _snd_pcm_hw_param_set(params, var, val, dir); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, NULL); } static int _snd_pcm_hw_param_setinteger(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; changed = snd_interval_setinteger(hw_param_interval(params, var)); if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /* * plugin */ #ifdef CONFIG_SND_PCM_OSS_PLUGINS static int snd_pcm_oss_plugin_clear(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_plugin *plugin, *next; plugin = runtime->oss.plugin_first; while (plugin) { next = plugin->next; snd_pcm_plugin_free(plugin); plugin = next; } runtime->oss.plugin_first = runtime->oss.plugin_last = NULL; return 0; } static int snd_pcm_plugin_insert(struct snd_pcm_plugin *plugin) { struct snd_pcm_runtime *runtime = plugin->plug->runtime; plugin->next = runtime->oss.plugin_first; plugin->prev = NULL; if (runtime->oss.plugin_first) { runtime->oss.plugin_first->prev = plugin; runtime->oss.plugin_first = plugin; } else { runtime->oss.plugin_last = runtime->oss.plugin_first = plugin; } return 0; } int snd_pcm_plugin_append(struct snd_pcm_plugin *plugin) { struct snd_pcm_runtime *runtime = plugin->plug->runtime; plugin->next = NULL; plugin->prev = runtime->oss.plugin_last; if (runtime->oss.plugin_last) { runtime->oss.plugin_last->next = plugin; runtime->oss.plugin_last = plugin; } else { runtime->oss.plugin_last = runtime->oss.plugin_first = plugin; } return 0; } #endif /* CONFIG_SND_PCM_OSS_PLUGINS */ static long snd_pcm_oss_bytes(struct snd_pcm_substream *substream, long frames) { struct snd_pcm_runtime *runtime = substream->runtime; long buffer_size = snd_pcm_lib_buffer_bytes(substream); long bytes = frames_to_bytes(runtime, frames); if (buffer_size == runtime->oss.buffer_bytes) return bytes; #if BITS_PER_LONG >= 64 return runtime->oss.buffer_bytes * bytes / buffer_size; #else { u64 bsize = (u64)runtime->oss.buffer_bytes * (u64)bytes; return div_u64(bsize, buffer_size); } #endif } static long snd_pcm_alsa_frames(struct snd_pcm_substream *substream, long bytes) { struct snd_pcm_runtime *runtime = substream->runtime; long buffer_size = snd_pcm_lib_buffer_bytes(substream); if (buffer_size == runtime->oss.buffer_bytes) return bytes_to_frames(runtime, bytes); return bytes_to_frames(runtime, (buffer_size * bytes) / runtime->oss.buffer_bytes); } static inline snd_pcm_uframes_t get_hw_ptr_period(struct snd_pcm_runtime *runtime) { return runtime->hw_ptr_interrupt; } /* define extended formats in the recent OSS versions (if any) */ /* linear formats */ #define AFMT_S32_LE 0x00001000 #define AFMT_S32_BE 0x00002000 #define AFMT_S24_LE 0x00008000 #define AFMT_S24_BE 0x00010000 #define AFMT_S24_PACKED 0x00040000 /* other supported formats */ #define AFMT_FLOAT 0x00004000 #define AFMT_SPDIF_RAW 0x00020000 /* unsupported formats */ #define AFMT_AC3 0x00000400 #define AFMT_VORBIS 0x00000800 static snd_pcm_format_t snd_pcm_oss_format_from(int format) { switch (format) { case AFMT_MU_LAW: return SNDRV_PCM_FORMAT_MU_LAW; case AFMT_A_LAW: return SNDRV_PCM_FORMAT_A_LAW; case AFMT_IMA_ADPCM: return SNDRV_PCM_FORMAT_IMA_ADPCM; case AFMT_U8: return SNDRV_PCM_FORMAT_U8; case AFMT_S16_LE: return SNDRV_PCM_FORMAT_S16_LE; case AFMT_S16_BE: return SNDRV_PCM_FORMAT_S16_BE; case AFMT_S8: return SNDRV_PCM_FORMAT_S8; case AFMT_U16_LE: return SNDRV_PCM_FORMAT_U16_LE; case AFMT_U16_BE: return SNDRV_PCM_FORMAT_U16_BE; case AFMT_MPEG: return SNDRV_PCM_FORMAT_MPEG; case AFMT_S32_LE: return SNDRV_PCM_FORMAT_S32_LE; case AFMT_S32_BE: return SNDRV_PCM_FORMAT_S32_BE; case AFMT_S24_LE: return SNDRV_PCM_FORMAT_S24_LE; case AFMT_S24_BE: return SNDRV_PCM_FORMAT_S24_BE; case AFMT_S24_PACKED: return SNDRV_PCM_FORMAT_S24_3LE; case AFMT_FLOAT: return SNDRV_PCM_FORMAT_FLOAT; case AFMT_SPDIF_RAW: return SNDRV_PCM_FORMAT_IEC958_SUBFRAME; default: return SNDRV_PCM_FORMAT_U8; } } static int snd_pcm_oss_format_to(snd_pcm_format_t format) { switch (format) { case SNDRV_PCM_FORMAT_MU_LAW: return AFMT_MU_LAW; case SNDRV_PCM_FORMAT_A_LAW: return AFMT_A_LAW; case SNDRV_PCM_FORMAT_IMA_ADPCM: return AFMT_IMA_ADPCM; case SNDRV_PCM_FORMAT_U8: return AFMT_U8; case SNDRV_PCM_FORMAT_S16_LE: return AFMT_S16_LE; case SNDRV_PCM_FORMAT_S16_BE: return AFMT_S16_BE; case SNDRV_PCM_FORMAT_S8: return AFMT_S8; case SNDRV_PCM_FORMAT_U16_LE: return AFMT_U16_LE; case SNDRV_PCM_FORMAT_U16_BE: return AFMT_U16_BE; case SNDRV_PCM_FORMAT_MPEG: return AFMT_MPEG; case SNDRV_PCM_FORMAT_S32_LE: return AFMT_S32_LE; case SNDRV_PCM_FORMAT_S32_BE: return AFMT_S32_BE; case SNDRV_PCM_FORMAT_S24_LE: return AFMT_S24_LE; case SNDRV_PCM_FORMAT_S24_BE: return AFMT_S24_BE; case SNDRV_PCM_FORMAT_S24_3LE: return AFMT_S24_PACKED; case SNDRV_PCM_FORMAT_FLOAT: return AFMT_FLOAT; case SNDRV_PCM_FORMAT_IEC958_SUBFRAME: return AFMT_SPDIF_RAW; default: return -EINVAL; } } static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *oss_params, struct snd_pcm_hw_params *slave_params) { size_t s; size_t oss_buffer_size, oss_period_size, oss_periods; size_t min_period_size, max_period_size; struct snd_pcm_runtime *runtime = substream->runtime; size_t oss_frame_size; oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) * params_channels(oss_params) / 8; oss_buffer_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; oss_buffer_size = 1 << ld2(oss_buffer_size); if (atomic_read(&substream->mmap_count)) { if (oss_buffer_size > runtime->oss.mmap_bytes) oss_buffer_size = runtime->oss.mmap_bytes; } if (substream->oss.setup.period_size > 16) oss_period_size = substream->oss.setup.period_size; else if (runtime->oss.fragshift) { oss_period_size = 1 << runtime->oss.fragshift; if (oss_period_size > oss_buffer_size / 2) oss_period_size = oss_buffer_size / 2; } else { int sd; size_t bytes_per_sec = params_rate(oss_params) * snd_pcm_format_physical_width(params_format(oss_params)) * params_channels(oss_params) / 8; oss_period_size = oss_buffer_size; do { oss_period_size /= 2; } while (oss_period_size > bytes_per_sec); if (runtime->oss.subdivision == 0) { sd = 4; if (oss_period_size / sd > 4096) sd *= 2; if (oss_period_size / sd < 4096) sd = 1; } else sd = runtime->oss.subdivision; oss_period_size /= sd; if (oss_period_size < 16) oss_period_size = 16; } min_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); min_period_size *= oss_frame_size; min_period_size = 1 << (ld2(min_period_size - 1) + 1); if (oss_period_size < min_period_size) oss_period_size = min_period_size; max_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); max_period_size *= oss_frame_size; max_period_size = 1 << ld2(max_period_size); if (oss_period_size > max_period_size) oss_period_size = max_period_size; oss_periods = oss_buffer_size / oss_period_size; if (substream->oss.setup.periods > 1) oss_periods = substream->oss.setup.periods; s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); if (runtime->oss.maxfrags && s > runtime->oss.maxfrags) s = runtime->oss.maxfrags; if (oss_periods > s) oss_periods = s; s = snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); if (s < 2) s = 2; if (oss_periods < s) oss_periods = s; while (oss_period_size * oss_periods > oss_buffer_size) oss_period_size /= 2; if (oss_period_size < 16) return -EINVAL; runtime->oss.period_bytes = oss_period_size; runtime->oss.period_frames = 1; runtime->oss.periods = oss_periods; return 0; } static int choose_rate(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, unsigned int best_rate) { struct snd_interval *it; struct snd_pcm_hw_params *save; unsigned int rate, prev; save = kmalloc(sizeof(*save), GFP_KERNEL); if (save == NULL) return -ENOMEM; *save = *params; it = hw_param_interval(save, SNDRV_PCM_HW_PARAM_RATE); /* try multiples of the best rate */ rate = best_rate; for (;;) { if (it->max < rate || (it->max == rate && it->openmax)) break; if (it->min < rate || (it->min == rate && !it->openmin)) { int ret; ret = snd_pcm_hw_param_set(substream, params, SNDRV_PCM_HW_PARAM_RATE, rate, 0); if (ret == (int)rate) { kfree(save); return rate; } *params = *save; } prev = rate; rate += best_rate; if (rate <= prev) break; } /* not found, use the nearest rate */ kfree(save); return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL); } static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hw_params *params, *sparams; struct snd_pcm_sw_params *sw_params; ssize_t oss_buffer_size, oss_period_size; size_t oss_frame_size; int err; int direct; snd_pcm_format_t format, sformat; int n; struct snd_mask sformat_mask; struct snd_mask mask; if (mutex_lock_interruptible(&runtime->oss.params_lock)) return -EINTR; sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL); params = kmalloc(sizeof(*params), GFP_KERNEL); sparams = kmalloc(sizeof(*sparams), GFP_KERNEL); if (!sw_params || !params || !sparams) { snd_printd("No memory\n"); err = -ENOMEM; goto failure; } if (atomic_read(&substream->mmap_count)) direct = 1; else direct = substream->oss.setup.direct; _snd_pcm_hw_params_any(sparams); _snd_pcm_hw_param_setinteger(sparams, SNDRV_PCM_HW_PARAM_PERIODS); _snd_pcm_hw_param_min(sparams, SNDRV_PCM_HW_PARAM_PERIODS, 2, 0); snd_mask_none(&mask); if (atomic_read(&substream->mmap_count)) snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_MMAP_INTERLEAVED); else { snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_RW_INTERLEAVED); if (!direct) snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_RW_NONINTERLEAVED); } err = snd_pcm_hw_param_mask(substream, sparams, SNDRV_PCM_HW_PARAM_ACCESS, &mask); if (err < 0) { snd_printd("No usable accesses\n"); err = -EINVAL; goto failure; } choose_rate(substream, sparams, runtime->oss.rate); snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL); format = snd_pcm_oss_format_from(runtime->oss.format); sformat_mask = *hw_param_mask(sparams, SNDRV_PCM_HW_PARAM_FORMAT); if (direct) sformat = format; else sformat = snd_pcm_plug_slave_format(format, &sformat_mask); if ((__force int)sformat < 0 || !snd_mask_test(&sformat_mask, (__force int)sformat)) { for (sformat = (__force snd_pcm_format_t)0; (__force int)sformat <= (__force int)SNDRV_PCM_FORMAT_LAST; sformat = (__force snd_pcm_format_t)((__force int)sformat + 1)) { if (snd_mask_test(&sformat_mask, (__force int)sformat) && snd_pcm_oss_format_to(sformat) >= 0) break; } if ((__force int)sformat > (__force int)SNDRV_PCM_FORMAT_LAST) { snd_printd("Cannot find a format!!!\n"); err = -EINVAL; goto failure; } } err = _snd_pcm_hw_param_set(sparams, SNDRV_PCM_HW_PARAM_FORMAT, (__force int)sformat, 0); if (err < 0) goto failure; if (direct) { memcpy(params, sparams, sizeof(*params)); } else { _snd_pcm_hw_params_any(params); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS, (__force int)SNDRV_PCM_ACCESS_RW_INTERLEAVED, 0); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT, (__force int)snd_pcm_oss_format_from(runtime->oss.format), 0); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, 0); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE, runtime->oss.rate, 0); pdprintf("client: access = %i, format = %i, channels = %i, rate = %i\n", params_access(params), params_format(params), params_channels(params), params_rate(params)); } pdprintf("slave: access = %i, format = %i, channels = %i, rate = %i\n", params_access(sparams), params_format(sparams), params_channels(sparams), params_rate(sparams)); oss_frame_size = snd_pcm_format_physical_width(params_format(params)) * params_channels(params) / 8; #ifdef CONFIG_SND_PCM_OSS_PLUGINS snd_pcm_oss_plugin_clear(substream); if (!direct) { /* add necessary plugins */ snd_pcm_oss_plugin_clear(substream); if ((err = snd_pcm_plug_format_plugins(substream, params, sparams)) < 0) { snd_printd("snd_pcm_plug_format_plugins failed: %i\n", err); snd_pcm_oss_plugin_clear(substream); goto failure; } if (runtime->oss.plugin_first) { struct snd_pcm_plugin *plugin; if ((err = snd_pcm_plugin_build_io(substream, sparams, &plugin)) < 0) { snd_printd("snd_pcm_plugin_build_io failed: %i\n", err); snd_pcm_oss_plugin_clear(substream); goto failure; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { err = snd_pcm_plugin_append(plugin); } else { err = snd_pcm_plugin_insert(plugin); } if (err < 0) { snd_pcm_oss_plugin_clear(substream); goto failure; } } } #endif err = snd_pcm_oss_period_size(substream, params, sparams); if (err < 0) goto failure; n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size); err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL); if (err < 0) goto failure; err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS, runtime->oss.periods, NULL); if (err < 0) goto failure; snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) { snd_printd("HW_PARAMS failed: %i\n", err); goto failure; } memset(sw_params, 0, sizeof(*sw_params)); if (runtime->oss.trigger) { sw_params->start_threshold = 1; } else { sw_params->start_threshold = runtime->boundary; } if (atomic_read(&substream->mmap_count) || substream->stream == SNDRV_PCM_STREAM_CAPTURE) sw_params->stop_threshold = runtime->boundary; else sw_params->stop_threshold = runtime->buffer_size; sw_params->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; sw_params->period_step = 1; sw_params->avail_min = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 1 : runtime->period_size; if (atomic_read(&substream->mmap_count) || substream->oss.setup.nosilence) { sw_params->silence_threshold = 0; sw_params->silence_size = 0; } else { snd_pcm_uframes_t frames; frames = runtime->period_size + 16; if (frames > runtime->buffer_size) frames = runtime->buffer_size; sw_params->silence_threshold = frames; sw_params->silence_size = frames; } if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_SW_PARAMS, sw_params)) < 0) { snd_printd("SW_PARAMS failed: %i\n", err); goto failure; } runtime->oss.periods = params_periods(sparams); oss_period_size = snd_pcm_plug_client_size(substream, params_period_size(sparams)); if (oss_period_size < 0) { err = -EINVAL; goto failure; } #ifdef CONFIG_SND_PCM_OSS_PLUGINS if (runtime->oss.plugin_first) { err = snd_pcm_plug_alloc(substream, oss_period_size); if (err < 0) goto failure; } #endif oss_period_size *= oss_frame_size; oss_buffer_size = oss_period_size * runtime->oss.periods; if (oss_buffer_size < 0) { err = -EINVAL; goto failure; } runtime->oss.period_bytes = oss_period_size; runtime->oss.buffer_bytes = oss_buffer_size; pdprintf("oss: period bytes = %i, buffer bytes = %i\n", runtime->oss.period_bytes, runtime->oss.buffer_bytes); pdprintf("slave: period_size = %i, buffer_size = %i\n", params_period_size(sparams), params_buffer_size(sparams)); runtime->oss.format = snd_pcm_oss_format_to(params_format(params)); runtime->oss.channels = params_channels(params); runtime->oss.rate = params_rate(params); vfree(runtime->oss.buffer); runtime->oss.buffer = vmalloc(runtime->oss.period_bytes); if (!runtime->oss.buffer) { err = -ENOMEM; goto failure; } runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; if (runtime->dma_area) snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); err = 0; failure: kfree(sw_params); kfree(params); kfree(sparams); mutex_unlock(&runtime->oss.params_lock); return err; } static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_file, struct snd_pcm_substream **r_substream) { int idx, err; struct snd_pcm_substream *asubstream = NULL, *substream; for (idx = 0; idx < 2; idx++) { substream = pcm_oss_file->streams[idx]; if (substream == NULL) continue; if (asubstream == NULL) asubstream = substream; if (substream->runtime->oss.params) { err = snd_pcm_oss_change_params(substream); if (err < 0) return err; } } if (!asubstream) return -EIO; if (r_substream) *r_substream = asubstream; return 0; } static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream) { int err; struct snd_pcm_runtime *runtime = substream->runtime; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL); if (err < 0) { snd_printd("snd_pcm_oss_prepare: SNDRV_PCM_IOCTL_PREPARE failed\n"); return err; } runtime->oss.prepare = 0; runtime->oss.prev_hw_ptr_period = 0; runtime->oss.period_ptr = 0; runtime->oss.buffer_used = 0; return 0; } static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int err; if (substream == NULL) return 0; runtime = substream->runtime; if (runtime->oss.params) { err = snd_pcm_oss_change_params(substream); if (err < 0) return err; } if (runtime->oss.prepare) { err = snd_pcm_oss_prepare(substream); if (err < 0) return err; } return 0; } static int snd_pcm_oss_capture_position_fixup(struct snd_pcm_substream *substream, snd_pcm_sframes_t *delay) { struct snd_pcm_runtime *runtime; snd_pcm_uframes_t frames; int err = 0; while (1) { err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, delay); if (err < 0) break; runtime = substream->runtime; if (*delay <= (snd_pcm_sframes_t)runtime->buffer_size) break; /* in case of overrun, skip whole periods like OSS/Linux driver does */ /* until avail(delay) <= buffer_size */ frames = (*delay - runtime->buffer_size) + runtime->period_size - 1; frames /= runtime->period_size; frames *= runtime->period_size; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_FORWARD, &frames); if (err < 0) break; } return err; } snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const char *ptr, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: write: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: write: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); } if (ret != -EPIPE && ret != -ESTRPIPE) break; /* test, if we can't store new data, because the stream */ /* has not been started */ if (runtime->status->state == SNDRV_PCM_STATE_PREPARED) return -EAGAIN; } return ret; } snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *ptr, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t delay; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: read: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: read: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); if (ret < 0) break; } else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) { ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } ret = snd_pcm_oss_capture_position_fixup(substream, &delay); if (ret < 0) break; if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); } if (ret == -EPIPE) { if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); if (ret < 0) break; } continue; } if (ret != -ESTRPIPE) break; } return ret; } snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: writev: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: writev: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_writev(substream, (void __user **)bufs, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_writev(substream, (void __user **)bufs, frames); } if (ret != -EPIPE && ret != -ESTRPIPE) break; /* test, if we can't store new data, because the stream */ /* has not been started */ if (runtime->status->state == SNDRV_PCM_STATE_PREPARED) return -EAGAIN; } return ret; } snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: readv: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: readv: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); if (ret < 0) break; } else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) { ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_readv(substream, (void __user **)bufs, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_readv(substream, (void __user **)bufs, frames); } if (ret != -EPIPE && ret != -ESTRPIPE) break; } return ret; } static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const char *buf, size_t bytes, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t frames, frames1; #ifdef CONFIG_SND_PCM_OSS_PLUGINS if (runtime->oss.plugin_first) { struct snd_pcm_plugin_channel *channels; size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8; if (!in_kernel) { if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes)) return -EFAULT; buf = runtime->oss.buffer; } frames = bytes / oss_frame_bytes; frames1 = snd_pcm_plug_client_channels_buf(substream, (char *)buf, frames, &channels); if (frames1 < 0) return frames1; frames1 = snd_pcm_plug_write_transfer(substream, channels, frames1); if (frames1 <= 0) return frames1; bytes = frames1 * oss_frame_bytes; } else #endif { frames = bytes_to_frames(runtime, bytes); frames1 = snd_pcm_oss_write3(substream, buf, frames, in_kernel); if (frames1 <= 0) return frames1; bytes = frames_to_bytes(runtime, frames1); } return bytes; } static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes) { size_t xfer = 0; ssize_t tmp; struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return -ENXIO; if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) return tmp; mutex_lock(&runtime->oss.params_lock); while (bytes > 0) { if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { tmp = bytes; if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) tmp = runtime->oss.period_bytes - runtime->oss.buffer_used; if (tmp > 0) { if (copy_from_user(runtime->oss.buffer + runtime->oss.buffer_used, buf, tmp)) { tmp = -EFAULT; goto err; } } runtime->oss.buffer_used += tmp; buf += tmp; bytes -= tmp; xfer += tmp; if (substream->oss.setup.partialfrag || runtime->oss.buffer_used == runtime->oss.period_bytes) { tmp = snd_pcm_oss_write2(substream, runtime->oss.buffer + runtime->oss.period_ptr, runtime->oss.buffer_used - runtime->oss.period_ptr, 1); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; runtime->oss.period_ptr += tmp; runtime->oss.period_ptr %= runtime->oss.period_bytes; if (runtime->oss.period_ptr == 0 || runtime->oss.period_ptr == runtime->oss.buffer_used) runtime->oss.buffer_used = 0; else if ((substream->f_flags & O_NONBLOCK) != 0) { tmp = -EAGAIN; goto err; } } } else { tmp = snd_pcm_oss_write2(substream, (const char __force *)buf, runtime->oss.period_bytes, 0); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; buf += tmp; bytes -= tmp; xfer += tmp; if ((substream->f_flags & O_NONBLOCK) != 0 && tmp != runtime->oss.period_bytes) break; } } mutex_unlock(&runtime->oss.params_lock); return xfer; err: mutex_unlock(&runtime->oss.params_lock); return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; } static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf, size_t bytes, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t frames, frames1; #ifdef CONFIG_SND_PCM_OSS_PLUGINS char __user *final_dst = (char __force __user *)buf; if (runtime->oss.plugin_first) { struct snd_pcm_plugin_channel *channels; size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8; if (!in_kernel) buf = runtime->oss.buffer; frames = bytes / oss_frame_bytes; frames1 = snd_pcm_plug_client_channels_buf(substream, buf, frames, &channels); if (frames1 < 0) return frames1; frames1 = snd_pcm_plug_read_transfer(substream, channels, frames1); if (frames1 <= 0) return frames1; bytes = frames1 * oss_frame_bytes; if (!in_kernel && copy_to_user(final_dst, buf, bytes)) return -EFAULT; } else #endif { frames = bytes_to_frames(runtime, bytes); frames1 = snd_pcm_oss_read3(substream, buf, frames, in_kernel); if (frames1 <= 0) return frames1; bytes = frames_to_bytes(runtime, frames1); } return bytes; } static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes) { size_t xfer = 0; ssize_t tmp; struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return -ENXIO; if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) return tmp; mutex_lock(&runtime->oss.params_lock); while (bytes > 0) { if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { if (runtime->oss.buffer_used == 0) { tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; runtime->oss.period_ptr = tmp; runtime->oss.buffer_used = tmp; } tmp = bytes; if ((size_t) tmp > runtime->oss.buffer_used) tmp = runtime->oss.buffer_used; if (copy_to_user(buf, runtime->oss.buffer + (runtime->oss.period_ptr - runtime->oss.buffer_used), tmp)) { tmp = -EFAULT; goto err; } buf += tmp; bytes -= tmp; xfer += tmp; runtime->oss.buffer_used -= tmp; } else { tmp = snd_pcm_oss_read2(substream, (char __force *)buf, runtime->oss.period_bytes, 0); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; buf += tmp; bytes -= tmp; xfer += tmp; } } mutex_unlock(&runtime->oss.params_lock); return xfer; err: mutex_unlock(&runtime->oss.params_lock); return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; } static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; int i; for (i = 0; i < 2; i++) { substream = pcm_oss_file->streams[i]; if (!substream) continue; runtime = substream->runtime; snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; runtime->oss.prev_hw_ptr_period = 0; runtime->oss.period_ptr = 0; } return 0; } static int snd_pcm_oss_post(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream != NULL) { if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_START, NULL); } /* note: all errors from the start action are ignored */ /* OSS apps do not know, how to handle them */ return 0; } static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size) { struct snd_pcm_runtime *runtime; ssize_t result = 0; snd_pcm_state_t state; long res; wait_queue_t wait; runtime = substream->runtime; init_waitqueue_entry(&wait, current); add_wait_queue(&runtime->sleep, &wait); #ifdef OSS_DEBUG printk(KERN_DEBUG "sync1: size = %li\n", size); #endif while (1) { result = snd_pcm_oss_write2(substream, runtime->oss.buffer, size, 1); if (result > 0) { runtime->oss.buffer_used = 0; result = 0; break; } if (result != 0 && result != -EAGAIN) break; result = 0; set_current_state(TASK_INTERRUPTIBLE); snd_pcm_stream_lock_irq(substream); state = runtime->status->state; snd_pcm_stream_unlock_irq(substream); if (state != SNDRV_PCM_STATE_RUNNING) { set_current_state(TASK_RUNNING); break; } res = schedule_timeout(10 * HZ); if (signal_pending(current)) { result = -ERESTARTSYS; break; } if (res == 0) { snd_printk(KERN_ERR "OSS sync error - DMA timeout\n"); result = -EIO; break; } } remove_wait_queue(&runtime->sleep, &wait); return result; } static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) { int err = 0; unsigned int saved_f_flags; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_format_t format; unsigned long width; size_t size; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream != NULL) { runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) goto __direct; if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; format = snd_pcm_oss_format_from(runtime->oss.format); width = snd_pcm_format_physical_width(format); mutex_lock(&runtime->oss.params_lock); if (runtime->oss.buffer_used > 0) { #ifdef OSS_DEBUG printk(KERN_DEBUG "sync: buffer_used\n"); #endif size = (8 * (runtime->oss.period_bytes - runtime->oss.buffer_used) + 7) / width; snd_pcm_format_set_silence(format, runtime->oss.buffer + runtime->oss.buffer_used, size); err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes); if (err < 0) { mutex_unlock(&runtime->oss.params_lock); return err; } } else if (runtime->oss.period_ptr > 0) { #ifdef OSS_DEBUG printk(KERN_DEBUG "sync: period_ptr\n"); #endif size = runtime->oss.period_bytes - runtime->oss.period_ptr; snd_pcm_format_set_silence(format, runtime->oss.buffer, size * 8 / width); err = snd_pcm_oss_sync1(substream, size); if (err < 0) { mutex_unlock(&runtime->oss.params_lock); return err; } } /* * The ALSA's period might be a bit large than OSS one. * Fill the remain portion of ALSA period with zeros. */ size = runtime->control->appl_ptr % runtime->period_size; if (size > 0) { size = runtime->period_size - size; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) { size = (runtime->frame_bits * size) / 8; while (size > 0) { mm_segment_t fs; size_t size1 = size < runtime->oss.period_bytes ? size : runtime->oss.period_bytes; size -= size1; size1 *= 8; size1 /= runtime->sample_bits; snd_pcm_format_set_silence(runtime->format, runtime->oss.buffer, size1); size1 /= runtime->channels; /* frames */ fs = snd_enter_user(); snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1); snd_leave_user(fs); } } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) { void __user *buffers[runtime->channels]; memset(buffers, 0, runtime->channels * sizeof(void *)); snd_pcm_lib_writev(substream, buffers, size); } } mutex_unlock(&runtime->oss.params_lock); /* * finish sync: drain the buffer */ __direct: saved_f_flags = substream->f_flags; substream->f_flags &= ~O_NONBLOCK; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); substream->f_flags = saved_f_flags; if (err < 0) return err; runtime->oss.prepare = 1; } substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (substream != NULL) { if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; runtime = substream->runtime; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); if (err < 0) return err; runtime->oss.buffer_used = 0; runtime->oss.prepare = 1; } return 0; } static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate) { int idx; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; struct snd_pcm_runtime *runtime; if (substream == NULL) continue; runtime = substream->runtime; if (rate < 1000) rate = 1000; else if (rate > 192000) rate = 192000; if (runtime->oss.rate != rate) { runtime->oss.params = 1; runtime->oss.rate = rate; } } return snd_pcm_oss_get_rate(pcm_oss_file); } static int snd_pcm_oss_get_rate(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.rate; } static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsigned int channels) { int idx; if (channels < 1) channels = 1; if (channels > 128) return -EINVAL; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; struct snd_pcm_runtime *runtime; if (substream == NULL) continue; runtime = substream->runtime; if (runtime->oss.channels != channels) { runtime->oss.params = 1; runtime->oss.channels = channels; } } return snd_pcm_oss_get_channels(pcm_oss_file); } static int snd_pcm_oss_get_channels(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.channels; } static int snd_pcm_oss_get_block_size(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.period_bytes; } static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; int direct; struct snd_pcm_hw_params *params; unsigned int formats = 0; struct snd_mask format_mask; int fmt; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; if (atomic_read(&substream->mmap_count)) direct = 1; else direct = substream->oss.setup.direct; if (!direct) return AFMT_MU_LAW | AFMT_U8 | AFMT_S16_LE | AFMT_S16_BE | AFMT_S8 | AFMT_U16_LE | AFMT_U16_BE | AFMT_S32_LE | AFMT_S32_BE | AFMT_S24_LE | AFMT_S24_BE | AFMT_S24_PACKED; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; _snd_pcm_hw_params_any(params); err = snd_pcm_hw_refine(substream, params); format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); kfree(params); if (err < 0) return err; for (fmt = 0; fmt < 32; ++fmt) { if (snd_mask_test(&format_mask, fmt)) { int f = snd_pcm_oss_format_to(fmt); if (f >= 0) formats |= f; } } return formats; } static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) { int formats, idx; if (format != AFMT_QUERY) { formats = snd_pcm_oss_get_formats(pcm_oss_file); if (formats < 0) return formats; if (!(formats & format)) format = AFMT_U8; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; struct snd_pcm_runtime *runtime; if (substream == NULL) continue; runtime = substream->runtime; if (runtime->oss.format != format) { runtime->oss.params = 1; runtime->oss.format = format; } } } return snd_pcm_oss_get_format(pcm_oss_file); } static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.format; } static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int subdivide) { struct snd_pcm_runtime *runtime; if (substream == NULL) return 0; runtime = substream->runtime; if (subdivide == 0) { subdivide = runtime->oss.subdivision; if (subdivide == 0) subdivide = 1; return subdivide; } if (runtime->oss.subdivision || runtime->oss.fragshift) return -EINVAL; if (subdivide != 1 && subdivide != 2 && subdivide != 4 && subdivide != 8 && subdivide != 16) return -EINVAL; runtime->oss.subdivision = subdivide; runtime->oss.params = 1; return subdivide; } static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int subdivide) { int err = -EINVAL, idx; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; if (substream == NULL) continue; if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0) return err; } return err; } static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val) { struct snd_pcm_runtime *runtime; if (substream == NULL) return 0; runtime = substream->runtime; if (runtime->oss.subdivision || runtime->oss.fragshift) return -EINVAL; runtime->oss.fragshift = val & 0xffff; runtime->oss.maxfrags = (val >> 16) & 0xffff; if (runtime->oss.fragshift < 4) /* < 16 */ runtime->oss.fragshift = 4; if (runtime->oss.maxfrags < 2) runtime->oss.maxfrags = 2; runtime->oss.params = 1; return 0; } static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsigned int val) { int err = -EINVAL, idx; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; if (substream == NULL) continue; if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0) return err; } return err; } static int snd_pcm_oss_nonblock(struct file * file) { spin_lock(&file->f_lock); file->f_flags |= O_NONBLOCK; spin_unlock(&file->f_lock); return 0; } static int snd_pcm_oss_get_caps1(struct snd_pcm_substream *substream, int res) { if (substream == NULL) { res &= ~DSP_CAP_DUPLEX; return res; } #ifdef DSP_CAP_MULTI if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) if (substream->pstr->substream_count > 1) res |= DSP_CAP_MULTI; #endif /* DSP_CAP_REALTIME is set all times: */ /* all ALSA drivers can return actual pointer in ring buffer */ #if defined(DSP_CAP_REALTIME) && 0 { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->info & (SNDRV_PCM_INFO_BLOCK_TRANSFER|SNDRV_PCM_INFO_BATCH)) res &= ~DSP_CAP_REALTIME; } #endif return res; } static int snd_pcm_oss_get_caps(struct snd_pcm_oss_file *pcm_oss_file) { int result, idx; result = DSP_CAP_TRIGGER | DSP_CAP_MMAP | DSP_CAP_DUPLEX | DSP_CAP_REALTIME; for (idx = 0; idx < 2; idx++) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; result = snd_pcm_oss_get_caps1(substream, result); } result |= 0x0001; /* revision - same as SB AWE 64 */ return result; } static void snd_pcm_oss_simulate_fill(struct snd_pcm_substream *substream, snd_pcm_uframes_t hw_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t appl_ptr; appl_ptr = hw_ptr + runtime->buffer_size; appl_ptr %= runtime->boundary; runtime->control->appl_ptr = appl_ptr; } static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int trigger) { struct snd_pcm_runtime *runtime; struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL; int err, cmd; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: trigger = 0x%x\n", trigger); #endif psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (psubstream) { if ((err = snd_pcm_oss_make_ready(psubstream)) < 0) return err; } if (csubstream) { if ((err = snd_pcm_oss_make_ready(csubstream)) < 0) return err; } if (psubstream) { runtime = psubstream->runtime; if (trigger & PCM_ENABLE_OUTPUT) { if (runtime->oss.trigger) goto _skip1; if (atomic_read(&psubstream->mmap_count)) snd_pcm_oss_simulate_fill(psubstream, get_hw_ptr_period(runtime)); runtime->oss.trigger = 1; runtime->start_threshold = 1; cmd = SNDRV_PCM_IOCTL_START; } else { if (!runtime->oss.trigger) goto _skip1; runtime->oss.trigger = 0; runtime->start_threshold = runtime->boundary; cmd = SNDRV_PCM_IOCTL_DROP; runtime->oss.prepare = 1; } err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL); if (err < 0) return err; } _skip1: if (csubstream) { runtime = csubstream->runtime; if (trigger & PCM_ENABLE_INPUT) { if (runtime->oss.trigger) goto _skip2; runtime->oss.trigger = 1; runtime->start_threshold = 1; cmd = SNDRV_PCM_IOCTL_START; } else { if (!runtime->oss.trigger) goto _skip2; runtime->oss.trigger = 0; runtime->start_threshold = runtime->boundary; cmd = SNDRV_PCM_IOCTL_DROP; runtime->oss.prepare = 1; } err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL); if (err < 0) return err; } _skip2: return 0; } static int snd_pcm_oss_get_trigger(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL; int result = 0; psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (psubstream && psubstream->runtime && psubstream->runtime->oss.trigger) result |= PCM_ENABLE_OUTPUT; if (csubstream && csubstream->runtime && csubstream->runtime->oss.trigger) result |= PCM_ENABLE_INPUT; return result; } static int snd_pcm_oss_get_odelay(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t delay; int err; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream == NULL) return -EINVAL; if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; runtime = substream->runtime; if (runtime->oss.params || runtime->oss.prepare) return 0; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &delay); if (err == -EPIPE) delay = 0; /* hack for broken OSS applications */ else if (err < 0) return err; return snd_pcm_oss_bytes(substream, delay); } static int snd_pcm_oss_get_ptr(struct snd_pcm_oss_file *pcm_oss_file, int stream, struct count_info __user * _info) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t delay; int fixup; struct count_info info; int err; if (_info == NULL) return -EFAULT; substream = pcm_oss_file->streams[stream]; if (substream == NULL) return -EINVAL; if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; runtime = substream->runtime; if (runtime->oss.params || runtime->oss.prepare) { memset(&info, 0, sizeof(info)); if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } if (stream == SNDRV_PCM_STREAM_PLAYBACK) { err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &delay); if (err == -EPIPE || err == -ESTRPIPE || (! err && delay < 0)) { err = 0; delay = 0; fixup = 0; } else { fixup = runtime->oss.buffer_used; } } else { err = snd_pcm_oss_capture_position_fixup(substream, &delay); fixup = -runtime->oss.buffer_used; } if (err < 0) return err; info.ptr = snd_pcm_oss_bytes(substream, runtime->status->hw_ptr % runtime->buffer_size); if (atomic_read(&substream->mmap_count)) { snd_pcm_sframes_t n; delay = get_hw_ptr_period(runtime); n = delay - runtime->oss.prev_hw_ptr_period; if (n < 0) n += runtime->boundary; info.blocks = n / runtime->period_size; runtime->oss.prev_hw_ptr_period = delay; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_pcm_oss_simulate_fill(substream, delay); info.bytes = snd_pcm_oss_bytes(substream, runtime->status->hw_ptr) & INT_MAX; } else { delay = snd_pcm_oss_bytes(substream, delay); if (stream == SNDRV_PCM_STREAM_PLAYBACK) { if (substream->oss.setup.buggyptr) info.blocks = (runtime->oss.buffer_bytes - delay - fixup) / runtime->oss.period_bytes; else info.blocks = (delay + fixup) / runtime->oss.period_bytes; info.bytes = (runtime->oss.bytes - delay) & INT_MAX; } else { delay += fixup; info.blocks = delay / runtime->oss.period_bytes; info.bytes = (runtime->oss.bytes + delay) & INT_MAX; } } if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stream, struct audio_buf_info __user *_info) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t avail; int fixup; struct audio_buf_info info; int err; if (_info == NULL) return -EFAULT; substream = pcm_oss_file->streams[stream]; if (substream == NULL) return -EINVAL; runtime = substream->runtime; if (runtime->oss.params && (err = snd_pcm_oss_change_params(substream)) < 0) return err; info.fragsize = runtime->oss.period_bytes; info.fragstotal = runtime->periods; if (runtime->oss.prepare) { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { info.bytes = runtime->oss.period_bytes * runtime->oss.periods; info.fragments = runtime->oss.periods; } else { info.bytes = 0; info.fragments = 0; } } else { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &avail); if (err == -EPIPE || err == -ESTRPIPE || (! err && avail < 0)) { avail = runtime->buffer_size; err = 0; fixup = 0; } else { avail = runtime->buffer_size - avail; fixup = -runtime->oss.buffer_used; } } else { err = snd_pcm_oss_capture_position_fixup(substream, &avail); fixup = runtime->oss.buffer_used; } if (err < 0) return err; info.bytes = snd_pcm_oss_bytes(substream, avail) + fixup; info.fragments = info.bytes / runtime->oss.period_bytes; } #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: space: bytes = %i, fragments = %i, " "fragstotal = %i, fragsize = %i\n", info.bytes, info.fragments, info.fragstotal, info.fragsize); #endif if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } static int snd_pcm_oss_get_mapbuf(struct snd_pcm_oss_file *pcm_oss_file, int stream, struct buffmem_desc __user * _info) { // it won't be probably implemented // snd_printd("TODO: snd_pcm_oss_get_mapbuf\n"); return -EINVAL; } static const char *strip_task_path(const char *path) { const char *ptr, *ptrl = NULL; for (ptr = path; *ptr; ptr++) { if (*ptr == '/') ptrl = ptr + 1; } return ptrl; } static void snd_pcm_oss_look_for_setup(struct snd_pcm *pcm, int stream, const char *task_name, struct snd_pcm_oss_setup *rsetup) { struct snd_pcm_oss_setup *setup; mutex_lock(&pcm->streams[stream].oss.setup_mutex); do { for (setup = pcm->streams[stream].oss.setup_list; setup; setup = setup->next) { if (!strcmp(setup->task_name, task_name)) goto out; } } while ((task_name = strip_task_path(task_name)) != NULL); out: if (setup) *rsetup = *setup; mutex_unlock(&pcm->streams[stream].oss.setup_mutex); } static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; runtime = substream->runtime; vfree(runtime->oss.buffer); runtime->oss.buffer = NULL; #ifdef CONFIG_SND_PCM_OSS_PLUGINS snd_pcm_oss_plugin_clear(substream); #endif substream->oss.oss = 0; } static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream, struct snd_pcm_oss_setup *setup, int minor) { struct snd_pcm_runtime *runtime; substream->oss.oss = 1; substream->oss.setup = *setup; if (setup->nonblock) substream->f_flags |= O_NONBLOCK; else if (setup->block) substream->f_flags &= ~O_NONBLOCK; runtime = substream->runtime; runtime->oss.params = 1; runtime->oss.trigger = 1; runtime->oss.rate = 8000; mutex_init(&runtime->oss.params_lock); switch (SNDRV_MINOR_OSS_DEVICE(minor)) { case SNDRV_MINOR_OSS_PCM_8: runtime->oss.format = AFMT_U8; break; case SNDRV_MINOR_OSS_PCM_16: runtime->oss.format = AFMT_S16_LE; break; default: runtime->oss.format = AFMT_MU_LAW; } runtime->oss.channels = 1; runtime->oss.fragshift = 0; runtime->oss.maxfrags = 0; runtime->oss.subdivision = 0; substream->pcm_release = snd_pcm_oss_release_substream; } static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file) { int cidx; if (!pcm_oss_file) return 0; for (cidx = 0; cidx < 2; ++cidx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[cidx]; if (substream) snd_pcm_release_substream(substream); } kfree(pcm_oss_file); return 0; } static int snd_pcm_oss_open_file(struct file *file, struct snd_pcm *pcm, struct snd_pcm_oss_file **rpcm_oss_file, int minor, struct snd_pcm_oss_setup *setup) { int idx, err; struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream; fmode_t f_mode = file->f_mode; if (rpcm_oss_file) *rpcm_oss_file = NULL; pcm_oss_file = kzalloc(sizeof(*pcm_oss_file), GFP_KERNEL); if (pcm_oss_file == NULL) return -ENOMEM; if ((f_mode & (FMODE_WRITE|FMODE_READ)) == (FMODE_WRITE|FMODE_READ) && (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX)) f_mode = FMODE_WRITE; file->f_flags &= ~O_APPEND; for (idx = 0; idx < 2; idx++) { if (setup[idx].disable) continue; if (! pcm->streams[idx].substream_count) continue; /* no matching substream */ if (idx == SNDRV_PCM_STREAM_PLAYBACK) { if (! (f_mode & FMODE_WRITE)) continue; } else { if (! (f_mode & FMODE_READ)) continue; } err = snd_pcm_open_substream(pcm, idx, file, &substream); if (err < 0) { snd_pcm_oss_release_file(pcm_oss_file); return err; } pcm_oss_file->streams[idx] = substream; substream->file = pcm_oss_file; snd_pcm_oss_init_substream(substream, &setup[idx], minor); } if (!pcm_oss_file->streams[0] && !pcm_oss_file->streams[1]) { snd_pcm_oss_release_file(pcm_oss_file); return -EINVAL; } file->private_data = pcm_oss_file; if (rpcm_oss_file) *rpcm_oss_file = pcm_oss_file; return 0; } static int snd_task_name(struct task_struct *task, char *name, size_t size) { unsigned int idx; if (snd_BUG_ON(!task || !name || size < 2)) return -EINVAL; for (idx = 0; idx < sizeof(task->comm) && idx + 1 < size; idx++) name[idx] = task->comm[idx]; name[idx] = '\0'; return 0; } static int snd_pcm_oss_open(struct inode *inode, struct file *file) { int err; char task_name[32]; struct snd_pcm *pcm; struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_oss_setup setup[2]; int nonblock; wait_queue_t wait; err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_oss_minor_data(iminor(inode), SNDRV_OSS_DEVICE_TYPE_PCM); if (pcm == NULL) { err = -ENODEV; goto __error1; } err = snd_card_file_add(pcm->card, file); if (err < 0) goto __error1; if (!try_module_get(pcm->card->module)) { err = -EFAULT; goto __error2; } if (snd_task_name(current, task_name, sizeof(task_name)) < 0) { err = -EFAULT; goto __error; } memset(setup, 0, sizeof(setup)); if (file->f_mode & FMODE_WRITE) snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_PLAYBACK, task_name, &setup[0]); if (file->f_mode & FMODE_READ) snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_CAPTURE, task_name, &setup[1]); nonblock = !!(file->f_flags & O_NONBLOCK); if (!nonblock) nonblock = nonblock_open; init_waitqueue_entry(&wait, current); add_wait_queue(&pcm->open_wait, &wait); mutex_lock(&pcm->open_mutex); while (1) { err = snd_pcm_oss_open_file(file, pcm, &pcm_oss_file, iminor(inode), setup); if (err >= 0) break; if (err == -EAGAIN) { if (nonblock) { err = -EBUSY; break; } } else break; set_current_state(TASK_INTERRUPTIBLE); mutex_unlock(&pcm->open_mutex); schedule(); mutex_lock(&pcm->open_mutex); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } remove_wait_queue(&pcm->open_wait, &wait); mutex_unlock(&pcm->open_mutex); if (err < 0) goto __error; return err; __error: module_put(pcm->card->module); __error2: snd_card_file_remove(pcm->card, file); __error1: return err; } static int snd_pcm_oss_release(struct inode *inode, struct file *file) { struct snd_pcm *pcm; struct snd_pcm_substream *substream; struct snd_pcm_oss_file *pcm_oss_file; pcm_oss_file = file->private_data; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream == NULL) substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (snd_BUG_ON(!substream)) return -ENXIO; pcm = substream->pcm; if (!pcm->card->shutdown) snd_pcm_oss_sync(pcm_oss_file); mutex_lock(&pcm->open_mutex); snd_pcm_oss_release_file(pcm_oss_file); mutex_unlock(&pcm->open_mutex); wake_up(&pcm->open_wait); module_put(pcm->card->module); snd_card_file_remove(pcm->card, file); return 0; } static long snd_pcm_oss_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_oss_file *pcm_oss_file; int __user *p = (int __user *)arg; int res; pcm_oss_file = file->private_data; if (cmd == OSS_GETVERSION) return put_user(SNDRV_OSS_VERSION, p); if (cmd == OSS_ALSAEMULVER) return put_user(1, p); #if defined(CONFIG_SND_MIXER_OSS) || (defined(MODULE) && defined(CONFIG_SND_MIXER_OSS_MODULE)) if (((cmd >> 8) & 0xff) == 'M') { /* mixer ioctl - for OSS compatibility */ struct snd_pcm_substream *substream; int idx; for (idx = 0; idx < 2; ++idx) { substream = pcm_oss_file->streams[idx]; if (substream != NULL) break; } if (snd_BUG_ON(idx >= 2)) return -ENXIO; return snd_mixer_oss_ioctl_card(substream->pcm->card, cmd, arg); } #endif if (((cmd >> 8) & 0xff) != 'P') return -EINVAL; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: ioctl = 0x%x\n", cmd); #endif switch (cmd) { case SNDCTL_DSP_RESET: return snd_pcm_oss_reset(pcm_oss_file); case SNDCTL_DSP_SYNC: return snd_pcm_oss_sync(pcm_oss_file); case SNDCTL_DSP_SPEED: if (get_user(res, p)) return -EFAULT; if ((res = snd_pcm_oss_set_rate(pcm_oss_file, res))<0) return res; return put_user(res, p); case SOUND_PCM_READ_RATE: res = snd_pcm_oss_get_rate(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_STEREO: if (get_user(res, p)) return -EFAULT; res = res > 0 ? 2 : 1; if ((res = snd_pcm_oss_set_channels(pcm_oss_file, res)) < 0) return res; return put_user(--res, p); case SNDCTL_DSP_GETBLKSIZE: res = snd_pcm_oss_get_block_size(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_SETFMT: if (get_user(res, p)) return -EFAULT; res = snd_pcm_oss_set_format(pcm_oss_file, res); if (res < 0) return res; return put_user(res, p); case SOUND_PCM_READ_BITS: res = snd_pcm_oss_get_format(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_CHANNELS: if (get_user(res, p)) return -EFAULT; res = snd_pcm_oss_set_channels(pcm_oss_file, res); if (res < 0) return res; return put_user(res, p); case SOUND_PCM_READ_CHANNELS: res = snd_pcm_oss_get_channels(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SOUND_PCM_WRITE_FILTER: case SOUND_PCM_READ_FILTER: return -EIO; case SNDCTL_DSP_POST: return snd_pcm_oss_post(pcm_oss_file); case SNDCTL_DSP_SUBDIVIDE: if (get_user(res, p)) return -EFAULT; res = snd_pcm_oss_set_subdivide(pcm_oss_file, res); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_SETFRAGMENT: if (get_user(res, p)) return -EFAULT; return snd_pcm_oss_set_fragment(pcm_oss_file, res); case SNDCTL_DSP_GETFMTS: res = snd_pcm_oss_get_formats(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_GETOSPACE: case SNDCTL_DSP_GETISPACE: return snd_pcm_oss_get_space(pcm_oss_file, cmd == SNDCTL_DSP_GETISPACE ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK, (struct audio_buf_info __user *) arg); case SNDCTL_DSP_NONBLOCK: return snd_pcm_oss_nonblock(file); case SNDCTL_DSP_GETCAPS: res = snd_pcm_oss_get_caps(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_GETTRIGGER: res = snd_pcm_oss_get_trigger(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_SETTRIGGER: if (get_user(res, p)) return -EFAULT; return snd_pcm_oss_set_trigger(pcm_oss_file, res); case SNDCTL_DSP_GETIPTR: case SNDCTL_DSP_GETOPTR: return snd_pcm_oss_get_ptr(pcm_oss_file, cmd == SNDCTL_DSP_GETIPTR ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK, (struct count_info __user *) arg); case SNDCTL_DSP_MAPINBUF: case SNDCTL_DSP_MAPOUTBUF: return snd_pcm_oss_get_mapbuf(pcm_oss_file, cmd == SNDCTL_DSP_MAPINBUF ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK, (struct buffmem_desc __user *) arg); case SNDCTL_DSP_SETSYNCRO: /* stop DMA now.. */ return 0; case SNDCTL_DSP_SETDUPLEX: if (snd_pcm_oss_get_caps(pcm_oss_file) & DSP_CAP_DUPLEX) return 0; return -EIO; case SNDCTL_DSP_GETODELAY: res = snd_pcm_oss_get_odelay(pcm_oss_file); if (res < 0) { /* it's for sure, some broken apps don't check for error codes */ put_user(0, p); return res; } return put_user(res, p); case SNDCTL_DSP_PROFILE: return 0; /* silently ignore */ default: snd_printd("pcm_oss: unknown command = 0x%x\n", cmd); } return -EINVAL; } #ifdef CONFIG_COMPAT /* all compatible */ #define snd_pcm_oss_ioctl_compat snd_pcm_oss_ioctl #else #define snd_pcm_oss_ioctl_compat NULL #endif static ssize_t snd_pcm_oss_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream; pcm_oss_file = file->private_data; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (substream == NULL) return -ENXIO; substream->f_flags = file->f_flags & O_NONBLOCK; #ifndef OSS_DEBUG return snd_pcm_oss_read1(substream, buf, count); #else { ssize_t res = snd_pcm_oss_read1(substream, buf, count); printk(KERN_DEBUG "pcm_oss: read %li bytes " "(returned %li bytes)\n", (long)count, (long)res); return res; } #endif } static ssize_t snd_pcm_oss_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream; long result; pcm_oss_file = file->private_data; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream == NULL) return -ENXIO; substream->f_flags = file->f_flags & O_NONBLOCK; result = snd_pcm_oss_write1(substream, buf, count); #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: write %li bytes (wrote %li bytes)\n", (long)count, (long)result); #endif return result; } static int snd_pcm_oss_playback_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return runtime->oss.prev_hw_ptr_period != get_hw_ptr_period(runtime); else return snd_pcm_playback_avail(runtime) >= runtime->oss.period_frames; } static int snd_pcm_oss_capture_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return runtime->oss.prev_hw_ptr_period != get_hw_ptr_period(runtime); else return snd_pcm_capture_avail(runtime) >= runtime->oss.period_frames; } static unsigned int snd_pcm_oss_poll(struct file *file, poll_table * wait) { struct snd_pcm_oss_file *pcm_oss_file; unsigned int mask; struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL; pcm_oss_file = file->private_data; psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; mask = 0; if (psubstream != NULL) { struct snd_pcm_runtime *runtime = psubstream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(psubstream); if (runtime->status->state != SNDRV_PCM_STATE_DRAINING && (runtime->status->state != SNDRV_PCM_STATE_RUNNING || snd_pcm_oss_playback_ready(psubstream))) mask |= POLLOUT | POLLWRNORM; snd_pcm_stream_unlock_irq(psubstream); } if (csubstream != NULL) { struct snd_pcm_runtime *runtime = csubstream->runtime; snd_pcm_state_t ostate; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(csubstream); if ((ostate = runtime->status->state) != SNDRV_PCM_STATE_RUNNING || snd_pcm_oss_capture_ready(csubstream)) mask |= POLLIN | POLLRDNORM; snd_pcm_stream_unlock_irq(csubstream); if (ostate != SNDRV_PCM_STATE_RUNNING && runtime->oss.trigger) { struct snd_pcm_oss_file ofile; memset(&ofile, 0, sizeof(ofile)); ofile.streams[SNDRV_PCM_STREAM_CAPTURE] = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; runtime->oss.trigger = 0; snd_pcm_oss_set_trigger(&ofile, PCM_ENABLE_INPUT); } } return mask; } static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area) { struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream = NULL; struct snd_pcm_runtime *runtime; int err; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: mmap begin\n"); #endif pcm_oss_file = file->private_data; switch ((area->vm_flags & (VM_READ | VM_WRITE))) { case VM_READ | VM_WRITE: substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream) break; /* Fall through */ case VM_READ: substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; break; case VM_WRITE: substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; break; default: return -EINVAL; } /* set VM_READ access as well to fix memset() routines that do reads before writes (to improve performance) */ area->vm_flags |= VM_READ; if (substream == NULL) return -ENXIO; runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_MMAP_VALID)) return -EIO; if (runtime->info & SNDRV_PCM_INFO_INTERLEAVED) runtime->access = SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; else return -EIO; if (runtime->oss.params) { if ((err = snd_pcm_oss_change_params(substream)) < 0) return err; } #ifdef CONFIG_SND_PCM_OSS_PLUGINS if (runtime->oss.plugin_first != NULL) return -EIO; #endif if (area->vm_pgoff != 0) return -EINVAL; err = snd_pcm_mmap_data(substream, file, area); if (err < 0) return err; runtime->oss.mmap_bytes = area->vm_end - area->vm_start; runtime->silence_threshold = 0; runtime->silence_size = 0; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: mmap ok, bytes = 0x%x\n", runtime->oss.mmap_bytes); #endif /* In mmap mode we never stop */ runtime->stop_threshold = runtime->boundary; return 0; } #ifdef CONFIG_SND_VERBOSE_PROCFS /* * /proc interface */ static void snd_pcm_oss_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_str *pstr = entry->private_data; struct snd_pcm_oss_setup *setup = pstr->oss.setup_list; mutex_lock(&pstr->oss.setup_mutex); while (setup) { snd_iprintf(buffer, "%s %u %u%s%s%s%s%s%s\n", setup->task_name, setup->periods, setup->period_size, setup->disable ? " disable" : "", setup->direct ? " direct" : "", setup->block ? " block" : "", setup->nonblock ? " non-block" : "", setup->partialfrag ? " partial-frag" : "", setup->nosilence ? " no-silence" : ""); setup = setup->next; } mutex_unlock(&pstr->oss.setup_mutex); } static void snd_pcm_oss_proc_free_setup_list(struct snd_pcm_str * pstr) { struct snd_pcm_oss_setup *setup, *setupn; for (setup = pstr->oss.setup_list, pstr->oss.setup_list = NULL; setup; setup = setupn) { setupn = setup->next; kfree(setup->task_name); kfree(setup); } pstr->oss.setup_list = NULL; } static void snd_pcm_oss_proc_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_str *pstr = entry->private_data; char line[128], str[32], task_name[32]; const char *ptr; int idx1; struct snd_pcm_oss_setup *setup, *setup1, template; while (!snd_info_get_line(buffer, line, sizeof(line))) { mutex_lock(&pstr->oss.setup_mutex); memset(&template, 0, sizeof(template)); ptr = snd_info_get_str(task_name, line, sizeof(task_name)); if (!strcmp(task_name, "clear") || !strcmp(task_name, "erase")) { snd_pcm_oss_proc_free_setup_list(pstr); mutex_unlock(&pstr->oss.setup_mutex); continue; } for (setup = pstr->oss.setup_list; setup; setup = setup->next) { if (!strcmp(setup->task_name, task_name)) { template = *setup; break; } } ptr = snd_info_get_str(str, ptr, sizeof(str)); template.periods = simple_strtoul(str, NULL, 10); ptr = snd_info_get_str(str, ptr, sizeof(str)); template.period_size = simple_strtoul(str, NULL, 10); for (idx1 = 31; idx1 >= 0; idx1--) if (template.period_size & (1 << idx1)) break; for (idx1--; idx1 >= 0; idx1--) template.period_size &= ~(1 << idx1); do { ptr = snd_info_get_str(str, ptr, sizeof(str)); if (!strcmp(str, "disable")) { template.disable = 1; } else if (!strcmp(str, "direct")) { template.direct = 1; } else if (!strcmp(str, "block")) { template.block = 1; } else if (!strcmp(str, "non-block")) { template.nonblock = 1; } else if (!strcmp(str, "partial-frag")) { template.partialfrag = 1; } else if (!strcmp(str, "no-silence")) { template.nosilence = 1; } else if (!strcmp(str, "buggy-ptr")) { template.buggyptr = 1; } } while (*str); if (setup == NULL) { setup = kmalloc(sizeof(*setup), GFP_KERNEL); if (! setup) { buffer->error = -ENOMEM; mutex_unlock(&pstr->oss.setup_mutex); return; } if (pstr->oss.setup_list == NULL) pstr->oss.setup_list = setup; else { for (setup1 = pstr->oss.setup_list; setup1->next; setup1 = setup1->next); setup1->next = setup; } template.task_name = kstrdup(task_name, GFP_KERNEL); if (! template.task_name) { kfree(setup); buffer->error = -ENOMEM; mutex_unlock(&pstr->oss.setup_mutex); return; } } *setup = template; mutex_unlock(&pstr->oss.setup_mutex); } } static void snd_pcm_oss_proc_init(struct snd_pcm *pcm) { int stream; for (stream = 0; stream < 2; ++stream) { struct snd_info_entry *entry; struct snd_pcm_str *pstr = &pcm->streams[stream]; if (pstr->substream_count == 0) continue; if ((entry = snd_info_create_card_entry(pcm->card, "oss", pstr->proc_root)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = snd_pcm_oss_proc_read; entry->c.text.write = snd_pcm_oss_proc_write; entry->private_data = pstr; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } pstr->oss.proc_entry = entry; } } static void snd_pcm_oss_proc_done(struct snd_pcm *pcm) { int stream; for (stream = 0; stream < 2; ++stream) { struct snd_pcm_str *pstr = &pcm->streams[stream]; snd_info_free_entry(pstr->oss.proc_entry); pstr->oss.proc_entry = NULL; snd_pcm_oss_proc_free_setup_list(pstr); } } #else /* !CONFIG_SND_VERBOSE_PROCFS */ #define snd_pcm_oss_proc_init(pcm) #define snd_pcm_oss_proc_done(pcm) #endif /* CONFIG_SND_VERBOSE_PROCFS */ /* * ENTRY functions */ static const struct file_operations snd_pcm_oss_f_reg = { .owner = THIS_MODULE, .read = snd_pcm_oss_read, .write = snd_pcm_oss_write, .open = snd_pcm_oss_open, .release = snd_pcm_oss_release, .llseek = no_llseek, .poll = snd_pcm_oss_poll, .unlocked_ioctl = snd_pcm_oss_ioctl, .compat_ioctl = snd_pcm_oss_ioctl_compat, .mmap = snd_pcm_oss_mmap, }; static void register_oss_dsp(struct snd_pcm *pcm, int index) { char name[128]; sprintf(name, "dsp%i%i", pcm->card->number, pcm->device); if (snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM, pcm->card, index, &snd_pcm_oss_f_reg, pcm, name) < 0) { snd_printk(KERN_ERR "unable to register OSS PCM device %i:%i\n", pcm->card->number, pcm->device); } } static int snd_pcm_oss_register_minor(struct snd_pcm *pcm) { pcm->oss.reg = 0; if (dsp_map[pcm->card->number] == (int)pcm->device) { char name[128]; int duplex; register_oss_dsp(pcm, 0); duplex = (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream_count > 0 && pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream_count && !(pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX)); sprintf(name, "%s%s", pcm->name, duplex ? " (DUPLEX)" : ""); #ifdef SNDRV_OSS_INFO_DEV_AUDIO snd_oss_info_register(SNDRV_OSS_INFO_DEV_AUDIO, pcm->card->number, name); #endif pcm->oss.reg++; pcm->oss.reg_mask |= 1; } if (adsp_map[pcm->card->number] == (int)pcm->device) { register_oss_dsp(pcm, 1); pcm->oss.reg++; pcm->oss.reg_mask |= 2; } if (pcm->oss.reg) snd_pcm_oss_proc_init(pcm); return 0; } static int snd_pcm_oss_disconnect_minor(struct snd_pcm *pcm) { if (pcm->oss.reg) { if (pcm->oss.reg_mask & 1) { pcm->oss.reg_mask &= ~1; snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM, pcm->card, 0); } if (pcm->oss.reg_mask & 2) { pcm->oss.reg_mask &= ~2; snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM, pcm->card, 1); } if (dsp_map[pcm->card->number] == (int)pcm->device) { #ifdef SNDRV_OSS_INFO_DEV_AUDIO snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_AUDIO, pcm->card->number); #endif } pcm->oss.reg = 0; } return 0; } static int snd_pcm_oss_unregister_minor(struct snd_pcm *pcm) { snd_pcm_oss_disconnect_minor(pcm); snd_pcm_oss_proc_done(pcm); return 0; } static struct snd_pcm_notify snd_pcm_oss_notify = { .n_register = snd_pcm_oss_register_minor, .n_disconnect = snd_pcm_oss_disconnect_minor, .n_unregister = snd_pcm_oss_unregister_minor, }; static int __init alsa_pcm_oss_init(void) { int i; int err; /* check device map table */ for (i = 0; i < SNDRV_CARDS; i++) { if (dsp_map[i] < 0 || dsp_map[i] >= SNDRV_PCM_DEVICES) { snd_printk(KERN_ERR "invalid dsp_map[%d] = %d\n", i, dsp_map[i]); dsp_map[i] = 0; } if (adsp_map[i] < 0 || adsp_map[i] >= SNDRV_PCM_DEVICES) { snd_printk(KERN_ERR "invalid adsp_map[%d] = %d\n", i, adsp_map[i]); adsp_map[i] = 1; } } if ((err = snd_pcm_notify(&snd_pcm_oss_notify, 0)) < 0) return err; return 0; } static void __exit alsa_pcm_oss_exit(void) { snd_pcm_notify(&snd_pcm_oss_notify, 1); } module_init(alsa_pcm_oss_init) module_exit(alsa_pcm_oss_exit)
gpl-2.0
pablohaylan/I9500_Stock_Kernel_KK_4.4.2
drivers/rtc/rtc-isl1208.c
3414
17752
/* * Intersil ISL1208 rtc class driver * * Copyright 2005,2006 Hebert Valerio Riedel <hvr@gnu.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/bcd.h> #include <linux/rtc.h> #define DRV_VERSION "0.3" /* Register map */ /* rtc section */ #define ISL1208_REG_SC 0x00 #define ISL1208_REG_MN 0x01 #define ISL1208_REG_HR 0x02 #define ISL1208_REG_HR_MIL (1<<7) /* 24h/12h mode */ #define ISL1208_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */ #define ISL1208_REG_DT 0x03 #define ISL1208_REG_MO 0x04 #define ISL1208_REG_YR 0x05 #define ISL1208_REG_DW 0x06 #define ISL1208_RTC_SECTION_LEN 7 /* control/status section */ #define ISL1208_REG_SR 0x07 #define ISL1208_REG_SR_ARST (1<<7) /* auto reset */ #define ISL1208_REG_SR_XTOSCB (1<<6) /* crystal oscillator */ #define ISL1208_REG_SR_WRTC (1<<4) /* write rtc */ #define ISL1208_REG_SR_ALM (1<<2) /* alarm */ #define ISL1208_REG_SR_BAT (1<<1) /* battery */ #define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */ #define ISL1208_REG_INT 0x08 #define ISL1208_REG_INT_ALME (1<<6) /* alarm enable */ #define ISL1208_REG_INT_IM (1<<7) /* interrupt/alarm mode */ #define ISL1208_REG_09 0x09 /* reserved */ #define ISL1208_REG_ATR 0x0a #define ISL1208_REG_DTR 0x0b /* alarm section */ #define ISL1208_REG_SCA 0x0c #define ISL1208_REG_MNA 0x0d #define ISL1208_REG_HRA 0x0e #define ISL1208_REG_DTA 0x0f #define ISL1208_REG_MOA 0x10 #define ISL1208_REG_DWA 0x11 #define ISL1208_ALARM_SECTION_LEN 6 /* user section */ #define ISL1208_REG_USR1 0x12 #define ISL1208_REG_USR2 0x13 #define ISL1208_USR_SECTION_LEN 2 static struct i2c_driver isl1208_driver; /* block read */ static int isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[], unsigned len) { u8 reg_addr[1] = { reg }; struct i2c_msg msgs[2] = { {client->addr, 0, sizeof(reg_addr), reg_addr} , {client->addr, I2C_M_RD, len, buf} }; int ret; BUG_ON(reg > ISL1208_REG_USR2); BUG_ON(reg + len > ISL1208_REG_USR2 + 1); ret = i2c_transfer(client->adapter, msgs, 2); if (ret > 0) ret = 0; return ret; } /* block write */ static int isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[], unsigned len) { u8 i2c_buf[ISL1208_REG_USR2 + 2]; struct i2c_msg msgs[1] = { {client->addr, 0, len + 1, i2c_buf} }; int ret; BUG_ON(reg > ISL1208_REG_USR2); BUG_ON(reg + len > ISL1208_REG_USR2 + 1); i2c_buf[0] = reg; memcpy(&i2c_buf[1], &buf[0], len); ret = i2c_transfer(client->adapter, msgs, 1); if (ret > 0) ret = 0; return ret; } /* simple check to see wether we have a isl1208 */ static int isl1208_i2c_validate_client(struct i2c_client *client) { u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; u8 zero_mask[ISL1208_RTC_SECTION_LEN] = { 0x80, 0x80, 0x40, 0xc0, 0xe0, 0x00, 0xf8 }; int i; int ret; ret = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); if (ret < 0) return ret; for (i = 0; i < ISL1208_RTC_SECTION_LEN; ++i) { if (regs[i] & zero_mask[i]) /* check if bits are cleared */ return -ENODEV; } return 0; } static int isl1208_i2c_get_sr(struct i2c_client *client) { int sr = i2c_smbus_read_byte_data(client, ISL1208_REG_SR); if (sr < 0) return -EIO; return sr; } static int isl1208_i2c_get_atr(struct i2c_client *client) { int atr = i2c_smbus_read_byte_data(client, ISL1208_REG_ATR); if (atr < 0) return atr; /* The 6bit value in the ATR register controls the load * capacitance C_load * in steps of 0.25pF * * bit (1<<5) of the ATR register is inverted * * C_load(ATR=0x20) = 4.50pF * C_load(ATR=0x00) = 12.50pF * C_load(ATR=0x1f) = 20.25pF * */ atr &= 0x3f; /* mask out lsb */ atr ^= 1 << 5; /* invert 6th bit */ atr += 2 * 9; /* add offset of 4.5pF; unit[atr] = 0.25pF */ return atr; } static int isl1208_i2c_get_dtr(struct i2c_client *client) { int dtr = i2c_smbus_read_byte_data(client, ISL1208_REG_DTR); if (dtr < 0) return -EIO; /* dtr encodes adjustments of {-60,-40,-20,0,20,40,60} ppm */ dtr = ((dtr & 0x3) * 20) * (dtr & (1 << 2) ? -1 : 1); return dtr; } static int isl1208_i2c_get_usr(struct i2c_client *client) { u8 buf[ISL1208_USR_SECTION_LEN] = { 0, }; int ret; ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1, buf, ISL1208_USR_SECTION_LEN); if (ret < 0) return ret; return (buf[1] << 8) | buf[0]; } static int isl1208_i2c_set_usr(struct i2c_client *client, u16 usr) { u8 buf[ISL1208_USR_SECTION_LEN]; buf[0] = usr & 0xff; buf[1] = (usr >> 8) & 0xff; return isl1208_i2c_set_regs(client, ISL1208_REG_USR1, buf, ISL1208_USR_SECTION_LEN); } static int isl1208_rtc_toggle_alarm(struct i2c_client *client, int enable) { int icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT); if (icr < 0) { dev_err(&client->dev, "%s: reading INT failed\n", __func__); return icr; } if (enable) icr |= ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM; else icr &= ~(ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM); icr = i2c_smbus_write_byte_data(client, ISL1208_REG_INT, icr); if (icr < 0) { dev_err(&client->dev, "%s: writing INT failed\n", __func__); return icr; } return 0; } static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq) { struct i2c_client *const client = to_i2c_client(dev); int sr, dtr, atr, usr; sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } seq_printf(seq, "status_reg\t:%s%s%s%s%s%s (0x%.2x)\n", (sr & ISL1208_REG_SR_RTCF) ? " RTCF" : "", (sr & ISL1208_REG_SR_BAT) ? " BAT" : "", (sr & ISL1208_REG_SR_ALM) ? " ALM" : "", (sr & ISL1208_REG_SR_WRTC) ? " WRTC" : "", (sr & ISL1208_REG_SR_XTOSCB) ? " XTOSCB" : "", (sr & ISL1208_REG_SR_ARST) ? " ARST" : "", sr); seq_printf(seq, "batt_status\t: %s\n", (sr & ISL1208_REG_SR_RTCF) ? "bad" : "okay"); dtr = isl1208_i2c_get_dtr(client); if (dtr >= 0 - 1) seq_printf(seq, "digital_trim\t: %d ppm\n", dtr); atr = isl1208_i2c_get_atr(client); if (atr >= 0) seq_printf(seq, "analog_trim\t: %d.%.2d pF\n", atr >> 2, (atr & 0x3) * 25); usr = isl1208_i2c_get_usr(client); if (usr >= 0) seq_printf(seq, "user_data\t: 0x%.4x\n", usr); return 0; } static int isl1208_i2c_read_time(struct i2c_client *client, struct rtc_time *tm) { int sr; u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return -EIO; } sr = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); if (sr < 0) { dev_err(&client->dev, "%s: reading RTC section failed\n", __func__); return sr; } tm->tm_sec = bcd2bin(regs[ISL1208_REG_SC]); tm->tm_min = bcd2bin(regs[ISL1208_REG_MN]); /* HR field has a more complex interpretation */ { const u8 _hr = regs[ISL1208_REG_HR]; if (_hr & ISL1208_REG_HR_MIL) /* 24h format */ tm->tm_hour = bcd2bin(_hr & 0x3f); else { /* 12h format */ tm->tm_hour = bcd2bin(_hr & 0x1f); if (_hr & ISL1208_REG_HR_PM) /* PM flag set */ tm->tm_hour += 12; } } tm->tm_mday = bcd2bin(regs[ISL1208_REG_DT]); tm->tm_mon = bcd2bin(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */ tm->tm_year = bcd2bin(regs[ISL1208_REG_YR]) + 100; tm->tm_wday = bcd2bin(regs[ISL1208_REG_DW]); return 0; } static int isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm) { struct rtc_time *const tm = &alarm->time; u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, }; int icr, yr, sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } sr = isl1208_i2c_read_regs(client, ISL1208_REG_SCA, regs, ISL1208_ALARM_SECTION_LEN); if (sr < 0) { dev_err(&client->dev, "%s: reading alarm section failed\n", __func__); return sr; } /* MSB of each alarm register is an enable bit */ tm->tm_sec = bcd2bin(regs[ISL1208_REG_SCA - ISL1208_REG_SCA] & 0x7f); tm->tm_min = bcd2bin(regs[ISL1208_REG_MNA - ISL1208_REG_SCA] & 0x7f); tm->tm_hour = bcd2bin(regs[ISL1208_REG_HRA - ISL1208_REG_SCA] & 0x3f); tm->tm_mday = bcd2bin(regs[ISL1208_REG_DTA - ISL1208_REG_SCA] & 0x3f); tm->tm_mon = bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1; tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03); /* The alarm doesn't store the year so get it from the rtc section */ yr = i2c_smbus_read_byte_data(client, ISL1208_REG_YR); if (yr < 0) { dev_err(&client->dev, "%s: reading RTC YR failed\n", __func__); return yr; } tm->tm_year = bcd2bin(yr) + 100; icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT); if (icr < 0) { dev_err(&client->dev, "%s: reading INT failed\n", __func__); return icr; } alarm->enabled = !!(icr & ISL1208_REG_INT_ALME); return 0; } static int isl1208_i2c_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm) { struct rtc_time *alarm_tm = &alarm->time; u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, }; const int offs = ISL1208_REG_SCA; unsigned long rtc_secs, alarm_secs; struct rtc_time rtc_tm; int err, enable; err = isl1208_i2c_read_time(client, &rtc_tm); if (err) return err; err = rtc_tm_to_time(&rtc_tm, &rtc_secs); if (err) return err; err = rtc_tm_to_time(alarm_tm, &alarm_secs); if (err) return err; /* If the alarm time is before the current time disable the alarm */ if (!alarm->enabled || alarm_secs <= rtc_secs) enable = 0x00; else enable = 0x80; /* Program the alarm and enable it for each setting */ regs[ISL1208_REG_SCA - offs] = bin2bcd(alarm_tm->tm_sec) | enable; regs[ISL1208_REG_MNA - offs] = bin2bcd(alarm_tm->tm_min) | enable; regs[ISL1208_REG_HRA - offs] = bin2bcd(alarm_tm->tm_hour) | ISL1208_REG_HR_MIL | enable; regs[ISL1208_REG_DTA - offs] = bin2bcd(alarm_tm->tm_mday) | enable; regs[ISL1208_REG_MOA - offs] = bin2bcd(alarm_tm->tm_mon + 1) | enable; regs[ISL1208_REG_DWA - offs] = bin2bcd(alarm_tm->tm_wday & 7) | enable; /* write ALARM registers */ err = isl1208_i2c_set_regs(client, offs, regs, ISL1208_ALARM_SECTION_LEN); if (err < 0) { dev_err(&client->dev, "%s: writing ALARM section failed\n", __func__); return err; } err = isl1208_rtc_toggle_alarm(client, enable); if (err) return err; return 0; } static int isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm) { return isl1208_i2c_read_time(to_i2c_client(dev), tm); } static int isl1208_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm) { int sr; u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; /* The clock has an 8 bit wide bcd-coded register (they never learn) * for the year. tm_year is an offset from 1900 and we are interested * in the 2000-2099 range, so any value less than 100 is invalid. */ if (tm->tm_year < 100) return -EINVAL; regs[ISL1208_REG_SC] = bin2bcd(tm->tm_sec); regs[ISL1208_REG_MN] = bin2bcd(tm->tm_min); regs[ISL1208_REG_HR] = bin2bcd(tm->tm_hour) | ISL1208_REG_HR_MIL; regs[ISL1208_REG_DT] = bin2bcd(tm->tm_mday); regs[ISL1208_REG_MO] = bin2bcd(tm->tm_mon + 1); regs[ISL1208_REG_YR] = bin2bcd(tm->tm_year - 100); regs[ISL1208_REG_DW] = bin2bcd(tm->tm_wday & 7); sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } /* set WRTC */ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr | ISL1208_REG_SR_WRTC); if (sr < 0) { dev_err(&client->dev, "%s: writing SR failed\n", __func__); return sr; } /* write RTC registers */ sr = isl1208_i2c_set_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); if (sr < 0) { dev_err(&client->dev, "%s: writing RTC section failed\n", __func__); return sr; } /* clear WRTC again */ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr & ~ISL1208_REG_SR_WRTC); if (sr < 0) { dev_err(&client->dev, "%s: writing SR failed\n", __func__); return sr; } return 0; } static int isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm) { return isl1208_i2c_set_time(to_i2c_client(dev), tm); } static int isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm); } static int isl1208_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { return isl1208_i2c_set_alarm(to_i2c_client(dev), alarm); } static irqreturn_t isl1208_rtc_interrupt(int irq, void *data) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct i2c_client *client = data; int handled = 0, sr, err; /* * I2C reads get NAK'ed if we read straight away after an interrupt? * Using a mdelay/msleep didn't seem to help either, so we work around * this by continually trying to read the register for a short time. */ while (1) { sr = isl1208_i2c_get_sr(client); if (sr >= 0) break; if (time_after(jiffies, timeout)) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } } if (sr & ISL1208_REG_SR_ALM) { dev_dbg(&client->dev, "alarm!\n"); /* Clear the alarm */ sr &= ~ISL1208_REG_SR_ALM; sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); if (sr < 0) dev_err(&client->dev, "%s: writing SR failed\n", __func__); else handled = 1; /* Disable the alarm */ err = isl1208_rtc_toggle_alarm(client, 0); if (err) return err; } return handled ? IRQ_HANDLED : IRQ_NONE; } static const struct rtc_class_ops isl1208_rtc_ops = { .proc = isl1208_rtc_proc, .read_time = isl1208_rtc_read_time, .set_time = isl1208_rtc_set_time, .read_alarm = isl1208_rtc_read_alarm, .set_alarm = isl1208_rtc_set_alarm, }; /* sysfs interface */ static ssize_t isl1208_sysfs_show_atrim(struct device *dev, struct device_attribute *attr, char *buf) { int atr = isl1208_i2c_get_atr(to_i2c_client(dev)); if (atr < 0) return atr; return sprintf(buf, "%d.%.2d pF\n", atr >> 2, (atr & 0x3) * 25); } static DEVICE_ATTR(atrim, S_IRUGO, isl1208_sysfs_show_atrim, NULL); static ssize_t isl1208_sysfs_show_dtrim(struct device *dev, struct device_attribute *attr, char *buf) { int dtr = isl1208_i2c_get_dtr(to_i2c_client(dev)); if (dtr < 0) return dtr; return sprintf(buf, "%d ppm\n", dtr); } static DEVICE_ATTR(dtrim, S_IRUGO, isl1208_sysfs_show_dtrim, NULL); static ssize_t isl1208_sysfs_show_usr(struct device *dev, struct device_attribute *attr, char *buf) { int usr = isl1208_i2c_get_usr(to_i2c_client(dev)); if (usr < 0) return usr; return sprintf(buf, "0x%.4x\n", usr); } static ssize_t isl1208_sysfs_store_usr(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int usr = -1; if (buf[0] == '0' && (buf[1] == 'x' || buf[1] == 'X')) { if (sscanf(buf, "%x", &usr) != 1) return -EINVAL; } else { if (sscanf(buf, "%d", &usr) != 1) return -EINVAL; } if (usr < 0 || usr > 0xffff) return -EINVAL; return isl1208_i2c_set_usr(to_i2c_client(dev), usr) ? -EIO : count; } static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr, isl1208_sysfs_store_usr); static struct attribute *isl1208_rtc_attrs[] = { &dev_attr_atrim.attr, &dev_attr_dtrim.attr, &dev_attr_usr.attr, NULL }; static const struct attribute_group isl1208_rtc_sysfs_files = { .attrs = isl1208_rtc_attrs, }; static int isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; struct rtc_device *rtc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; if (isl1208_i2c_validate_client(client) < 0) return -ENODEV; dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); if (client->irq > 0) { rc = request_threaded_irq(client->irq, NULL, isl1208_rtc_interrupt, IRQF_SHARED, isl1208_driver.driver.name, client); if (!rc) { device_init_wakeup(&client->dev, 1); enable_irq_wake(client->irq); } else { dev_err(&client->dev, "Unable to request irq %d, no alarm support\n", client->irq); client->irq = 0; } } rtc = rtc_device_register(isl1208_driver.driver.name, &client->dev, &isl1208_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { rc = PTR_ERR(rtc); goto exit_free_irq; } i2c_set_clientdata(client, rtc); rc = isl1208_i2c_get_sr(client); if (rc < 0) { dev_err(&client->dev, "reading status failed\n"); goto exit_unregister; } if (rc & ISL1208_REG_SR_RTCF) dev_warn(&client->dev, "rtc power failure detected, " "please set clock.\n"); rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); if (rc) goto exit_unregister; return 0; exit_unregister: rtc_device_unregister(rtc); exit_free_irq: if (client->irq) free_irq(client->irq, client); return rc; } static int isl1208_remove(struct i2c_client *client) { struct rtc_device *rtc = i2c_get_clientdata(client); sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); rtc_device_unregister(rtc); if (client->irq) free_irq(client->irq, client); return 0; } static const struct i2c_device_id isl1208_id[] = { { "isl1208", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, isl1208_id); static struct i2c_driver isl1208_driver = { .driver = { .name = "rtc-isl1208", }, .probe = isl1208_probe, .remove = isl1208_remove, .id_table = isl1208_id, }; module_i2c_driver(isl1208_driver); MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>"); MODULE_DESCRIPTION("Intersil ISL1208 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
yoAeroA00/android_kernel_nokia_msm8610
sound/ppc/powermac.c
4950
5271
/* * Driver for PowerMac AWACS * Copyright (c) 2001 by Takashi Iwai <tiwai@suse.de> * based on dmasound.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include "pmac.h" #include "awacs.h" #include "burgundy.h" #define CHIP_NAME "PMac" MODULE_DESCRIPTION("PowerMac"); MODULE_SUPPORTED_DEVICE("{{Apple,PowerMac}}"); MODULE_LICENSE("GPL"); static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static bool enable_beep = 1; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for " CHIP_NAME " soundchip."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for " CHIP_NAME " soundchip."); module_param(enable_beep, bool, 0444); MODULE_PARM_DESC(enable_beep, "Enable beep using PCM."); static struct platform_device *device; /* */ static int __devinit snd_pmac_probe(struct platform_device *devptr) { struct snd_card *card; struct snd_pmac *chip; char *name_ext; int err; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; if ((err = snd_pmac_new(card, &chip)) < 0) goto __error; card->private_data = chip; switch (chip->model) { case PMAC_BURGUNDY: strcpy(card->driver, "PMac Burgundy"); strcpy(card->shortname, "PowerMac Burgundy"); sprintf(card->longname, "%s (Dev %d) Sub-frame %d", card->shortname, chip->device_id, chip->subframe); if ((err = snd_pmac_burgundy_init(chip)) < 0) goto __error; break; case PMAC_DACA: strcpy(card->driver, "PMac DACA"); strcpy(card->shortname, "PowerMac DACA"); sprintf(card->longname, "%s (Dev %d) Sub-frame %d", card->shortname, chip->device_id, chip->subframe); if ((err = snd_pmac_daca_init(chip)) < 0) goto __error; break; case PMAC_TUMBLER: case PMAC_SNAPPER: name_ext = chip->model == PMAC_TUMBLER ? "Tumbler" : "Snapper"; sprintf(card->driver, "PMac %s", name_ext); sprintf(card->shortname, "PowerMac %s", name_ext); sprintf(card->longname, "%s (Dev %d) Sub-frame %d", card->shortname, chip->device_id, chip->subframe); if ( snd_pmac_tumbler_init(chip) < 0 || snd_pmac_tumbler_post_init() < 0) goto __error; break; case PMAC_AWACS: case PMAC_SCREAMER: name_ext = chip->model == PMAC_SCREAMER ? "Screamer" : "AWACS"; sprintf(card->driver, "PMac %s", name_ext); sprintf(card->shortname, "PowerMac %s", name_ext); if (chip->is_pbook_3400) name_ext = " [PB3400]"; else if (chip->is_pbook_G3) name_ext = " [PBG3]"; else name_ext = ""; sprintf(card->longname, "%s%s Rev %d", card->shortname, name_ext, chip->revision); if ((err = snd_pmac_awacs_init(chip)) < 0) goto __error; break; default: snd_printk(KERN_ERR "unsupported hardware %d\n", chip->model); err = -EINVAL; goto __error; } if ((err = snd_pmac_pcm_new(chip)) < 0) goto __error; chip->initialized = 1; if (enable_beep) snd_pmac_attach_beep(chip); snd_card_set_dev(card, &devptr->dev); if ((err = snd_card_register(card)) < 0) goto __error; platform_set_drvdata(devptr, card); return 0; __error: snd_card_free(card); return err; } static int __devexit snd_pmac_remove(struct platform_device *devptr) { snd_card_free(platform_get_drvdata(devptr)); platform_set_drvdata(devptr, NULL); return 0; } #ifdef CONFIG_PM static int snd_pmac_driver_suspend(struct platform_device *devptr, pm_message_t state) { struct snd_card *card = platform_get_drvdata(devptr); snd_pmac_suspend(card->private_data); return 0; } static int snd_pmac_driver_resume(struct platform_device *devptr) { struct snd_card *card = platform_get_drvdata(devptr); snd_pmac_resume(card->private_data); return 0; } #endif #define SND_PMAC_DRIVER "snd_powermac" static struct platform_driver snd_pmac_driver = { .probe = snd_pmac_probe, .remove = __devexit_p(snd_pmac_remove), #ifdef CONFIG_PM .suspend = snd_pmac_driver_suspend, .resume = snd_pmac_driver_resume, #endif .driver = { .name = SND_PMAC_DRIVER }, }; static int __init alsa_card_pmac_init(void) { int err; if ((err = platform_driver_register(&snd_pmac_driver)) < 0) return err; device = platform_device_register_simple(SND_PMAC_DRIVER, -1, NULL, 0); return 0; } static void __exit alsa_card_pmac_exit(void) { if (!IS_ERR(device)) platform_device_unregister(device); platform_driver_unregister(&snd_pmac_driver); } module_init(alsa_card_pmac_init) module_exit(alsa_card_pmac_exit)
gpl-2.0
Arc-Team/android_kernel_htc_a11
drivers/media/video/gspca/m5602/m5602_core.c
4950
10244
/* * USB Driver for ALi m5602 based webcams * * Copyright (C) 2008 Erik Andrén * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "m5602_ov9650.h" #include "m5602_ov7660.h" #include "m5602_mt9m111.h" #include "m5602_po1030.h" #include "m5602_s5k83a.h" #include "m5602_s5k4aa.h" /* Kernel module parameters */ int force_sensor; static bool dump_bridge; bool dump_sensor; static const struct usb_device_id m5602_table[] = { {USB_DEVICE(0x0402, 0x5602)}, {} }; MODULE_DEVICE_TABLE(usb, m5602_table); /* Reads a byte from the m5602 */ int m5602_read_bridge(struct sd *sd, const u8 address, u8 *i2c_data) { int err; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, 0x14, 0x8100 + address, buf, 1, M5602_URB_MSG_TIMEOUT); *i2c_data = buf[0]; PDEBUG(D_CONF, "Reading bridge register 0x%x containing 0x%x", address, *i2c_data); /* usb_control_msg(...) returns the number of bytes sent upon success, mask that and return zero instead*/ return (err < 0) ? err : 0; } /* Writes a byte to the m5602 */ int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data) { int err; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; PDEBUG(D_CONF, "Writing bridge register 0x%x with 0x%x", address, i2c_data); memcpy(buf, bridge_urb_skeleton, sizeof(bridge_urb_skeleton)); buf[1] = address; buf[3] = i2c_data; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x19, 0x0000, buf, 4, M5602_URB_MSG_TIMEOUT); /* usb_control_msg(...) returns the number of bytes sent upon success, mask that and return zero instead */ return (err < 0) ? err : 0; } static int m5602_wait_for_i2c(struct sd *sd) { int err; u8 data; do { err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, &data); } while ((data & I2C_BUSY) && !err); return err; } int m5602_read_sensor(struct sd *sd, const u8 address, u8 *i2c_data, const u8 len) { int err, i; if (!len || len > sd->sensor->i2c_regW) return -EINVAL; err = m5602_wait_for_i2c(sd); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR, sd->sensor->i2c_slave_id); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address); if (err < 0) return err; /* Sensors with registers that are of only one byte width are differently read */ /* FIXME: This works with the ov9650, but has issues with the po1030 */ if (sd->sensor->i2c_regW == 1) { err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 1); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08); } else { err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len); } for (i = 0; (i < len) && !err; i++) { err = m5602_wait_for_i2c(sd); if (err < 0) return err; err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i])); PDEBUG(D_CONF, "Reading sensor register " "0x%x containing 0x%x ", address, *i2c_data); } return err; } int m5602_write_sensor(struct sd *sd, const u8 address, u8 *i2c_data, const u8 len) { int err, i; u8 *p; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; /* No sensor with a data width larger than 16 bits has yet been seen */ if (len > sd->sensor->i2c_regW || !len) return -EINVAL; memcpy(buf, sensor_urb_skeleton, sizeof(sensor_urb_skeleton)); buf[11] = sd->sensor->i2c_slave_id; buf[15] = address; /* Special case larger sensor writes */ p = buf + 16; /* Copy a four byte write sequence for each byte to be written to */ for (i = 0; i < len; i++) { memcpy(p, sensor_urb_skeleton + 16, 4); p[3] = i2c_data[i]; p += 4; PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x", address, i2c_data[i]); } /* Copy the tailer */ memcpy(p, sensor_urb_skeleton + 20, 4); /* Set the total length */ p[3] = 0x10 + len; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x19, 0x0000, buf, 20 + len * 4, M5602_URB_MSG_TIMEOUT); return (err < 0) ? err : 0; } /* Dump all the registers of the m5602 bridge, unfortunately this breaks the camera until it's power cycled */ static void m5602_dump_bridge(struct sd *sd) { int i; for (i = 0; i < 0x80; i++) { unsigned char val = 0; m5602_read_bridge(sd, i, &val); pr_info("ALi m5602 address 0x%x contains 0x%x\n", i, val); } pr_info("Warning: The ALi m5602 webcam probably won't work until it's power cycled\n"); } static int m5602_probe_sensor(struct sd *sd) { /* Try the po1030 */ sd->sensor = &po1030; if (!sd->sensor->probe(sd)) return 0; /* Try the mt9m111 sensor */ sd->sensor = &mt9m111; if (!sd->sensor->probe(sd)) return 0; /* Try the s5k4aa */ sd->sensor = &s5k4aa; if (!sd->sensor->probe(sd)) return 0; /* Try the ov9650 */ sd->sensor = &ov9650; if (!sd->sensor->probe(sd)) return 0; /* Try the ov7660 */ sd->sensor = &ov7660; if (!sd->sensor->probe(sd)) return 0; /* Try the s5k83a */ sd->sensor = &s5k83a; if (!sd->sensor->probe(sd)) return 0; /* More sensor probe function goes here */ pr_info("Failed to find a sensor\n"); sd->sensor = NULL; return -ENODEV; } static int m5602_configure(struct gspca_dev *gspca_dev, const struct usb_device_id *id); static int m5602_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int err; PDEBUG(D_CONF, "Initializing ALi m5602 webcam"); /* Run the init sequence */ err = sd->sensor->init(sd); return err; } static int m5602_start_transfer(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 *buf = sd->gspca_dev.usb_buf; int err; /* Send start command to the camera */ const u8 buffer[4] = {0x13, 0xf9, 0x0f, 0x01}; if (sd->sensor->start) sd->sensor->start(sd); memcpy(buf, buffer, sizeof(buffer)); err = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x04, 0x40, 0x19, 0x0000, buf, sizeof(buffer), M5602_URB_MSG_TIMEOUT); PDEBUG(D_STREAM, "Transfer started"); return (err < 0) ? err : 0; } static void m5602_urb_complete(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; if (len < 6) { PDEBUG(D_PACK, "Packet is less than 6 bytes"); return; } /* Frame delimiter: ff xx xx xx ff ff */ if (data[0] == 0xff && data[4] == 0xff && data[5] == 0xff && data[2] != sd->frame_id) { PDEBUG(D_FRAM, "Frame delimiter detected"); sd->frame_id = data[2]; /* Remove the extra fluff appended on each header */ data += 6; len -= 6; /* Complete the last frame (if any) */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); sd->frame_count++; /* Create a new frame */ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); PDEBUG(D_FRAM, "Starting new frame %d", sd->frame_count); } else { int cur_frame_len; cur_frame_len = gspca_dev->image_len; /* Remove urb header */ data += 4; len -= 4; if (cur_frame_len + len <= gspca_dev->frsz) { PDEBUG(D_FRAM, "Continuing frame %d copying %d bytes", sd->frame_count, len); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } else { /* Add the remaining data up to frame size */ gspca_frame_add(gspca_dev, INTER_PACKET, data, gspca_dev->frsz - cur_frame_len); } } } static void m5602_stop_transfer(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* Run the sensor specific end transfer sequence */ if (sd->sensor->stop) sd->sensor->stop(sd); } /* sub-driver description, the ctrl and nctrl is filled at probe time */ static struct sd_desc sd_desc = { .name = MODULE_NAME, .config = m5602_configure, .init = m5602_init, .start = m5602_start_transfer, .stopN = m5602_stop_transfer, .pkt_scan = m5602_urb_complete }; /* this function is called at probe time */ static int m5602_configure(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; int err; cam = &gspca_dev->cam; sd->desc = &sd_desc; if (dump_bridge) m5602_dump_bridge(sd); /* Probe sensor */ err = m5602_probe_sensor(sd); if (err) goto fail; return 0; fail: PDEBUG(D_ERR, "ALi m5602 webcam failed"); cam->cam_mode = NULL; cam->nmodes = 0; return err; } static int m5602_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static void m5602_disconnect(struct usb_interface *intf) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor->disconnect) sd->sensor->disconnect(sd); gspca_disconnect(intf); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = m5602_table, .probe = m5602_probe, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif .disconnect = m5602_disconnect }; module_usb_driver(sd_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(force_sensor, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(force_sensor, "forces detection of a sensor, " "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, " "4 = MT9M111, 5 = PO1030, 6 = OV7660"); module_param(dump_bridge, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup"); module_param(dump_sensor, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers " "at startup providing a sensor is found");
gpl-2.0