repo_name
string
path
string
copies
string
size
string
content
string
license
string
Jovy23/N930TUVU1APGC_Kernel
drivers/acpi/processor_driver.c
995
8356
/* * processor_driver.c - ACPI Processor Driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - Added processor hotplug support * Copyright (C) 2013, Intel Corporation * Rafael J. Wysocki <rafael.j.wysocki@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/slab.h> #include <linux/acpi.h> #include <acpi/processor.h> #include "internal.h" #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_driver"); MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Processor Driver"); MODULE_LICENSE("GPL"); static int acpi_processor_start(struct device *dev); static int acpi_processor_stop(struct device *dev); static const struct acpi_device_id processor_device_ids[] = { {ACPI_PROCESSOR_OBJECT_HID, 0}, {ACPI_PROCESSOR_DEVICE_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, processor_device_ids); static struct device_driver acpi_processor_driver = { .name = "processor", .bus = &cpu_subsys, .acpi_match_table = processor_device_ids, .probe = acpi_processor_start, .remove = acpi_processor_stop, }; static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) { struct acpi_device *device = data; struct acpi_processor *pr; int saved; if (device->handle != handle) return; pr = acpi_driver_data(device); if (!pr) return; switch (event) { case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: saved = pr->performance_platform_limit; acpi_processor_ppc_has_changed(pr, 1); if (saved == pr->performance_platform_limit) break; acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, pr->performance_platform_limit); break; case ACPI_PROCESSOR_NOTIFY_POWER: acpi_processor_cst_has_changed(pr); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; case ACPI_PROCESSOR_NOTIFY_THROTTLING: acpi_processor_tstate_has_changed(pr); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); break; } return; } static int __acpi_processor_start(struct acpi_device *device); static int acpi_cpu_soft_notify(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_device *device; action &= ~CPU_TASKS_FROZEN; /* * CPU_STARTING and CPU_DYING must not sleep. Return here since * acpi_bus_get_device() may sleep. */ if (action == CPU_STARTING || action == CPU_DYING) return NOTIFY_DONE; if (!pr || acpi_bus_get_device(pr->handle, &device)) return NOTIFY_DONE; if (action == CPU_ONLINE) { /* * CPU got physically hotplugged and onlined for the first time: * Initialize missing things. */ if (pr->flags.need_hotplug_init) { int ret; pr_info("Will online and init hotplugged CPU: %d\n", pr->id); pr->flags.need_hotplug_init = 0; ret = __acpi_processor_start(device); WARN(ret, "Failed to start CPU: %d\n", pr->id); } else { /* Normal CPU soft online event. */ acpi_processor_ppc_has_changed(pr, 0); acpi_processor_hotplug(pr); acpi_processor_reevaluate_tstate(pr, action); acpi_processor_tstate_has_changed(pr); } } else if (action == CPU_DEAD) { /* Invalidate flag.throttling after the CPU is offline. */ acpi_processor_reevaluate_tstate(pr, action); } return NOTIFY_OK; } static struct notifier_block __refdata acpi_cpu_notifier = { .notifier_call = acpi_cpu_soft_notify, }; static int __acpi_processor_start(struct acpi_device *device) { struct acpi_processor *pr = acpi_driver_data(device); acpi_status status; int result = 0; if (!pr) return -ENODEV; if (pr->flags.need_hotplug_init) return 0; #ifdef CONFIG_CPU_FREQ acpi_processor_ppc_has_changed(pr, 0); #endif acpi_processor_get_throttling_info(pr); if (pr->flags.throttling) pr->flags.limit = 1; if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr); pr->cdev = thermal_cooling_device_register("Processor", device, &processor_cooling_ops); if (IS_ERR(pr->cdev)) { result = PTR_ERR(pr->cdev); goto err_power_exit; } dev_dbg(&device->dev, "registered as cooling_device%d\n", pr->cdev->id); result = sysfs_create_link(&device->dev.kobj, &pr->cdev->device.kobj, "thermal_cooling"); if (result) { dev_err(&device->dev, "Failed to create sysfs link 'thermal_cooling'\n"); goto err_thermal_unregister; } result = sysfs_create_link(&pr->cdev->device.kobj, &device->dev.kobj, "device"); if (result) { dev_err(&pr->cdev->device, "Failed to create sysfs link 'device'\n"); goto err_remove_sysfs_thermal; } status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify, device); if (ACPI_SUCCESS(status)) return 0; sysfs_remove_link(&pr->cdev->device.kobj, "device"); err_remove_sysfs_thermal: sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); err_thermal_unregister: thermal_cooling_device_unregister(pr->cdev); err_power_exit: acpi_processor_power_exit(pr); return result; } static int acpi_processor_start(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); if (!device) return -ENODEV; return __acpi_processor_start(device); } static int acpi_processor_stop(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); struct acpi_processor *pr; if (!device) return 0; acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify); pr = acpi_driver_data(device); if (!pr) return 0; acpi_processor_power_exit(pr); if (pr->cdev) { sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); sysfs_remove_link(&pr->cdev->device.kobj, "device"); thermal_cooling_device_unregister(pr->cdev); pr->cdev = NULL; } return 0; } /* * We keep the driver loaded even when ACPI is not running. * This is needed for the powernow-k8 driver, that works even without * ACPI, but needs symbols from this driver */ static int __init acpi_processor_driver_init(void) { int result = 0; if (acpi_disabled) return 0; result = driver_register(&acpi_processor_driver); if (result < 0) return result; acpi_processor_syscore_init(); register_hotcpu_notifier(&acpi_cpu_notifier); acpi_thermal_cpufreq_init(); acpi_processor_ppc_init(); acpi_processor_throttling_init(); return 0; } static void __exit acpi_processor_driver_exit(void) { if (acpi_disabled) return; acpi_processor_ppc_exit(); acpi_thermal_cpufreq_exit(); unregister_hotcpu_notifier(&acpi_cpu_notifier); acpi_processor_syscore_exit(); driver_unregister(&acpi_processor_driver); } module_init(acpi_processor_driver_init); module_exit(acpi_processor_driver_exit); MODULE_ALIAS("processor");
gpl-2.0
alianmohammad/linux-kernel-4.2.6-hacks
ipc/mq_sysctl.c
2019
2985
/* * Copyright (C) 2007 IBM Corporation * * Author: Cedric Le Goater <clg@fr.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include <linux/sysctl.h> #ifdef CONFIG_PROC_SYSCTL static void *get_mq(struct ctl_table *table) { char *which = table->data; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; return which; } static int proc_mq_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table mq_table; memcpy(&mq_table, table, sizeof(mq_table)); mq_table.data = get_mq(table); return proc_dointvec(&mq_table, write, buffer, lenp, ppos); } static int proc_mq_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table mq_table; memcpy(&mq_table, table, sizeof(mq_table)); mq_table.data = get_mq(table); return proc_dointvec_minmax(&mq_table, write, buffer, lenp, ppos); } #else #define proc_mq_dointvec NULL #define proc_mq_dointvec_minmax NULL #endif static int msg_max_limit_min = MIN_MSGMAX; static int msg_max_limit_max = HARD_MSGMAX; static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; static int msg_maxsize_limit_max = HARD_MSGSIZEMAX; static struct ctl_table mq_sysctls[] = { { .procname = "queues_max", .data = &init_ipc_ns.mq_queues_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_mq_dointvec, }, { .procname = "msg_max", .data = &init_ipc_ns.mq_msg_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_mq_dointvec_minmax, .extra1 = &msg_max_limit_min, .extra2 = &msg_max_limit_max, }, { .procname = "msgsize_max", .data = &init_ipc_ns.mq_msgsize_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_mq_dointvec_minmax, .extra1 = &msg_maxsize_limit_min, .extra2 = &msg_maxsize_limit_max, }, { .procname = "msg_default", .data = &init_ipc_ns.mq_msg_default, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_mq_dointvec_minmax, .extra1 = &msg_max_limit_min, .extra2 = &msg_max_limit_max, }, { .procname = "msgsize_default", .data = &init_ipc_ns.mq_msgsize_default, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_mq_dointvec_minmax, .extra1 = &msg_maxsize_limit_min, .extra2 = &msg_maxsize_limit_max, }, {} }; static struct ctl_table mq_sysctl_dir[] = { { .procname = "mqueue", .mode = 0555, .child = mq_sysctls, }, {} }; static struct ctl_table mq_sysctl_root[] = { { .procname = "fs", .mode = 0555, .child = mq_sysctl_dir, }, {} }; struct ctl_table_header *mq_register_sysctl_table(void) { return register_sysctl_table(mq_sysctl_root); }
gpl-2.0
TaichiN/android_kernel_samsung_tuna
drivers/ata/sata_uli.c
2531
6987
/* * sata_uli.c - ULi Electronics SATA * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available under NDA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "sata_uli" #define DRV_VERSION "1.3" enum { uli_5289 = 0, uli_5287 = 1, uli_5281 = 2, uli_max_ports = 4, /* PCI configuration registers */ ULI5287_BASE = 0x90, /* sata0 phy SCR registers */ ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */ ULI5281_BASE = 0x60, /* sata0 phy SCR registers */ ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */ }; struct uli_priv { unsigned int scr_cfg_addr[uli_max_ports]; }; static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static const struct pci_device_id uli_pci_tbl[] = { { PCI_VDEVICE(AL, 0x5289), uli_5289 }, { PCI_VDEVICE(AL, 0x5287), uli_5287 }, { PCI_VDEVICE(AL, 0x5281), uli_5281 }, { } /* terminate list */ }; static struct pci_driver uli_pci_driver = { .name = DRV_NAME, .id_table = uli_pci_tbl, .probe = uli_init_one, .remove = ata_pci_remove_one, }; static struct scsi_host_template uli_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations uli_ops = { .inherits = &ata_bmdma_port_ops, .scr_read = uli_scr_read, .scr_write = uli_scr_write, .hardreset = ATA_OP_NULL, }; static const struct ata_port_info uli_port_info = { .flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &uli_ops, }; MODULE_AUTHOR("Peer Chen"); MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, uli_pci_tbl); MODULE_VERSION(DRV_VERSION); static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) { struct uli_priv *hpriv = ap->host->private_data; return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); } static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg) { struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg); u32 val; pci_read_config_dword(pdev, cfg_addr, &val); return val; } static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val) { struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr); pci_write_config_dword(pdev, cfg_addr, val); } static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) return -EINVAL; *val = uli_scr_cfg_read(link, sc_reg); return 0; } static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 return -EINVAL; uli_scr_cfg_write(link, sc_reg, val); return 0; } static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; const struct ata_port_info *ppi[] = { &uli_port_info, NULL }; unsigned int board_idx = (unsigned int) ent->driver_data; struct ata_host *host; struct uli_priv *hpriv; void __iomem * const *iomap; struct ata_ioports *ioaddr; int n_ports, rc; if (!printed_version++) dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(pdev); if (rc) return rc; n_ports = 2; if (board_idx == uli_5287) n_ports = 4; /* allocate the host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); if (!host) return -ENOMEM; hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; host->private_data = hpriv; /* the first two ports are standard SFF */ rc = ata_pci_sff_init_host(host); if (rc) return rc; ata_pci_bmdma_init(host); iomap = host->iomap; switch (board_idx) { case uli_5287: /* If there are four, the last two live right after * the standard SFF ports. */ hpriv->scr_cfg_addr[0] = ULI5287_BASE; hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS; ioaddr = &host->ports[2]->ioaddr; ioaddr->cmd_addr = iomap[0] + 8; ioaddr->altstatus_addr = ioaddr->ctl_addr = (void __iomem *) ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4; ioaddr->bmdma_addr = iomap[4] + 16; hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[2], "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", (unsigned long long)pci_resource_start(pdev, 0) + 8, ((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4, (unsigned long long)pci_resource_start(pdev, 4) + 16); ioaddr = &host->ports[3]->ioaddr; ioaddr->cmd_addr = iomap[2] + 8; ioaddr->altstatus_addr = ioaddr->ctl_addr = (void __iomem *) ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4; ioaddr->bmdma_addr = iomap[4] + 24; hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[2], "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", (unsigned long long)pci_resource_start(pdev, 2) + 9, ((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4, (unsigned long long)pci_resource_start(pdev, 4) + 24); break; case uli_5289: hpriv->scr_cfg_addr[0] = ULI5287_BASE; hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS; break; case uli_5281: hpriv->scr_cfg_addr[0] = ULI5281_BASE; hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS; break; default: BUG(); break; } pci_set_master(pdev); pci_intx(pdev, 1); return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &uli_sht); } static int __init uli_init(void) { return pci_register_driver(&uli_pci_driver); } static void __exit uli_exit(void) { pci_unregister_driver(&uli_pci_driver); } module_init(uli_init); module_exit(uli_exit);
gpl-2.0
JijonHyuni/HyperKernel-JB
virt/drivers/staging/et131x/et131x_netdev.c
2531
20330
/* * Agere Systems Inc. * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * http://www.agere.com * *------------------------------------------------------------------------------ * * et131x_netdev.c - Routines and data required by all Linux network devices. * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include "et131x_version.h" #include "et131x_defs.h" #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/pci.h> #include <asm/system.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include "et1310_phy.h" #include "et1310_tx.h" #include "et131x_adapter.h" #include "et131x.h" struct net_device_stats *et131x_stats(struct net_device *netdev); int et131x_open(struct net_device *netdev); int et131x_close(struct net_device *netdev); int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd); void et131x_multicast(struct net_device *netdev); int et131x_tx(struct sk_buff *skb, struct net_device *netdev); void et131x_tx_timeout(struct net_device *netdev); int et131x_change_mtu(struct net_device *netdev, int new_mtu); int et131x_set_mac_addr(struct net_device *netdev, void *new_mac); void et131x_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); static const struct net_device_ops et131x_netdev_ops = { .ndo_open = et131x_open, .ndo_stop = et131x_close, .ndo_start_xmit = et131x_tx, .ndo_set_multicast_list = et131x_multicast, .ndo_tx_timeout = et131x_tx_timeout, .ndo_change_mtu = et131x_change_mtu, .ndo_set_mac_address = et131x_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats = et131x_stats, .ndo_do_ioctl = et131x_ioctl, }; /** * et131x_device_alloc * * Returns pointer to the allocated and initialized net_device struct for * this device. * * Create instances of net_device and wl_private for the new adapter and * register the device's entry points in the net_device structure. */ struct net_device *et131x_device_alloc(void) { struct net_device *netdev; /* Alloc net_device and adapter structs */ netdev = alloc_etherdev(sizeof(struct et131x_adapter)); if (netdev == NULL) { printk(KERN_ERR "et131x: Alloc of net_device struct failed\n"); return NULL; } /* Setup the function registration table (and other data) for a * net_device */ /* netdev->init = &et131x_init; */ /* netdev->set_config = &et131x_config; */ netdev->watchdog_timeo = ET131X_TX_TIMEOUT; netdev->netdev_ops = &et131x_netdev_ops; /* netdev->ethtool_ops = &et131x_ethtool_ops; */ /* Poll? */ /* netdev->poll = &et131x_poll; */ /* netdev->poll_controller = &et131x_poll_controller; */ return netdev; } /** * et131x_stats - Return the current device statistics. * @netdev: device whose stats are being queried * * Returns 0 on success, errno on failure (as defined in errno.h) */ struct net_device_stats *et131x_stats(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &adapter->net_stats; CE_STATS_t *devstat = &adapter->Stats; stats->rx_packets = devstat->ipackets; stats->tx_packets = devstat->opackets; stats->rx_errors = devstat->length_err + devstat->alignment_err + devstat->crc_err + devstat->code_violations + devstat->other_errors; stats->tx_errors = devstat->max_pkt_error; stats->multicast = devstat->multircv; stats->collisions = devstat->collisions; stats->rx_length_errors = devstat->length_err; stats->rx_over_errors = devstat->rx_ov_flow; stats->rx_crc_errors = devstat->crc_err; /* NOTE: These stats don't have corresponding values in CE_STATS, * so we're going to have to update these directly from within the * TX/RX code */ /* stats->rx_bytes = 20; devstat->; */ /* stats->tx_bytes = 20; devstat->; */ /* stats->rx_dropped = devstat->; */ /* stats->tx_dropped = devstat->; */ /* NOTE: Not used, can't find analogous statistics */ /* stats->rx_frame_errors = devstat->; */ /* stats->rx_fifo_errors = devstat->; */ /* stats->rx_missed_errors = devstat->; */ /* stats->tx_aborted_errors = devstat->; */ /* stats->tx_carrier_errors = devstat->; */ /* stats->tx_fifo_errors = devstat->; */ /* stats->tx_heartbeat_errors = devstat->; */ /* stats->tx_window_errors = devstat->; */ return stats; } /** * et131x_open - Open the device for use. * @netdev: device to be opened * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_open(struct net_device *netdev) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* Start the timer to track NIC errors */ add_timer(&adapter->ErrorTimer); /* Register our IRQ */ result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED, netdev->name, netdev); if (result) { dev_err(&adapter->pdev->dev, "c ould not register IRQ %d\n", netdev->irq); return result; } /* Enable the Tx and Rx DMA engines (if not already enabled) */ et131x_rx_dma_enable(adapter); et131x_tx_dma_enable(adapter); /* Enable device interrupts */ et131x_enable_interrupts(adapter); adapter->Flags |= fMP_ADAPTER_INTERRUPT_IN_USE; /* We're ready to move some data, so start the queue */ netif_start_queue(netdev); return result; } /** * et131x_close - Close the device * @netdev: device to be closed * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_close(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); /* First thing is to stop the queue */ netif_stop_queue(netdev); /* Stop the Tx and Rx DMA engines */ et131x_rx_dma_disable(adapter); et131x_tx_dma_disable(adapter); /* Disable device interrupts */ et131x_disable_interrupts(adapter); /* Deregistering ISR */ adapter->Flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; free_irq(netdev->irq, netdev); /* Stop the error timer */ del_timer_sync(&adapter->ErrorTimer); return 0; } /** * et131x_ioctl_mii - The function which handles MII IOCTLs * @netdev: device on which the query is being made * @reqbuf: the request-specific data buffer * @cmd: the command request code * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_ioctl_mii(struct net_device *netdev, struct ifreq *reqbuf, int cmd) { int status = 0; struct et131x_adapter *etdev = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(reqbuf); switch (cmd) { case SIOCGMIIPHY: data->phy_id = etdev->Stats.xcvr_addr; break; case SIOCGMIIREG: if (!capable(CAP_NET_ADMIN)) status = -EPERM; else status = MiRead(etdev, data->reg_num, &data->val_out); break; case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) status = -EPERM; else status = MiWrite(etdev, data->reg_num, data->val_in); break; default: status = -EOPNOTSUPP; } return status; } /** * et131x_ioctl - The I/O Control handler for the driver * @netdev: device on which the control request is being made * @reqbuf: a pointer to the IOCTL request buffer * @cmd: the IOCTL command code * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) { int status = 0; switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: status = et131x_ioctl_mii(netdev, reqbuf, cmd); break; default: status = -EOPNOTSUPP; } return status; } /** * et131x_set_packet_filter - Configures the Rx Packet filtering on the device * @adapter: pointer to our private adapter structure * * FIXME: lot of dups with MAC code * * Returns 0 on success, errno on failure */ int et131x_set_packet_filter(struct et131x_adapter *adapter) { int status = 0; uint32_t filter = adapter->PacketFilter; u32 ctrl; u32 pf_ctrl; ctrl = readl(&adapter->regs->rxmac.ctrl); pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); /* Default to disabled packet filtering. Enable it in the individual * case statements that require the device to filter something */ ctrl |= 0x04; /* Set us to be in promiscuous mode so we receive everything, this * is also true when we get a packet filter of 0 */ if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) pf_ctrl &= ~7; /* Clear filter bits */ else { /* * Set us up with Multicast packet filtering. Three cases are * possible - (1) we have a multi-cast list, (2) we receive ALL * multicast entries or (3) we receive none. */ if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) pf_ctrl &= ~2; /* Multicast filter bit */ else { SetupDeviceForMulticast(adapter); pf_ctrl |= 2; ctrl &= ~0x04; } /* Set us up with Unicast packet filtering */ if (filter & ET131X_PACKET_TYPE_DIRECTED) { SetupDeviceForUnicast(adapter); pf_ctrl |= 4; ctrl &= ~0x04; } /* Set us up with Broadcast packet filtering */ if (filter & ET131X_PACKET_TYPE_BROADCAST) { pf_ctrl |= 1; /* Broadcast filter bit */ ctrl &= ~0x04; } else pf_ctrl &= ~1; /* Setup the receive mac configuration registers - Packet * Filter control + the enable / disable for packet filter * in the control reg. */ writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); writel(ctrl, &adapter->regs->rxmac.ctrl); } return status; } /** * et131x_multicast - The handler to configure multicasting on the interface * @netdev: a pointer to a net_device struct representing the device */ void et131x_multicast(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); uint32_t PacketFilter = 0; unsigned long flags; struct netdev_hw_addr *ha; int i; spin_lock_irqsave(&adapter->Lock, flags); /* Before we modify the platform-independent filter flags, store them * locally. This allows us to determine if anything's changed and if * we even need to bother the hardware */ PacketFilter = adapter->PacketFilter; /* Clear the 'multicast' flag locally; because we only have a single * flag to check multicast, and multiple multicast addresses can be * set, this is the easiest way to determine if more than one * multicast address is being set. */ PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST; /* Check the net_device flags and set the device independent flags * accordingly */ if (netdev->flags & IFF_PROMISC) adapter->PacketFilter |= ET131X_PACKET_TYPE_PROMISCUOUS; else adapter->PacketFilter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; if (netdev->flags & IFF_ALLMULTI) adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST; if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST; if (netdev_mc_count(netdev) < 1) { adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST; } else adapter->PacketFilter |= ET131X_PACKET_TYPE_MULTICAST; /* Set values in the private adapter struct */ i = 0; netdev_for_each_mc_addr(ha, netdev) { if (i == NIC_MAX_MCAST_LIST) break; memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN); } adapter->MCAddressCount = i; /* Are the new flags different from the previous ones? If not, then no * action is required * * NOTE - This block will always update the MCList with the hardware, * even if the addresses aren't the same. */ if (PacketFilter != adapter->PacketFilter) { /* Call the device's filter function */ et131x_set_packet_filter(adapter); } spin_unlock_irqrestore(&adapter->Lock, flags); } /** * et131x_tx - The handler to tx a packet on the device * @skb: data to be Tx'd * @netdev: device on which data is to be Tx'd * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_tx(struct sk_buff *skb, struct net_device *netdev) { int status = 0; /* Save the timestamp for the TX timeout watchdog */ netdev->trans_start = jiffies; /* Call the device-specific data Tx routine */ status = et131x_send_packets(skb, netdev); /* Check status and manage the netif queue if necessary */ if (status != 0) { if (status == -ENOMEM) { /* Put the queue to sleep until resources are * available */ netif_stop_queue(netdev); status = NETDEV_TX_BUSY; } else { status = NETDEV_TX_OK; } } return status; } /** * et131x_tx_timeout - Timeout handler * @netdev: a pointer to a net_device struct representing the device * * The handler called when a Tx request times out. The timeout period is * specified by the 'tx_timeo" element in the net_device structure (see * et131x_alloc_device() to see how this value is set). */ void et131x_tx_timeout(struct net_device *netdev) { struct et131x_adapter *etdev = netdev_priv(netdev); struct tcb *tcb; unsigned long flags; /* Just skip this part if the adapter is doing link detection */ if (etdev->Flags & fMP_ADAPTER_LINK_DETECTION) return; /* Any nonrecoverable hardware error? * Checks adapter->flags for any failure in phy reading */ if (etdev->Flags & fMP_ADAPTER_NON_RECOVER_ERROR) return; /* Hardware failure? */ if (etdev->Flags & fMP_ADAPTER_HARDWARE_ERROR) { dev_err(&etdev->pdev->dev, "hardware error - reset\n"); return; } /* Is send stuck? */ spin_lock_irqsave(&etdev->TCBSendQLock, flags); tcb = etdev->tx_ring.send_head; if (tcb != NULL) { tcb->count++; if (tcb->count > NIC_SEND_HANG_THRESHOLD) { spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); dev_warn(&etdev->pdev->dev, "Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n", tcb->index, tcb->flags); et131x_close(netdev); et131x_open(netdev); return; } } spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); } /** * et131x_change_mtu - The handler called to change the MTU for the device * @netdev: device whose MTU is to be changed * @new_mtu: the desired MTU * * Returns 0 on success, errno on failure (as defined in errno.h) */ int et131x_change_mtu(struct net_device *netdev, int new_mtu) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* Make sure the requested MTU is valid */ if (new_mtu < 64 || new_mtu > 9216) return -EINVAL; /* Stop the netif queue */ netif_stop_queue(netdev); /* Stop the Tx and Rx DMA engines */ et131x_rx_dma_disable(adapter); et131x_tx_dma_disable(adapter); /* Disable device interrupts */ et131x_disable_interrupts(adapter); et131x_handle_send_interrupt(adapter); et131x_handle_recv_interrupt(adapter); /* Set the new MTU */ netdev->mtu = new_mtu; /* Free Rx DMA memory */ et131x_adapter_memory_free(adapter); /* Set the config parameter for Jumbo Packet support */ adapter->RegistryJumboPacket = new_mtu + 14; et131x_soft_reset(adapter); /* Alloc and init Rx DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result != 0) { dev_warn(&adapter->pdev->dev, "Change MTU failed; couldn't re-alloc DMA memory\n"); return result; } et131x_init_send(adapter); et131x_hwaddr_init(adapter); memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); /* Init the device with the new settings */ et131x_adapter_setup(adapter); /* Enable interrupts */ if (adapter->Flags & fMP_ADAPTER_INTERRUPT_IN_USE) et131x_enable_interrupts(adapter); /* Restart the Tx and Rx DMA engines */ et131x_rx_dma_enable(adapter); et131x_tx_dma_enable(adapter); /* Restart the netif queue */ netif_wake_queue(netdev); return result; } /** * et131x_set_mac_addr - handler to change the MAC address for the device * @netdev: device whose MAC is to be changed * @new_mac: the desired MAC address * * Returns 0 on success, errno on failure (as defined in errno.h) * * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 */ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); struct sockaddr *address = new_mac; /* begin blux */ if (adapter == NULL) return -ENODEV; /* Make sure the requested MAC is valid */ if (!is_valid_ether_addr(address->sa_data)) return -EINVAL; /* Stop the netif queue */ netif_stop_queue(netdev); /* Stop the Tx and Rx DMA engines */ et131x_rx_dma_disable(adapter); et131x_tx_dma_disable(adapter); /* Disable device interrupts */ et131x_disable_interrupts(adapter); et131x_handle_send_interrupt(adapter); et131x_handle_recv_interrupt(adapter); /* Set the new MAC */ /* netdev->set_mac_address = &new_mac; */ /* netdev->mtu = new_mtu; */ memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); printk(KERN_INFO "%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr); /* Free Rx DMA memory */ et131x_adapter_memory_free(adapter); /* Set the config parameter for Jumbo Packet support */ /* adapter->RegistryJumboPacket = new_mtu + 14; */ /* blux: not needet here, we'll change the MAC */ et131x_soft_reset(adapter); /* Alloc and init Rx DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result != 0) { dev_err(&adapter->pdev->dev, "Change MAC failed; couldn't re-alloc DMA memory\n"); return result; } et131x_init_send(adapter); et131x_hwaddr_init(adapter); /* Init the device with the new settings */ et131x_adapter_setup(adapter); /* Enable interrupts */ if (adapter->Flags & fMP_ADAPTER_INTERRUPT_IN_USE) et131x_enable_interrupts(adapter); /* Restart the Tx and Rx DMA engines */ et131x_rx_dma_enable(adapter); et131x_tx_dma_enable(adapter); /* Restart the netif queue */ netif_wake_queue(netdev); return result; }
gpl-2.0
Luquidtester/DirtyKernel-3.0.101
drivers/media/dvb/mantis/hopper_cards.c
2787
6996
/* Hopper PCI bridge driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/slab.h> #include <asm/irq.h> #include <linux/interrupt.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "mantis_common.h" #include "hopper_vp3028.h" #include "mantis_dma.h" #include "mantis_dvb.h" #include "mantis_uart.h" #include "mantis_ioc.h" #include "mantis_pci.h" #include "mantis_i2c.h" #include "mantis_reg.h" static unsigned int verbose; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "verbose startup messages, default is 0 (no)"); #define DRIVER_NAME "Hopper" static char *label[10] = { "DMA", "IRQ-0", "IRQ-1", "OCERR", "PABRT", "RIPRR", "PPERR", "FTRGT", "RISCI", "RACK" }; static int devs; static irqreturn_t hopper_irq_handler(int irq, void *dev_id) { u32 stat = 0, mask = 0, lstat = 0, mstat = 0; u32 rst_stat = 0, rst_mask = 0; struct mantis_pci *mantis; struct mantis_ca *ca; mantis = (struct mantis_pci *) dev_id; if (unlikely(mantis == NULL)) { dprintk(MANTIS_ERROR, 1, "Mantis == NULL"); return IRQ_NONE; } ca = mantis->mantis_ca; stat = mmread(MANTIS_INT_STAT); mask = mmread(MANTIS_INT_MASK); mstat = lstat = stat & ~MANTIS_INT_RISCSTAT; if (!(stat & mask)) return IRQ_NONE; rst_mask = MANTIS_GPIF_WRACK | MANTIS_GPIF_OTHERR | MANTIS_SBUF_WSTO | MANTIS_GPIF_EXTIRQ; rst_stat = mmread(MANTIS_GPIF_STATUS); rst_stat &= rst_mask; mmwrite(rst_stat, MANTIS_GPIF_STATUS); mantis->mantis_int_stat = stat; mantis->mantis_int_mask = mask; dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask); if (stat & MANTIS_INT_RISCEN) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]); } if (stat & MANTIS_INT_IRQ0) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]); mantis->gpif_status = rst_stat; wake_up(&ca->hif_write_wq); schedule_work(&ca->hif_evm_work); } if (stat & MANTIS_INT_IRQ1) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]); schedule_work(&mantis->uart_work); } if (stat & MANTIS_INT_OCERR) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]); } if (stat & MANTIS_INT_PABORT) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]); } if (stat & MANTIS_INT_RIPERR) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]); } if (stat & MANTIS_INT_PPERR) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]); } if (stat & MANTIS_INT_FTRGT) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]); } if (stat & MANTIS_INT_RISCI) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]); mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28; tasklet_schedule(&mantis->tasklet); } if (stat & MANTIS_INT_I2CDONE) { dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]); wake_up(&mantis->i2c_wq); } mmwrite(stat, MANTIS_INT_STAT); stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE | MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 | MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 | MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 | MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 | MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 | MANTIS_INT_IRQ0 | MANTIS_INT_OCERR | MANTIS_INT_PABORT | MANTIS_INT_RIPERR | MANTIS_INT_PPERR | MANTIS_INT_FTRGT | MANTIS_INT_RISCI); if (stat) dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask); dprintk(MANTIS_DEBUG, 0, "\n"); return IRQ_HANDLED; } static int __devinit hopper_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct mantis_pci *mantis; struct mantis_hwconfig *config; int err = 0; mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL); if (mantis == NULL) { printk(KERN_ERR "%s ERROR: Out of memory\n", __func__); err = -ENOMEM; goto fail0; } mantis->num = devs; mantis->verbose = verbose; mantis->pdev = pdev; config = (struct mantis_hwconfig *) pci_id->driver_data; config->irq_handler = &hopper_irq_handler; mantis->hwconfig = config; err = mantis_pci_init(mantis); if (err) { dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err); goto fail1; } err = mantis_stream_control(mantis, STREAM_TO_HIF); if (err < 0) { dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err); goto fail1; } err = mantis_i2c_init(mantis); if (err < 0) { dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err); goto fail2; } err = mantis_get_mac(mantis); if (err < 0) { dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err); goto fail2; } err = mantis_dma_init(mantis); if (err < 0) { dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err); goto fail3; } err = mantis_dvb_init(mantis); if (err < 0) { dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err); goto fail4; } devs++; return err; fail4: dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err); mantis_dma_exit(mantis); fail3: dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err); mantis_i2c_exit(mantis); fail2: dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err); mantis_pci_exit(mantis); fail1: dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err); kfree(mantis); fail0: return err; } static void __devexit hopper_pci_remove(struct pci_dev *pdev) { struct mantis_pci *mantis = pci_get_drvdata(pdev); if (mantis) { mantis_dvb_exit(mantis); mantis_dma_exit(mantis); mantis_i2c_exit(mantis); mantis_pci_exit(mantis); kfree(mantis); } return; } static struct pci_device_id hopper_pci_table[] = { MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config), { } }; MODULE_DEVICE_TABLE(pci, hopper_pci_table); static struct pci_driver hopper_pci_driver = { .name = DRIVER_NAME, .id_table = hopper_pci_table, .probe = hopper_pci_probe, .remove = hopper_pci_remove, }; static int __devinit hopper_init(void) { return pci_register_driver(&hopper_pci_driver); } static void __devexit hopper_exit(void) { return pci_unregister_driver(&hopper_pci_driver); } module_init(hopper_init); module_exit(hopper_exit); MODULE_DESCRIPTION("HOPPER driver"); MODULE_AUTHOR("Manu Abraham"); MODULE_LICENSE("GPL");
gpl-2.0
anoane/ville-4.2.2-sense5-evitaul_porting
fs/ext3/hash.c
3811
4397
/* * linux/fs/ext3/hash.c * * Copyright (C) 2002 by Theodore Ts'o * * This file is released under the GPL v2. * * This file may be redistributed under the terms of the GNU Public * License. */ #include "ext3.h" #include <linux/cryptohash.h> #define DELTA 0x9E3779B9 static void TEA_transform(__u32 buf[4], __u32 const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while(--n); buf[0] += b0; buf[1] += b1; } /* The old legacy hash */ static __u32 dx_hack_hash_unsigned(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const unsigned char *ucp = (const unsigned char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static __u32 dx_hack_hash_signed(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const signed char *scp = (const signed char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const signed char *scp = (const signed char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) scp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const unsigned char *ucp = (const unsigned char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i=0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) ucp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } /* * Returns the hash of a filename. If len is 0 and name is NULL, then * this function can be used to test whether or not a hash version is * supported. * * The seed is an 4 longword (32 bits) "secret" which can be used to * uniquify a hash. If the seed is all zero's, then some default seed * may be used. * * A particular hash version specifies whether or not the seed is * represented, and whether or not the returned hash is 32 bits or 64 * bits. 32 bit hashes will return 0 for the minor hash. */ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) { __u32 hash; __u32 minor_hash = 0; const char *p; int i; __u32 in[8], buf[4]; void (*str2hashbuf)(const char *, int, __u32 *, int) = str2hashbuf_signed; /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; /* Check to see if the seed is all zero's */ if (hinfo->seed) { for (i=0; i < 4; i++) { if (hinfo->seed[i]) break; } if (i < 4) memcpy(buf, hinfo->seed, sizeof(buf)); } switch (hinfo->hash_version) { case DX_HASH_LEGACY_UNSIGNED: hash = dx_hack_hash_unsigned(name, len); break; case DX_HASH_LEGACY: hash = dx_hack_hash_signed(name, len); break; case DX_HASH_HALF_MD4_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_HALF_MD4: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 8); half_md4_transform(buf, in); len -= 32; p += 32; } minor_hash = buf[2]; hash = buf[1]; break; case DX_HASH_TEA_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_TEA: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 4); TEA_transform(buf, in); len -= 16; p += 16; } hash = buf[0]; minor_hash = buf[1]; break; default: hinfo->hash = 0; return -1; } hash = hash & ~1; if (hash == (EXT3_HTREE_EOF << 1)) hash = (EXT3_HTREE_EOF-1) << 1; hinfo->hash = hash; hinfo->minor_hash = minor_hash; return 0; }
gpl-2.0
arkas/Samsung-GT-I5510-Kernel
arch/ia64/hp/sim/boot/fw-emu.c
4835
11574
/* * PAL & SAL emulation. * * Copyright (C) 1998-2001 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #ifdef CONFIG_PCI # include <linux/pci.h> #endif #include <linux/efi.h> #include <asm/io.h> #include <asm/pal.h> #include <asm/sal.h> #include "ssc.h" #define MB (1024*1024UL) #define SIMPLE_MEMMAP 1 #if SIMPLE_MEMMAP # define NUM_MEM_DESCS 4 #else # define NUM_MEM_DESCS 16 #endif static char fw_mem[( sizeof(struct ia64_boot_param) + sizeof(efi_system_table_t) + sizeof(efi_runtime_services_t) + 1*sizeof(efi_config_table_t) + sizeof(struct ia64_sal_systab) + sizeof(struct ia64_sal_desc_entry_point) + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t)) + 1024)] __attribute__ ((aligned (8))); #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) /* Compute the `struct tm' representation of *T, offset OFFSET seconds east of UTC, and store year, yday, mon, mday, wday, hour, min, sec into *TP. Return nonzero if successful. */ int offtime (unsigned long t, efi_time_t *tp) { const unsigned short int __mon_yday[2][13] = { /* Normal years. */ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, /* Leap years. */ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } }; long int days, rem, y; const unsigned short int *ip; days = t / SECS_PER_DAY; rem = t % SECS_PER_DAY; while (rem < 0) { rem += SECS_PER_DAY; --days; } while (rem >= SECS_PER_DAY) { rem -= SECS_PER_DAY; ++days; } tp->hour = rem / SECS_PER_HOUR; rem %= SECS_PER_HOUR; tp->minute = rem / 60; tp->second = rem % 60; /* January 1, 1970 was a Thursday. */ y = 1970; # define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) # define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) # define __isleap(year) \ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) while (days < 0 || days >= (__isleap (y) ? 366 : 365)) { /* Guess a corrected year, assuming 365 days per year. */ long int yg = y + days / 365 - (days % 365 < 0); /* Adjust DAYS and Y to match the guessed year. */ days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1) - LEAPS_THRU_END_OF (y - 1)); y = yg; } tp->year = y; ip = __mon_yday[__isleap(y)]; for (y = 11; days < (long int) ip[y]; --y) continue; days -= ip[y]; tp->month = y + 1; tp->day = days + 1; return 1; } extern void pal_emulator_static (void); /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ #define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) #define REG_OFFSET(addr) (0x00000000000000FF & (addr)) #define DEVICE_FUNCTION(addr) (0x000000000000FF00 & (addr)) #define BUS_NUMBER(addr) (0x0000000000FF0000 & (addr)) static efi_status_t fw_efi_get_time (efi_time_t *tm, efi_time_cap_t *tc) { #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) struct { int tv_sec; /* must be 32bits to work */ int tv_usec; } tv32bits; ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD); memset(tm, 0, sizeof(*tm)); offtime(tv32bits.tv_sec, tm); if (tc) memset(tc, 0, sizeof(*tc)); #else # error Not implemented yet... #endif return EFI_SUCCESS; } static void efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) { #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) ssc(status, 0, 0, 0, SSC_EXIT); #else # error Not implemented yet... #endif } static efi_status_t efi_unimplemented (void) { return EFI_UNSUPPORTED; } static struct sal_ret_values sal_emulator (long index, unsigned long in1, unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in6, unsigned long in7) { long r9 = 0; long r10 = 0; long r11 = 0; long status; /* * Don't do a "switch" here since that gives us code that * isn't self-relocatable. */ status = 0; if (index == SAL_FREQ_BASE) { switch (in1) { case SAL_FREQ_BASE_PLATFORM: r9 = 200000000; break; case SAL_FREQ_BASE_INTERVAL_TIMER: /* * Is this supposed to be the cr.itc frequency * or something platform specific? The SAL * doc ain't exactly clear on this... */ r9 = 700000000; break; case SAL_FREQ_BASE_REALTIME_CLOCK: r9 = 1; break; default: status = -1; break; } } else if (index == SAL_SET_VECTORS) { ; } else if (index == SAL_GET_STATE_INFO) { ; } else if (index == SAL_GET_STATE_INFO_SIZE) { ; } else if (index == SAL_CLEAR_STATE_INFO) { ; } else if (index == SAL_MC_RENDEZ) { ; } else if (index == SAL_MC_SET_PARAMS) { ; } else if (index == SAL_CACHE_FLUSH) { ; } else if (index == SAL_CACHE_INIT) { ; #ifdef CONFIG_PCI } else if (index == SAL_PCI_CONFIG_READ) { /* * in1 contains the PCI configuration address and in2 * the size of the read. The value that is read is * returned via the general register r9. */ outl(BUILD_CMD(in1), 0xCF8); if (in2 == 1) /* Reading byte */ r9 = inb(0xCFC + ((REG_OFFSET(in1) & 3))); else if (in2 == 2) /* Reading word */ r9 = inw(0xCFC + ((REG_OFFSET(in1) & 2))); else /* Reading dword */ r9 = inl(0xCFC); status = PCIBIOS_SUCCESSFUL; } else if (index == SAL_PCI_CONFIG_WRITE) { /* * in1 contains the PCI configuration address, in2 the * size of the write, and in3 the actual value to be * written out. */ outl(BUILD_CMD(in1), 0xCF8); if (in2 == 1) /* Writing byte */ outb(in3, 0xCFC + ((REG_OFFSET(in1) & 3))); else if (in2 == 2) /* Writing word */ outw(in3, 0xCFC + ((REG_OFFSET(in1) & 2))); else /* Writing dword */ outl(in3, 0xCFC); status = PCIBIOS_SUCCESSFUL; #endif /* CONFIG_PCI */ } else if (index == SAL_UPDATE_PAL) { ; } else { status = -1; } return ((struct sal_ret_values) {status, r9, r10, r11}); } struct ia64_boot_param * sys_fw_init (const char *args, int arglen) { efi_system_table_t *efi_systab; efi_runtime_services_t *efi_runtime; efi_config_table_t *efi_tables; struct ia64_sal_systab *sal_systab; efi_memory_desc_t *efi_memmap, *md; unsigned long *pal_desc, *sal_desc; struct ia64_sal_desc_entry_point *sal_ed; struct ia64_boot_param *bp; unsigned char checksum = 0; char *cp, *cmd_line; int i = 0; # define MAKE_MD(typ, attr, start, end) \ do { \ md = efi_memmap + i++; \ md->type = typ; \ md->pad = 0; \ md->phys_addr = start; \ md->virt_addr = 0; \ md->num_pages = (end - start) >> 12; \ md->attribute = attr; \ } while (0) memset(fw_mem, 0, sizeof(fw_mem)); pal_desc = (unsigned long *) &pal_emulator_static; sal_desc = (unsigned long *) &sal_emulator; cp = fw_mem; efi_systab = (void *) cp; cp += sizeof(*efi_systab); efi_runtime = (void *) cp; cp += sizeof(*efi_runtime); efi_tables = (void *) cp; cp += sizeof(*efi_tables); sal_systab = (void *) cp; cp += sizeof(*sal_systab); sal_ed = (void *) cp; cp += sizeof(*sal_ed); efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap); bp = (void *) cp; cp += sizeof(*bp); cmd_line = (void *) cp; if (args) { if (arglen >= 1024) arglen = 1023; memcpy(cmd_line, args, arglen); } else { arglen = 0; } cmd_line[arglen] = '\0'; memset(efi_systab, 0, sizeof(*efi_systab)); efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE; efi_systab->hdr.revision = ((1 << 16) | 00); efi_systab->hdr.headersize = sizeof(efi_systab->hdr); efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); efi_systab->fw_revision = 1; efi_systab->runtime = (void *) __pa(efi_runtime); efi_systab->nr_tables = 1; efi_systab->tables = __pa(efi_tables); efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE; efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION; efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr); efi_runtime->get_time = __pa(&fw_efi_get_time); efi_runtime->set_time = __pa(&efi_unimplemented); efi_runtime->get_wakeup_time = __pa(&efi_unimplemented); efi_runtime->set_wakeup_time = __pa(&efi_unimplemented); efi_runtime->set_virtual_address_map = __pa(&efi_unimplemented); efi_runtime->get_variable = __pa(&efi_unimplemented); efi_runtime->get_next_variable = __pa(&efi_unimplemented); efi_runtime->set_variable = __pa(&efi_unimplemented); efi_runtime->get_next_high_mono_count = __pa(&efi_unimplemented); efi_runtime->reset_system = __pa(&efi_reset_system); efi_tables->guid = SAL_SYSTEM_TABLE_GUID; efi_tables->table = __pa(sal_systab); /* fill in the SAL system table: */ memcpy(sal_systab->signature, "SST_", 4); sal_systab->size = sizeof(*sal_systab); sal_systab->sal_rev_minor = 1; sal_systab->sal_rev_major = 0; sal_systab->entry_count = 1; #ifdef CONFIG_IA64_GENERIC strcpy(sal_systab->oem_id, "Generic"); strcpy(sal_systab->product_id, "IA-64 system"); #endif #ifdef CONFIG_IA64_HP_SIM strcpy(sal_systab->oem_id, "Hewlett-Packard"); strcpy(sal_systab->product_id, "HP-simulator"); #endif /* fill in an entry point: */ sal_ed->type = SAL_DESC_ENTRY_POINT; sal_ed->pal_proc = __pa(pal_desc[0]); sal_ed->sal_proc = __pa(sal_desc[0]); sal_ed->gp = __pa(sal_desc[1]); for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp) checksum += *cp; sal_systab->checksum = -checksum; #if SIMPLE_MEMMAP /* simulate free memory at physical address zero */ MAKE_MD(EFI_BOOT_SERVICES_DATA, EFI_MEMORY_WB, 0*MB, 1*MB); MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB, 1*MB, 2*MB); MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 2*MB, 130*MB); MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 4096*MB, 4128*MB); #else MAKE_MD( 4, 0x9, 0x0000000000000000, 0x0000000000001000); MAKE_MD( 7, 0x9, 0x0000000000001000, 0x000000000008a000); MAKE_MD( 4, 0x9, 0x000000000008a000, 0x00000000000a0000); MAKE_MD( 5, 0x8000000000000009, 0x00000000000c0000, 0x0000000000100000); MAKE_MD( 7, 0x9, 0x0000000000100000, 0x0000000004400000); MAKE_MD( 2, 0x9, 0x0000000004400000, 0x0000000004be5000); MAKE_MD( 7, 0x9, 0x0000000004be5000, 0x000000007f77e000); MAKE_MD( 6, 0x8000000000000009, 0x000000007f77e000, 0x000000007fb94000); MAKE_MD( 6, 0x8000000000000009, 0x000000007fb94000, 0x000000007fb95000); MAKE_MD( 6, 0x8000000000000009, 0x000000007fb95000, 0x000000007fc00000); MAKE_MD(13, 0x8000000000000009, 0x000000007fc00000, 0x000000007fc3a000); MAKE_MD( 7, 0x9, 0x000000007fc3a000, 0x000000007fea0000); MAKE_MD( 5, 0x8000000000000009, 0x000000007fea0000, 0x000000007fea8000); MAKE_MD( 7, 0x9, 0x000000007fea8000, 0x000000007feab000); MAKE_MD( 5, 0x8000000000000009, 0x000000007feab000, 0x000000007ffff000); MAKE_MD( 7, 0x9, 0x00000000ff400000, 0x0000000104000000); #endif bp->efi_systab = __pa(&fw_mem); bp->efi_memmap = __pa(efi_memmap); bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t); bp->efi_memdesc_size = sizeof(efi_memory_desc_t); bp->efi_memdesc_version = 1; bp->command_line = __pa(cmd_line); bp->console_info.num_cols = 80; bp->console_info.num_rows = 25; bp->console_info.orig_x = 0; bp->console_info.orig_y = 24; bp->fpswa = 0; return bp; }
gpl-2.0
vuanhduy/odroidxu-3.4.y
net/wireless/wext-sme.c
4835
9469
/* * cfg80211 wext compat for managed mode. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2009 Intel Corporation. All rights reserved. */ #include <linux/export.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <net/cfg80211.h> #include <net/cfg80211-wext.h> #include "wext-compat.h" #include "nl80211.h" int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; const u8 *prev_bssid = NULL; int err, i; ASSERT_RDEV_LOCK(rdev); ASSERT_WDEV_LOCK(wdev); if (!netif_running(wdev->netdev)) return 0; wdev->wext.connect.ie = wdev->wext.ie; wdev->wext.connect.ie_len = wdev->wext.ie_len; /* Use default background scan period */ wdev->wext.connect.bg_scan_period = -1; if (wdev->wext.keys) { wdev->wext.keys->def = wdev->wext.default_key; wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; if (wdev->wext.default_key != -1) wdev->wext.connect.privacy = true; } if (!wdev->wext.connect.ssid_len) return 0; if (wdev->wext.keys) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < 6; i++) ck->params[i].key = ck->data[i]; } if (wdev->wext.prev_bssid_valid) prev_bssid = wdev->wext.prev_bssid; err = __cfg80211_connect(rdev, wdev->netdev, &wdev->wext.connect, ck, prev_bssid); if (err) kfree(ck); return err; } int cfg80211_mgd_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { bool event = true; if (wdev->wext.connect.channel == chan) { err = 0; goto out; } /* if SSID set, we'll try right again, avoid event */ if (wdev->wext.connect.ssid_len) event = false; err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.connect.channel = chan; /* SSID is not set, we just want to switch channel */ if (chan && !wdev->wext.connect.ssid_len) { err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); goto out; } err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; } int cfg80211_mgd_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; wdev_lock(wdev); if (wdev->current_bss) chan = wdev->current_bss->pub.channel; else if (wdev->wext.connect.channel) chan = wdev->wext.connect.channel; wdev_unlock(wdev); if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_mgd_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); size_t len = data->length; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (!data->flags) len = 0; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = 0; if (wdev->sme_state != CFG80211_SME_IDLE) { bool event = true; if (wdev->wext.connect.ssid && len && len == wdev->wext.connect.ssid_len && memcmp(wdev->wext.connect.ssid, ssid, len) == 0) goto out; /* if SSID set now, we'll try to connect, avoid event */ if (len) event = false; err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.prev_bssid_valid = false; wdev->wext.connect.ssid = wdev->wext.ssid; memcpy(wdev->wext.ssid, ssid, len); wdev->wext.connect.ssid_len = len; wdev->wext.connect.crypto.control_port = false; wdev->wext.connect.crypto.control_port_ethertype = cpu_to_be16(ETH_P_PAE); err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; } int cfg80211_mgd_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; data->flags = 0; wdev_lock(wdev); if (wdev->current_bss) { const u8 *ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, WLAN_EID_SSID); if (ie) { data->flags = 1; data->length = ie[1]; memcpy(ssid, ie + 2, data->length); } } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { data->flags = 1; data->length = wdev->wext.connect.ssid_len; memcpy(ssid, wdev->wext.connect.ssid, data->length); } wdev_unlock(wdev); return 0; } int cfg80211_mgd_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { err = 0; /* both automatic */ if (!bssid && !wdev->wext.connect.bssid) goto out; /* fixed already - and no change */ if (wdev->wext.connect.bssid && bssid && compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) goto out; err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.connect.bssid = wdev->wext.bssid; } else wdev->wext.connect.bssid = NULL; err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; } int cfg80211_mgd_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; wdev_lock(wdev); if (wdev->current_bss) memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); else memset(ap_addr->sa_data, 0, ETH_ALEN); wdev_unlock(wdev); return 0; } int cfg80211_wext_siwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); u8 *ie = extra; int ie_len = data->length, err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!ie_len) ie = NULL; wdev_lock(wdev); /* no change */ err = 0; if (wdev->wext.ie_len == ie_len && memcmp(wdev->wext.ie, ie, ie_len) == 0) goto out; if (ie_len) { ie = kmemdup(extra, ie_len, GFP_KERNEL); if (!ie) { err = -ENOMEM; goto out; } } else ie = NULL; kfree(wdev->wext.ie); wdev->wext.ie = ie; wdev->wext.ie_len = ie_len; if (wdev->sme_state != CFG80211_SME_IDLE) { err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } /* userspace better not think we'll reconnect */ err = 0; out: wdev_unlock(wdev); return err; } int cfg80211_wext_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_mlme *mlme = (struct iw_mlme *)extra; struct cfg80211_registered_device *rdev; int err; if (!wdev) return -EOPNOTSUPP; rdev = wiphy_to_dev(wdev->wiphy); if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; if (mlme->addr.sa_family != ARPHRD_ETHER) return -EINVAL; wdev_lock(wdev); switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: err = __cfg80211_disconnect(rdev, dev, mlme->reason_code, true); break; default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); return err; }
gpl-2.0
rogrady/lin_imx6
net/sched/ematch.c
7651
14816
/* * net/sched/ematch.c Extended Match API * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== * * An extended match (ematch) is a small classification tool not worth * writing a full classifier for. Ematches can be interconnected to form * a logic expression and get attached to classifiers to extend their * functionatlity. * * The userspace part transforms the logic expressions into an array * consisting of multiple sequences of interconnected ematches separated * by markers. Precedence is implemented by a special ematch kind * referencing a sequence beyond the marker of the current sequence * causing the current position in the sequence to be pushed onto a stack * to allow the current position to be overwritten by the position referenced * in the special ematch. Matching continues in the new sequence until a * marker is reached causing the position to be restored from the stack. * * Example: * A AND (B1 OR B2) AND C AND D * * ------->-PUSH------- * -->-- / -->-- \ -->-- * / \ / / \ \ / \ * +-------+-------+-------+-------+-------+--------+ * | A AND | B AND | C AND | D END | B1 OR | B2 END | * +-------+-------+-------+-------+-------+--------+ * \ / * --------<-POP--------- * * where B is a virtual ematch referencing to sequence starting with B1. * * ========================================================================== * * How to write an ematch in 60 seconds * ------------------------------------ * * 1) Provide a matcher function: * static int my_match(struct sk_buff *skb, struct tcf_ematch *m, * struct tcf_pkt_info *info) * { * struct mydata *d = (struct mydata *) m->data; * * if (...matching goes here...) * return 1; * else * return 0; * } * * 2) Fill out a struct tcf_ematch_ops: * static struct tcf_ematch_ops my_ops = { * .kind = unique id, * .datalen = sizeof(struct mydata), * .match = my_match, * .owner = THIS_MODULE, * }; * * 3) Register/Unregister your ematch: * static int __init init_my_ematch(void) * { * return tcf_em_register(&my_ops); * } * * static void __exit exit_my_ematch(void) * { * tcf_em_unregister(&my_ops); * } * * module_init(init_my_ematch); * module_exit(exit_my_ematch); * * 4) By now you should have two more seconds left, barely enough to * open up a beer to watch the compilation going. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/pkt_cls.h> static LIST_HEAD(ematch_ops); static DEFINE_RWLOCK(ematch_mod_lock); static struct tcf_ematch_ops *tcf_em_lookup(u16 kind) { struct tcf_ematch_ops *e = NULL; read_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) { if (kind == e->kind) { if (!try_module_get(e->owner)) e = NULL; read_unlock(&ematch_mod_lock); return e; } } read_unlock(&ematch_mod_lock); return NULL; } /** * tcf_em_register - register an extended match * * @ops: ematch operations lookup table * * This function must be called by ematches to announce their presence. * The given @ops must have kind set to a unique identifier and the * callback match() must be implemented. All other callbacks are optional * and a fallback implementation is used instead. * * Returns -EEXISTS if an ematch of the same kind has already registered. */ int tcf_em_register(struct tcf_ematch_ops *ops) { int err = -EEXIST; struct tcf_ematch_ops *e; if (ops->match == NULL) return -EINVAL; write_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) if (ops->kind == e->kind) goto errout; list_add_tail(&ops->link, &ematch_ops); err = 0; errout: write_unlock(&ematch_mod_lock); return err; } EXPORT_SYMBOL(tcf_em_register); /** * tcf_em_unregister - unregster and extended match * * @ops: ematch operations lookup table * * This function must be called by ematches to announce their disappearance * for examples when the module gets unloaded. The @ops parameter must be * the same as the one used for registration. * * Returns -ENOENT if no matching ematch was found. */ void tcf_em_unregister(struct tcf_ematch_ops *ops) { write_lock(&ematch_mod_lock); list_del(&ops->link); write_unlock(&ematch_mod_lock); } EXPORT_SYMBOL(tcf_em_unregister); static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, int index) { return &tree->matches[index]; } static int tcf_em_validate(struct tcf_proto *tp, struct tcf_ematch_tree_hdr *tree_hdr, struct tcf_ematch *em, struct nlattr *nla, int idx) { int err = -EINVAL; struct tcf_ematch_hdr *em_hdr = nla_data(nla); int data_len = nla_len(nla) - sizeof(*em_hdr); void *data = (void *) em_hdr + sizeof(*em_hdr); if (!TCF_EM_REL_VALID(em_hdr->flags)) goto errout; if (em_hdr->kind == TCF_EM_CONTAINER) { /* Special ematch called "container", carries an index * referencing an external ematch sequence. */ u32 ref; if (data_len < sizeof(ref)) goto errout; ref = *(u32 *) data; if (ref >= tree_hdr->nmatches) goto errout; /* We do not allow backward jumps to avoid loops and jumps * to our own position are of course illegal. */ if (ref <= idx) goto errout; em->data = ref; } else { /* Note: This lookup will increase the module refcnt * of the ematch module referenced. In case of a failure, * a destroy function is called by the underlying layer * which automatically releases the reference again, therefore * the module MUST not be given back under any circumstances * here. Be aware, the destroy function assumes that the * module is held if the ops field is non zero. */ em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops == NULL) { err = -ENOENT; #ifdef CONFIG_MODULES __rtnl_unlock(); request_module("ematch-kind-%u", em_hdr->kind); rtnl_lock(); em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops) { /* We dropped the RTNL mutex in order to * perform the module load. Tell the caller * to replay the request. */ module_put(em->ops->owner); err = -EAGAIN; } #endif goto errout; } /* ematch module provides expected length of data, so we * can do a basic sanity check. */ if (em->ops->datalen && data_len < em->ops->datalen) goto errout; if (em->ops->change) { err = em->ops->change(tp, data, data_len, em); if (err < 0) goto errout; } else if (data_len > 0) { /* ematch module doesn't provide an own change * procedure and expects us to allocate and copy * the ematch data. * * TCF_EM_SIMPLE may be specified stating that the * data only consists of a u32 integer and the module * does not expected a memory reference but rather * the value carried. */ if (em_hdr->flags & TCF_EM_SIMPLE) { if (data_len < sizeof(u32)) goto errout; em->data = *(u32 *) data; } else { void *v = kmemdup(data, data_len, GFP_KERNEL); if (v == NULL) { err = -ENOBUFS; goto errout; } em->data = (unsigned long) v; } } } em->matchid = em_hdr->matchid; em->flags = em_hdr->flags; em->datalen = data_len; err = 0; errout: return err; } static const struct nla_policy em_policy[TCA_EMATCH_TREE_MAX + 1] = { [TCA_EMATCH_TREE_HDR] = { .len = sizeof(struct tcf_ematch_tree_hdr) }, [TCA_EMATCH_TREE_LIST] = { .type = NLA_NESTED }, }; /** * tcf_em_tree_validate - validate ematch config TLV and build ematch tree * * @tp: classifier kind handle * @nla: ematch tree configuration TLV * @tree: destination ematch tree variable to store the resulting * ematch tree. * * This function validates the given configuration TLV @nla and builds an * ematch tree in @tree. The resulting tree must later be copied into * the private classifier data using tcf_em_tree_change(). You MUST NOT * provide the ematch tree variable of the private classifier data directly, * the changes would not be locked properly. * * Returns a negative error code if the configuration TLV contains errors. */ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, struct tcf_ematch_tree *tree) { int idx, list_len, matches_len, err; struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1]; struct nlattr *rt_match, *rt_hdr, *rt_list; struct tcf_ematch_tree_hdr *tree_hdr; struct tcf_ematch *em; memset(tree, 0, sizeof(*tree)); if (!nla) return 0; err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy); if (err < 0) goto errout; err = -EINVAL; rt_hdr = tb[TCA_EMATCH_TREE_HDR]; rt_list = tb[TCA_EMATCH_TREE_LIST]; if (rt_hdr == NULL || rt_list == NULL) goto errout; tree_hdr = nla_data(rt_hdr); memcpy(&tree->hdr, tree_hdr, sizeof(*tree_hdr)); rt_match = nla_data(rt_list); list_len = nla_len(rt_list); matches_len = tree_hdr->nmatches * sizeof(*em); tree->matches = kzalloc(matches_len, GFP_KERNEL); if (tree->matches == NULL) goto errout; /* We do not use nla_parse_nested here because the maximum * number of attributes is unknown. This saves us the allocation * for a tb buffer which would serve no purpose at all. * * The array of rt attributes is parsed in the order as they are * provided, their type must be incremental from 1 to n. Even * if it does not serve any real purpose, a failure of sticking * to this policy will result in parsing failure. */ for (idx = 0; nla_ok(rt_match, list_len); idx++) { err = -EINVAL; if (rt_match->nla_type != (idx + 1)) goto errout_abort; if (idx >= tree_hdr->nmatches) goto errout_abort; if (nla_len(rt_match) < sizeof(struct tcf_ematch_hdr)) goto errout_abort; em = tcf_em_get_match(tree, idx); err = tcf_em_validate(tp, tree_hdr, em, rt_match, idx); if (err < 0) goto errout_abort; rt_match = nla_next(rt_match, &list_len); } /* Check if the number of matches provided by userspace actually * complies with the array of matches. The number was used for * the validation of references and a mismatch could lead to * undefined references during the matching process. */ if (idx != tree_hdr->nmatches) { err = -EINVAL; goto errout_abort; } err = 0; errout: return err; errout_abort: tcf_em_tree_destroy(tp, tree); return err; } EXPORT_SYMBOL(tcf_em_tree_validate); /** * tcf_em_tree_destroy - destroy an ematch tree * * @tp: classifier kind handle * @tree: ematch tree to be deleted * * This functions destroys an ematch tree previously created by * tcf_em_tree_validate()/tcf_em_tree_change(). You must ensure that * the ematch tree is not in use before calling this function. */ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree) { int i; if (tree->matches == NULL) return; for (i = 0; i < tree->hdr.nmatches; i++) { struct tcf_ematch *em = tcf_em_get_match(tree, i); if (em->ops) { if (em->ops->destroy) em->ops->destroy(tp, em); else if (!tcf_em_is_simple(em)) kfree((void *) em->data); module_put(em->ops->owner); } } tree->hdr.nmatches = 0; kfree(tree->matches); tree->matches = NULL; } EXPORT_SYMBOL(tcf_em_tree_destroy); /** * tcf_em_tree_dump - dump ematch tree into a rtnl message * * @skb: skb holding the rtnl message * @t: ematch tree to be dumped * @tlv: TLV type to be used to encapsulate the tree * * This function dumps a ematch tree into a rtnl message. It is valid to * call this function while the ematch tree is in use. * * Returns -1 if the skb tailroom is insufficient. */ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) { int i; u8 *tail; struct nlattr *top_start; struct nlattr *list_start; top_start = nla_nest_start(skb, tlv); if (top_start == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); if (list_start == NULL) goto nla_put_failure; tail = skb_tail_pointer(skb); for (i = 0; i < tree->hdr.nmatches; i++) { struct nlattr *match_start = (struct nlattr *)tail; struct tcf_ematch *em = tcf_em_get_match(tree, i); struct tcf_ematch_hdr em_hdr = { .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER, .matchid = em->matchid, .flags = em->flags }; NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); if (em->ops && em->ops->dump) { if (em->ops->dump(skb, em) < 0) goto nla_put_failure; } else if (tcf_em_is_container(em) || tcf_em_is_simple(em)) { u32 u = em->data; nla_put_nohdr(skb, sizeof(u), &u); } else if (em->datalen > 0) nla_put_nohdr(skb, em->datalen, (void *) em->data); tail = skb_tail_pointer(skb); match_start->nla_len = tail - (u8 *)match_start; } nla_nest_end(skb, list_start); nla_nest_end(skb, top_start); return 0; nla_put_failure: return -1; } EXPORT_SYMBOL(tcf_em_tree_dump); static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { int r = em->ops->match(skb, em, info); return tcf_em_is_inverted(em) ? !r : r; } /* Do not use this function directly, use tcf_em_tree_match instead */ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree, struct tcf_pkt_info *info) { int stackp = 0, match_idx = 0, res = 0; struct tcf_ematch *cur_match; int stack[CONFIG_NET_EMATCH_STACK]; proceed: while (match_idx < tree->hdr.nmatches) { cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_is_container(cur_match)) { if (unlikely(stackp >= CONFIG_NET_EMATCH_STACK)) goto stack_overflow; stack[stackp++] = match_idx; match_idx = cur_match->data; goto proceed; } res = tcf_em_match(skb, cur_match, info); if (tcf_em_early_end(cur_match, res)) break; match_idx++; } pop_stack: if (stackp > 0) { match_idx = stack[--stackp]; cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_early_end(cur_match, res)) goto pop_stack; else { match_idx++; goto proceed; } } return res; stack_overflow: if (net_ratelimit()) pr_warning("tc ematch: local stack overflow," " increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match);
gpl-2.0
GustavoRD78/78Kernel-ZL-230
net/rds/iw_sysctl.c
8163
4338
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> #include "iw.h" static struct ctl_table_header *rds_iw_sysctl_hdr; unsigned long rds_iw_sysctl_max_send_wr = RDS_IW_DEFAULT_SEND_WR; unsigned long rds_iw_sysctl_max_recv_wr = RDS_IW_DEFAULT_RECV_WR; unsigned long rds_iw_sysctl_max_recv_allocation = (128 * 1024 * 1024) / RDS_FRAG_SIZE; static unsigned long rds_iw_sysctl_max_wr_min = 1; /* hardware will fail CQ creation long before this */ static unsigned long rds_iw_sysctl_max_wr_max = (u32)~0; unsigned long rds_iw_sysctl_max_unsig_wrs = 16; static unsigned long rds_iw_sysctl_max_unsig_wr_min = 1; static unsigned long rds_iw_sysctl_max_unsig_wr_max = 64; unsigned long rds_iw_sysctl_max_unsig_bytes = (16 << 20); static unsigned long rds_iw_sysctl_max_unsig_bytes_min = 1; static unsigned long rds_iw_sysctl_max_unsig_bytes_max = ~0UL; unsigned int rds_iw_sysctl_flow_control = 1; static ctl_table rds_iw_sysctl_table[] = { { .procname = "max_send_wr", .data = &rds_iw_sysctl_max_send_wr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_wr_min, .extra2 = &rds_iw_sysctl_max_wr_max, }, { .procname = "max_recv_wr", .data = &rds_iw_sysctl_max_recv_wr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_wr_min, .extra2 = &rds_iw_sysctl_max_wr_max, }, { .procname = "max_unsignaled_wr", .data = &rds_iw_sysctl_max_unsig_wrs, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_unsig_wr_min, .extra2 = &rds_iw_sysctl_max_unsig_wr_max, }, { .procname = "max_unsignaled_bytes", .data = &rds_iw_sysctl_max_unsig_bytes, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_unsig_bytes_min, .extra2 = &rds_iw_sysctl_max_unsig_bytes_max, }, { .procname = "max_recv_allocation", .data = &rds_iw_sysctl_max_recv_allocation, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "flow_control", .data = &rds_iw_sysctl_flow_control, .maxlen = sizeof(rds_iw_sysctl_flow_control), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static struct ctl_path rds_iw_sysctl_path[] = { { .procname = "net", }, { .procname = "rds", }, { .procname = "iw", }, { } }; void rds_iw_sysctl_exit(void) { if (rds_iw_sysctl_hdr) unregister_sysctl_table(rds_iw_sysctl_hdr); } int rds_iw_sysctl_init(void) { rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); if (!rds_iw_sysctl_hdr) return -ENOMEM; return 0; }
gpl-2.0
b8e5n/KTG-kernel_es209ra
arch/xtensa/kernel/xtensa_ksyms.c
9443
2236
/* * arch/xtensa/kernel/xtensa_ksyms.c * * Export Xtensa-specific functions for loadable modules. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Joe Taylor <joe@tensilica.com> */ #include <linux/module.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <asm/irq.h> #include <linux/in6.h> #include <asm/uaccess.h> #include <asm/checksum.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgalloc.h> #ifdef CONFIG_BLK_DEV_FD #include <asm/floppy.h> #endif #ifdef CONFIG_NET #include <net/checksum.h> #endif /* CONFIG_NET */ /* * String functions */ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(kernel_thread); /* * gcc internal math functions */ extern long long __ashrdi3(long long, int); extern long long __ashldi3(long long, int); extern long long __lshrdi3(long long, int); extern int __divsi3(int, int); extern int __modsi3(int, int); extern long long __muldi3(long long, long long); extern int __mulsi3(int, int); extern unsigned int __udivsi3(unsigned int, unsigned int); extern unsigned int __umodsi3(unsigned int, unsigned int); extern unsigned long long __umoddi3(unsigned long long, unsigned long long); extern unsigned long long __udivdi3(unsigned long long, unsigned long long); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__umoddi3); #ifdef CONFIG_NET /* * Networking support */ EXPORT_SYMBOL(csum_partial_copy_generic); #endif /* CONFIG_NET */ /* * Architecture-specific symbols */ EXPORT_SYMBOL(__xtensa_copy_user); /* * Kernel hacking ... */ #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) // FIXME EXPORT_SYMBOL(screen_info); #endif EXPORT_SYMBOL(outsb); EXPORT_SYMBOL(outsw); EXPORT_SYMBOL(outsl); EXPORT_SYMBOL(insb); EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl);
gpl-2.0
CyanogenMod/android_kernel_nvidia_shieldtablet
drivers/net/ethernet/marvell/mvneta.c
228
76704
/* * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. * * Copyright (C) 2012 Marvell * * Rami Rosen <rosenr@marvell.com> * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/mbus.h> #include <linux/module.h> #include <linux/interrupt.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_address.h> #include <linux/phy.h> #include <linux/clk.h> /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 #define MVNETA_PORT_RX_RESET 0x1cc0 #define MVNETA_PORT_RX_DMA_RESET BIT(0) #define MVNETA_PHY_ADDR 0x2000 #define MVNETA_PHY_ADDR_MASK 0x1f #define MVNETA_MBUS_RETRY 0x2010 #define MVNETA_UNIT_INTR_CAUSE 0x2080 #define MVNETA_UNIT_CONTROL 0x20B0 #define MVNETA_PHY_POLLING_ENABLE BIT(1) #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) #define MVNETA_BASE_ADDR_ENABLE 0x2290 #define MVNETA_PORT_CONFIG 0x2400 #define MVNETA_UNI_PROMISC_MODE BIT(0) #define MVNETA_DEF_RXQ(q) ((q) << 1) #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) #define MVNETA_TX_UNSET_ERR_SUM BIT(12) #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ MVNETA_DEF_RXQ_ARP(q) | \ MVNETA_DEF_RXQ_TCP(q) | \ MVNETA_DEF_RXQ_UDP(q) | \ MVNETA_DEF_RXQ_BPDU(q) | \ MVNETA_TX_UNSET_ERR_SUM | \ MVNETA_RX_CSUM_WITH_PSEUDO_HDR) #define MVNETA_PORT_CONFIG_EXTEND 0x2404 #define MVNETA_MAC_ADDR_LOW 0x2414 #define MVNETA_MAC_ADDR_HIGH 0x2418 #define MVNETA_SDMA_CONFIG 0x241c #define MVNETA_SDMA_BRST_SIZE_16 4 #define MVNETA_NO_DESC_SWAP 0x0 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) #define MVNETA_RX_NO_DATA_SWAP BIT(4) #define MVNETA_TX_NO_DATA_SWAP BIT(5) #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) #define MVNETA_PORT_STATUS 0x2444 #define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 #define MVNETA_TXQ_CMD 0x2448 #define MVNETA_TXQ_DISABLE_SHIFT 8 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff #define MVNETA_ACC_MODE 0x2500 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) #define MVNETA_INTR_NEW_CAUSE 0x25a0 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) #define MVNETA_INTR_NEW_MASK 0x25a4 #define MVNETA_INTR_OLD_CAUSE 0x25a8 #define MVNETA_INTR_OLD_MASK 0x25ac #define MVNETA_INTR_MISC_CAUSE 0x25b0 #define MVNETA_INTR_MISC_MASK 0x25b4 #define MVNETA_INTR_ENABLE 0x25b8 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 #define MVNETA_RXQ_CMD 0x2680 #define MVNETA_RXQ_DISABLE_SHIFT 8 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) #define MVNETA_GMAC_CTRL_0 0x2c00 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVNETA_GMAC0_PORT_ENABLE BIT(0) #define MVNETA_GMAC_CTRL_2 0x2c08 #define MVNETA_GMAC2_PSC_ENABLE BIT(3) #define MVNETA_GMAC2_PORT_RGMII BIT(4) #define MVNETA_GMAC2_PORT_RESET BIT(6) #define MVNETA_GMAC_STATUS 0x2c10 #define MVNETA_GMAC_LINK_UP BIT(0) #define MVNETA_GMAC_SPEED_1000 BIT(1) #define MVNETA_GMAC_SPEED_100 BIT(2) #define MVNETA_GMAC_FULL_DUPLEX BIT(3) #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVNETA_GMAC_AN_SPEED_EN BIT(7) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) #define MVNETA_MIB_COUNTERS_BASE 0x3080 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_DEC_SENT_SHIFT 16 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 #define MVNETA_PORT_TX_RESET 0x3cf0 #define MVNETA_PORT_TX_DMA_RESET BIT(0) #define MVNETA_TX_MTU 0x3e0c #define MVNETA_TX_TOKEN_SIZE 0x3e14 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff /* Descriptor ring Macros */ #define MVNETA_QUEUE_NEXT_DESC(q, index) \ (((index) < (q)->last_desc) ? ((index) + 1) : 0) /* Various constants */ /* Coalescing */ #define MVNETA_TXDONE_COAL_PKTS 16 #define MVNETA_RX_COAL_PKTS 32 #define MVNETA_RX_COAL_USEC 100 /* Timer */ #define MVNETA_TX_DONE_TIMER_PERIOD 10 /* Napi polling weight */ #define MVNETA_RX_POLL_WEIGHT 64 /* The two bytes Marvell header. Either contains a special value used * by Marvell switches when a specific hardware mode is enabled (not * supported by this driver) or is filled automatically by zeroes on * the RX side. Those two bytes being at the front of the Ethernet * header, they allow to have the IP header aligned on a 4 bytes * boundary automatically: the hardware skips those two bytes on its * own. */ #define MVNETA_MH_SIZE 2 #define MVNETA_VLAN_TAG_LEN 4 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 #define MVNETA_TX_CSUM_MAX_SIZE 9800 #define MVNETA_ACC_MODE_EXT 1 /* Timeout constants */ #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 #define MVNETA_TX_MTU_MAX 0x3ffff /* Max number of Rx descriptors */ #define MVNETA_MAX_RXD 128 /* Max number of Tx descriptors */ #define MVNETA_MAX_TXD 532 /* descriptor aligned size */ #define MVNETA_DESC_ALIGNED_SIZE 32 #define MVNETA_RX_PKT_SIZE(mtu) \ ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ ETH_HLEN + ETH_FCS_LEN, \ MVNETA_CPU_D_CACHE_LINE_SIZE) #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) struct mvneta_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; }; struct mvneta_port { int pkt_size; void __iomem *base; struct mvneta_rx_queue *rxqs; struct mvneta_tx_queue *txqs; struct timer_list tx_done_timer; struct net_device *dev; u32 cause_rx_tx; struct napi_struct napi; /* Flags */ unsigned long flags; #define MVNETA_F_TX_DONE_TIMER_BIT 0 /* Napi weight */ int weight; /* Core clock */ struct clk *clk; u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; struct mvneta_stats tx_stats; struct mvneta_stats rx_stats; struct mii_bus *mii_bus; struct phy_device *phy_dev; phy_interface_t phy_interface; struct device_node *phy_node; unsigned int link; unsigned int duplex; unsigned int speed; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the * layout of the transmit and reception DMA descriptors, and their * layout is therefore defined by the hardware design */ struct mvneta_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ #define MVNETA_TX_L3_OFF_SHIFT 0 #define MVNETA_TX_IP_HLEN_SHIFT 8 #define MVNETA_TX_L4_UDP BIT(16) #define MVNETA_TX_L3_IP6 BIT(17) #define MVNETA_TXD_IP_CSUM BIT(18) #define MVNETA_TXD_Z_PAD BIT(19) #define MVNETA_TXD_L_DESC BIT(20) #define MVNETA_TXD_F_DESC BIT(21) #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ MVNETA_TXD_L_DESC | \ MVNETA_TXD_F_DESC) #define MVNETA_TX_L4_CSUM_FULL BIT(30) #define MVNETA_TX_L4_CSUM_NOT BIT(31) u16 reserverd1; /* csum_l4 (for future use) */ u16 data_size; /* Data size of transmitted packet in bytes */ u32 buf_phys_addr; /* Physical addr of transmitted buffer */ u32 reserved2; /* hw_cmd - (for future use, PMT) */ u32 reserved3[4]; /* Reserved - (for future use) */ }; struct mvneta_rx_desc { u32 status; /* Info about received packet */ #define MVNETA_RXD_ERR_CRC 0x0 #define MVNETA_RXD_ERR_SUMMARY BIT(16) #define MVNETA_RXD_ERR_OVERRUN BIT(17) #define MVNETA_RXD_ERR_LEN BIT(18) #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) #define MVNETA_RXD_L3_IP4 BIT(25) #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) #define MVNETA_RXD_L4_CSUM_OK BIT(30) u16 reserved1; /* pnc_info - (for future use, PnC) */ u16 data_size; /* Size of received packet in bytes */ u32 buf_phys_addr; /* Physical address of the buffer */ u32 reserved2; /* pnc_flow_id (for future use, PnC) */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved3; /* prefetch_cmd, for future use */ u16 reserved4; /* csum_l4 - (for future use, PnC) */ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ }; struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ u8 id; /* Number of TX DMA descriptors in the descriptor ring */ int size; /* Number of currently used TX DMA descriptor in the * descriptor ring */ int count; /* Array of transmitted skb */ struct sk_buff **tx_skb; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; /* Index of the TX DMA descriptor to be cleaned up */ int txq_get_index; u32 done_pkts_coal; /* Virtual address of the TX DMA descriptors array */ struct mvneta_tx_desc *descs; /* DMA address of the TX DMA descriptors array */ dma_addr_t descs_phys; /* Index of the last TX DMA descriptor */ int last_desc; /* Index of the next TX DMA descriptor to process */ int next_desc_to_proc; }; struct mvneta_rx_queue { /* rx queue number, in the range 0-7 */ u8 id; /* num of rx descriptors in the rx descriptor ring */ int size; /* counter of times when mvneta_refill() failed */ int missed; u32 pkts_coal; u32 time_coal; /* Virtual address of the RX DMA descriptors array */ struct mvneta_rx_desc *descs; /* DMA address of the RX DMA descriptors array */ dma_addr_t descs_phys; /* Index of the last RX DMA descriptor */ int last_desc; /* Index of the next RX DMA descriptor to process */ int next_desc_to_proc; }; static int rxq_number = 8; static int txq_number = 8; static int rxq_def; #define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" /* Utility/helper methods */ /* Write helper method */ static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) { writel(data, pp->base + offset); } /* Read helper method */ static u32 mvreg_read(struct mvneta_port *pp, u32 offset) { return readl(pp->base + offset); } /* Increment txq get counter */ static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) { txq->txq_get_index++; if (txq->txq_get_index == txq->size) txq->txq_get_index = 0; } /* Increment txq put counter */ static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) { txq->txq_put_index++; if (txq->txq_put_index == txq->size) txq->txq_put_index = 0; } /* Clear all MIB counters */ static void mvneta_mib_counters_clear(struct mvneta_port *pp) { int i; u32 dummy; /* Perform dummy reads from MIB counters */ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); } /* Get System Network Statistics */ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvneta_port *pp = netdev_priv(dev); unsigned int start; memset(stats, 0, sizeof(struct rtnl_link_stats64)); do { start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp); stats->rx_packets = pp->rx_stats.packets; stats->rx_bytes = pp->rx_stats.bytes; } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start)); do { start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp); stats->tx_packets = pp->tx_stats.packets; stats->tx_bytes = pp->tx_stats.bytes; } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start)); stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; } /* Rx descriptors helper methods */ /* Checks whether the given RX descriptor is both the first and the * last descriptor for the RX packet. Each RX packet is currently * received through a single RX descriptor, so not having each RX * descriptor with its first and last bits set is an error */ static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc) { return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) == MVNETA_RXD_FIRST_LAST_DESC; } /* Add number of descriptors ready to receive new packets */ static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int ndescs) { /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can * be added at once */ while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; } mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); } /* Get number of RX descriptors occupied by received packets */ static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; } /* Update num of rx desc called upon return from rx path or * from mvneta_rxq_drop_pkts(). */ static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int rx_done, int rx_filled) { u32 val; if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { val = rx_done | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); return; } /* Only 255 descriptors can be added at once */ while ((rx_done > 0) || (rx_filled > 0)) { if (rx_done <= 0xff) { val = rx_done; rx_done = 0; } else { val = 0xff; rx_done -= 0xff; } if (rx_filled <= 0xff) { val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; rx_filled = 0; } else { val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; rx_filled -= 0xff; } mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); } } /* Get pointer to next RX descriptor to be processed by SW */ static struct mvneta_rx_desc * mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) { int rx_desc = rxq->next_desc_to_proc; rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); return rxq->descs + rx_desc; } /* Change maximum receive size of the port. */ static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) { u32 val; val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << MVNETA_GMAC_MAX_RX_SIZE_SHIFT; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); } /* Set rx queue offset */ static void mvneta_rxq_offset_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int offset) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; /* Offset is in */ val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } /* Tx descriptors helper methods */ /* Update HW with number of TX descriptors to be sent */ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int pend_desc) { u32 val; /* Only 255 descriptors can be added at once ; Assume caller * process TX desriptors in quanta less than 256 */ val = pend_desc; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); } /* Get pointer to next TX descriptor to be processed (send) by HW */ static struct mvneta_tx_desc * mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) { int tx_desc = txq->next_desc_to_proc; txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); return txq->descs + tx_desc; } /* Release the last allocated TX descriptor. Useful to handle DMA * mapping failures in the TX path. */ static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) { if (txq->next_desc_to_proc == 0) txq->next_desc_to_proc = txq->last_desc - 1; else txq->next_desc_to_proc--; } /* Set rxq buf size */ static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int buf_size) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); val &= ~MVNETA_RXQ_BUF_SIZE_MASK; val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); } /* Disable buffer management (BM) */ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); val &= ~MVNETA_RXQ_HW_BUF_ALLOC; mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) { u32 val; val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); if (enable) val |= MVNETA_GMAC2_PORT_RGMII; else val &= ~MVNETA_GMAC2_PORT_RGMII; mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); } /* Config SGMII port */ static void mvneta_port_sgmii_config(struct mvneta_port *pp) { u32 val; val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); val |= MVNETA_GMAC2_PSC_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); } /* Start the Ethernet port RX and TX activity */ static void mvneta_port_up(struct mvneta_port *pp) { int queue; u32 q_map; /* Enable all initialized TXs. */ mvneta_mib_counters_clear(pp); q_map = 0; for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; if (txq->descs != NULL) q_map |= (1 << queue); } mvreg_write(pp, MVNETA_TXQ_CMD, q_map); /* Enable all initialized RXQs. */ q_map = 0; for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; if (rxq->descs != NULL) q_map |= (1 << queue); } mvreg_write(pp, MVNETA_RXQ_CMD, q_map); } /* Stop the Ethernet port activity */ static void mvneta_port_down(struct mvneta_port *pp) { u32 val; int count; /* Stop Rx port activity. Check port Rx activity. */ val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; /* Issue stop command for active channels only */ if (val != 0) mvreg_write(pp, MVNETA_RXQ_CMD, val << MVNETA_RXQ_DISABLE_SHIFT); /* Wait for all Rx activity to terminate. */ count = 0; do { if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { netdev_warn(pp->dev, "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", val); break; } mdelay(1); val = mvreg_read(pp, MVNETA_RXQ_CMD); } while (val & 0xff); /* Stop Tx port activity. Check port Tx activity. Issue stop * command for active channels only */ val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; if (val != 0) mvreg_write(pp, MVNETA_TXQ_CMD, (val << MVNETA_TXQ_DISABLE_SHIFT)); /* Wait for all Tx activity to terminate. */ count = 0; do { if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { netdev_warn(pp->dev, "TIMEOUT for TX stopped status=0x%08x\n", val); break; } mdelay(1); /* Check TX Command reg that all Txqs are stopped */ val = mvreg_read(pp, MVNETA_TXQ_CMD); } while (val & 0xff); /* Double check to verify that TX FIFO is empty */ count = 0; do { if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { netdev_warn(pp->dev, "TX FIFO empty timeout status=0x08%x\n", val); break; } mdelay(1); val = mvreg_read(pp, MVNETA_PORT_STATUS); } while (!(val & MVNETA_TX_FIFO_EMPTY) && (val & MVNETA_TX_IN_PRGRS)); udelay(200); } /* Enable the port by setting the port enable bit of the MAC control register */ static void mvneta_port_enable(struct mvneta_port *pp) { u32 val; /* Enable port */ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); val |= MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); } /* Disable the port and wait for about 200 usec before retuning */ static void mvneta_port_disable(struct mvneta_port *pp) { u32 val; /* Reset the Enable bit in the Serial Control Register */ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); val &= ~MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); udelay(200); } /* Multicast tables methods */ /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) { int offset; u32 val; if (queue == -1) { val = 0; } else { val = 0x1 | (queue << 1); val |= (val << 24) | (val << 16) | (val << 8); } for (offset = 0; offset <= 0xc; offset += 4) mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); } /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) { int offset; u32 val; if (queue == -1) { val = 0; } else { val = 0x1 | (queue << 1); val |= (val << 24) | (val << 16) | (val << 8); } for (offset = 0; offset <= 0xfc; offset += 4) mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); } /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) { int offset; u32 val; if (queue == -1) { memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); val = 0; } else { memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); val = 0x1 | (queue << 1); val |= (val << 24) | (val << 16) | (val << 8); } for (offset = 0; offset <= 0xfc; offset += 4) mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); } /* This method sets defaults to the NETA port: * Clears interrupt Cause and Mask registers. * Clears all MAC tables. * Sets defaults to all registers. * Resets RX and TX descriptor rings. * Resets PHY. * This method can be called after mvneta_port_down() to return the port * settings to defaults. */ static void mvneta_defaults_set(struct mvneta_port *pp) { int cpu; int queue; u32 val; /* Clear all Cause registers */ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); /* Mask all interrupts */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); mvreg_write(pp, MVNETA_INTR_ENABLE, 0); /* Enable MBUS Retry bit16 */ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); /* Set CPU queue access map - all CPUs have access to all RX * queues and to all TX queues */ for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) mvreg_write(pp, MVNETA_CPU_MAP(cpu), (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); /* Reset RX and TX DMAs */ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); /* Disable Legacy WRR, Disable EJP, Release from reset */ mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); for (queue = 0; queue < txq_number; queue++) { mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); } mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); /* Set Port Acceleration Mode */ val = MVNETA_ACC_MODE_EXT; mvreg_write(pp, MVNETA_ACC_MODE, val); /* Update val of portCfg register accordingly with all RxQueue types */ val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); mvreg_write(pp, MVNETA_PORT_CONFIG, val); val = 0; mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); /* Build PORT_SDMA_CONFIG_REG */ val = 0; /* Default burst size */ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP | MVNETA_NO_DESC_SWAP); /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); /* Disable PHY polling in hardware, since we're using the * kernel phylib to do this. */ val = mvreg_read(pp, MVNETA_UNIT_CONTROL); val &= ~MVNETA_PHY_POLLING_ENABLE; mvreg_write(pp, MVNETA_UNIT_CONTROL, val); mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); /* Set port interrupt enable register - default enable all */ mvreg_write(pp, MVNETA_INTR_ENABLE, (MVNETA_RXQ_INTR_ENABLE_ALL_MASK | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); } /* Set max sizes for tx queues */ static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) { u32 val, size, mtu; int queue; mtu = max_tx_size * 8; if (mtu > MVNETA_TX_MTU_MAX) mtu = MVNETA_TX_MTU_MAX; /* Set MTU */ val = mvreg_read(pp, MVNETA_TX_MTU); val &= ~MVNETA_TX_MTU_MAX; val |= mtu; mvreg_write(pp, MVNETA_TX_MTU, val); /* TX token size and all TXQs token size must be larger that MTU */ val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); size = val & MVNETA_TX_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVNETA_TX_TOKEN_SIZE_MAX; val |= size; mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); } for (queue = 0; queue < txq_number; queue++) { val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; val |= size; mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); } } } /* Set unicast address */ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, int queue) { unsigned int unicast_reg; unsigned int tbl_offset; unsigned int reg_offset; /* Locate the Unicast table entry */ last_nibble = (0xf & last_nibble); /* offset from unicast tbl base */ tbl_offset = (last_nibble / 4) * 4; /* offset within the above reg */ reg_offset = last_nibble % 4; unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); if (queue == -1) { /* Clear accepts frame bit at specified unicast DA tbl entry */ unicast_reg &= ~(0xff << (8 * reg_offset)); } else { unicast_reg &= ~(0xff << (8 * reg_offset)); unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); } mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); } /* Set mac address */ static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, int queue) { unsigned int mac_h; unsigned int mac_l; if (queue != -1) { mac_l = (addr[4] << 8) | (addr[5]); mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | (addr[3] << 0); mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); } /* Accept frames of this address */ mvneta_set_ucast_addr(pp, addr[5], queue); } /* Set the number of packets that will be received before RX interrupt * will be generated by HW. */ static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value) { mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), value | MVNETA_RXQ_NON_OCCUPIED(0)); rxq->pkts_coal = value; } /* Set the time delay in usec before RX interrupt will be generated by * HW. */ static void mvneta_rx_time_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value) { u32 val; unsigned long clk_rate; clk_rate = clk_get_rate(pp->clk); val = (clk_rate / 1000000) * value; mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); rxq->time_coal = value; } /* Set threshold for TX_DONE pkts coalescing */ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, struct mvneta_tx_queue *txq, u32 value) { u32 val; val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; val |= MVNETA_TXQ_SENT_THRESH_MASK(value); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); txq->done_pkts_coal = value; } /* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */ static void mvneta_add_tx_done_timer(struct mvneta_port *pp) { if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) { pp->tx_done_timer.expires = jiffies + msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD); add_timer(&pp->tx_done_timer); } } /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, u32 phys_addr, u32 cookie) { rx_desc->buf_cookie = cookie; rx_desc->buf_phys_addr = phys_addr; } /* Decrement sent descriptors counter */ static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int sent_desc) { u32 val; /* Only 255 TX descriptors can be updated at once */ while (sent_desc > 0xff) { val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); sent_desc = sent_desc - 0xff; } val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); } /* Get number of TX descriptors already sent by HW */ static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { u32 val; int sent_desc; val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> MVNETA_TXQ_SENT_DESC_SHIFT; return sent_desc; } /* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. */ static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { int sent_desc; /* Get number of sent descriptors */ sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); /* Decrement sent descriptors counter */ if (sent_desc) mvneta_txq_sent_desc_dec(pp, txq, sent_desc); return sent_desc; } /* Set TXQ descriptors fields relevant for CSUM calculation */ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, int ip_hdr_len, int l4_proto) { u32 command; /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, * G_L4_chk, L4_type; required only for checksum * calculation */ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; if (l3_proto == swab16(ETH_P_IP)) command |= MVNETA_TXD_IP_CSUM; else command |= MVNETA_TX_L3_IP6; if (l4_proto == IPPROTO_TCP) command |= MVNETA_TX_L4_CSUM_FULL; else if (l4_proto == IPPROTO_UDP) command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; else command |= MVNETA_TX_L4_CSUM_NOT; return command; } /* Display more error info */ static void mvneta_rx_error(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc) { u32 status = rx_desc->status; if (!mvneta_rxq_desc_is_first_last(rx_desc)) { netdev_err(pp->dev, "bad rx status %08x (buffer oversize), size=%d\n", rx_desc->status, rx_desc->data_size); return; } switch (status & MVNETA_RXD_ERR_CODE_MASK) { case MVNETA_RXD_ERR_CRC: netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", status, rx_desc->data_size); break; case MVNETA_RXD_ERR_OVERRUN: netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", status, rx_desc->data_size); break; case MVNETA_RXD_ERR_LEN: netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", status, rx_desc->data_size); break; case MVNETA_RXD_ERR_RESOURCE: netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", status, rx_desc->data_size); break; } } /* Handle RX checksum offload */ static void mvneta_rx_csum(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct sk_buff *skb) { if ((rx_desc->status & MVNETA_RXD_L3_IP4) && (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) { skb->csum = 0; skb->ip_summed = CHECKSUM_UNNECESSARY; return; } skb->ip_summed = CHECKSUM_NONE; } /* Return tx queue pointer (find last set bit) according to causeTxDone reg */ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, u32 cause) { int queue = fls(cause) - 1; return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue]; } /* Free tx queue skbuffs */ static void mvneta_txq_bufs_free(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int num) { int i; for (i = 0; i < num; i++) { struct mvneta_tx_desc *tx_desc = txq->descs + txq->txq_get_index; struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; mvneta_txq_inc_get(txq); if (!skb) continue; dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } /* Handle end of transmission */ static int mvneta_txq_done(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); int tx_done; tx_done = mvneta_txq_sent_desc_proc(pp, txq); if (tx_done == 0) return tx_done; mvneta_txq_bufs_free(pp, txq, tx_done); txq->count -= tx_done; if (netif_tx_queue_stopped(nq)) { if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); } return tx_done; } /* Refill processing */ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc) { dma_addr_t phys_addr; struct sk_buff *skb; skb = netdev_alloc_skb(pp->dev, pp->pkt_size); if (!skb) return -ENOMEM; phys_addr = dma_map_single(pp->dev->dev.parent, skb->head, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { dev_kfree_skb(skb); return -ENOMEM; } mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb); return 0; } /* Handle tx checksum */ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; u8 l4_proto; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *ip4h = ip_hdr(skb); /* Calculate IPv4 checksum and L4 checksum */ ip_hdr_len = ip4h->ihl; l4_proto = ip4h->protocol; } else if (skb->protocol == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); /* Read l4_protocol from one of IPv6 extra headers */ if (skb_network_header_len(skb) > 0) ip_hdr_len = (skb_network_header_len(skb) >> 2); l4_proto = ip6h->nexthdr; } else return MVNETA_TX_L4_CSUM_NOT; return mvneta_txq_desc_csum(skb_network_offset(skb), skb->protocol, ip_hdr_len, l4_proto); } return MVNETA_TX_L4_CSUM_NOT; } /* Returns rx queue pointer (find last set bit) according to causeRxTx * value */ static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, u32 cause) { int queue = fls(cause >> 8) - 1; return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; } /* Drop packets received by the RXQ and free buffers */ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { int rx_done, i; rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); for (i = 0; i < rxq->size; i++) { struct mvneta_rx_desc *rx_desc = rxq->descs + i; struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie; dev_kfree_skb_any(skb); dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, rx_desc->data_size, DMA_FROM_DEVICE); } if (rx_done) mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); } /* Main rx processing */ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { struct net_device *dev = pp->dev; int rx_done, rx_filled; /* Get number of received packets */ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); if (rx_todo > rx_done) rx_todo = rx_done; rx_done = 0; rx_filled = 0; /* Fairness NAPI loop */ while (rx_done < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); struct sk_buff *skb; u32 rx_status; int rx_bytes, err; prefetch(rx_desc); rx_done++; rx_filled++; rx_status = rx_desc->status; skb = (struct sk_buff *)rx_desc->buf_cookie; if (!mvneta_rxq_desc_is_first_last(rx_desc) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { dev->stats.rx_errors++; mvneta_rx_error(pp, rx_desc); mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr, (u32)skb); continue; } dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, rx_desc->data_size, DMA_FROM_DEVICE); rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); u64_stats_update_begin(&pp->rx_stats.syncp); pp->rx_stats.packets++; pp->rx_stats.bytes += rx_bytes; u64_stats_update_end(&pp->rx_stats.syncp); /* Linux processing */ skb_reserve(skb, MVNETA_MH_SIZE); skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); mvneta_rx_csum(pp, rx_desc, skb); napi_gro_receive(&pp->napi, skb); /* Refill processing */ err = mvneta_rx_refill(pp, rx_desc); if (err) { netdev_err(pp->dev, "Linux processing - Can't refill\n"); rxq->missed++; rx_filled--; } } /* Update rxq management counters */ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); return rx_done; } /* Handle tx fragmentation processing */ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, struct mvneta_tx_queue *txq) { struct mvneta_tx_desc *tx_desc; int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = page_address(frag->page.p) + frag->page_offset; tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = frag->size; tx_desc->buf_phys_addr = dma_map_single(pp->dev->dev.parent, addr, tx_desc->data_size, DMA_TO_DEVICE); if (dma_mapping_error(pp->dev->dev.parent, tx_desc->buf_phys_addr)) { mvneta_txq_desc_put(txq); goto error; } if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; txq->tx_skb[txq->txq_put_index] = skb; mvneta_txq_inc_put(txq); } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; txq->tx_skb[txq->txq_put_index] = NULL; mvneta_txq_inc_put(txq); } } return 0; error: /* Release all descriptors that were used to map fragments of * this packet, as well as the corresponding DMA mappings */ for (i = i - 1; i >= 0; i--) { tx_desc = txq->descs + i; dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); mvneta_txq_desc_put(txq); } return -ENOMEM; } /* Main tx processing */ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; struct netdev_queue *nq; int frags = 0; u32 tx_cmd; if (!netif_running(dev)) goto out; frags = skb_shinfo(skb)->nr_frags + 1; nq = netdev_get_tx_queue(dev, txq_id); /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); tx_cmd = mvneta_skb_tx_csum(pp, skb); tx_desc->data_size = skb_headlen(skb); tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, tx_desc->data_size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, tx_desc->buf_phys_addr))) { mvneta_txq_desc_put(txq); frags = 0; goto out; } if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVNETA_TXD_FLZ_DESC; tx_desc->command = tx_cmd; txq->tx_skb[txq->txq_put_index] = skb; mvneta_txq_inc_put(txq); } else { /* First but not Last */ tx_cmd |= MVNETA_TXD_F_DESC; txq->tx_skb[txq->txq_put_index] = NULL; mvneta_txq_inc_put(txq); tx_desc->command = tx_cmd; /* Continue with other skb fragments */ if (mvneta_tx_frag_process(pp, skb, txq)) { dma_unmap_single(dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); mvneta_txq_desc_put(txq); frags = 0; goto out; } } txq->count += frags; mvneta_txq_pend_desc_add(pp, txq, frags); if (txq->size - txq->count < MAX_SKB_FRAGS + 1) netif_tx_stop_queue(nq); out: if (frags > 0) { u64_stats_update_begin(&pp->tx_stats.syncp); pp->tx_stats.packets++; pp->tx_stats.bytes += skb->len; u64_stats_update_end(&pp->tx_stats.syncp); } else { dev->stats.tx_dropped++; dev_kfree_skb_any(skb); } if (txq->count >= MVNETA_TXDONE_COAL_PKTS) mvneta_txq_done(pp, txq); /* If after calling mvneta_txq_done, count equals * frags, we need to set the timer */ if (txq->count == frags && frags > 0) mvneta_add_tx_done_timer(pp); return NETDEV_TX_OK; } /* Free tx resources, when resetting a port */ static void mvneta_txq_done_force(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { int tx_done = txq->count; mvneta_txq_bufs_free(pp, txq, tx_done); /* reset txq */ txq->count = 0; txq->txq_put_index = 0; txq->txq_get_index = 0; } /* handle tx done - called from tx done timer callback */ static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done, int *tx_todo) { struct mvneta_tx_queue *txq; u32 tx_done = 0; struct netdev_queue *nq; *tx_todo = 0; while (cause_tx_done != 0) { txq = mvneta_tx_done_policy(pp, cause_tx_done); if (!txq) break; nq = netdev_get_tx_queue(pp->dev, txq->id); __netif_tx_lock(nq, smp_processor_id()); if (txq->count) { tx_done += mvneta_txq_done(pp, txq); *tx_todo += txq->count; } __netif_tx_unlock(nq); cause_tx_done &= ~((1 << txq->id)); } return tx_done; } /* Compute crc8 of the specified address, using a unique algorithm , * according to hw spec, different than generic crc8 algorithm */ static int mvneta_addr_crc(unsigned char *addr) { int crc = 0; int i; for (i = 0; i < ETH_ALEN; i++) { int j; crc = (crc ^ addr[i]) << 8; for (j = 7; j >= 0; j--) { if (crc & (0x100 << j)) crc ^= 0x107 << j; } } return crc; } /* This method controls the net device special MAC multicast support. * The Special Multicast Table for MAC addresses supports MAC of the form * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). * The MAC DA[7:0] bits are used as a pointer to the Special Multicast * Table entries in the DA-Filter table. This method set the Special * Multicast Table appropriate entry. */ static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, unsigned char last_byte, int queue) { unsigned int smc_table_reg; unsigned int tbl_offset; unsigned int reg_offset; /* Register offset from SMC table base */ tbl_offset = (last_byte / 4); /* Entry offset within the above reg */ reg_offset = last_byte % 4; smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4)); if (queue == -1) smc_table_reg &= ~(0xff << (8 * reg_offset)); else { smc_table_reg &= ~(0xff << (8 * reg_offset)); smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); } mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, smc_table_reg); } /* This method controls the network device Other MAC multicast support. * The Other Multicast Table is used for multicast of another type. * A CRC-8 is used as an index to the Other Multicast Table entries * in the DA-Filter table. * The method gets the CRC-8 value from the calling routine and * sets the Other Multicast Table appropriate entry according to the * specified CRC-8 . */ static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, unsigned char crc8, int queue) { unsigned int omc_table_reg; unsigned int tbl_offset; unsigned int reg_offset; tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ reg_offset = crc8 % 4; /* Entry offset within the above reg */ omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); if (queue == -1) { /* Clear accepts frame bit at specified Other DA table entry */ omc_table_reg &= ~(0xff << (8 * reg_offset)); } else { omc_table_reg &= ~(0xff << (8 * reg_offset)); omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); } mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); } /* The network device supports multicast using two tables: * 1) Special Multicast Table for MAC addresses of the form * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). * The MAC DA[7:0] bits are used as a pointer to the Special Multicast * Table entries in the DA-Filter table. * 2) Other Multicast Table for multicast of another type. A CRC-8 value * is used as an index to the Other Multicast Table entries in the * DA-Filter table. */ static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, int queue) { unsigned char crc_result = 0; if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { mvneta_set_special_mcast_addr(pp, p_addr[5], queue); return 0; } crc_result = mvneta_addr_crc(p_addr); if (queue == -1) { if (pp->mcast_count[crc_result] == 0) { netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", crc_result); return -EINVAL; } pp->mcast_count[crc_result]--; if (pp->mcast_count[crc_result] != 0) { netdev_info(pp->dev, "After delete there are %d valid Mcast for crc8=0x%02x\n", pp->mcast_count[crc_result], crc_result); return -EINVAL; } } else pp->mcast_count[crc_result]++; mvneta_set_other_mcast_addr(pp, crc_result, queue); return 0; } /* Configure Fitering mode of Ethernet port */ static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, int is_promisc) { u32 port_cfg_reg, val; port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); val = mvreg_read(pp, MVNETA_TYPE_PRIO); /* Set / Clear UPM bit in port configuration register */ if (is_promisc) { /* Accept all Unicast addresses */ port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; val |= MVNETA_FORCE_UNI; mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); } else { /* Reject all Unicast addresses */ port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; val &= ~MVNETA_FORCE_UNI; } mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); mvreg_write(pp, MVNETA_TYPE_PRIO, val); } /* register unicast and multicast addresses */ static void mvneta_set_rx_mode(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); struct netdev_hw_addr *ha; if (dev->flags & IFF_PROMISC) { /* Accept all: Multicast + Unicast */ mvneta_rx_unicast_promisc_set(pp, 1); mvneta_set_ucast_table(pp, rxq_def); mvneta_set_special_mcast_table(pp, rxq_def); mvneta_set_other_mcast_table(pp, rxq_def); } else { /* Accept single Unicast */ mvneta_rx_unicast_promisc_set(pp, 0); mvneta_set_ucast_table(pp, -1); mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); if (dev->flags & IFF_ALLMULTI) { /* Accept all multicast */ mvneta_set_special_mcast_table(pp, rxq_def); mvneta_set_other_mcast_table(pp, rxq_def); } else { /* Accept only initialized multicast */ mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); if (!netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) { mvneta_mcast_addr_set(pp, ha->addr, rxq_def); } } } } } /* Interrupt handling - the callback for request_irq() */ static irqreturn_t mvneta_isr(int irq, void *dev_id) { struct mvneta_port *pp = (struct mvneta_port *)dev_id; /* Mask all interrupts */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); napi_schedule(&pp->napi); return IRQ_HANDLED; } /* NAPI handler * Bits 0 - 7 of the causeRxTx register indicate that are transmitted * packets on the corresponding TXQ (Bit 0 is for TX queue 1). * Bits 8 -15 of the cause Rx Tx register indicate that are received * packets on the corresponding RXQ (Bit 8 is for RX queue 0). * Each CPU has its own causeRxTx register */ static int mvneta_poll(struct napi_struct *napi, int budget) { int rx_done = 0; u32 cause_rx_tx; unsigned long flags; struct mvneta_port *pp = netdev_priv(napi->dev); if (!netif_running(pp->dev)) { napi_complete(napi); return rx_done; } /* Read cause register */ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & MVNETA_RX_INTR_MASK(rxq_number); /* For the case where the last mvneta_poll did not process all * RX packets */ cause_rx_tx |= pp->cause_rx_tx; if (rxq_number > 1) { while ((cause_rx_tx != 0) && (budget > 0)) { int count; struct mvneta_rx_queue *rxq; /* get rx queue number from cause_rx_tx */ rxq = mvneta_rx_policy(pp, cause_rx_tx); if (!rxq) break; /* process the packet in that rx queue */ count = mvneta_rx(pp, budget, rxq); rx_done += count; budget -= count; if (budget > 0) { /* set off the rx bit of the * corresponding bit in the cause rx * tx register, so that next iteration * will find the next rx queue where * packets are received on */ cause_rx_tx &= ~((1 << rxq->id) << 8); } } } else { rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); budget -= rx_done; } if (budget > 0) { cause_rx_tx = 0; napi_complete(napi); local_irq_save(flags); mvreg_write(pp, MVNETA_INTR_NEW_MASK, MVNETA_RX_INTR_MASK(rxq_number)); local_irq_restore(flags); } pp->cause_rx_tx = cause_rx_tx; return rx_done; } /* tx done timer callback */ static void mvneta_tx_done_timer_callback(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct mvneta_port *pp = netdev_priv(dev); int tx_done = 0, tx_todo = 0; if (!netif_running(dev)) return ; clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); tx_done = mvneta_tx_done_gbe(pp, (((1 << txq_number) - 1) & MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK), &tx_todo); if (tx_todo > 0) mvneta_add_tx_done_timer(pp); } /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int num) { struct net_device *dev = pp->dev; int i; for (i = 0; i < num; i++) { struct sk_buff *skb; struct mvneta_rx_desc *rx_desc; unsigned long phys_addr; skb = dev_alloc_skb(pp->pkt_size); if (!skb) { netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n", __func__, rxq->id, i, num); break; } rx_desc = rxq->descs + i; memset(rx_desc, 0, sizeof(struct mvneta_rx_desc)); phys_addr = dma_map_single(dev->dev.parent, skb->head, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) { dev_kfree_skb(skb); break; } mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb); } /* Add this number of RX descriptors as non occupied (ready to * get packets) */ mvneta_rxq_non_occup_desc_add(pp, rxq, i); return i; } /* Free all packets pending transmit from all TXQs and reset TX port */ static void mvneta_tx_reset(struct mvneta_port *pp) { int queue; /* free the skb's in the hal tx ring */ for (queue = 0; queue < txq_number; queue++) mvneta_txq_done_force(pp, &pp->txqs[queue]); mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); } static void mvneta_rx_reset(struct mvneta_port *pp) { mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); } /* Rx/Tx queue initialization/cleanup methods */ /* Create a specified RX queue */ static int mvneta_rxq_init(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { rxq->size = pp->rx_ring_size; /* Allocate memory for RX descriptors */ rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, &rxq->descs_phys, GFP_KERNEL); if (rxq->descs == NULL) return -ENOMEM; BUG_ON(rxq->descs != PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); rxq->last_desc = rxq->size - 1; /* Set Rx descriptors queue starting address */ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); /* Set Offset */ mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); /* Set coalescing pkts and time */ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); /* Fill RXQ with buffers from RX pool */ mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); return 0; } /* Cleanup Rx queue */ static void mvneta_rxq_deinit(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { mvneta_rxq_drop_pkts(pp, rxq); if (rxq->descs) dma_free_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, rxq->descs, rxq->descs_phys); rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; rxq->descs_phys = 0; } /* Create and initialize a tx queue */ static int mvneta_txq_init(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { txq->size = pp->tx_ring_size; /* Allocate memory for TX descriptors */ txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, &txq->descs_phys, GFP_KERNEL); if (txq->descs == NULL) return -ENOMEM; /* Make sure descriptor address is cache line size aligned */ BUG_ON(txq->descs != PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); txq->last_desc = txq->size - 1; /* Set maximum bandwidth for enabled TXQs */ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); /* Set Tx descriptors queue starting address */ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); if (txq->tx_skb == NULL) { dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); return -ENOMEM; } mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); return 0; } /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ static void mvneta_txq_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { kfree(txq->tx_skb); if (txq->descs) dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; txq->descs_phys = 0; /* Set minimum bandwidth for disabled TXQs */ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); } /* Cleanup all Tx queues */ static void mvneta_cleanup_txqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < txq_number; queue++) mvneta_txq_deinit(pp, &pp->txqs[queue]); } /* Cleanup all Rx queues */ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < rxq_number; queue++) mvneta_rxq_deinit(pp, &pp->rxqs[queue]); } /* Init all Rx queues */ static int mvneta_setup_rxqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < rxq_number; queue++) { int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); if (err) { netdev_err(pp->dev, "%s: can't create rxq=%d\n", __func__, queue); mvneta_cleanup_rxqs(pp); return err; } } return 0; } /* Init all tx queues */ static int mvneta_setup_txqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < txq_number; queue++) { int err = mvneta_txq_init(pp, &pp->txqs[queue]); if (err) { netdev_err(pp->dev, "%s: can't create txq=%d\n", __func__, queue); mvneta_cleanup_txqs(pp); return err; } } return 0; } static void mvneta_start_dev(struct mvneta_port *pp) { mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); /* start the Rx/Tx activity */ mvneta_port_enable(pp); /* Enable polling on the port */ napi_enable(&pp->napi); /* Unmask interrupts */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, MVNETA_RX_INTR_MASK(rxq_number)); phy_start(pp->phy_dev); netif_tx_start_all_queues(pp->dev); } static void mvneta_stop_dev(struct mvneta_port *pp) { phy_stop(pp->phy_dev); napi_disable(&pp->napi); netif_carrier_off(pp->dev); mvneta_port_down(pp); netif_tx_stop_all_queues(pp->dev); /* Stop the port activity */ mvneta_port_disable(pp); /* Clear all ethernet port interrupts */ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); /* Mask all ethernet port interrupts */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); mvneta_tx_reset(pp); mvneta_rx_reset(pp); } /* tx timeout callback - display a message and stop/start the network device */ static void mvneta_tx_timeout(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); netdev_info(dev, "tx timeout\n"); mvneta_stop_dev(pp); mvneta_start_dev(pp); } /* Return positive if MTU is valid */ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) { if (mtu < 68) { netdev_err(dev, "cannot change mtu to less than 68\n"); return -EINVAL; } /* 9676 == 9700 - 20 and rounding to 8 */ if (mtu > 9676) { netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); mtu = 9676; } if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); } return mtu; } /* Change the device mtu */ static int mvneta_change_mtu(struct net_device *dev, int mtu) { struct mvneta_port *pp = netdev_priv(dev); int ret; mtu = mvneta_check_mtu_valid(dev, mtu); if (mtu < 0) return -EINVAL; dev->mtu = mtu; if (!netif_running(dev)) return 0; /* The interface is running, so we have to force a * reallocation of the RXQs */ mvneta_stop_dev(pp); mvneta_cleanup_txqs(pp); mvneta_cleanup_rxqs(pp); pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); ret = mvneta_setup_rxqs(pp); if (ret) { netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); return ret; } mvneta_setup_txqs(pp); mvneta_start_dev(pp); mvneta_port_up(pp); return 0; } /* Handle setting mac address */ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) { struct mvneta_port *pp = netdev_priv(dev); u8 *mac = addr + 2; int i; if (netif_running(dev)) return -EBUSY; /* Remove previous address table entry */ mvneta_mac_addr_set(pp, dev->dev_addr, -1); /* Set new addr in hw */ mvneta_mac_addr_set(pp, mac, rxq_def); /* Set addr in the device */ for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = mac[i]; return 0; } static void mvneta_adjust_link(struct net_device *ndev) { struct mvneta_port *pp = netdev_priv(ndev); struct phy_device *phydev = pp->phy_dev; int status_change = 0; if (phydev->link) { if ((pp->speed != phydev->speed) || (pp->duplex != phydev->duplex)) { u32 val; val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | MVNETA_GMAC_CONFIG_FULL_DUPLEX | MVNETA_GMAC_AN_SPEED_EN | MVNETA_GMAC_AN_DUPLEX_EN); if (phydev->duplex) val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; if (phydev->speed == SPEED_1000) val |= MVNETA_GMAC_CONFIG_GMII_SPEED; else val |= MVNETA_GMAC_CONFIG_MII_SPEED; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); pp->duplex = phydev->duplex; pp->speed = phydev->speed; } } if (phydev->link != pp->link) { if (!phydev->link) { pp->duplex = -1; pp->speed = 0; } pp->link = phydev->link; status_change = 1; } if (status_change) { if (phydev->link) { u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val |= (MVNETA_GMAC_FORCE_LINK_PASS | MVNETA_GMAC_FORCE_LINK_DOWN); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); mvneta_port_up(pp); netdev_info(pp->dev, "link up\n"); } else { mvneta_port_down(pp); netdev_info(pp->dev, "link down\n"); } } } static int mvneta_mdio_probe(struct mvneta_port *pp) { struct phy_device *phy_dev; phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, pp->phy_interface); if (!phy_dev) { netdev_err(pp->dev, "could not find the PHY\n"); return -ENODEV; } phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->advertising = phy_dev->supported; pp->phy_dev = phy_dev; pp->link = 0; pp->duplex = 0; pp->speed = 0; return 0; } static void mvneta_mdio_remove(struct mvneta_port *pp) { phy_disconnect(pp->phy_dev); pp->phy_dev = NULL; } static int mvneta_open(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); int ret; mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); ret = mvneta_setup_rxqs(pp); if (ret) return ret; ret = mvneta_setup_txqs(pp); if (ret) goto err_cleanup_rxqs; /* Connect to port interrupt line */ ret = request_irq(pp->dev->irq, mvneta_isr, 0, MVNETA_DRIVER_NAME, pp); if (ret) { netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); goto err_cleanup_txqs; } /* In default link is down */ netif_carrier_off(pp->dev); ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); goto err_free_irq; } mvneta_start_dev(pp); return 0; err_free_irq: free_irq(pp->dev->irq, pp); err_cleanup_txqs: mvneta_cleanup_txqs(pp); err_cleanup_rxqs: mvneta_cleanup_rxqs(pp); return ret; } /* Stop the port, free port interrupt line */ static int mvneta_stop(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); mvneta_stop_dev(pp); mvneta_mdio_remove(pp); free_irq(dev->irq, pp); mvneta_cleanup_rxqs(pp); mvneta_cleanup_txqs(pp); del_timer(&pp->tx_done_timer); clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); return 0; } /* Ethtool methods */ /* Get settings (phy address, speed) for ethtools */ int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mvneta_port *pp = netdev_priv(dev); if (!pp->phy_dev) return -ENODEV; return phy_ethtool_gset(pp->phy_dev, cmd); } /* Set settings (phy address, speed) for ethtools */ int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mvneta_port *pp = netdev_priv(dev); if (!pp->phy_dev) return -ENODEV; return phy_ethtool_sset(pp->phy_dev, cmd); } /* Set interrupt coalescing for ethtools */ static int mvneta_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct mvneta_port *pp = netdev_priv(dev); int queue; for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; rxq->time_coal = c->rx_coalesce_usecs; rxq->pkts_coal = c->rx_max_coalesced_frames; mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); } for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); } return 0; } /* get coalescing for ethtools */ static int mvneta_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct mvneta_port *pp = netdev_priv(dev); c->rx_coalesce_usecs = pp->rxqs[0].time_coal; c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; return 0; } static void mvneta_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, dev_name(&dev->dev), sizeof(drvinfo->bus_info)); } static void mvneta_ethtool_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct mvneta_port *pp = netdev_priv(netdev); ring->rx_max_pending = MVNETA_MAX_RXD; ring->tx_max_pending = MVNETA_MAX_TXD; ring->rx_pending = pp->rx_ring_size; ring->tx_pending = pp->tx_ring_size; } static int mvneta_ethtool_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct mvneta_port *pp = netdev_priv(dev); if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) return -EINVAL; pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? ring->rx_pending : MVNETA_MAX_RXD; pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? ring->tx_pending : MVNETA_MAX_TXD; if (netif_running(dev)) { mvneta_stop(dev); if (mvneta_open(dev)) { netdev_err(dev, "error on opening device after ring param change\n"); return -ENOMEM; } } return 0; } static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, .ndo_start_xmit = mvneta_tx, .ndo_set_rx_mode = mvneta_set_rx_mode, .ndo_set_mac_address = mvneta_set_mac_addr, .ndo_change_mtu = mvneta_change_mtu, .ndo_tx_timeout = mvneta_tx_timeout, .ndo_get_stats64 = mvneta_get_stats64, }; const struct ethtool_ops mvneta_eth_tool_ops = { .get_link = ethtool_op_get_link, .get_settings = mvneta_ethtool_get_settings, .set_settings = mvneta_ethtool_set_settings, .set_coalesce = mvneta_ethtool_set_coalesce, .get_coalesce = mvneta_ethtool_get_coalesce, .get_drvinfo = mvneta_ethtool_get_drvinfo, .get_ringparam = mvneta_ethtool_get_ringparam, .set_ringparam = mvneta_ethtool_set_ringparam, }; /* Initialize hw */ static int mvneta_init(struct mvneta_port *pp, int phy_addr) { int queue; /* Disable port */ mvneta_port_disable(pp); /* Set port default values */ mvneta_defaults_set(pp); pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), GFP_KERNEL); if (!pp->txqs) return -ENOMEM; /* Initialize TX descriptor rings */ for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; txq->id = queue; txq->size = pp->tx_ring_size; txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; } pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), GFP_KERNEL); if (!pp->rxqs) { kfree(pp->txqs); return -ENOMEM; } /* Create Rx descriptor rings */ for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; rxq->id = queue; rxq->size = pp->rx_ring_size; rxq->pkts_coal = MVNETA_RX_COAL_PKTS; rxq->time_coal = MVNETA_RX_COAL_USEC; } return 0; } static void mvneta_deinit(struct mvneta_port *pp) { kfree(pp->txqs); kfree(pp->rxqs); } /* platform glue : initialize decoding windows */ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, const struct mbus_dram_target_info *dram) { u32 win_enable; u32 win_protect; int i; for (i = 0; i < 6; i++) { mvreg_write(pp, MVNETA_WIN_BASE(i), 0); mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); if (i < 4) mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); } win_enable = 0x3f; win_protect = 0; for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); mvreg_write(pp, MVNETA_WIN_SIZE(i), (cs->size - 1) & 0xffff0000); win_enable &= ~(1 << i); win_protect |= 3 << (2 * i); } mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); } /* Power up the port */ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) { u32 val; /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); if (phy_mode == PHY_INTERFACE_MODE_SGMII) mvneta_port_sgmii_config(pp); mvneta_gmac_rgmii_set(pp, 1); /* Cancel Port Reset */ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); val &= ~MVNETA_GMAC2_PORT_RESET; mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & MVNETA_GMAC2_PORT_RESET) != 0) continue; } /* Device initialization routine */ static int mvneta_probe(struct platform_device *pdev) { const struct mbus_dram_target_info *dram_target_info; struct device_node *dn = pdev->dev.of_node; struct device_node *phy_node; u32 phy_addr; struct mvneta_port *pp; struct net_device *dev; const char *mac_addr; int phy_mode; int err; /* Our multiqueue support is not complete, so for now, only * allow the usage of the first RX queue */ if (rxq_def != 0) { dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def); return -EINVAL; } dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); if (!dev) return -ENOMEM; dev->irq = irq_of_parse_and_map(dn, 0); if (dev->irq == 0) { err = -EINVAL; goto err_free_netdev; } phy_node = of_parse_phandle(dn, "phy", 0); if (!phy_node) { dev_err(&pdev->dev, "no associated PHY\n"); err = -ENODEV; goto err_free_irq; } phy_mode = of_get_phy_mode(dn); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy-mode\n"); err = -EINVAL; goto err_free_irq; } mac_addr = of_get_mac_address(dn); if (!mac_addr || !is_valid_ether_addr(mac_addr)) eth_hw_addr_random(dev); else memcpy(dev->dev_addr, mac_addr, ETH_ALEN); dev->tx_queue_len = MVNETA_MAX_TXD; dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvneta_netdev_ops; SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops); pp = netdev_priv(dev); pp->tx_done_timer.function = mvneta_tx_done_timer_callback; init_timer(&pp->tx_done_timer); clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); pp->weight = MVNETA_RX_POLL_WEIGHT; pp->phy_node = phy_node; pp->phy_interface = phy_mode; pp->base = of_iomap(dn, 0); if (pp->base == NULL) { err = -ENOMEM; goto err_free_irq; } pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); goto err_unmap; } clk_prepare_enable(pp->clk); pp->tx_done_timer.data = (unsigned long)dev; pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; pp->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); err = mvneta_init(pp, phy_addr); if (err < 0) { dev_err(&pdev->dev, "can't init eth hal\n"); goto err_clk; } mvneta_port_power_up(pp, phy_mode); dram_target_info = mv_mbus_dram_info(); if (dram_target_info) mvneta_conf_mbus_windows(pp, dram_target_info); netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; dev->priv_flags |= IFF_UNICAST_FLT; err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); goto err_deinit; } netdev_info(dev, "mac: %pM\n", dev->dev_addr); platform_set_drvdata(pdev, pp->dev); return 0; err_deinit: mvneta_deinit(pp); err_clk: clk_disable_unprepare(pp->clk); err_unmap: iounmap(pp->base); err_free_irq: irq_dispose_mapping(dev->irq); err_free_netdev: free_netdev(dev); return err; } /* Device removal routine */ static int mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); mvneta_deinit(pp); clk_disable_unprepare(pp->clk); iounmap(pp->base); irq_dispose_mapping(dev->irq); free_netdev(dev); platform_set_drvdata(pdev, NULL); return 0; } static const struct of_device_id mvneta_match[] = { { .compatible = "marvell,armada-370-neta" }, { } }; MODULE_DEVICE_TABLE(of, mvneta_match); static struct platform_driver mvneta_driver = { .probe = mvneta_probe, .remove = mvneta_remove, .driver = { .name = MVNETA_DRIVER_NAME, .of_match_table = mvneta_match, }, }; module_platform_driver(mvneta_driver); MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); MODULE_LICENSE("GPL"); module_param(rxq_number, int, S_IRUGO); module_param(txq_number, int, S_IRUGO); module_param(rxq_def, int, S_IRUGO);
gpl-2.0
raininja/android_kernel_asus_a500cg
arch/arm/mach-kirkwood/board-dnskw.c
2276
1225
/* * Copyright 2012 (C), Jamie Lentin <jm@lentin.co.uk> * * arch/arm/mach-kirkwood/board-dnskw.c * * D-link DNS-320 & DNS-325 NAS Init for drivers not converted to * flattened device tree yet. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mv643xx_eth.h> #include <linux/gpio.h> #include "common.h" static struct mv643xx_eth_platform_data dnskw_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; /* Register any GPIO for output and set the value */ static void __init dnskw_gpio_register(unsigned gpio, char *name, int def) { if (gpio_request(gpio, name) == 0 && gpio_direction_output(gpio, 0) == 0) { gpio_set_value(gpio, def); if (gpio_export(gpio, 0) != 0) pr_err("dnskw: Failed to export GPIO %s\n", name); } else pr_err("dnskw: Failed to register %s\n", name); } void __init dnskw_init(void) { kirkwood_ge00_init(&dnskw_ge00_data); /* Set NAS to turn back on after a power failure */ dnskw_gpio_register(37, "dnskw:power:recover", 1); }
gpl-2.0
javilonas/Ptah-GT-I9300_OLD
drivers/hwmon/w83791d.c
2532
51462
/* w83791d.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Copyright (C) 2006-2007 Charles Spirakis <bezaur@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports following chips: Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA w83791d 10 5 5 3 0x71 0x5ca3 yes no The w83791d chip appears to be part way between the 83781d and the 83792d. Thus, this file is derived from both the w83792d.c and w83781d.c files. The w83791g chip is the same as the w83791d but lead-free. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #define NUMBER_OF_VIN 10 #define NUMBER_OF_FANIN 5 #define NUMBER_OF_TEMPIN 3 #define NUMBER_OF_PWM 5 /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " "{bus, clientaddr, subclientaddr1, subclientaddr2}"); static int reset; module_param(reset, bool, 0); MODULE_PARM_DESC(reset, "Set to one to force a hardware chip reset"); static int init; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to one to force extra software initialization"); /* The W83791D registers */ static const u8 W83791D_REG_IN[NUMBER_OF_VIN] = { 0x20, /* VCOREA in DataSheet */ 0x21, /* VINR0 in DataSheet */ 0x22, /* +3.3VIN in DataSheet */ 0x23, /* VDD5V in DataSheet */ 0x24, /* +12VIN in DataSheet */ 0x25, /* -12VIN in DataSheet */ 0x26, /* -5VIN in DataSheet */ 0xB0, /* 5VSB in DataSheet */ 0xB1, /* VBAT in DataSheet */ 0xB2 /* VINR1 in DataSheet */ }; static const u8 W83791D_REG_IN_MAX[NUMBER_OF_VIN] = { 0x2B, /* VCOREA High Limit in DataSheet */ 0x2D, /* VINR0 High Limit in DataSheet */ 0x2F, /* +3.3VIN High Limit in DataSheet */ 0x31, /* VDD5V High Limit in DataSheet */ 0x33, /* +12VIN High Limit in DataSheet */ 0x35, /* -12VIN High Limit in DataSheet */ 0x37, /* -5VIN High Limit in DataSheet */ 0xB4, /* 5VSB High Limit in DataSheet */ 0xB6, /* VBAT High Limit in DataSheet */ 0xB8 /* VINR1 High Limit in DataSheet */ }; static const u8 W83791D_REG_IN_MIN[NUMBER_OF_VIN] = { 0x2C, /* VCOREA Low Limit in DataSheet */ 0x2E, /* VINR0 Low Limit in DataSheet */ 0x30, /* +3.3VIN Low Limit in DataSheet */ 0x32, /* VDD5V Low Limit in DataSheet */ 0x34, /* +12VIN Low Limit in DataSheet */ 0x36, /* -12VIN Low Limit in DataSheet */ 0x38, /* -5VIN Low Limit in DataSheet */ 0xB5, /* 5VSB Low Limit in DataSheet */ 0xB7, /* VBAT Low Limit in DataSheet */ 0xB9 /* VINR1 Low Limit in DataSheet */ }; static const u8 W83791D_REG_FAN[NUMBER_OF_FANIN] = { 0x28, /* FAN 1 Count in DataSheet */ 0x29, /* FAN 2 Count in DataSheet */ 0x2A, /* FAN 3 Count in DataSheet */ 0xBA, /* FAN 4 Count in DataSheet */ 0xBB, /* FAN 5 Count in DataSheet */ }; static const u8 W83791D_REG_FAN_MIN[NUMBER_OF_FANIN] = { 0x3B, /* FAN 1 Count Low Limit in DataSheet */ 0x3C, /* FAN 2 Count Low Limit in DataSheet */ 0x3D, /* FAN 3 Count Low Limit in DataSheet */ 0xBC, /* FAN 4 Count Low Limit in DataSheet */ 0xBD, /* FAN 5 Count Low Limit in DataSheet */ }; static const u8 W83791D_REG_PWM[NUMBER_OF_PWM] = { 0x81, /* PWM 1 duty cycle register in DataSheet */ 0x83, /* PWM 2 duty cycle register in DataSheet */ 0x94, /* PWM 3 duty cycle register in DataSheet */ 0xA0, /* PWM 4 duty cycle register in DataSheet */ 0xA1, /* PWM 5 duty cycle register in DataSheet */ }; static const u8 W83791D_REG_TEMP_TARGET[3] = { 0x85, /* PWM 1 target temperature for temp 1 */ 0x86, /* PWM 2 target temperature for temp 2 */ 0x96, /* PWM 3 target temperature for temp 3 */ }; static const u8 W83791D_REG_TEMP_TOL[2] = { 0x87, /* PWM 1/2 temperature tolerance */ 0x97, /* PWM 3 temperature tolerance */ }; static const u8 W83791D_REG_FAN_CFG[2] = { 0x84, /* FAN 1/2 configuration */ 0x95, /* FAN 3 configuration */ }; static const u8 W83791D_REG_FAN_DIV[3] = { 0x47, /* contains FAN1 and FAN2 Divisor */ 0x4b, /* contains FAN3 Divisor */ 0x5C, /* contains FAN4 and FAN5 Divisor */ }; #define W83791D_REG_BANK 0x4E #define W83791D_REG_TEMP2_CONFIG 0xC2 #define W83791D_REG_TEMP3_CONFIG 0xCA static const u8 W83791D_REG_TEMP1[3] = { 0x27, /* TEMP 1 in DataSheet */ 0x39, /* TEMP 1 Over in DataSheet */ 0x3A, /* TEMP 1 Hyst in DataSheet */ }; static const u8 W83791D_REG_TEMP_ADD[2][6] = { {0xC0, /* TEMP 2 in DataSheet */ 0xC1, /* TEMP 2(0.5 deg) in DataSheet */ 0xC5, /* TEMP 2 Over High part in DataSheet */ 0xC6, /* TEMP 2 Over Low part in DataSheet */ 0xC3, /* TEMP 2 Thyst High part in DataSheet */ 0xC4}, /* TEMP 2 Thyst Low part in DataSheet */ {0xC8, /* TEMP 3 in DataSheet */ 0xC9, /* TEMP 3(0.5 deg) in DataSheet */ 0xCD, /* TEMP 3 Over High part in DataSheet */ 0xCE, /* TEMP 3 Over Low part in DataSheet */ 0xCB, /* TEMP 3 Thyst High part in DataSheet */ 0xCC} /* TEMP 3 Thyst Low part in DataSheet */ }; #define W83791D_REG_BEEP_CONFIG 0x4D static const u8 W83791D_REG_BEEP_CTRL[3] = { 0x56, /* BEEP Control Register 1 */ 0x57, /* BEEP Control Register 2 */ 0xA3, /* BEEP Control Register 3 */ }; #define W83791D_REG_GPIO 0x15 #define W83791D_REG_CONFIG 0x40 #define W83791D_REG_VID_FANDIV 0x47 #define W83791D_REG_DID_VID4 0x49 #define W83791D_REG_WCHIPID 0x58 #define W83791D_REG_CHIPMAN 0x4F #define W83791D_REG_PIN 0x4B #define W83791D_REG_I2C_SUBADDR 0x4A #define W83791D_REG_ALARM1 0xA9 /* realtime status register1 */ #define W83791D_REG_ALARM2 0xAA /* realtime status register2 */ #define W83791D_REG_ALARM3 0xAB /* realtime status register3 */ #define W83791D_REG_VBAT 0x5D #define W83791D_REG_I2C_ADDR 0x48 /* The SMBus locks itself. The Winbond W83791D has a bank select register (index 0x4e), but the driver only accesses registers in bank 0. Since we don't switch banks, we don't need any special code to handle locking access between bank switches */ static inline int w83791d_read(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /* The analog voltage inputs have 16mV LSB. Since the sysfs output is in mV as would be measured on the chip input pin, need to just multiply/divide by 16 to translate from/to register values. */ #define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8) / 16), 0, 255)) #define IN_FROM_REG(val) ((val) * 16) static u8 fan_to_reg(long rpm, int div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } #define FAN_FROM_REG(val,div) ((val) == 0 ? -1 : \ ((val) == 255 ? 0 : \ 1350000 / ((val) * (div)))) /* for temp1 which is 8-bit resolution, LSB = 1 degree Celsius */ #define TEMP1_FROM_REG(val) ((val) * 1000) #define TEMP1_TO_REG(val) ((val) <= -128000 ? -128 : \ (val) >= 127000 ? 127 : \ (val) < 0 ? ((val) - 500) / 1000 : \ ((val) + 500) / 1000) /* for temp2 and temp3 which are 9-bit resolution, LSB = 0.5 degree Celsius Assumes the top 8 bits are the integral amount and the bottom 8 bits are the fractional amount. Since we only have 0.5 degree resolution, the bottom 7 bits will always be zero */ #define TEMP23_FROM_REG(val) ((val) / 128 * 500) #define TEMP23_TO_REG(val) ((val) <= -128000 ? 0x8000 : \ (val) >= 127500 ? 0x7F80 : \ (val) < 0 ? ((val) - 250) / 500 * 128 : \ ((val) + 250) / 500 * 128) /* for thermal cruise target temp, 7-bits, LSB = 1 degree Celsius */ #define TARGET_TEMP_TO_REG(val) ((val) < 0 ? 0 : \ (val) >= 127000 ? 127 : \ ((val) + 500) / 1000) /* for thermal cruise temp tolerance, 4-bits, LSB = 1 degree Celsius */ #define TOL_TEMP_TO_REG(val) ((val) < 0 ? 0 : \ (val) >= 15000 ? 15 : \ ((val) + 500) / 1000) #define BEEP_MASK_TO_REG(val) ((val) & 0xffffff) #define BEEP_MASK_FROM_REG(val) ((val) & 0xffffff) #define DIV_FROM_REG(val) (1 << (val)) static u8 div_to_reg(int nr, long val) { int i; /* fan divisors max out at 128 */ val = SENSORS_LIMIT(val, 1, 128) >> 1; for (i = 0; i < 7; i++) { if (val == 0) break; val >>= 1; } return (u8) i; } struct w83791d_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* array of 2 pointers to subclients */ struct i2c_client *lm75[2]; /* volts */ u8 in[NUMBER_OF_VIN]; /* Register value */ u8 in_max[NUMBER_OF_VIN]; /* Register value */ u8 in_min[NUMBER_OF_VIN]; /* Register value */ /* fans */ u8 fan[NUMBER_OF_FANIN]; /* Register value */ u8 fan_min[NUMBER_OF_FANIN]; /* Register value */ u8 fan_div[NUMBER_OF_FANIN]; /* Register encoding, shifted right */ /* Temperature sensors */ s8 temp1[3]; /* current, over, thyst */ s16 temp_add[2][3]; /* fixed point value. Top 8 bits are the integral part, bottom 8 bits are the fractional part. We only use the top 9 bits as the resolution is only to the 0.5 degree C... two sensors with three values (cur, over, hyst) */ /* PWMs */ u8 pwm[5]; /* pwm duty cycle */ u8 pwm_enable[3]; /* pwm enable status for fan 1-3 (fan 4-5 only support manual mode) */ u8 temp_target[3]; /* pwm 1-3 target temperature */ u8 temp_tolerance[3]; /* pwm 1-3 temperature tolerance */ /* Misc */ u32 alarms; /* realtime status register encoding,combined */ u8 beep_enable; /* Global beep enable */ u32 beep_mask; /* Mask off specific beeps */ u8 vid; /* Register encoding, combined */ u8 vrm; /* hwmon-vid */ }; static int w83791d_probe(struct i2c_client *client, const struct i2c_device_id *id); static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83791d_remove(struct i2c_client *client); static int w83791d_read(struct i2c_client *client, u8 register); static int w83791d_write(struct i2c_client *client, u8 register, u8 value); static struct w83791d_data *w83791d_update_device(struct device *dev); #ifdef DEBUG static void w83791d_print_debug(struct w83791d_data *data, struct device *dev); #endif static void w83791d_init_client(struct i2c_client *client); static const struct i2c_device_id w83791d_id[] = { { "w83791d", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83791d_id); static struct i2c_driver w83791d_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "w83791d", }, .probe = w83791d_probe, .remove = w83791d_remove, .id_table = w83791d_id, .detect = w83791d_detect, .address_list = normal_i2c, }; /* following are the sysfs callback functions */ #define show_in_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ struct w83791d_data *data = w83791d_update_device(dev); \ int nr = sensor_attr->index; \ return sprintf(buf,"%d\n", IN_FROM_REG(data->reg[nr])); \ } show_in_reg(in); show_in_reg(in_min); show_in_reg(in_max); #define store_in_reg(REG, reg) \ static ssize_t store_in_##reg(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ struct i2c_client *client = to_i2c_client(dev); \ struct w83791d_data *data = i2c_get_clientdata(client); \ unsigned long val = simple_strtoul(buf, NULL, 10); \ int nr = sensor_attr->index; \ \ mutex_lock(&data->update_lock); \ data->in_##reg[nr] = IN_TO_REG(val); \ w83791d_write(client, W83791D_REG_IN_##REG[nr], data->in_##reg[nr]); \ mutex_unlock(&data->update_lock); \ \ return count; \ } store_in_reg(MIN, min); store_in_reg(MAX, max); static struct sensor_device_attribute sda_in_input[] = { SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9), }; static struct sensor_device_attribute sda_in_min[] = { SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), }; static struct sensor_device_attribute sda_in_max[] = { SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), }; static ssize_t show_beep(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int bitnr = sensor_attr->index; return sprintf(buf, "%d\n", (data->beep_mask >> bitnr) & 1); } static ssize_t store_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int bitnr = sensor_attr->index; int bytenr = bitnr / 8; long val = simple_strtol(buf, NULL, 10) ? 1 : 0; mutex_lock(&data->update_lock); data->beep_mask &= ~(0xff << (bytenr * 8)); data->beep_mask |= w83791d_read(client, W83791D_REG_BEEP_CTRL[bytenr]) << (bytenr * 8); data->beep_mask &= ~(1 << bitnr); data->beep_mask |= val << bitnr; w83791d_write(client, W83791D_REG_BEEP_CTRL[bytenr], (data->beep_mask >> (bytenr * 8)) & 0xff); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int bitnr = sensor_attr->index; return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1); } /* Note: The bitmask for the beep enable/disable is different than the bitmask for the alarm. */ static struct sensor_device_attribute sda_in_beep[] = { SENSOR_ATTR(in0_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 0), SENSOR_ATTR(in1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 13), SENSOR_ATTR(in2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 2), SENSOR_ATTR(in3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 3), SENSOR_ATTR(in4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 8), SENSOR_ATTR(in5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 9), SENSOR_ATTR(in6_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 10), SENSOR_ATTR(in7_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 16), SENSOR_ATTR(in8_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 17), SENSOR_ATTR(in9_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 14), }; static struct sensor_device_attribute sda_in_alarm[] = { SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0), SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1), SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2), SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3), SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8), SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9), SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10), SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 19), SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20), SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 14), }; #define show_fan_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ struct w83791d_data *data = w83791d_update_device(dev); \ int nr = sensor_attr->index; \ return sprintf(buf,"%d\n", \ FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ } show_fan_reg(fan); show_fan_reg(fan_min); static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); int nr = sensor_attr->index; mutex_lock(&data->update_lock); data->fan_min[nr] = fan_to_reg(val, DIV_FROM_REG(data->fan_div[nr])); w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr])); } /* Note: we save and restore the fan minimum here, because its value is determined in part by the fan divisor. This follows the principle of least surprise; the user doesn't expect the fan minimum to change just because the divisor changed. */ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long min; u8 tmp_fan_div; u8 fan_div_reg; u8 vbat_reg; int indx = 0; u8 keep_mask = 0; u8 new_shift = 0; /* Save fan_min */ min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); mutex_lock(&data->update_lock); data->fan_div[nr] = div_to_reg(nr, simple_strtoul(buf, NULL, 10)); switch (nr) { case 0: indx = 0; keep_mask = 0xcf; new_shift = 4; break; case 1: indx = 0; keep_mask = 0x3f; new_shift = 6; break; case 2: indx = 1; keep_mask = 0x3f; new_shift = 6; break; case 3: indx = 2; keep_mask = 0xf8; new_shift = 0; break; case 4: indx = 2; keep_mask = 0x8f; new_shift = 4; break; #ifdef DEBUG default: dev_warn(dev, "store_fan_div: Unexpected nr seen: %d\n", nr); count = -EINVAL; goto err_exit; #endif } fan_div_reg = w83791d_read(client, W83791D_REG_FAN_DIV[indx]) & keep_mask; tmp_fan_div = (data->fan_div[nr] << new_shift) & ~keep_mask; w83791d_write(client, W83791D_REG_FAN_DIV[indx], fan_div_reg | tmp_fan_div); /* Bit 2 of fans 0-2 is stored in the vbat register (bits 5-7) */ if (nr < 3) { keep_mask = ~(1 << (nr + 5)); vbat_reg = w83791d_read(client, W83791D_REG_VBAT) & keep_mask; tmp_fan_div = (data->fan_div[nr] << (3 + nr)) & ~keep_mask; w83791d_write(client, W83791D_REG_VBAT, vbat_reg | tmp_fan_div); } /* Restore fan_min */ data->fan_min[nr] = fan_to_reg(min, DIV_FROM_REG(data->fan_div[nr])); w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]); #ifdef DEBUG err_exit: #endif mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_fan_input[] = { SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3), SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4), }; static struct sensor_device_attribute sda_fan_min[] = { SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 0), SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 1), SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 2), SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 3), SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 4), }; static struct sensor_device_attribute sda_fan_div[] = { SENSOR_ATTR(fan1_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 0), SENSOR_ATTR(fan2_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 1), SENSOR_ATTR(fan3_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 2), SENSOR_ATTR(fan4_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 3), SENSOR_ATTR(fan5_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 4), }; static struct sensor_device_attribute sda_fan_beep[] = { SENSOR_ATTR(fan1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 6), SENSOR_ATTR(fan2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 7), SENSOR_ATTR(fan3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 11), SENSOR_ATTR(fan4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 21), SENSOR_ATTR(fan5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 22), }; static struct sensor_device_attribute sda_fan_alarm[] = { SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6), SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7), SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11), SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21), SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22), }; /* read/write PWMs */ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", data->pwm[nr]); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; if (strict_strtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->update_lock); data->pwm[nr] = SENSORS_LIMIT(val, 0, 255); w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_pwm[] = { SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0), SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2), SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3), SENSOR_ATTR(pwm5, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4), }; static ssize_t show_pwmenable(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", data->pwm_enable[nr] + 1); } static ssize_t store_pwmenable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; u8 reg_cfg_tmp; u8 reg_idx = 0; u8 val_shift = 0; u8 keep_mask = 0; int ret = strict_strtoul(buf, 10, &val); if (ret || val < 1 || val > 3) return -EINVAL; mutex_lock(&data->update_lock); data->pwm_enable[nr] = val - 1; switch (nr) { case 0: reg_idx = 0; val_shift = 2; keep_mask = 0xf3; break; case 1: reg_idx = 0; val_shift = 4; keep_mask = 0xcf; break; case 2: reg_idx = 1; val_shift = 2; keep_mask = 0xf3; break; } reg_cfg_tmp = w83791d_read(client, W83791D_REG_FAN_CFG[reg_idx]); reg_cfg_tmp = (reg_cfg_tmp & keep_mask) | data->pwm_enable[nr] << val_shift; w83791d_write(client, W83791D_REG_FAN_CFG[reg_idx], reg_cfg_tmp); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_pwmenable[] = { SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 0), SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 1), SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 2), }; /* For Smart Fan I / Thermal Cruise */ static ssize_t show_temp_target(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int nr = sensor_attr->index; return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_target[nr])); } static ssize_t store_temp_target(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; u8 target_mask; if (strict_strtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->update_lock); data->temp_target[nr] = TARGET_TEMP_TO_REG(val); target_mask = w83791d_read(client, W83791D_REG_TEMP_TARGET[nr]) & 0x80; w83791d_write(client, W83791D_REG_TEMP_TARGET[nr], data->temp_target[nr] | target_mask); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_temp_target[] = { SENSOR_ATTR(temp1_target, S_IWUSR | S_IRUGO, show_temp_target, store_temp_target, 0), SENSOR_ATTR(temp2_target, S_IWUSR | S_IRUGO, show_temp_target, store_temp_target, 1), SENSOR_ATTR(temp3_target, S_IWUSR | S_IRUGO, show_temp_target, store_temp_target, 2), }; static ssize_t show_temp_tolerance(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int nr = sensor_attr->index; return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_tolerance[nr])); } static ssize_t store_temp_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; u8 target_mask; u8 reg_idx = 0; u8 val_shift = 0; u8 keep_mask = 0; if (strict_strtoul(buf, 10, &val)) return -EINVAL; switch (nr) { case 0: reg_idx = 0; val_shift = 0; keep_mask = 0xf0; break; case 1: reg_idx = 0; val_shift = 4; keep_mask = 0x0f; break; case 2: reg_idx = 1; val_shift = 0; keep_mask = 0xf0; break; } mutex_lock(&data->update_lock); data->temp_tolerance[nr] = TOL_TEMP_TO_REG(val); target_mask = w83791d_read(client, W83791D_REG_TEMP_TOL[reg_idx]) & keep_mask; w83791d_write(client, W83791D_REG_TEMP_TOL[reg_idx], (data->temp_tolerance[nr] << val_shift) | target_mask); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_temp_tolerance[] = { SENSOR_ATTR(temp1_tolerance, S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance, 0), SENSOR_ATTR(temp2_tolerance, S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance, 1), SENSOR_ATTR(temp3_tolerance, S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance, 2), }; /* read/write the temperature1, includes measured value and limits */ static ssize_t show_temp1(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp1[attr->index])); } static ssize_t store_temp1(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); int nr = attr->index; mutex_lock(&data->update_lock); data->temp1[nr] = TEMP1_TO_REG(val); w83791d_write(client, W83791D_REG_TEMP1[nr], data->temp1[nr]); mutex_unlock(&data->update_lock); return count; } /* read/write temperature2-3, includes measured value and limits */ static ssize_t show_temp23(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); struct w83791d_data *data = w83791d_update_device(dev); int nr = attr->nr; int index = attr->index; return sprintf(buf, "%d\n", TEMP23_FROM_REG(data->temp_add[nr][index])); } static ssize_t store_temp23(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); int nr = attr->nr; int index = attr->index; mutex_lock(&data->update_lock); data->temp_add[nr][index] = TEMP23_TO_REG(val); w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2], data->temp_add[nr][index] >> 8); w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2 + 1], data->temp_add[nr][index] & 0x80); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute_2 sda_temp_input[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp1, NULL, 0, 0), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp23, NULL, 0, 0), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp23, NULL, 1, 0), }; static struct sensor_device_attribute_2 sda_temp_max[] = { SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 1), SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 1), SENSOR_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 1), }; static struct sensor_device_attribute_2 sda_temp_max_hyst[] = { SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 2), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 2), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 2), }; /* Note: The bitmask for the beep enable/disable is different than the bitmask for the alarm. */ static struct sensor_device_attribute sda_temp_beep[] = { SENSOR_ATTR(temp1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 4), SENSOR_ATTR(temp2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 5), SENSOR_ATTR(temp3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 1), }; static struct sensor_device_attribute sda_temp_alarm[] = { SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4), SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5), SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13), }; /* get reatime status of all sensors items: voltage, temp, fan */ static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); /* Beep control */ #define GLOBAL_BEEP_ENABLE_SHIFT 15 #define GLOBAL_BEEP_ENABLE_MASK (1 << GLOBAL_BEEP_ENABLE_SHIFT) static ssize_t show_beep_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", data->beep_enable); } static ssize_t show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", BEEP_MASK_FROM_REG(data->beep_mask)); } static ssize_t store_beep_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); int i; mutex_lock(&data->update_lock); /* The beep_enable state overrides any enabling request from the masks */ data->beep_mask = BEEP_MASK_TO_REG(val) & ~GLOBAL_BEEP_ENABLE_MASK; data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT); val = data->beep_mask; for (i = 0; i < 3; i++) { w83791d_write(client, W83791D_REG_BEEP_CTRL[i], (val & 0xff)); val >>= 8; } mutex_unlock(&data->update_lock); return count; } static ssize_t store_beep_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->beep_enable = val ? 1 : 0; /* Keep the full mask value in sync with the current enable */ data->beep_mask &= ~GLOBAL_BEEP_ENABLE_MASK; data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT); /* The global control is in the second beep control register so only need to update that register */ val = (data->beep_mask >> 8) & 0xff; w83791d_write(client, W83791D_REG_BEEP_CTRL[1], val); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_beep_ctrl[] = { SENSOR_ATTR(beep_enable, S_IRUGO | S_IWUSR, show_beep_enable, store_beep_enable, 0), SENSOR_ATTR(beep_mask, S_IRUGO | S_IWUSR, show_beep_mask, store_beep_mask, 1) }; /* cpu voltage regulation information */ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83791d_data *data = dev_get_drvdata(dev); /* No lock needed as vrm is internal to the driver (not read from a chip register) and so is not updated in w83791d_update_device() */ data->vrm = simple_strtoul(buf, NULL, 10); return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg); #define IN_UNIT_ATTRS(X) \ &sda_in_input[X].dev_attr.attr, \ &sda_in_min[X].dev_attr.attr, \ &sda_in_max[X].dev_attr.attr, \ &sda_in_beep[X].dev_attr.attr, \ &sda_in_alarm[X].dev_attr.attr #define FAN_UNIT_ATTRS(X) \ &sda_fan_input[X].dev_attr.attr, \ &sda_fan_min[X].dev_attr.attr, \ &sda_fan_div[X].dev_attr.attr, \ &sda_fan_beep[X].dev_attr.attr, \ &sda_fan_alarm[X].dev_attr.attr #define TEMP_UNIT_ATTRS(X) \ &sda_temp_input[X].dev_attr.attr, \ &sda_temp_max[X].dev_attr.attr, \ &sda_temp_max_hyst[X].dev_attr.attr, \ &sda_temp_beep[X].dev_attr.attr, \ &sda_temp_alarm[X].dev_attr.attr static struct attribute *w83791d_attributes[] = { IN_UNIT_ATTRS(0), IN_UNIT_ATTRS(1), IN_UNIT_ATTRS(2), IN_UNIT_ATTRS(3), IN_UNIT_ATTRS(4), IN_UNIT_ATTRS(5), IN_UNIT_ATTRS(6), IN_UNIT_ATTRS(7), IN_UNIT_ATTRS(8), IN_UNIT_ATTRS(9), FAN_UNIT_ATTRS(0), FAN_UNIT_ATTRS(1), FAN_UNIT_ATTRS(2), TEMP_UNIT_ATTRS(0), TEMP_UNIT_ATTRS(1), TEMP_UNIT_ATTRS(2), &dev_attr_alarms.attr, &sda_beep_ctrl[0].dev_attr.attr, &sda_beep_ctrl[1].dev_attr.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, &sda_pwm[0].dev_attr.attr, &sda_pwm[1].dev_attr.attr, &sda_pwm[2].dev_attr.attr, &sda_pwmenable[0].dev_attr.attr, &sda_pwmenable[1].dev_attr.attr, &sda_pwmenable[2].dev_attr.attr, &sda_temp_target[0].dev_attr.attr, &sda_temp_target[1].dev_attr.attr, &sda_temp_target[2].dev_attr.attr, &sda_temp_tolerance[0].dev_attr.attr, &sda_temp_tolerance[1].dev_attr.attr, &sda_temp_tolerance[2].dev_attr.attr, NULL }; static const struct attribute_group w83791d_group = { .attrs = w83791d_attributes, }; /* Separate group of attributes for fan/pwm 4-5. Their pins can also be in use for GPIO in which case their sysfs-interface should not be made available */ static struct attribute *w83791d_attributes_fanpwm45[] = { FAN_UNIT_ATTRS(3), FAN_UNIT_ATTRS(4), &sda_pwm[3].dev_attr.attr, &sda_pwm[4].dev_attr.attr, NULL }; static const struct attribute_group w83791d_group_fanpwm45 = { .attrs = w83791d_attributes_fanpwm45, }; static int w83791d_detect_subclients(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct w83791d_data *data = i2c_get_clientdata(client); int address = client->addr; int i, id, err; u8 val; id = i2c_adapter_id(adapter); if (force_subclients[0] == id && force_subclients[1] == address) { for (i = 2; i <= 3; i++) { if (force_subclients[i] < 0x48 || force_subclients[i] > 0x4f) { dev_err(&client->dev, "invalid subclient " "address %d; must be 0x48-0x4f\n", force_subclients[i]); err = -ENODEV; goto error_sc_0; } } w83791d_write(client, W83791D_REG_I2C_SUBADDR, (force_subclients[2] & 0x07) | ((force_subclients[3] & 0x07) << 4)); } val = w83791d_read(client, W83791D_REG_I2C_SUBADDR); if (!(val & 0x08)) { data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7)); } if (!(val & 0x80)) { if ((data->lm75[0] != NULL) && ((val & 0x7) == ((val >> 4) & 0x7))) { dev_err(&client->dev, "duplicate addresses 0x%x, " "use force_subclient\n", data->lm75[0]->addr); err = -ENODEV; goto error_sc_1; } data->lm75[1] = i2c_new_dummy(adapter, 0x48 + ((val >> 4) & 0x7)); } return 0; /* Undo inits in case of errors */ error_sc_1: if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); error_sc_0: return err; } /* Return 0 if detection is successful, -ENODEV otherwise */ static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int val1, val2; unsigned short address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { return -ENODEV; } if (w83791d_read(client, W83791D_REG_CONFIG) & 0x80) return -ENODEV; val1 = w83791d_read(client, W83791D_REG_BANK); val2 = w83791d_read(client, W83791D_REG_CHIPMAN); /* Check for Winbond ID if in bank 0 */ if (!(val1 & 0x07)) { if ((!(val1 & 0x80) && val2 != 0xa3) || ( (val1 & 0x80) && val2 != 0x5c)) { return -ENODEV; } } /* If Winbond chip, address of chip and W83791D_REG_I2C_ADDR should match */ if (w83791d_read(client, W83791D_REG_I2C_ADDR) != address) return -ENODEV; /* We want bank 0 and Vendor ID high byte */ val1 = w83791d_read(client, W83791D_REG_BANK) & 0x78; w83791d_write(client, W83791D_REG_BANK, val1 | 0x80); /* Verify it is a Winbond w83791d */ val1 = w83791d_read(client, W83791D_REG_WCHIPID); val2 = w83791d_read(client, W83791D_REG_CHIPMAN); if (val1 != 0x71 || val2 != 0x5c) return -ENODEV; strlcpy(info->type, "w83791d", I2C_NAME_SIZE); return 0; } static int w83791d_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct w83791d_data *data; struct device *dev = &client->dev; int i, err; u8 has_fanpwm45; #ifdef DEBUG int val1; val1 = w83791d_read(client, W83791D_REG_DID_VID4); dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n", (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1); #endif data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto error0; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); err = w83791d_detect_subclients(client); if (err) goto error1; /* Initialize the chip */ w83791d_init_client(client); /* If the fan_div is changed, make sure there is a rational fan_min in place */ for (i = 0; i < NUMBER_OF_FANIN; i++) { data->fan_min[i] = w83791d_read(client, W83791D_REG_FAN_MIN[i]); } /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &w83791d_group))) goto error3; /* Check if pins of fan/pwm 4-5 are in use as GPIO */ has_fanpwm45 = w83791d_read(client, W83791D_REG_GPIO) & 0x10; if (has_fanpwm45) { err = sysfs_create_group(&client->dev.kobj, &w83791d_group_fanpwm45); if (err) goto error4; } /* Everything is ready, now register the working device */ data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto error5; } return 0; error5: if (has_fanpwm45) sysfs_remove_group(&client->dev.kobj, &w83791d_group_fanpwm45); error4: sysfs_remove_group(&client->dev.kobj, &w83791d_group); error3: if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); if (data->lm75[1] != NULL) i2c_unregister_device(data->lm75[1]); error1: kfree(data); error0: return err; } static int w83791d_remove(struct i2c_client *client) { struct w83791d_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &w83791d_group); if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); if (data->lm75[1] != NULL) i2c_unregister_device(data->lm75[1]); kfree(data); return 0; } static void w83791d_init_client(struct i2c_client *client) { struct w83791d_data *data = i2c_get_clientdata(client); u8 tmp; u8 old_beep; /* The difference between reset and init is that reset does a hard reset of the chip via index 0x40, bit 7, but init simply forces certain registers to have "sane" values. The hope is that the BIOS has done the right thing (which is why the default is reset=0, init=0), but if not, reset is the hard hammer and init is the soft mallet both of which are trying to whack things into place... NOTE: The data sheet makes a distinction between "power on defaults" and "reset by MR". As far as I can tell, the hard reset puts everything into a power-on state so I'm not sure what "reset by MR" means or how it can happen. */ if (reset || init) { /* keep some BIOS settings when we... */ old_beep = w83791d_read(client, W83791D_REG_BEEP_CONFIG); if (reset) { /* ... reset the chip and ... */ w83791d_write(client, W83791D_REG_CONFIG, 0x80); } /* ... disable power-on abnormal beep */ w83791d_write(client, W83791D_REG_BEEP_CONFIG, old_beep | 0x80); /* disable the global beep (not done by hard reset) */ tmp = w83791d_read(client, W83791D_REG_BEEP_CTRL[1]); w83791d_write(client, W83791D_REG_BEEP_CTRL[1], tmp & 0xef); if (init) { /* Make sure monitoring is turned on for add-ons */ tmp = w83791d_read(client, W83791D_REG_TEMP2_CONFIG); if (tmp & 1) { w83791d_write(client, W83791D_REG_TEMP2_CONFIG, tmp & 0xfe); } tmp = w83791d_read(client, W83791D_REG_TEMP3_CONFIG); if (tmp & 1) { w83791d_write(client, W83791D_REG_TEMP3_CONFIG, tmp & 0xfe); } /* Start monitoring */ tmp = w83791d_read(client, W83791D_REG_CONFIG) & 0xf7; w83791d_write(client, W83791D_REG_CONFIG, tmp | 0x01); } } data->vrm = vid_which_vrm(); } static struct w83791d_data *w83791d_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int i, j; u8 reg_array_tmp[3]; u8 vbat_reg; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + (HZ * 3)) || !data->valid) { dev_dbg(dev, "Starting w83791d device update\n"); /* Update the voltages measured value and limits */ for (i = 0; i < NUMBER_OF_VIN; i++) { data->in[i] = w83791d_read(client, W83791D_REG_IN[i]); data->in_max[i] = w83791d_read(client, W83791D_REG_IN_MAX[i]); data->in_min[i] = w83791d_read(client, W83791D_REG_IN_MIN[i]); } /* Update the fan counts and limits */ for (i = 0; i < NUMBER_OF_FANIN; i++) { /* Update the Fan measured value and limits */ data->fan[i] = w83791d_read(client, W83791D_REG_FAN[i]); data->fan_min[i] = w83791d_read(client, W83791D_REG_FAN_MIN[i]); } /* Update the fan divisor */ for (i = 0; i < 3; i++) { reg_array_tmp[i] = w83791d_read(client, W83791D_REG_FAN_DIV[i]); } data->fan_div[0] = (reg_array_tmp[0] >> 4) & 0x03; data->fan_div[1] = (reg_array_tmp[0] >> 6) & 0x03; data->fan_div[2] = (reg_array_tmp[1] >> 6) & 0x03; data->fan_div[3] = reg_array_tmp[2] & 0x07; data->fan_div[4] = (reg_array_tmp[2] >> 4) & 0x07; /* The fan divisor for fans 0-2 get bit 2 from bits 5-7 respectively of vbat register */ vbat_reg = w83791d_read(client, W83791D_REG_VBAT); for (i = 0; i < 3; i++) data->fan_div[i] |= (vbat_reg >> (3 + i)) & 0x04; /* Update PWM duty cycle */ for (i = 0; i < NUMBER_OF_PWM; i++) { data->pwm[i] = w83791d_read(client, W83791D_REG_PWM[i]); } /* Update PWM enable status */ for (i = 0; i < 2; i++) { reg_array_tmp[i] = w83791d_read(client, W83791D_REG_FAN_CFG[i]); } data->pwm_enable[0] = (reg_array_tmp[0] >> 2) & 0x03; data->pwm_enable[1] = (reg_array_tmp[0] >> 4) & 0x03; data->pwm_enable[2] = (reg_array_tmp[1] >> 2) & 0x03; /* Update PWM target temperature */ for (i = 0; i < 3; i++) { data->temp_target[i] = w83791d_read(client, W83791D_REG_TEMP_TARGET[i]) & 0x7f; } /* Update PWM temperature tolerance */ for (i = 0; i < 2; i++) { reg_array_tmp[i] = w83791d_read(client, W83791D_REG_TEMP_TOL[i]); } data->temp_tolerance[0] = reg_array_tmp[0] & 0x0f; data->temp_tolerance[1] = (reg_array_tmp[0] >> 4) & 0x0f; data->temp_tolerance[2] = reg_array_tmp[1] & 0x0f; /* Update the first temperature sensor */ for (i = 0; i < 3; i++) { data->temp1[i] = w83791d_read(client, W83791D_REG_TEMP1[i]); } /* Update the rest of the temperature sensors */ for (i = 0; i < 2; i++) { for (j = 0; j < 3; j++) { data->temp_add[i][j] = (w83791d_read(client, W83791D_REG_TEMP_ADD[i][j * 2]) << 8) | w83791d_read(client, W83791D_REG_TEMP_ADD[i][j * 2 + 1]); } } /* Update the realtime status */ data->alarms = w83791d_read(client, W83791D_REG_ALARM1) + (w83791d_read(client, W83791D_REG_ALARM2) << 8) + (w83791d_read(client, W83791D_REG_ALARM3) << 16); /* Update the beep configuration information */ data->beep_mask = w83791d_read(client, W83791D_REG_BEEP_CTRL[0]) + (w83791d_read(client, W83791D_REG_BEEP_CTRL[1]) << 8) + (w83791d_read(client, W83791D_REG_BEEP_CTRL[2]) << 16); /* Extract global beep enable flag */ data->beep_enable = (data->beep_mask >> GLOBAL_BEEP_ENABLE_SHIFT) & 0x01; /* Update the cpu voltage information */ i = w83791d_read(client, W83791D_REG_VID_FANDIV); data->vid = i & 0x0f; data->vid |= (w83791d_read(client, W83791D_REG_DID_VID4) & 0x01) << 4; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); #ifdef DEBUG w83791d_print_debug(data, dev); #endif return data; } #ifdef DEBUG static void w83791d_print_debug(struct w83791d_data *data, struct device *dev) { int i = 0, j = 0; dev_dbg(dev, "======Start of w83791d debug values======\n"); dev_dbg(dev, "%d set of Voltages: ===>\n", NUMBER_OF_VIN); for (i = 0; i < NUMBER_OF_VIN; i++) { dev_dbg(dev, "vin[%d] is: 0x%02x\n", i, data->in[i]); dev_dbg(dev, "vin[%d] min is: 0x%02x\n", i, data->in_min[i]); dev_dbg(dev, "vin[%d] max is: 0x%02x\n", i, data->in_max[i]); } dev_dbg(dev, "%d set of Fan Counts/Divisors: ===>\n", NUMBER_OF_FANIN); for (i = 0; i < NUMBER_OF_FANIN; i++) { dev_dbg(dev, "fan[%d] is: 0x%02x\n", i, data->fan[i]); dev_dbg(dev, "fan[%d] min is: 0x%02x\n", i, data->fan_min[i]); dev_dbg(dev, "fan_div[%d] is: 0x%02x\n", i, data->fan_div[i]); } /* temperature math is signed, but only print out the bits that matter */ dev_dbg(dev, "%d set of Temperatures: ===>\n", NUMBER_OF_TEMPIN); for (i = 0; i < 3; i++) { dev_dbg(dev, "temp1[%d] is: 0x%02x\n", i, (u8) data->temp1[i]); } for (i = 0; i < 2; i++) { for (j = 0; j < 3; j++) { dev_dbg(dev, "temp_add[%d][%d] is: 0x%04x\n", i, j, (u16) data->temp_add[i][j]); } } dev_dbg(dev, "Misc Information: ===>\n"); dev_dbg(dev, "alarm is: 0x%08x\n", data->alarms); dev_dbg(dev, "beep_mask is: 0x%08x\n", data->beep_mask); dev_dbg(dev, "beep_enable is: %d\n", data->beep_enable); dev_dbg(dev, "vid is: 0x%02x\n", data->vid); dev_dbg(dev, "vrm is: 0x%02x\n", data->vrm); dev_dbg(dev, "=======End of w83791d debug values========\n"); dev_dbg(dev, "\n"); } #endif static int __init sensors_w83791d_init(void) { return i2c_add_driver(&w83791d_driver); } static void __exit sensors_w83791d_exit(void) { i2c_del_driver(&w83791d_driver); } MODULE_AUTHOR("Charles Spirakis <bezaur@gmail.com>"); MODULE_DESCRIPTION("W83791D driver"); MODULE_LICENSE("GPL"); module_init(sensors_w83791d_init); module_exit(sensors_w83791d_exit);
gpl-2.0
javilonas/SGS3-Sourcedrops
drivers/char/ipmi/ipmi_kcs_sm.c
4324
13751
/* * ipmi_kcs_sm.c * * State machine for handling IPMI KCS interfaces. * * Author: MontaVista Software, Inc. * Corey Minyard <minyard@mvista.com> * source@mvista.com * * Copyright 2002 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * This state machine is taken from the state machine in the IPMI spec, * pretty much verbatim. If you have questions about the states, see * that document. */ #include <linux/kernel.h> /* For printk. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/jiffies.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" /* kcs_debug is a bit-field * KCS_DEBUG_ENABLE - turned on for now * KCS_DEBUG_MSG - commands and their responses * KCS_DEBUG_STATES - state machine */ #define KCS_DEBUG_STATES 4 #define KCS_DEBUG_MSG 2 #define KCS_DEBUG_ENABLE 1 static int kcs_debug; module_param(kcs_debug, int, 0644); MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); /* The states the KCS driver may be in. */ enum kcs_states { /* The KCS interface is currently doing nothing. */ KCS_IDLE, /* * We are starting an operation. The data is in the output * buffer, but nothing has been done to the interface yet. This * was added to the state machine in the spec to wait for the * initial IBF. */ KCS_START_OP, /* We have written a write cmd to the interface. */ KCS_WAIT_WRITE_START, /* We are writing bytes to the interface. */ KCS_WAIT_WRITE, /* * We have written the write end cmd to the interface, and * still need to write the last byte. */ KCS_WAIT_WRITE_END, /* We are waiting to read data from the interface. */ KCS_WAIT_READ, /* * State to transition to the error handler, this was added to * the state machine in the spec to be sure IBF was there. */ KCS_ERROR0, /* * First stage error handler, wait for the interface to * respond. */ KCS_ERROR1, /* * The abort cmd has been written, wait for the interface to * respond. */ KCS_ERROR2, /* * We wrote some data to the interface, wait for it to switch * to read mode. */ KCS_ERROR3, /* The hardware failed to follow the state machine. */ KCS_HOSED }; #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH #define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH /* Timeouts in microseconds. */ #define IBF_RETRY_TIMEOUT 1000000 #define OBF_RETRY_TIMEOUT 1000000 #define MAX_ERROR_RETRIES 10 #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) struct si_sm_data { enum kcs_states state; struct si_sm_io *io; unsigned char write_data[MAX_KCS_WRITE_SIZE]; int write_pos; int write_count; int orig_write_count; unsigned char read_data[MAX_KCS_READ_SIZE]; int read_pos; int truncated; unsigned int error_retries; long ibf_timeout; long obf_timeout; unsigned long error0_timeout; }; static unsigned int init_kcs_data(struct si_sm_data *kcs, struct si_sm_io *io) { kcs->state = KCS_IDLE; kcs->io = io; kcs->write_pos = 0; kcs->write_count = 0; kcs->orig_write_count = 0; kcs->read_pos = 0; kcs->error_retries = 0; kcs->truncated = 0; kcs->ibf_timeout = IBF_RETRY_TIMEOUT; kcs->obf_timeout = OBF_RETRY_TIMEOUT; /* Reserve 2 I/O bytes. */ return 2; } static inline unsigned char read_status(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 1); } static inline unsigned char read_data(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 0); } static inline void write_cmd(struct si_sm_data *kcs, unsigned char data) { kcs->io->outputb(kcs->io, 1, data); } static inline void write_data(struct si_sm_data *kcs, unsigned char data) { kcs->io->outputb(kcs->io, 0, data); } /* Control codes. */ #define KCS_GET_STATUS_ABORT 0x60 #define KCS_WRITE_START 0x61 #define KCS_WRITE_END 0x62 #define KCS_READ_BYTE 0x68 /* Status bits. */ #define GET_STATUS_STATE(status) (((status) >> 6) & 0x03) #define KCS_IDLE_STATE 0 #define KCS_READ_STATE 1 #define KCS_WRITE_STATE 2 #define KCS_ERROR_STATE 3 #define GET_STATUS_ATN(status) ((status) & 0x04) #define GET_STATUS_IBF(status) ((status) & 0x02) #define GET_STATUS_OBF(status) ((status) & 0x01) static inline void write_next_byte(struct si_sm_data *kcs) { write_data(kcs, kcs->write_data[kcs->write_pos]); (kcs->write_pos)++; (kcs->write_count)--; } static inline void start_error_recovery(struct si_sm_data *kcs, char *reason) { (kcs->error_retries)++; if (kcs->error_retries > MAX_ERROR_RETRIES) { if (kcs_debug & KCS_DEBUG_ENABLE) printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason); kcs->state = KCS_HOSED; } else { kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; kcs->state = KCS_ERROR0; } } static inline void read_next_byte(struct si_sm_data *kcs) { if (kcs->read_pos >= MAX_KCS_READ_SIZE) { /* Throw the data away and mark it truncated. */ read_data(kcs); kcs->truncated = 1; } else { kcs->read_data[kcs->read_pos] = read_data(kcs); (kcs->read_pos)++; } write_data(kcs, KCS_READ_BYTE); } static inline int check_ibf(struct si_sm_data *kcs, unsigned char status, long time) { if (GET_STATUS_IBF(status)) { kcs->ibf_timeout -= time; if (kcs->ibf_timeout < 0) { start_error_recovery(kcs, "IBF not ready in time"); kcs->ibf_timeout = IBF_RETRY_TIMEOUT; return 1; } return 0; } kcs->ibf_timeout = IBF_RETRY_TIMEOUT; return 1; } static inline int check_obf(struct si_sm_data *kcs, unsigned char status, long time) { if (!GET_STATUS_OBF(status)) { kcs->obf_timeout -= time; if (kcs->obf_timeout < 0) { start_error_recovery(kcs, "OBF not ready in time"); return 1; } return 0; } kcs->obf_timeout = OBF_RETRY_TIMEOUT; return 1; } static void clear_obf(struct si_sm_data *kcs, unsigned char status) { if (GET_STATUS_OBF(status)) read_data(kcs); } static void restart_kcs_transaction(struct si_sm_data *kcs) { kcs->write_count = kcs->orig_write_count; kcs->write_pos = 0; kcs->read_pos = 0; kcs->state = KCS_WAIT_WRITE_START; kcs->ibf_timeout = IBF_RETRY_TIMEOUT; kcs->obf_timeout = OBF_RETRY_TIMEOUT; write_cmd(kcs, KCS_WRITE_START); } static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, unsigned int size) { unsigned int i; if (size < 2) return IPMI_REQ_LEN_INVALID_ERR; if (size > MAX_KCS_WRITE_SIZE) return IPMI_REQ_LEN_EXCEEDED_ERR; if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) return IPMI_NOT_IN_MY_STATE_ERR; if (kcs_debug & KCS_DEBUG_MSG) { printk(KERN_DEBUG "start_kcs_transaction -"); for (i = 0; i < size; i++) printk(" %02x", (unsigned char) (data [i])); printk("\n"); } kcs->error_retries = 0; memcpy(kcs->write_data, data, size); kcs->write_count = size; kcs->orig_write_count = size; kcs->write_pos = 0; kcs->read_pos = 0; kcs->state = KCS_START_OP; kcs->ibf_timeout = IBF_RETRY_TIMEOUT; kcs->obf_timeout = OBF_RETRY_TIMEOUT; return 0; } static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, unsigned int length) { if (length < kcs->read_pos) { kcs->read_pos = length; kcs->truncated = 1; } memcpy(data, kcs->read_data, kcs->read_pos); if ((length >= 3) && (kcs->read_pos < 3)) { /* Guarantee that we return at least 3 bytes, with an error in the third byte if it is too short. */ data[2] = IPMI_ERR_UNSPECIFIED; kcs->read_pos = 3; } if (kcs->truncated) { /* * Report a truncated error. We might overwrite * another error, but that's too bad, the user needs * to know it was truncated. */ data[2] = IPMI_ERR_MSG_TRUNCATED; kcs->truncated = 0; } return kcs->read_pos; } /* * This implements the state machine defined in the IPMI manual, see * that for details on how this works. Divide that flowchart into * sections delimited by "Wait for IBF" and this will become clear. */ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) { unsigned char status; unsigned char state; status = read_status(kcs); if (kcs_debug & KCS_DEBUG_STATES) printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status); /* All states wait for ibf, so just do it here. */ if (!check_ibf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; /* Just about everything looks at the KCS state, so grab that, too. */ state = GET_STATUS_STATE(status); switch (kcs->state) { case KCS_IDLE: /* If there's and interrupt source, turn it off. */ clear_obf(kcs, status); if (GET_STATUS_ATN(status)) return SI_SM_ATTN; else return SI_SM_IDLE; case KCS_START_OP: if (state != KCS_IDLE_STATE) { start_error_recovery(kcs, "State machine not idle at start"); break; } clear_obf(kcs, status); write_cmd(kcs, KCS_WRITE_START); kcs->state = KCS_WAIT_WRITE_START; break; case KCS_WAIT_WRITE_START: if (state != KCS_WRITE_STATE) { start_error_recovery( kcs, "Not in write state at write start"); break; } read_data(kcs); if (kcs->write_count == 1) { write_cmd(kcs, KCS_WRITE_END); kcs->state = KCS_WAIT_WRITE_END; } else { write_next_byte(kcs); kcs->state = KCS_WAIT_WRITE; } break; case KCS_WAIT_WRITE: if (state != KCS_WRITE_STATE) { start_error_recovery(kcs, "Not in write state for write"); break; } clear_obf(kcs, status); if (kcs->write_count == 1) { write_cmd(kcs, KCS_WRITE_END); kcs->state = KCS_WAIT_WRITE_END; } else { write_next_byte(kcs); } break; case KCS_WAIT_WRITE_END: if (state != KCS_WRITE_STATE) { start_error_recovery(kcs, "Not in write state" " for write end"); break; } clear_obf(kcs, status); write_next_byte(kcs); kcs->state = KCS_WAIT_READ; break; case KCS_WAIT_READ: if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) { start_error_recovery( kcs, "Not in read or idle in read state"); break; } if (state == KCS_READ_STATE) { if (!check_obf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; read_next_byte(kcs); } else { /* * We don't implement this exactly like the state * machine in the spec. Some broken hardware * does not write the final dummy byte to the * read register. Thus obf will never go high * here. We just go straight to idle, and we * handle clearing out obf in idle state if it * happens to come in. */ clear_obf(kcs, status); kcs->orig_write_count = 0; kcs->state = KCS_IDLE; return SI_SM_TRANSACTION_COMPLETE; } break; case KCS_ERROR0: clear_obf(kcs, status); status = read_status(kcs); if (GET_STATUS_OBF(status)) /* controller isn't responding */ if (time_before(jiffies, kcs->error0_timeout)) return SI_SM_CALL_WITH_TICK_DELAY; write_cmd(kcs, KCS_GET_STATUS_ABORT); kcs->state = KCS_ERROR1; break; case KCS_ERROR1: clear_obf(kcs, status); write_data(kcs, 0); kcs->state = KCS_ERROR2; break; case KCS_ERROR2: if (state != KCS_READ_STATE) { start_error_recovery(kcs, "Not in read state for error2"); break; } if (!check_obf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; clear_obf(kcs, status); write_data(kcs, KCS_READ_BYTE); kcs->state = KCS_ERROR3; break; case KCS_ERROR3: if (state != KCS_IDLE_STATE) { start_error_recovery(kcs, "Not in idle state for error3"); break; } if (!check_obf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; clear_obf(kcs, status); if (kcs->orig_write_count) { restart_kcs_transaction(kcs); } else { kcs->state = KCS_IDLE; return SI_SM_TRANSACTION_COMPLETE; } break; case KCS_HOSED: break; } if (kcs->state == KCS_HOSED) { init_kcs_data(kcs, kcs->io); return SI_SM_HOSED; } return SI_SM_CALL_WITHOUT_DELAY; } static int kcs_size(void) { return sizeof(struct si_sm_data); } static int kcs_detect(struct si_sm_data *kcs) { /* * It's impossible for the KCS status register to be all 1's, * (assuming a properly functioning, self-initialized BMC) * but that's what you get from reading a bogus address, so we * test that first. */ if (read_status(kcs) == 0xff) return 1; return 0; } static void kcs_cleanup(struct si_sm_data *kcs) { } struct si_sm_handlers kcs_smi_handlers = { .init_data = init_kcs_data, .start_transaction = start_kcs_transaction, .get_result = get_kcs_result, .event = kcs_event, .detect = kcs_detect, .cleanup = kcs_cleanup, .size = kcs_size, };
gpl-2.0
crdroid-devices/android_kernel_lge_hammerhead
drivers/tty/serial/msm_serial_debugger.c
4836
9578
/* * drivers/serial/msm_serial_debuger.c * * Serial Debugger Interface for MSM7K * * Copyright (C) 2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <stdarg.h> #include <linux/module.h> #include <linux/io.h> #include <linux/console.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/kernel_debugger.h> #include <linux/kernel_stat.h> #include <linux/irq.h> #include <linux/delay.h> #include <mach/system.h> #include <mach/fiq.h> #include "msm_serial.h" static unsigned int debug_port_base; static int debug_signal_irq; static struct clk *debug_clk; static int debug_enable; static int debugger_enable; static struct { unsigned int base; int irq; struct device *clk_device; int signal_irq; } init_data; static inline void msm_write(unsigned int val, unsigned int off) { __raw_writel(val, debug_port_base + off); } static inline unsigned int msm_read(unsigned int off) { return __raw_readl(debug_port_base + off); } static void debug_port_init(void) { /* reset everything */ msm_write(UART_CR_CMD_RESET_RX, UART_CR); msm_write(UART_CR_CMD_RESET_TX, UART_CR); msm_write(UART_CR_CMD_RESET_ERR, UART_CR); msm_write(UART_CR_CMD_RESET_BREAK_INT, UART_CR); msm_write(UART_CR_CMD_RESET_CTS, UART_CR); msm_write(UART_CR_CMD_SET_RFR, UART_CR); /* setup clock dividers */ if (clk_get_rate(debug_clk) == 19200000) { /* clock is TCXO (19.2MHz) */ msm_write(0x06, UART_MREG); msm_write(0xF1, UART_NREG); msm_write(0x0F, UART_DREG); msm_write(0x1A, UART_MNDREG); } else { /* clock must be TCXO/4 */ msm_write(0x18, UART_MREG); msm_write(0xF6, UART_NREG); msm_write(0x0F, UART_DREG); msm_write(0x0A, UART_MNDREG); } msm_write(UART_CSR_115200, UART_CSR); /* rx interrupt on every character -- keep it simple */ msm_write(0, UART_RFWR); /* enable TX and RX */ msm_write(0x05, UART_CR); /* enable RX interrupt */ msm_write(UART_IMR_RXLEV, UART_IMR); } static inline int debug_getc(void) { if (msm_read(UART_SR) & UART_SR_RX_READY) { return msm_read(UART_RF); } else { return -1; } } static inline void debug_putc(unsigned int c) { while (!(msm_read(UART_SR) & UART_SR_TX_READY)) ; msm_write(c, UART_TF); } static inline void debug_flush(void) { while (!(msm_read(UART_SR) & UART_SR_TX_EMPTY)) ; } static void debug_puts(char *s) { unsigned c; while ((c = *s++)) { if (c == '\n') debug_putc('\r'); debug_putc(c); } } static void debug_prompt(void) { debug_puts("debug> "); } int log_buf_copy(char *dest, int idx, int len); static void dump_kernel_log(void) { char buf[1024]; int idx = 0; int ret; int saved_oip; /* setting oops_in_progress prevents log_buf_copy() * from trying to take a spinlock which will make it * very unhappy in some cases... */ saved_oip = oops_in_progress; oops_in_progress = 1; for (;;) { ret = log_buf_copy(buf, idx, 1023); if (ret <= 0) break; buf[ret] = 0; debug_puts(buf); idx += ret; } oops_in_progress = saved_oip; } static char *mode_name(unsigned cpsr) { switch (cpsr & MODE_MASK) { case USR_MODE: return "USR"; case FIQ_MODE: return "FIQ"; case IRQ_MODE: return "IRQ"; case SVC_MODE: return "SVC"; case ABT_MODE: return "ABT"; case UND_MODE: return "UND"; case SYSTEM_MODE: return "SYS"; default: return "???"; } } #define DEBUG_MAX 64 static char debug_cmd[DEBUG_MAX]; static int debug_busy; static int debug_abort; static int debug_printf(void *cookie, const char *fmt, ...) { char buf[256]; va_list ap; va_start(ap, fmt); vsnprintf(buf, 128, fmt, ap); va_end(ap); debug_puts(buf); return debug_abort; } /* Safe outside fiq context */ static int debug_printf_nfiq(void *cookie, const char *fmt, ...) { char buf[256]; va_list ap; unsigned long irq_flags; va_start(ap, fmt); vsnprintf(buf, 128, fmt, ap); va_end(ap); local_irq_save(irq_flags); debug_puts(buf); debug_flush(); local_irq_restore(irq_flags); return debug_abort; } #define dprintf(fmt...) debug_printf(0, fmt) unsigned int last_irqs[NR_IRQS]; static void dump_irqs(void) { int n; dprintf("irqnr total since-last status name\n"); for (n = 1; n < NR_IRQS; n++) { struct irqaction *act = irq_desc[n].action; if (!act && !kstat_cpu(0).irqs[n]) continue; dprintf("%5d: %10u %11u %8x %s\n", n, kstat_cpu(0).irqs[n], kstat_cpu(0).irqs[n] - last_irqs[n], irq_desc[n].status, (act && act->name) ? act->name : "???"); last_irqs[n] = kstat_cpu(0).irqs[n]; } } static void debug_exec(const char *cmd, unsigned *regs) { if (!strcmp(cmd, "pc")) { dprintf(" pc %08x cpsr %08x mode %s\n", regs[15], regs[16], mode_name(regs[16])); } else if (!strcmp(cmd, "regs")) { dprintf(" r0 %08x r1 %08x r2 %08x r3 %08x\n", regs[0], regs[1], regs[2], regs[3]); dprintf(" r4 %08x r5 %08x r6 %08x r7 %08x\n", regs[4], regs[5], regs[6], regs[7]); dprintf(" r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", regs[8], regs[9], regs[10], regs[11], mode_name(regs[16])); dprintf(" ip %08x sp %08x lr %08x pc %08x cpsr %08x\n", regs[10], regs[13], regs[14], regs[15], regs[16]); } else if (!strcmp(cmd, "reboot")) { if (msm_hw_reset_hook) msm_hw_reset_hook(); } else if (!strcmp(cmd, "irqs")) { dump_irqs(); } else if (!strcmp(cmd, "kmsg")) { dump_kernel_log(); } else if (!strcmp(cmd, "version")) { dprintf("%s\n", linux_banner); } else { if (debug_busy) { dprintf("command processor busy. trying to abort.\n"); debug_abort = -1; } else { strcpy(debug_cmd, cmd); debug_busy = 1; } msm_trigger_irq(debug_signal_irq); return; } debug_prompt(); } static irqreturn_t debug_irq(int irq, void *dev) { if (debug_busy) { struct kdbg_ctxt ctxt; ctxt.printf = debug_printf_nfiq; kernel_debugger(&ctxt, debug_cmd); debug_prompt(); debug_busy = 0; } return IRQ_HANDLED; } static char debug_buf[DEBUG_MAX]; static int debug_count; static void debug_fiq(void *data, void *regs) { int c; static int last_c; while ((c = debug_getc()) != -1) { if (!debug_enable) { if ((c == 13) || (c == 10)) { debug_enable = true; debug_count = 0; debug_prompt(); } } else if ((c >= ' ') && (c < 127)) { if (debug_count < (DEBUG_MAX - 1)) { debug_buf[debug_count++] = c; debug_putc(c); } } else if ((c == 8) || (c == 127)) { if (debug_count > 0) { debug_count--; debug_putc(8); debug_putc(' '); debug_putc(8); } } else if ((c == 13) || (c == 10)) { if (c == '\r' || (c == '\n' && last_c != '\r')) { debug_putc('\r'); debug_putc('\n'); } if (debug_count) { debug_buf[debug_count] = 0; debug_count = 0; debug_exec(debug_buf, regs); } else { debug_prompt(); } } last_c = c; } debug_flush(); } #if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE) static void debug_console_write(struct console *co, const char *s, unsigned int count) { unsigned long irq_flags; /* disable irq's while TXing outside of FIQ context */ local_irq_save(irq_flags); while (count--) { if (*s == '\n') debug_putc('\r'); debug_putc(*s++); } debug_flush(); local_irq_restore(irq_flags); } static struct console msm_serial_debug_console = { .name = "debug_console", .write = debug_console_write, .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, }; #endif void msm_serial_debug_enable(int enable) { debug_enable = enable; } void msm_serial_debug_init(unsigned int base, int irq, struct device *clk_device, int signal_irq) { int ret; void *port; debug_clk = clk_get(clk_device, "uart_clk"); if (debug_clk) clk_enable(debug_clk); port = ioremap(base, 4096); if (!port) return; init_data.base = base; init_data.irq = irq; init_data.clk_device = clk_device; init_data.signal_irq = signal_irq; debug_port_base = (unsigned int) port; debug_signal_irq = signal_irq; debug_port_init(); debug_prompt(); msm_fiq_select(irq); msm_fiq_set_handler(debug_fiq, 0); msm_fiq_enable(irq); ret = request_irq(signal_irq, debug_irq, IRQF_TRIGGER_RISING, "debug", 0); if (ret) printk(KERN_ERR "serial_debugger: could not install signal_irq"); #if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE) register_console(&msm_serial_debug_console); #endif debugger_enable = 1; } static int msm_serial_debug_remove(const char *val, struct kernel_param *kp) { int ret; static int pre_stat = 1; ret = param_set_bool(val, kp); if (ret) return ret; if (pre_stat == *(int *)kp->arg) return 0; pre_stat = *(int *)kp->arg; if (*(int *)kp->arg) { msm_serial_debug_init(init_data.base, init_data.irq, init_data.clk_device, init_data.signal_irq); printk(KERN_INFO "enable FIQ serial debugger\n"); return 0; } #if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE) unregister_console(&msm_serial_debug_console); #endif free_irq(init_data.signal_irq, 0); msm_fiq_set_handler(NULL, 0); msm_fiq_disable(init_data.irq); msm_fiq_unselect(init_data.irq); clk_disable(debug_clk); printk(KERN_INFO "disable FIQ serial debugger\n"); return 0; } module_param_call(enable, msm_serial_debug_remove, param_get_bool, &debugger_enable, S_IWUSR | S_IRUGO);
gpl-2.0
nychitman1/android_kernel_lge_hammerhead
net/llc/llc_conn.c
8932
27806
/* * llc_conn.c - Driver routines for connection component. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/init.h> #include <linux/slab.h> #include <net/llc_sap.h> #include <net/llc_conn.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/llc_c_ev.h> #include <net/llc_c_ac.h> #include <net/llc_c_st.h> #include <net/llc_pdu.h> #if 0 #define dprintk(args...) printk(KERN_DEBUG args) #else #define dprintk(args...) #endif static int llc_find_offset(int state, int ev_type); static void llc_conn_send_pdus(struct sock *sk); static int llc_conn_service(struct sock *sk, struct sk_buff *skb); static int llc_exec_conn_trans_actions(struct sock *sk, struct llc_conn_state_trans *trans, struct sk_buff *ev); static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, struct sk_buff *skb); /* Offset table on connection states transition diagram */ static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV]; int sysctl_llc2_ack_timeout = LLC2_ACK_TIME * HZ; int sysctl_llc2_p_timeout = LLC2_P_TIME * HZ; int sysctl_llc2_rej_timeout = LLC2_REJ_TIME * HZ; int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ; /** * llc_conn_state_process - sends event to connection state machine * @sk: connection * @skb: occurred event * * Sends an event to connection state machine. After processing event * (executing it's actions and changing state), upper layer will be * indicated or confirmed, if needed. Returns 0 for success, 1 for * failure. The socket lock has to be held before calling this function. */ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(skb->sk); struct llc_conn_state_ev *ev = llc_conn_ev(skb); /* * We have to hold the skb, because llc_conn_service will kfree it in * the sending path and we need to look at the skb->cb, where we encode * llc_conn_state_ev. */ skb_get(skb); ev->ind_prim = ev->cfm_prim = 0; /* * Send event to state machine */ rc = llc_conn_service(skb->sk, skb); if (unlikely(rc != 0)) { printk(KERN_ERR "%s: llc_conn_service failed\n", __func__); goto out_kfree_skb; } if (unlikely(!ev->ind_prim && !ev->cfm_prim)) { /* indicate or confirm not required */ if (!skb->next) goto out_kfree_skb; goto out_skb_put; } if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */ skb_get(skb); switch (ev->ind_prim) { case LLC_DATA_PRIM: llc_save_primitive(sk, skb, LLC_DATA_PRIM); if (unlikely(sock_queue_rcv_skb(sk, skb))) { /* * shouldn't happen */ printk(KERN_ERR "%s: sock_queue_rcv_skb failed!\n", __func__); kfree_skb(skb); } break; case LLC_CONN_PRIM: /* * Can't be sock_queue_rcv_skb, because we have to leave the * skb->sk pointing to the newly created struct sock in * llc_conn_handler. -acme */ skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_state_change(sk); break; case LLC_DISC_PRIM: sock_hold(sk); if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_ESTABLISHED) { sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_socket->state = SS_UNCONNECTED; sk->sk_state = TCP_CLOSE; if (!sock_flag(sk, SOCK_DEAD)) { sock_set_flag(sk, SOCK_DEAD); sk->sk_state_change(sk); } } kfree_skb(skb); sock_put(sk); break; case LLC_RESET_PRIM: /* * FIXME: * RESET is not being notified to upper layers for now */ printk(KERN_INFO "%s: received a reset ind!\n", __func__); kfree_skb(skb); break; default: if (ev->ind_prim) { printk(KERN_INFO "%s: received unknown %d prim!\n", __func__, ev->ind_prim); kfree_skb(skb); } /* No indication */ break; } switch (ev->cfm_prim) { case LLC_DATA_PRIM: if (!llc_data_accept_state(llc->state)) sk->sk_write_space(sk); else rc = llc->failed_data_req = 1; break; case LLC_CONN_PRIM: if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_SYN_SENT) { if (ev->status) { sk->sk_socket->state = SS_UNCONNECTED; sk->sk_state = TCP_CLOSE; } else { sk->sk_socket->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; } sk->sk_state_change(sk); } break; case LLC_DISC_PRIM: sock_hold(sk); if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSING) { sk->sk_socket->state = SS_UNCONNECTED; sk->sk_state = TCP_CLOSE; sk->sk_state_change(sk); } sock_put(sk); break; case LLC_RESET_PRIM: /* * FIXME: * RESET is not being notified to upper layers for now */ printk(KERN_INFO "%s: received a reset conf!\n", __func__); break; default: if (ev->cfm_prim) { printk(KERN_INFO "%s: received unknown %d prim!\n", __func__, ev->cfm_prim); break; } goto out_skb_put; /* No confirmation */ } out_kfree_skb: kfree_skb(skb); out_skb_put: kfree_skb(skb); return rc; } void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) { /* queue PDU to send to MAC layer */ skb_queue_tail(&sk->sk_write_queue, skb); llc_conn_send_pdus(sk); } /** * llc_conn_rtn_pdu - sends received data pdu to upper layer * @sk: Active connection * @skb: Received data frame * * Sends received data pdu to upper layer (by using indicate function). * Prepares service parameters (prim and prim_data). calling indication * function will be done in llc_conn_state_process. */ void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->ind_prim = LLC_DATA_PRIM; } /** * llc_conn_resend_i_pdu_as_cmd - resend all all unacknowledged I PDUs * @sk: active connection * @nr: NR * @first_p_bit: p_bit value of first pdu * * Resend all unacknowledged I PDUs, starting with the NR; send first as * command PDU with P bit equal first_p_bit; if more than one send * subsequent as command PDUs with P bit equal zero (0). */ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) { struct sk_buff *skb; struct llc_pdu_sn *pdu; u16 nbr_unack_pdus; struct llc_sock *llc; u8 howmany_resend = 0; llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus); if (!nbr_unack_pdus) goto out; /* * Process unack PDUs only if unack queue is not empty; remove * appropriate PDUs, fix them up, and put them on mac_pdu_q. */ llc = llc_sk(sk); while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) { pdu = llc_pdu_sn_hdr(skb); llc_pdu_set_cmd_rsp(skb, LLC_PDU_CMD); llc_pdu_set_pf_bit(skb, first_p_bit); skb_queue_tail(&sk->sk_write_queue, skb); first_p_bit = 0; llc->vS = LLC_I_GET_NS(pdu); howmany_resend++; } if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ llc_conn_send_pdus(sk); out:; } /** * llc_conn_resend_i_pdu_as_rsp - Resend all unacknowledged I PDUs * @sk: active connection. * @nr: NR * @first_f_bit: f_bit value of first pdu. * * Resend all unacknowledged I PDUs, starting with the NR; send first as * response PDU with F bit equal first_f_bit; if more than one send * subsequent as response PDUs with F bit equal zero (0). */ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) { struct sk_buff *skb; u16 nbr_unack_pdus; struct llc_sock *llc = llc_sk(sk); u8 howmany_resend = 0; llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus); if (!nbr_unack_pdus) goto out; /* * Process unack PDUs only if unack queue is not empty; remove * appropriate PDUs, fix them up, and put them on mac_pdu_q */ while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_set_cmd_rsp(skb, LLC_PDU_RSP); llc_pdu_set_pf_bit(skb, first_f_bit); skb_queue_tail(&sk->sk_write_queue, skb); first_f_bit = 0; llc->vS = LLC_I_GET_NS(pdu); howmany_resend++; } if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ llc_conn_send_pdus(sk); out:; } /** * llc_conn_remove_acked_pdus - Removes acknowledged pdus from tx queue * @sk: active connection * nr: NR * how_many_unacked: size of pdu_unack_q after removing acked pdus * * Removes acknowledged pdus from transmit queue (pdu_unack_q). Returns * the number of pdus that removed from queue. */ int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked) { int pdu_pos, i; struct sk_buff *skb; struct llc_pdu_sn *pdu; int nbr_acked = 0; struct llc_sock *llc = llc_sk(sk); int q_len = skb_queue_len(&llc->pdu_unack_q); if (!q_len) goto out; skb = skb_peek(&llc->pdu_unack_q); pdu = llc_pdu_sn_hdr(skb); /* finding position of last acked pdu in queue */ pdu_pos = ((int)LLC_2_SEQ_NBR_MODULO + (int)nr - (int)LLC_I_GET_NS(pdu)) % LLC_2_SEQ_NBR_MODULO; for (i = 0; i < pdu_pos && i < q_len; i++) { skb = skb_dequeue(&llc->pdu_unack_q); kfree_skb(skb); nbr_acked++; } out: *how_many_unacked = skb_queue_len(&llc->pdu_unack_q); return nbr_acked; } /** * llc_conn_send_pdus - Sends queued PDUs * @sk: active connection * * Sends queued pdus to MAC layer for transmission. */ static void llc_conn_send_pdus(struct sock *sk) { struct sk_buff *skb; while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_TYPE_IS_I(pdu) && !(skb->dev->flags & IFF_LOOPBACK)) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); if (!skb2) break; skb = skb2; } dev_queue_xmit(skb); } } /** * llc_conn_service - finds transition and changes state of connection * @sk: connection * @skb: happened event * * This function finds transition that matches with happened event, then * executes related actions and finally changes state of connection. * Returns 0 for success, 1 for failure. */ static int llc_conn_service(struct sock *sk, struct sk_buff *skb) { int rc = 1; struct llc_sock *llc = llc_sk(sk); struct llc_conn_state_trans *trans; if (llc->state > NBR_CONN_STATES) goto out; rc = 0; trans = llc_qualify_conn_ev(sk, skb); if (trans) { rc = llc_exec_conn_trans_actions(sk, trans, skb); if (!rc && trans->next_state != NO_STATE_CHANGE) { llc->state = trans->next_state; if (!llc_data_accept_state(llc->state)) sk->sk_state_change(sk); } } out: return rc; } /** * llc_qualify_conn_ev - finds transition for event * @sk: connection * @skb: happened event * * This function finds transition that matches with happened event. * Returns pointer to found transition on success, %NULL otherwise. */ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_trans **next_trans; llc_conn_ev_qfyr_t *next_qualifier; struct llc_conn_state_ev *ev = llc_conn_ev(skb); struct llc_sock *llc = llc_sk(sk); struct llc_conn_state *curr_state = &llc_conn_state_table[llc->state - 1]; /* search thru events for this state until * list exhausted or until no more */ for (next_trans = curr_state->transitions + llc_find_offset(llc->state - 1, ev->type); (*next_trans)->ev; next_trans++) { if (!((*next_trans)->ev)(sk, skb)) { /* got POSSIBLE event match; the event may require * qualification based on the values of a number of * state flags; if all qualifications are met (i.e., * if all qualifying functions return success, or 0, * then this is THE event we're looking for */ for (next_qualifier = (*next_trans)->ev_qualifiers; next_qualifier && *next_qualifier && !(*next_qualifier)(sk, skb); next_qualifier++) /* nothing */; if (!next_qualifier || !*next_qualifier) /* all qualifiers executed successfully; this is * our transition; return it so we can perform * the associated actions & change the state */ return *next_trans; } } return NULL; } /** * llc_exec_conn_trans_actions - executes related actions * @sk: connection * @trans: transition that it's actions must be performed * @skb: event * * Executes actions that is related to happened event. Returns 0 for * success, 1 to indicate failure of at least one action. */ static int llc_exec_conn_trans_actions(struct sock *sk, struct llc_conn_state_trans *trans, struct sk_buff *skb) { int rc = 0; llc_conn_action_t *next_action; for (next_action = trans->ev_actions; next_action && *next_action; next_action++) { int rc2 = (*next_action)(sk, skb); if (rc2 == 2) { rc = rc2; break; } else if (rc2) rc = 1; } return rc; } static inline bool llc_estab_match(const struct llc_sap *sap, const struct llc_addr *daddr, const struct llc_addr *laddr, const struct sock *sk) { struct llc_sock *llc = llc_sk(sk); return llc->laddr.lsap == laddr->lsap && llc->daddr.lsap == daddr->lsap && llc_mac_match(llc->laddr.mac, laddr->mac) && llc_mac_match(llc->daddr.mac, daddr->mac); } /** * __llc_lookup_established - Finds connection for the remote/local sap/mac * @sap: SAP * @daddr: address of remote LLC (MAC + SAP) * @laddr: address of local LLC (MAC + SAP) * * Search connection list of the SAP and finds connection using the remote * mac, remote sap, local mac, and local sap. Returns pointer for * connection found, %NULL otherwise. * Caller has to make sure local_bh is disabled. */ static struct sock *__llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr, struct llc_addr *laddr) { struct sock *rc; struct hlist_nulls_node *node; int slot = llc_sk_laddr_hashfn(sap, laddr); struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot]; rcu_read_lock(); again: sk_nulls_for_each_rcu(rc, node, laddr_hb) { if (llc_estab_match(sap, daddr, laddr, rc)) { /* Extra checks required by SLAB_DESTROY_BY_RCU */ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) goto again; if (unlikely(llc_sk(rc)->sap != sap || !llc_estab_match(sap, daddr, laddr, rc))) { sock_put(rc); continue; } goto found; } } rc = NULL; /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (unlikely(get_nulls_value(node) != slot)) goto again; found: rcu_read_unlock(); return rc; } struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr, struct llc_addr *laddr) { struct sock *sk; local_bh_disable(); sk = __llc_lookup_established(sap, daddr, laddr); local_bh_enable(); return sk; } static inline bool llc_listener_match(const struct llc_sap *sap, const struct llc_addr *laddr, const struct sock *sk) { struct llc_sock *llc = llc_sk(sk); return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN && llc->laddr.lsap == laddr->lsap && llc_mac_match(llc->laddr.mac, laddr->mac); } static struct sock *__llc_lookup_listener(struct llc_sap *sap, struct llc_addr *laddr) { struct sock *rc; struct hlist_nulls_node *node; int slot = llc_sk_laddr_hashfn(sap, laddr); struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot]; rcu_read_lock(); again: sk_nulls_for_each_rcu(rc, node, laddr_hb) { if (llc_listener_match(sap, laddr, rc)) { /* Extra checks required by SLAB_DESTROY_BY_RCU */ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) goto again; if (unlikely(llc_sk(rc)->sap != sap || !llc_listener_match(sap, laddr, rc))) { sock_put(rc); continue; } goto found; } } rc = NULL; /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (unlikely(get_nulls_value(node) != slot)) goto again; found: rcu_read_unlock(); return rc; } /** * llc_lookup_listener - Finds listener for local MAC + SAP * @sap: SAP * @laddr: address of local LLC (MAC + SAP) * * Search connection list of the SAP and finds connection listening on * local mac, and local sap. Returns pointer for parent socket found, * %NULL otherwise. * Caller has to make sure local_bh is disabled. */ static struct sock *llc_lookup_listener(struct llc_sap *sap, struct llc_addr *laddr) { static struct llc_addr null_addr; struct sock *rc = __llc_lookup_listener(sap, laddr); if (!rc) rc = __llc_lookup_listener(sap, &null_addr); return rc; } static struct sock *__llc_lookup(struct llc_sap *sap, struct llc_addr *daddr, struct llc_addr *laddr) { struct sock *sk = __llc_lookup_established(sap, daddr, laddr); return sk ? : llc_lookup_listener(sap, laddr); } /** * llc_data_accept_state - designates if in this state data can be sent. * @state: state of connection. * * Returns 0 if data can be sent, 1 otherwise. */ u8 llc_data_accept_state(u8 state) { return state != LLC_CONN_STATE_NORMAL && state != LLC_CONN_STATE_BUSY && state != LLC_CONN_STATE_REJ; } /** * llc_find_next_offset - finds offset for next category of transitions * @state: state table. * @offset: start offset. * * Finds offset of next category of transitions in transition table. * Returns the start index of next category. */ static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset) { u16 cnt = 0; struct llc_conn_state_trans **next_trans; for (next_trans = state->transitions + offset; (*next_trans)->ev; next_trans++) ++cnt; return cnt; } /** * llc_build_offset_table - builds offset table of connection * * Fills offset table of connection state transition table * (llc_offset_table). */ void __init llc_build_offset_table(void) { struct llc_conn_state *curr_state; int state, ev_type, next_offset; for (state = 0; state < NBR_CONN_STATES; state++) { curr_state = &llc_conn_state_table[state]; next_offset = 0; for (ev_type = 0; ev_type < NBR_CONN_EV; ev_type++) { llc_offset_table[state][ev_type] = next_offset; next_offset += llc_find_next_offset(curr_state, next_offset) + 1; } } } /** * llc_find_offset - finds start offset of category of transitions * @state: state of connection * @ev_type: type of happened event * * Finds start offset of desired category of transitions. Returns the * desired start offset. */ static int llc_find_offset(int state, int ev_type) { int rc = 0; /* at this stage, llc_offset_table[..][2] is not important. it is for * init_pf_cycle and I don't know what is it. */ switch (ev_type) { case LLC_CONN_EV_TYPE_PRIM: rc = llc_offset_table[state][0]; break; case LLC_CONN_EV_TYPE_PDU: rc = llc_offset_table[state][4]; break; case LLC_CONN_EV_TYPE_SIMPLE: rc = llc_offset_table[state][1]; break; case LLC_CONN_EV_TYPE_P_TMR: case LLC_CONN_EV_TYPE_ACK_TMR: case LLC_CONN_EV_TYPE_REJ_TMR: case LLC_CONN_EV_TYPE_BUSY_TMR: rc = llc_offset_table[state][3]; break; } return rc; } /** * llc_sap_add_socket - adds a socket to a SAP * @sap: SAP * @sk: socket * * This function adds a socket to the hash tables of a SAP. */ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) { struct llc_sock *llc = llc_sk(sk); struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex); struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr); llc_sap_hold(sap); llc_sk(sk)->sap = sap; spin_lock_bh(&sap->sk_lock); sap->sk_count++; sk_nulls_add_node_rcu(sk, laddr_hb); hlist_add_head(&llc->dev_hash_node, dev_hb); spin_unlock_bh(&sap->sk_lock); } /** * llc_sap_remove_socket - removes a socket from SAP * @sap: SAP * @sk: socket * * This function removes a connection from the hash tables of a SAP if * the connection was in this list. */ void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) { struct llc_sock *llc = llc_sk(sk); spin_lock_bh(&sap->sk_lock); sk_nulls_del_node_init_rcu(sk); hlist_del(&llc->dev_hash_node); sap->sk_count--; spin_unlock_bh(&sap->sk_lock); llc_sap_put(sap); } /** * llc_conn_rcv - sends received pdus to the connection state machine * @sk: current connection structure. * @skb: received frame. * * Sends received pdus to the connection state machine. */ static int llc_conn_rcv(struct sock* sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->type = LLC_CONN_EV_TYPE_PDU; ev->reason = 0; return llc_conn_state_process(sk, skb); } static struct sock *llc_create_incoming_sock(struct sock *sk, struct net_device *dev, struct llc_addr *saddr, struct llc_addr *daddr) { struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC, sk->sk_prot); struct llc_sock *newllc, *llc = llc_sk(sk); if (!newsk) goto out; newllc = llc_sk(newsk); memcpy(&newllc->laddr, daddr, sizeof(newllc->laddr)); memcpy(&newllc->daddr, saddr, sizeof(newllc->daddr)); newllc->dev = dev; dev_hold(dev); llc_sap_add_socket(llc->sap, newsk); llc_sap_hold(llc->sap); out: return newsk; } void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) { struct llc_addr saddr, daddr; struct sock *sk; llc_pdu_decode_sa(skb, saddr.mac); llc_pdu_decode_ssap(skb, &saddr.lsap); llc_pdu_decode_da(skb, daddr.mac); llc_pdu_decode_dsap(skb, &daddr.lsap); sk = __llc_lookup(sap, &saddr, &daddr); if (!sk) goto drop; bh_lock_sock(sk); /* * This has to be done here and not at the upper layer ->accept * method because of the way the PROCOM state machine works: * it needs to set several state variables (see, for instance, * llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to * the originator of the new connection, and this state has to be * in the newly created struct sock private area. -acme */ if (unlikely(sk->sk_state == TCP_LISTEN)) { struct sock *newsk = llc_create_incoming_sock(sk, skb->dev, &saddr, &daddr); if (!newsk) goto drop_unlock; skb_set_owner_r(skb, newsk); } else { /* * Can't be skb_set_owner_r, this will be done at the * llc_conn_state_process function, later on, when we will use * skb_queue_rcv_skb to send it to upper layers, this is * another trick required to cope with how the PROCOM state * machine works. -acme */ skb->sk = sk; } if (!sock_owned_by_user(sk)) llc_conn_rcv(sk, skb); else { dprintk("%s: adding to backlog...\n", __func__); llc_set_backlog_type(skb, LLC_PACKET); if (sk_add_backlog(sk, skb)) goto drop_unlock; } out: bh_unlock_sock(sk); sock_put(sk); return; drop: kfree_skb(skb); return; drop_unlock: kfree_skb(skb); goto out; } #undef LLC_REFCNT_DEBUG #ifdef LLC_REFCNT_DEBUG static atomic_t llc_sock_nr; #endif /** * llc_backlog_rcv - Processes rx frames and expired timers. * @sk: LLC sock (p8022 connection) * @skb: queued rx frame or event * * This function processes frames that has received and timers that has * expired during sending an I pdu (refer to data_req_handler). frames * queue by llc_rcv function (llc_mac.c) and timers queue by timer * callback functions(llc_c_ac.c). */ static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb) { int rc = 0; struct llc_sock *llc = llc_sk(sk); if (likely(llc_backlog_type(skb) == LLC_PACKET)) { if (likely(llc->state > 1)) /* not closed */ rc = llc_conn_rcv(sk, skb); else goto out_kfree_skb; } else if (llc_backlog_type(skb) == LLC_EVENT) { /* timer expiration event */ if (likely(llc->state > 1)) /* not closed */ rc = llc_conn_state_process(sk, skb); else goto out_kfree_skb; } else { printk(KERN_ERR "%s: invalid skb in backlog\n", __func__); goto out_kfree_skb; } out: return rc; out_kfree_skb: kfree_skb(skb); goto out; } /** * llc_sk_init - Initializes a socket with default llc values. * @sk: socket to initialize. * * Initializes a socket with default llc values. */ static void llc_sk_init(struct sock* sk) { struct llc_sock *llc = llc_sk(sk); llc->state = LLC_CONN_STATE_ADM; llc->inc_cntr = llc->dec_cntr = 2; llc->dec_step = llc->connect_step = 1; setup_timer(&llc->ack_timer.timer, llc_conn_ack_tmr_cb, (unsigned long)sk); llc->ack_timer.expire = sysctl_llc2_ack_timeout; setup_timer(&llc->pf_cycle_timer.timer, llc_conn_pf_cycle_tmr_cb, (unsigned long)sk); llc->pf_cycle_timer.expire = sysctl_llc2_p_timeout; setup_timer(&llc->rej_sent_timer.timer, llc_conn_rej_tmr_cb, (unsigned long)sk); llc->rej_sent_timer.expire = sysctl_llc2_rej_timeout; setup_timer(&llc->busy_state_timer.timer, llc_conn_busy_tmr_cb, (unsigned long)sk); llc->busy_state_timer.expire = sysctl_llc2_busy_timeout; llc->n2 = 2; /* max retransmit */ llc->k = 2; /* tx win size, will adjust dynam */ llc->rw = 128; /* rx win size (opt and equal to * tx_win of remote LLC) */ skb_queue_head_init(&llc->pdu_unack_q); sk->sk_backlog_rcv = llc_backlog_rcv; } /** * llc_sk_alloc - Allocates LLC sock * @family: upper layer protocol family * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) * * Allocates a LLC sock and initializes it. Returns the new LLC sock * or %NULL if there's no memory available for one */ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) { struct sock *sk = sk_alloc(net, family, priority, prot); if (!sk) goto out; llc_sk_init(sk); sock_init_data(NULL, sk); #ifdef LLC_REFCNT_DEBUG atomic_inc(&llc_sock_nr); printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk, __func__, atomic_read(&llc_sock_nr)); #endif out: return sk; } /** * llc_sk_free - Frees a LLC socket * @sk - socket to free * * Frees a LLC socket */ void llc_sk_free(struct sock *sk) { struct llc_sock *llc = llc_sk(sk); llc->state = LLC_CONN_OUT_OF_SVC; /* Stop all (possibly) running timers */ llc_conn_ac_stop_all_timers(sk, NULL); #ifdef DEBUG_LLC_CONN_ALLOC printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, skb_queue_len(&llc->pdu_unack_q), skb_queue_len(&sk->sk_write_queue)); #endif skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); skb_queue_purge(&llc->pdu_unack_q); #ifdef LLC_REFCNT_DEBUG if (atomic_read(&sk->sk_refcnt) != 1) { printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n", sk, __func__, atomic_read(&sk->sk_refcnt)); printk(KERN_DEBUG "%d LLC sockets are still alive\n", atomic_read(&llc_sock_nr)); } else { atomic_dec(&llc_sock_nr); printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk, __func__, atomic_read(&llc_sock_nr)); } #endif sock_put(sk); } /** * llc_sk_reset - resets a connection * @sk: LLC socket to reset * * Resets a connection to the out of service state. Stops its timers * and frees any frames in the queues of the connection. */ void llc_sk_reset(struct sock *sk) { struct llc_sock *llc = llc_sk(sk); llc_conn_ac_stop_all_timers(sk, NULL); skb_queue_purge(&sk->sk_write_queue); skb_queue_purge(&llc->pdu_unack_q); llc->remote_busy_flag = 0; llc->cause_flag = 0; llc->retry_count = 0; llc_conn_set_p_flag(sk, 0); llc->f_flag = 0; llc->s_flag = 0; llc->ack_pf = 0; llc->first_pdu_Ns = 0; llc->ack_must_be_send = 0; llc->dec_step = 1; llc->inc_cntr = 2; llc->dec_cntr = 2; llc->X = 0; llc->failed_data_req = 0 ; llc->last_nr = 0; }
gpl-2.0
smac0628/htc_gpe_51
drivers/video/omap/lcdc.c
9188
19816
/* * OMAP1 internal LCD controller * * Copyright (C) 2004 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/clk.h> #include <linux/gfp.h> #include <mach/lcdc.h> #include <plat/dma.h> #include <asm/mach-types.h> #include "omapfb.h" #include "lcdc.h" #define MODULE_NAME "lcdc" #define MAX_PALETTE_SIZE PAGE_SIZE enum lcdc_load_mode { OMAP_LCDC_LOAD_PALETTE, OMAP_LCDC_LOAD_FRAME, OMAP_LCDC_LOAD_PALETTE_AND_FRAME }; static struct omap_lcd_controller { enum omapfb_update_mode update_mode; int ext_mode; unsigned long frame_offset; int screen_width; int xres; int yres; enum omapfb_color_format color_mode; int bpp; void *palette_virt; dma_addr_t palette_phys; int palette_code; int palette_size; unsigned int irq_mask; struct completion last_frame_complete; struct completion palette_load_complete; struct clk *lcd_ck; struct omapfb_device *fbdev; void (*dma_callback)(void *data); void *dma_callback_data; int fbmem_allocated; dma_addr_t vram_phys; void *vram_virt; unsigned long vram_size; } lcdc; static void inline enable_irqs(int mask) { lcdc.irq_mask |= mask; } static void inline disable_irqs(int mask) { lcdc.irq_mask &= ~mask; } static void set_load_mode(enum lcdc_load_mode mode) { u32 l; l = omap_readl(OMAP_LCDC_CONTROL); l &= ~(3 << 20); switch (mode) { case OMAP_LCDC_LOAD_PALETTE: l |= 1 << 20; break; case OMAP_LCDC_LOAD_FRAME: l |= 2 << 20; break; case OMAP_LCDC_LOAD_PALETTE_AND_FRAME: break; default: BUG(); } omap_writel(l, OMAP_LCDC_CONTROL); } static void enable_controller(void) { u32 l; l = omap_readl(OMAP_LCDC_CONTROL); l |= OMAP_LCDC_CTRL_LCD_EN; l &= ~OMAP_LCDC_IRQ_MASK; l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */ omap_writel(l, OMAP_LCDC_CONTROL); } static void disable_controller_async(void) { u32 l; u32 mask; l = omap_readl(OMAP_LCDC_CONTROL); mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK; /* * Preserve the DONE mask, since we still want to get the * final DONE irq. It will be disabled in the IRQ handler. */ mask &= ~OMAP_LCDC_IRQ_DONE; l &= ~mask; omap_writel(l, OMAP_LCDC_CONTROL); } static void disable_controller(void) { init_completion(&lcdc.last_frame_complete); disable_controller_async(); if (!wait_for_completion_timeout(&lcdc.last_frame_complete, msecs_to_jiffies(500))) dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n"); } static void reset_controller(u32 status) { static unsigned long reset_count; static unsigned long last_jiffies; disable_controller_async(); reset_count++; if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) { dev_err(lcdc.fbdev->dev, "resetting (status %#010x,reset count %lu)\n", status, reset_count); last_jiffies = jiffies; } if (reset_count < 100) { enable_controller(); } else { reset_count = 0; dev_err(lcdc.fbdev->dev, "too many reset attempts, giving up.\n"); } } /* * Configure the LCD DMA according to the current mode specified by parameters * in lcdc.fbdev and fbdev->var. */ static void setup_lcd_dma(void) { static const int dma_elem_type[] = { 0, OMAP_DMA_DATA_TYPE_S8, OMAP_DMA_DATA_TYPE_S16, 0, OMAP_DMA_DATA_TYPE_S32, }; struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par; struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var; unsigned long src; int esize, xelem, yelem; src = lcdc.vram_phys + lcdc.frame_offset; switch (var->rotate) { case 0: if (plane->info.mirror || (src & 3) || lcdc.color_mode == OMAPFB_COLOR_YUV420 || (lcdc.xres & 1)) esize = 2; else esize = 4; xelem = lcdc.xres * lcdc.bpp / 8 / esize; yelem = lcdc.yres; break; case 90: case 180: case 270: if (cpu_is_omap15xx()) { BUG(); } esize = 2; xelem = lcdc.yres * lcdc.bpp / 16; yelem = lcdc.xres; break; default: BUG(); return; } #ifdef VERBOSE dev_dbg(lcdc.fbdev->dev, "setup_dma: src %#010lx esize %d xelem %d yelem %d\n", src, esize, xelem, yelem); #endif omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]); if (!cpu_is_omap15xx()) { int bpp = lcdc.bpp; /* * YUV support is only for external mode when we have the * YUV window embedded in a 16bpp frame buffer. */ if (lcdc.color_mode == OMAPFB_COLOR_YUV420) bpp = 16; /* Set virtual xres elem size */ omap_set_lcd_dma_b1_vxres( lcdc.screen_width * bpp / 8 / esize); /* Setup transformations */ omap_set_lcd_dma_b1_rotation(var->rotate); omap_set_lcd_dma_b1_mirror(plane->info.mirror); } omap_setup_lcd_dma(); } static irqreturn_t lcdc_irq_handler(int irq, void *dev_id) { u32 status; status = omap_readl(OMAP_LCDC_STATUS); if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST)) reset_controller(status); else { if (status & OMAP_LCDC_STAT_DONE) { u32 l; /* * Disable IRQ_DONE. The status bit will be cleared * only when the controller is reenabled and we don't * want to get more interrupts. */ l = omap_readl(OMAP_LCDC_CONTROL); l &= ~OMAP_LCDC_IRQ_DONE; omap_writel(l, OMAP_LCDC_CONTROL); complete(&lcdc.last_frame_complete); } if (status & OMAP_LCDC_STAT_LOADED_PALETTE) { disable_controller_async(); complete(&lcdc.palette_load_complete); } } /* * Clear these interrupt status bits. * Sync_lost, FUF bits were cleared by disabling the LCD controller * LOADED_PALETTE can be cleared this way only in palette only * load mode. In other load modes it's cleared by disabling the * controller. */ status &= ~(OMAP_LCDC_STAT_VSYNC | OMAP_LCDC_STAT_LOADED_PALETTE | OMAP_LCDC_STAT_ABC | OMAP_LCDC_STAT_LINE_INT); omap_writel(status, OMAP_LCDC_STATUS); return IRQ_HANDLED; } /* * Change to a new video mode. We defer this to a later time to avoid any * flicker and not to mess up the current LCD DMA context. For this we disable * the LCD controller, which will generate a DONE irq after the last frame has * been transferred. Then it'll be safe to reconfigure both the LCD controller * as well as the LCD DMA. */ static int omap_lcdc_setup_plane(int plane, int channel_out, unsigned long offset, int screen_width, int pos_x, int pos_y, int width, int height, int color_mode) { struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var; struct lcd_panel *panel = lcdc.fbdev->panel; int rot_x, rot_y; if (var->rotate == 0) { rot_x = panel->x_res; rot_y = panel->y_res; } else { rot_x = panel->y_res; rot_y = panel->x_res; } if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 || width > rot_x || height > rot_y) { #ifdef VERBOSE dev_dbg(lcdc.fbdev->dev, "invalid plane params plane %d pos_x %d pos_y %d " "w %d h %d\n", plane, pos_x, pos_y, width, height); #endif return -EINVAL; } lcdc.frame_offset = offset; lcdc.xres = width; lcdc.yres = height; lcdc.screen_width = screen_width; lcdc.color_mode = color_mode; switch (color_mode) { case OMAPFB_COLOR_CLUT_8BPP: lcdc.bpp = 8; lcdc.palette_code = 0x3000; lcdc.palette_size = 512; break; case OMAPFB_COLOR_RGB565: lcdc.bpp = 16; lcdc.palette_code = 0x4000; lcdc.palette_size = 32; break; case OMAPFB_COLOR_RGB444: lcdc.bpp = 16; lcdc.palette_code = 0x4000; lcdc.palette_size = 32; break; case OMAPFB_COLOR_YUV420: if (lcdc.ext_mode) { lcdc.bpp = 12; break; } /* fallthrough */ case OMAPFB_COLOR_YUV422: if (lcdc.ext_mode) { lcdc.bpp = 16; break; } /* fallthrough */ default: /* FIXME: other BPPs. * bpp1: code 0, size 256 * bpp2: code 0x1000 size 256 * bpp4: code 0x2000 size 256 * bpp12: code 0x4000 size 32 */ dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode); BUG(); return -1; } if (lcdc.ext_mode) { setup_lcd_dma(); return 0; } if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) { disable_controller(); omap_stop_lcd_dma(); setup_lcd_dma(); enable_controller(); } return 0; } static int omap_lcdc_enable_plane(int plane, int enable) { dev_dbg(lcdc.fbdev->dev, "plane %d enable %d update_mode %d ext_mode %d\n", plane, enable, lcdc.update_mode, lcdc.ext_mode); if (plane != OMAPFB_PLANE_GFX) return -EINVAL; return 0; } /* * Configure the LCD DMA for a palette load operation and do the palette * downloading synchronously. We don't use the frame+palette load mode of * the controller, since the palette can always be downloaded separately. */ static void load_palette(void) { u16 *palette; palette = (u16 *)lcdc.palette_virt; *(u16 *)palette &= 0x0fff; *(u16 *)palette |= lcdc.palette_code; omap_set_lcd_dma_b1(lcdc.palette_phys, lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32); omap_set_lcd_dma_single_transfer(1); omap_setup_lcd_dma(); init_completion(&lcdc.palette_load_complete); enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE); set_load_mode(OMAP_LCDC_LOAD_PALETTE); enable_controller(); if (!wait_for_completion_timeout(&lcdc.palette_load_complete, msecs_to_jiffies(500))) dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n"); /* The controller gets disabled in the irq handler */ disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE); omap_stop_lcd_dma(); omap_set_lcd_dma_single_transfer(lcdc.ext_mode); } /* Used only in internal controller mode */ static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue, u16 transp, int update_hw_pal) { u16 *palette; if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255) return -EINVAL; palette = (u16 *)lcdc.palette_virt; palette[regno] &= ~0x0fff; palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) | (blue >> 12); if (update_hw_pal) { disable_controller(); omap_stop_lcd_dma(); load_palette(); setup_lcd_dma(); set_load_mode(OMAP_LCDC_LOAD_FRAME); enable_controller(); } return 0; } static void calc_ck_div(int is_tft, int pck, int *pck_div) { unsigned long lck; pck = max(1, pck); lck = clk_get_rate(lcdc.lcd_ck); *pck_div = (lck + pck - 1) / pck; if (is_tft) *pck_div = max(2, *pck_div); else *pck_div = max(3, *pck_div); if (*pck_div > 255) { /* FIXME: try to adjust logic clock divider as well */ *pck_div = 255; dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n", pck / 1000); } } static void inline setup_regs(void) { u32 l; struct lcd_panel *panel = lcdc.fbdev->panel; int is_tft = panel->config & OMAP_LCDC_PANEL_TFT; unsigned long lck; int pcd; l = omap_readl(OMAP_LCDC_CONTROL); l &= ~OMAP_LCDC_CTRL_LCD_TFT; l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0; #ifdef CONFIG_MACH_OMAP_PALMTE /* FIXME:if (machine_is_omap_palmte()) { */ /* PalmTE uses alternate TFT setting in 8BPP mode */ l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0; /* } */ #endif omap_writel(l, OMAP_LCDC_CONTROL); l = omap_readl(OMAP_LCDC_TIMING2); l &= ~(((1 << 6) - 1) << 20); l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20; omap_writel(l, OMAP_LCDC_TIMING2); l = panel->x_res - 1; l |= (panel->hsw - 1) << 10; l |= (panel->hfp - 1) << 16; l |= (panel->hbp - 1) << 24; omap_writel(l, OMAP_LCDC_TIMING0); l = panel->y_res - 1; l |= (panel->vsw - 1) << 10; l |= panel->vfp << 16; l |= panel->vbp << 24; omap_writel(l, OMAP_LCDC_TIMING1); l = omap_readl(OMAP_LCDC_TIMING2); l &= ~0xff; lck = clk_get_rate(lcdc.lcd_ck); if (!panel->pcd) calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd); else { dev_warn(lcdc.fbdev->dev, "Pixel clock divider value is obsolete.\n" "Try to set pixel_clock to %lu and pcd to 0 " "in drivers/video/omap/lcd_%s.c and submit a patch.\n", lck / panel->pcd / 1000, panel->name); pcd = panel->pcd; } l |= pcd & 0xff; l |= panel->acb << 8; omap_writel(l, OMAP_LCDC_TIMING2); /* update panel info with the exact clock */ panel->pixel_clock = lck / pcd / 1000; } /* * Configure the LCD controller, download the color palette and start a looped * DMA transfer of the frame image data. Called only in internal * controller mode. */ static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode) { int r = 0; if (mode != lcdc.update_mode) { switch (mode) { case OMAPFB_AUTO_UPDATE: setup_regs(); load_palette(); /* Setup and start LCD DMA */ setup_lcd_dma(); set_load_mode(OMAP_LCDC_LOAD_FRAME); enable_irqs(OMAP_LCDC_IRQ_DONE); /* This will start the actual DMA transfer */ enable_controller(); lcdc.update_mode = mode; break; case OMAPFB_UPDATE_DISABLED: disable_controller(); omap_stop_lcd_dma(); lcdc.update_mode = mode; break; default: r = -EINVAL; } } return r; } static enum omapfb_update_mode omap_lcdc_get_update_mode(void) { return lcdc.update_mode; } /* PM code called only in internal controller mode */ static void omap_lcdc_suspend(void) { omap_lcdc_set_update_mode(OMAPFB_UPDATE_DISABLED); } static void omap_lcdc_resume(void) { omap_lcdc_set_update_mode(OMAPFB_AUTO_UPDATE); } static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps) { return; } int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data) { BUG_ON(callback == NULL); if (lcdc.dma_callback) return -EBUSY; else { lcdc.dma_callback = callback; lcdc.dma_callback_data = data; } return 0; } EXPORT_SYMBOL(omap_lcdc_set_dma_callback); void omap_lcdc_free_dma_callback(void) { lcdc.dma_callback = NULL; } EXPORT_SYMBOL(omap_lcdc_free_dma_callback); static void lcdc_dma_handler(u16 status, void *data) { if (lcdc.dma_callback) lcdc.dma_callback(lcdc.dma_callback_data); } static int mmap_kern(void) { struct vm_struct *kvma; struct vm_area_struct vma; pgprot_t pgprot; unsigned long vaddr; kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP); if (kvma == NULL) { dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n"); return -ENOMEM; } vma.vm_mm = &init_mm; vaddr = (unsigned long)kvma->addr; vma.vm_start = vaddr; vma.vm_end = vaddr + lcdc.vram_size; pgprot = pgprot_writecombine(pgprot_kernel); if (io_remap_pfn_range(&vma, vaddr, lcdc.vram_phys >> PAGE_SHIFT, lcdc.vram_size, pgprot) < 0) { dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n"); return -EAGAIN; } lcdc.vram_virt = (void *)vaddr; return 0; } static void unmap_kern(void) { vunmap(lcdc.vram_virt); } static int alloc_palette_ram(void) { lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL); if (lcdc.palette_virt == NULL) { dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n"); return -ENOMEM; } memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE); return 0; } static void free_palette_ram(void) { dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE, lcdc.palette_virt, lcdc.palette_phys); } static int alloc_fbmem(struct omapfb_mem_region *region) { int bpp; int frame_size; struct lcd_panel *panel = lcdc.fbdev->panel; bpp = panel->bpp; if (bpp == 12) bpp = 16; frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res); if (region->size > frame_size) frame_size = region->size; lcdc.vram_size = frame_size; lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev, lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL); if (lcdc.vram_virt == NULL) { dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n"); return -ENOMEM; } region->size = frame_size; region->paddr = lcdc.vram_phys; region->vaddr = lcdc.vram_virt; region->alloc = 1; memset(lcdc.vram_virt, 0, lcdc.vram_size); return 0; } static void free_fbmem(void) { dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size, lcdc.vram_virt, lcdc.vram_phys); } static int setup_fbmem(struct omapfb_mem_desc *req_md) { int r; if (!req_md->region_cnt) { dev_err(lcdc.fbdev->dev, "no memory regions defined\n"); return -EINVAL; } if (req_md->region_cnt > 1) { dev_err(lcdc.fbdev->dev, "only one plane is supported\n"); req_md->region_cnt = 1; } if (req_md->region[0].paddr == 0) { lcdc.fbmem_allocated = 1; if ((r = alloc_fbmem(&req_md->region[0])) < 0) return r; return 0; } lcdc.vram_phys = req_md->region[0].paddr; lcdc.vram_size = req_md->region[0].size; if ((r = mmap_kern()) < 0) return r; dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n", lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt); return 0; } static void cleanup_fbmem(void) { if (lcdc.fbmem_allocated) free_fbmem(); else unmap_kern(); } static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode, struct omapfb_mem_desc *req_vram) { int r; u32 l; int rate; struct clk *tc_ck; lcdc.irq_mask = 0; lcdc.fbdev = fbdev; lcdc.ext_mode = ext_mode; l = 0; omap_writel(l, OMAP_LCDC_CONTROL); /* FIXME: * According to errata some platforms have a clock rate limitiation */ lcdc.lcd_ck = clk_get(fbdev->dev, "lcd_ck"); if (IS_ERR(lcdc.lcd_ck)) { dev_err(fbdev->dev, "unable to access LCD clock\n"); r = PTR_ERR(lcdc.lcd_ck); goto fail0; } tc_ck = clk_get(fbdev->dev, "tc_ck"); if (IS_ERR(tc_ck)) { dev_err(fbdev->dev, "unable to access TC clock\n"); r = PTR_ERR(tc_ck); goto fail1; } rate = clk_get_rate(tc_ck); clk_put(tc_ck); if (machine_is_ams_delta()) rate /= 4; if (machine_is_omap_h3()) rate /= 3; r = clk_set_rate(lcdc.lcd_ck, rate); if (r) { dev_err(fbdev->dev, "failed to adjust LCD rate\n"); goto fail1; } clk_enable(lcdc.lcd_ck); r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev); if (r) { dev_err(fbdev->dev, "unable to get IRQ\n"); goto fail2; } r = omap_request_lcd_dma(lcdc_dma_handler, NULL); if (r) { dev_err(fbdev->dev, "unable to get LCD DMA\n"); goto fail3; } omap_set_lcd_dma_single_transfer(ext_mode); omap_set_lcd_dma_ext_controller(ext_mode); if (!ext_mode) if ((r = alloc_palette_ram()) < 0) goto fail4; if ((r = setup_fbmem(req_vram)) < 0) goto fail5; pr_info("omapfb: LCDC initialized\n"); return 0; fail5: if (!ext_mode) free_palette_ram(); fail4: omap_free_lcd_dma(); fail3: free_irq(OMAP_LCDC_IRQ, lcdc.fbdev); fail2: clk_disable(lcdc.lcd_ck); fail1: clk_put(lcdc.lcd_ck); fail0: return r; } static void omap_lcdc_cleanup(void) { if (!lcdc.ext_mode) free_palette_ram(); cleanup_fbmem(); omap_free_lcd_dma(); free_irq(OMAP_LCDC_IRQ, lcdc.fbdev); clk_disable(lcdc.lcd_ck); clk_put(lcdc.lcd_ck); } const struct lcd_ctrl omap1_int_ctrl = { .name = "internal", .init = omap_lcdc_init, .cleanup = omap_lcdc_cleanup, .get_caps = omap_lcdc_get_caps, .set_update_mode = omap_lcdc_set_update_mode, .get_update_mode = omap_lcdc_get_update_mode, .update_window = NULL, .suspend = omap_lcdc_suspend, .resume = omap_lcdc_resume, .setup_plane = omap_lcdc_setup_plane, .enable_plane = omap_lcdc_enable_plane, .setcolreg = omap_lcdc_setcolreg, };
gpl-2.0
Stefan-Schmidt/linux-2.6
fs/afs/callback.c
9956
11654
/* * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Woodhouse <dwmw2@infradead.org> * David Howells <dhowells@redhat.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/circ_buf.h> #include <linux/sched.h> #include "internal.h" #if 0 unsigned afs_vnode_update_timeout = 10; #endif /* 0 */ #define afs_breakring_space(server) \ CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \ ARRAY_SIZE((server)->cb_break)) //static void afs_callback_updater(struct work_struct *); static struct workqueue_struct *afs_callback_update_worker; /* * allow the fileserver to request callback state (re-)initialisation */ void afs_init_callback_state(struct afs_server *server) { struct afs_vnode *vnode; _enter("{%p}", server); spin_lock(&server->cb_lock); /* kill all the promises on record from this server */ while (!RB_EMPTY_ROOT(&server->cb_promises)) { vnode = rb_entry(server->cb_promises.rb_node, struct afs_vnode, cb_promise); _debug("UNPROMISE { vid=%x:%u uq=%u}", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } spin_unlock(&server->cb_lock); _leave(""); } /* * handle the data invalidation side of a callback being broken */ void afs_broken_callback_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_broken_work); _enter(""); if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) return; /* we're only interested in dealing with a broken callback on *this* * vnode and only if no-one else has dealt with it yet */ if (!mutex_trylock(&vnode->validate_lock)) return; /* someone else is dealing with it */ if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { if (S_ISDIR(vnode->vfs_inode.i_mode)) afs_clear_permits(vnode); if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0) goto out; if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) goto out; /* if the vnode's data version number changed then its contents * are different */ if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) afs_zap_data(vnode); } out: mutex_unlock(&vnode->validate_lock); /* avoid the potential race whereby the mutex_trylock() in this * function happens again between the clear_bit() and the * mutex_unlock() */ if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { _debug("requeue"); queue_work(afs_callback_update_worker, &vnode->cb_broken_work); } _leave(""); } /* * actually break a callback */ static void afs_break_callback(struct afs_server *server, struct afs_vnode *vnode) { _enter(""); set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); if (vnode->cb_promised) { spin_lock(&vnode->lock); _debug("break callback"); spin_lock(&server->cb_lock); if (vnode->cb_promised) { rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } spin_unlock(&server->cb_lock); queue_work(afs_callback_update_worker, &vnode->cb_broken_work); if (list_empty(&vnode->granted_locks) && !list_empty(&vnode->pending_locks)) afs_lock_may_be_available(vnode); spin_unlock(&vnode->lock); } } /* * allow the fileserver to explicitly break one callback * - happens when * - the backing file is changed * - a lock is released */ static void afs_break_one_callback(struct afs_server *server, struct afs_fid *fid) { struct afs_vnode *vnode; struct rb_node *p; _debug("find"); spin_lock(&server->fs_lock); p = server->fs_vnodes.rb_node; while (p) { vnode = rb_entry(p, struct afs_vnode, server_rb); if (fid->vid < vnode->fid.vid) p = p->rb_left; else if (fid->vid > vnode->fid.vid) p = p->rb_right; else if (fid->vnode < vnode->fid.vnode) p = p->rb_left; else if (fid->vnode > vnode->fid.vnode) p = p->rb_right; else if (fid->unique < vnode->fid.unique) p = p->rb_left; else if (fid->unique > vnode->fid.unique) p = p->rb_right; else goto found; } /* not found so we just ignore it (it may have moved to another * server) */ not_available: _debug("not avail"); spin_unlock(&server->fs_lock); _leave(""); return; found: _debug("found"); ASSERTCMP(server, ==, vnode->server); if (!igrab(AFS_VNODE_TO_I(vnode))) goto not_available; spin_unlock(&server->fs_lock); afs_break_callback(server, vnode); iput(&vnode->vfs_inode); _leave(""); } /* * allow the fileserver to break callback promises */ void afs_break_callbacks(struct afs_server *server, size_t count, struct afs_callback callbacks[]) { _enter("%p,%zu,", server, count); ASSERT(server != NULL); ASSERTCMP(count, <=, AFSCBMAX); for (; count > 0; callbacks++, count--) { _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", callbacks->fid.vid, callbacks->fid.vnode, callbacks->fid.unique, callbacks->version, callbacks->expiry, callbacks->type ); afs_break_one_callback(server, &callbacks->fid); } _leave(""); return; } /* * record the callback for breaking * - the caller must hold server->cb_lock */ static void afs_do_give_up_callback(struct afs_server *server, struct afs_vnode *vnode) { struct afs_callback *cb; _enter("%p,%p", server, vnode); cb = &server->cb_break[server->cb_break_head]; cb->fid = vnode->fid; cb->version = vnode->cb_version; cb->expiry = vnode->cb_expiry; cb->type = vnode->cb_type; smp_wmb(); server->cb_break_head = (server->cb_break_head + 1) & (ARRAY_SIZE(server->cb_break) - 1); /* defer the breaking of callbacks to try and collect as many as * possible to ship in one operation */ switch (atomic_inc_return(&server->cb_break_n)) { case 1 ... AFSCBMAX - 1: queue_delayed_work(afs_callback_update_worker, &server->cb_break_work, HZ * 2); break; case AFSCBMAX: afs_flush_callback_breaks(server); break; default: break; } ASSERT(server->cb_promises.rb_node != NULL); rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; _leave(""); } /* * discard the callback on a deleted item */ void afs_discard_callback_on_delete(struct afs_vnode *vnode) { struct afs_server *server = vnode->server; _enter("%d", vnode->cb_promised); if (!vnode->cb_promised) { _leave(" [not promised]"); return; } ASSERT(server != NULL); spin_lock(&server->cb_lock); if (vnode->cb_promised) { ASSERT(server->cb_promises.rb_node != NULL); rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } spin_unlock(&server->cb_lock); _leave(""); } /* * give up the callback registered for a vnode on the file server when the * inode is being cleared */ void afs_give_up_callback(struct afs_vnode *vnode) { struct afs_server *server = vnode->server; DECLARE_WAITQUEUE(myself, current); _enter("%d", vnode->cb_promised); _debug("GIVE UP INODE %p", &vnode->vfs_inode); if (!vnode->cb_promised) { _leave(" [not promised]"); return; } ASSERT(server != NULL); spin_lock(&server->cb_lock); if (vnode->cb_promised && afs_breakring_space(server) == 0) { add_wait_queue(&server->cb_break_waitq, &myself); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!vnode->cb_promised || afs_breakring_space(server) != 0) break; spin_unlock(&server->cb_lock); schedule(); spin_lock(&server->cb_lock); } remove_wait_queue(&server->cb_break_waitq, &myself); __set_current_state(TASK_RUNNING); } /* of course, it's always possible for the server to break this vnode's * callback first... */ if (vnode->cb_promised) afs_do_give_up_callback(server, vnode); spin_unlock(&server->cb_lock); _leave(""); } /* * dispatch a deferred give up callbacks operation */ void afs_dispatch_give_up_callbacks(struct work_struct *work) { struct afs_server *server = container_of(work, struct afs_server, cb_break_work.work); _enter(""); /* tell the fileserver to discard the callback promises it has * - in the event of ENOMEM or some other error, we just forget that we * had callbacks entirely, and the server will call us later to break * them */ afs_fs_give_up_callbacks(server, &afs_async_call); } /* * flush the outstanding callback breaks on a server */ void afs_flush_callback_breaks(struct afs_server *server) { cancel_delayed_work(&server->cb_break_work); queue_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0); } #if 0 /* * update a bunch of callbacks */ static void afs_callback_updater(struct work_struct *work) { struct afs_server *server; struct afs_vnode *vnode, *xvnode; time_t now; long timeout; int ret; server = container_of(work, struct afs_server, updater); _enter(""); now = get_seconds(); /* find the first vnode to update */ spin_lock(&server->cb_lock); for (;;) { if (RB_EMPTY_ROOT(&server->cb_promises)) { spin_unlock(&server->cb_lock); _leave(" [nothing]"); return; } vnode = rb_entry(rb_first(&server->cb_promises), struct afs_vnode, cb_promise); if (atomic_read(&vnode->usage) > 0) break; rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } timeout = vnode->update_at - now; if (timeout > 0) { queue_delayed_work(afs_vnode_update_worker, &afs_vnode_update, timeout * HZ); spin_unlock(&server->cb_lock); _leave(" [nothing]"); return; } list_del_init(&vnode->update); atomic_inc(&vnode->usage); spin_unlock(&server->cb_lock); /* we can now perform the update */ _debug("update %s", vnode->vldb.name); vnode->state = AFS_VL_UPDATING; vnode->upd_rej_cnt = 0; vnode->upd_busy_cnt = 0; ret = afs_vnode_update_record(vl, &vldb); switch (ret) { case 0: afs_vnode_apply_update(vl, &vldb); vnode->state = AFS_VL_UPDATING; break; case -ENOMEDIUM: vnode->state = AFS_VL_VOLUME_DELETED; break; default: vnode->state = AFS_VL_UNCERTAIN; break; } /* and then reschedule */ _debug("reschedule"); vnode->update_at = get_seconds() + afs_vnode_update_timeout; spin_lock(&server->cb_lock); if (!list_empty(&server->cb_promises)) { /* next update in 10 minutes, but wait at least 1 second more * than the newest record already queued so that we don't spam * the VL server suddenly with lots of requests */ xvnode = list_entry(server->cb_promises.prev, struct afs_vnode, update); if (vnode->update_at <= xvnode->update_at) vnode->update_at = xvnode->update_at + 1; xvnode = list_entry(server->cb_promises.next, struct afs_vnode, update); timeout = xvnode->update_at - now; if (timeout < 0) timeout = 0; } else { timeout = afs_vnode_update_timeout; } list_add_tail(&vnode->update, &server->cb_promises); _debug("timeout %ld", timeout); queue_delayed_work(afs_vnode_update_worker, &afs_vnode_update, timeout * HZ); spin_unlock(&server->cb_lock); afs_put_vnode(vl); } #endif /* * initialise the callback update process */ int __init afs_callback_update_init(void) { afs_callback_update_worker = create_singlethread_workqueue("kafs_callbackd"); return afs_callback_update_worker ? 0 : -ENOMEM; } /* * shut down the callback update process */ void afs_callback_update_kill(void) { destroy_workqueue(afs_callback_update_worker); }
gpl-2.0
tusharbehera/linux
fs/afs/cache.c
12772
11040
/* AFS caching stuff * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include "internal.h" static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen); static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_vlocation_cache_check_aux( void *cookie_netfs_data, const void *buffer, uint16_t buflen); static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static void afs_vnode_cache_get_attr(const void *cookie_netfs_data, uint64_t *size); static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen); static void afs_vnode_cache_now_uncached(void *cookie_netfs_data); struct fscache_netfs afs_cache_netfs = { .name = "afs", .version = 0, }; struct fscache_cookie_def afs_cell_cache_index_def = { .name = "AFS.cell", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_cell_cache_get_key, .get_aux = afs_cell_cache_get_aux, .check_aux = afs_cell_cache_check_aux, }; struct fscache_cookie_def afs_vlocation_cache_index_def = { .name = "AFS.vldb", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_vlocation_cache_get_key, .get_aux = afs_vlocation_cache_get_aux, .check_aux = afs_vlocation_cache_check_aux, }; struct fscache_cookie_def afs_volume_cache_index_def = { .name = "AFS.volume", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_volume_cache_get_key, }; struct fscache_cookie_def afs_vnode_cache_index_def = { .name = "AFS.vnode", .type = FSCACHE_COOKIE_TYPE_DATAFILE, .get_key = afs_vnode_cache_get_key, .get_attr = afs_vnode_cache_get_attr, .get_aux = afs_vnode_cache_get_aux, .check_aux = afs_vnode_cache_check_aux, .now_uncached = afs_vnode_cache_now_uncached, }; /* * set the key for the index entry */ static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_cell *cell = cookie_netfs_data; uint16_t klen; _enter("%p,%p,%u", cell, buffer, bufmax); klen = strlen(cell->name); if (klen > bufmax) return 0; memcpy(buffer, cell->name, klen); return klen; } /* * provide new auxiliary cache data */ static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_cell *cell = cookie_netfs_data; uint16_t dlen; _enter("%p,%p,%u", cell, buffer, bufmax); dlen = cell->vl_naddrs * sizeof(cell->vl_addrs[0]); dlen = min(dlen, bufmax); dlen &= ~(sizeof(cell->vl_addrs[0]) - 1); memcpy(buffer, cell->vl_addrs, dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { _leave(" = OKAY"); return FSCACHE_CHECKAUX_OKAY; } /*****************************************************************************/ /* * set the key for the index entry */ static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t klen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax); klen = strnlen(vlocation->vldb.name, sizeof(vlocation->vldb.name)); if (klen > bufmax) return 0; memcpy(buffer, vlocation->vldb.name, klen); _leave(" = %u", klen); return klen; } /* * provide new auxiliary cache data */ static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t dlen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax); dlen = sizeof(struct afs_cache_vlocation); dlen -= offsetof(struct afs_cache_vlocation, nservers); if (dlen > bufmax) return 0; memcpy(buffer, (uint8_t *)&vlocation->vldb.nservers, dlen); _leave(" = %u", dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { const struct afs_cache_vlocation *cvldb; struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t dlen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, buflen); /* check the size of the data is what we're expecting */ dlen = sizeof(struct afs_cache_vlocation); dlen -= offsetof(struct afs_cache_vlocation, nservers); if (dlen != buflen) return FSCACHE_CHECKAUX_OBSOLETE; cvldb = container_of(buffer, struct afs_cache_vlocation, nservers); /* if what's on disk is more valid than what's in memory, then use the * VL record from the cache */ if (!vlocation->valid || vlocation->vldb.rtime == cvldb->rtime) { memcpy((uint8_t *)&vlocation->vldb.nservers, buffer, dlen); vlocation->valid = 1; _leave(" = SUCCESS [c->m]"); return FSCACHE_CHECKAUX_OKAY; } /* need to update the cache if the cached info differs */ if (memcmp(&vlocation->vldb, buffer, dlen) != 0) { /* delete if the volume IDs for this name differ */ if (memcmp(&vlocation->vldb.vid, &cvldb->vid, sizeof(cvldb->vid)) != 0 ) { _leave(" = OBSOLETE"); return FSCACHE_CHECKAUX_OBSOLETE; } _leave(" = UPDATE"); return FSCACHE_CHECKAUX_NEEDS_UPDATE; } _leave(" = OKAY"); return FSCACHE_CHECKAUX_OKAY; } /*****************************************************************************/ /* * set the key for the volume index entry */ static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_volume *volume = cookie_netfs_data; uint16_t klen; _enter("{%u},%p,%u", volume->type, buffer, bufmax); klen = sizeof(volume->type); if (klen > bufmax) return 0; memcpy(buffer, &volume->type, sizeof(volume->type)); _leave(" = %u", klen); return klen; } /*****************************************************************************/ /* * set the key for the index entry */ static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vnode *vnode = cookie_netfs_data; uint16_t klen; _enter("{%x,%x,%llx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, bufmax); klen = sizeof(vnode->fid.vnode); if (klen > bufmax) return 0; memcpy(buffer, &vnode->fid.vnode, sizeof(vnode->fid.vnode)); _leave(" = %u", klen); return klen; } /* * provide updated file attributes */ static void afs_vnode_cache_get_attr(const void *cookie_netfs_data, uint64_t *size) { const struct afs_vnode *vnode = cookie_netfs_data; _enter("{%x,%x,%llx},", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version); *size = vnode->status.size; } /* * provide new auxiliary cache data */ static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vnode *vnode = cookie_netfs_data; uint16_t dlen; _enter("{%x,%x,%Lx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, bufmax); dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version); if (dlen > bufmax) return 0; memcpy(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique)); buffer += sizeof(vnode->fid.unique); memcpy(buffer, &vnode->status.data_version, sizeof(vnode->status.data_version)); _leave(" = %u", dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { struct afs_vnode *vnode = cookie_netfs_data; uint16_t dlen; _enter("{%x,%x,%llx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, buflen); /* check the size of the data is what we're expecting */ dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version); if (dlen != buflen) { _leave(" = OBSOLETE [len %hx != %hx]", dlen, buflen); return FSCACHE_CHECKAUX_OBSOLETE; } if (memcmp(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique) ) != 0) { unsigned unique; memcpy(&unique, buffer, sizeof(unique)); _leave(" = OBSOLETE [uniq %x != %x]", unique, vnode->fid.unique); return FSCACHE_CHECKAUX_OBSOLETE; } if (memcmp(buffer + sizeof(vnode->fid.unique), &vnode->status.data_version, sizeof(vnode->status.data_version) ) != 0) { afs_dataversion_t version; memcpy(&version, buffer + sizeof(vnode->fid.unique), sizeof(version)); _leave(" = OBSOLETE [vers %llx != %llx]", version, vnode->status.data_version); return FSCACHE_CHECKAUX_OBSOLETE; } _leave(" = SUCCESS"); return FSCACHE_CHECKAUX_OKAY; } /* * indication the cookie is no longer uncached * - this function is called when the backing store currently caching a cookie * is removed * - the netfs should use this to clean up any markers indicating cached pages * - this is mandatory for any object that may have data */ static void afs_vnode_cache_now_uncached(void *cookie_netfs_data) { struct afs_vnode *vnode = cookie_netfs_data; struct pagevec pvec; pgoff_t first; int loop, nr_pages; _enter("{%x,%x,%Lx}", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version); pagevec_init(&pvec, 0); first = 0; for (;;) { /* grab a bunch of pages to clean */ nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping, first, PAGEVEC_SIZE - pagevec_count(&pvec)); if (!nr_pages) break; for (loop = 0; loop < nr_pages; loop++) ClearPageFsCache(pvec.pages[loop]); first = pvec.pages[nr_pages - 1]->index + 1; pvec.nr = nr_pages; pagevec_release(&pvec); cond_resched(); } _leave(""); }
gpl-2.0
zhenyw/linux
sound/drivers/opl4/yrw801.c
15588
56838
/* * Information about the Yamaha YRW801 wavetable ROM chip * * Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opl4_local.h" int snd_yrw801_detect(struct snd_opl4 *opl4) { char buf[15]; snd_opl4_read_memory(opl4, buf, 0x001200, 15); if (memcmp(buf, "CopyrightYAMAHA", 15)) return -ENODEV; snd_opl4_read_memory(opl4, buf, 0x1ffffe, 2); if (buf[0] != 0x01) return -ENODEV; snd_printdd("YRW801 ROM version %02x.%02x\n", buf[0], buf[1]); return 0; } /* * The instrument definitions are stored statically because, in practice, the * OPL4 is always coupled with a YRW801. Dynamic instrument loading would be * required if downloading sample data to external SRAM was actually supported * by this driver. */ static const struct opl4_region regions_00[] = { /* Acoustic Grand Piano */ {0x14, 0x27, {0x12c,7474,100, 0,0,0x00,0xc8,0x20,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6816,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5899,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5290,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4260,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3625,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3116,100, 0,0,0x04,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2081,100, 0,0,0x03,0xc8,0x20,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1444,100, 0,0,0x07,0xc8,0x20,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1915,100, 0,0,0x00,0xc8,0x20,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_01[] = { /* Bright Acoustic Piano */ {0x14, 0x2d, {0x12c,7474,100, 0,0,0x00,0xc8,0x20,0xf2,0x13,0x08,0x0}}, {0x2e, 0x33, {0x12d,6816,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12e,5899,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x12f,5290,100, 0,0,0x00,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x130,4260,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x131,3625,100, 0,0,0x0a,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x132,3116,100, 0,0,0x04,0xc8,0x20,0xf2,0x14,0x08,0x0}}, {0x53, 0x58, {0x133,2081,100, 0,0,0x07,0xc8,0x20,0xf2,0x14,0x18,0x0}}, {0x59, 0x5e, {0x134,1444,100, 0,0,0x0a,0xc8,0x20,0xf3,0x14,0x18,0x0}}, {0x5f, 0x6d, {0x135,1915,100, 0,0,0x00,0xc8,0x20,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_02[] = { /* Electric Grand Piano */ {0x14, 0x2d, {0x12c,7476,100, 1,0,0x00,0xae,0x20,0xf2,0x13,0x07,0x0}}, {0x2e, 0x33, {0x12d,6818,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x34, 0x39, {0x12e,5901,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x3a, 0x3f, {0x12f,5292,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x40, 0x45, {0x130,4262,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x46, 0x4b, {0x131,3627,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x4c, 0x52, {0x132,3118,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x07,0x0}}, {0x53, 0x58, {0x133,2083,100, 1,0,0x00,0xae,0x20,0xf2,0x14,0x17,0x0}}, {0x59, 0x5e, {0x134,1446,100, 1,0,0x00,0xae,0x20,0xf3,0x14,0x17,0x0}}, {0x5f, 0x6d, {0x135,1917,100, 1,0,0x00,0xae,0x20,0xf4,0x15,0x07,0x0}}, {0x00, 0x7f, {0x06c,6375,100,-1,0,0x00,0xc2,0x28,0xf4,0x23,0x18,0x0}} }; static const struct opl4_region regions_03[] = { /* Honky-Tonk Piano */ {0x14, 0x27, {0x12c,7474,100, 0,0,0x00,0xb4,0x20,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6816,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5899,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5290,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4260,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3625,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3116,100, 0,0,0x04,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2081,100, 0,0,0x03,0xb4,0x20,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1444,100, 0,0,0x07,0xb4,0x20,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1915,100, 0,0,0x00,0xb4,0x20,0xf4,0x15,0x08,0x0}}, {0x14, 0x27, {0x12c,7486,100, 0,0,0x00,0xb4,0x20,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6803,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5912,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5275,100, 0,0,0x00,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4274,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3611,100, 0,0,0x0a,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3129,100, 0,0,0x04,0xb4,0x20,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2074,100, 0,0,0x07,0xb4,0x20,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1457,100, 0,0,0x01,0xb4,0x20,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1903,100, 0,0,0x00,0xb4,0x20,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_04[] = { /* Electric Piano 1 */ {0x15, 0x6c, {0x00b,6570,100, 0,0,0x00,0x28,0x38,0xf0,0x00,0x0c,0x0}}, {0x00, 0x7f, {0x06c,6375,100, 0,2,0x00,0xb0,0x22,0xf4,0x23,0x19,0x0}} }; static const struct opl4_region regions_05[] = { /* Electric Piano 2 */ {0x14, 0x27, {0x12c,7476,100, 0,3,0x00,0xa2,0x1b,0xf2,0x13,0x08,0x0}}, {0x28, 0x2d, {0x12d,6818,100, 0,3,0x00,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x2e, 0x33, {0x12e,5901,100, 0,3,0x00,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12f,5292,100, 0,3,0x00,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x130,4262,100, 0,3,0x0a,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x131,3627,100, 0,3,0x0a,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x132,3118,100, 0,3,0x04,0xa2,0x1b,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x133,2083,100, 0,3,0x03,0xa2,0x1b,0xf2,0x14,0x18,0x0}}, {0x53, 0x58, {0x134,1446,100, 0,3,0x07,0xa2,0x1b,0xf3,0x14,0x18,0x0}}, {0x59, 0x6d, {0x135,1917,100, 0,3,0x00,0xa2,0x1b,0xf4,0x15,0x08,0x0}}, {0x14, 0x2d, {0x12c,7472,100, 0,0,0x00,0xa2,0x18,0xf2,0x13,0x08,0x0}}, {0x2e, 0x33, {0x12d,6814,100, 0,0,0x00,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x34, 0x39, {0x12e,5897,100, 0,0,0x00,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x3a, 0x3f, {0x12f,5288,100, 0,0,0x00,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x40, 0x45, {0x130,4258,100, 0,0,0x0a,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x46, 0x4b, {0x131,3623,100, 0,0,0x0a,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x4c, 0x52, {0x132,3114,100, 0,0,0x04,0xa2,0x18,0xf2,0x14,0x08,0x0}}, {0x53, 0x58, {0x133,2079,100, 0,0,0x07,0xa2,0x18,0xf2,0x14,0x18,0x0}}, {0x59, 0x5e, {0x134,1442,100, 0,0,0x0a,0xa2,0x18,0xf3,0x14,0x18,0x0}}, {0x5f, 0x6d, {0x135,1913,100, 0,0,0x00,0xa2,0x18,0xf4,0x15,0x08,0x0}} }; static const struct opl4_region regions_06[] = { /* Harpsichord */ {0x15, 0x39, {0x080,5158,100, 0,0,0x00,0xb2,0x20,0xf5,0x24,0x19,0x0}}, {0x3a, 0x3f, {0x081,4408,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x09,0x0}}, {0x40, 0x45, {0x082,3622,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x09,0x0}}, {0x46, 0x4d, {0x083,2843,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x19,0x0}}, {0x4e, 0x6c, {0x084,1307,100, 0,0,0x00,0xb2,0x20,0xf5,0x25,0x29,0x0}} }; static const struct opl4_region regions_07[] = { /* Clavinet */ {0x15, 0x51, {0x027,5009,100, 0,0,0x00,0xd2,0x28,0xf5,0x13,0x2b,0x0}}, {0x52, 0x6c, {0x028,3495,100, 0,0,0x00,0xd2,0x28,0xf5,0x13,0x3b,0x0}} }; static const struct opl4_region regions_08[] = { /* Celesta */ {0x15, 0x6c, {0x02b,3267,100, 0,0,0x00,0xdc,0x20,0xf4,0x15,0x07,0x3}} }; static const struct opl4_region regions_09[] = { /* Glockenspiel */ {0x15, 0x78, {0x0f3, 285,100, 0,0,0x00,0xc2,0x28,0xf6,0x25,0x25,0x0}} }; static const struct opl4_region regions_0a[] = { /* Music Box */ {0x15, 0x6c, {0x0f3,3362,100, 0,0,0x00,0xb6,0x20,0xa6,0x25,0x25,0x0}}, {0x15, 0x6c, {0x101,4773,100, 0,0,0x00,0xaa,0x20,0xd4,0x14,0x16,0x0}} }; static const struct opl4_region regions_0b[] = { /* Vibraphone */ {0x15, 0x6c, {0x101,4778,100, 0,0,0x00,0xc0,0x28,0xf4,0x14,0x16,0x4}} }; static const struct opl4_region regions_0c[] = { /* Marimba */ {0x15, 0x3f, {0x0f4,4778,100, 0,0,0x00,0xc4,0x38,0xf7,0x47,0x08,0x0}}, {0x40, 0x4c, {0x0f5,3217,100, 0,0,0x00,0xc4,0x38,0xf7,0x47,0x08,0x0}}, {0x4d, 0x5a, {0x0f5,3217,100, 0,0,0x00,0xc4,0x38,0xf7,0x48,0x08,0x0}}, {0x5b, 0x7f, {0x0f5,3218,100, 0,0,0x00,0xc4,0x38,0xf7,0x48,0x18,0x0}} }; static const struct opl4_region regions_0d[] = { /* Xylophone */ {0x00, 0x7f, {0x136,1729,100, 0,0,0x00,0xd2,0x38,0xf0,0x06,0x36,0x0}} }; static const struct opl4_region regions_0e[] = { /* Tubular Bell */ {0x01, 0x7f, {0x0ff,3999,100, 0,1,0x00,0x90,0x21,0xf4,0xa3,0x25,0x1}} }; static const struct opl4_region regions_0f[] = { /* Dulcimer */ {0x00, 0x7f, {0x03f,4236,100, 0,1,0x00,0xbc,0x29,0xf5,0x16,0x07,0x0}}, {0x00, 0x7f, {0x040,4236,100, 0,2,0x0e,0x94,0x2a,0xf5,0x16,0x07,0x0}} }; static const struct opl4_region regions_10[] = { /* Drawbar Organ */ {0x01, 0x7f, {0x08e,4394,100, 0,2,0x14,0xc2,0x3a,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_11[] = { /* Percussive Organ */ {0x15, 0x3b, {0x08c,6062,100, 0,3,0x00,0xbe,0x3b,0xf0,0x00,0x09,0x0}}, {0x3c, 0x6c, {0x08d,2984,100, 0,3,0x00,0xbe,0x3b,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_12[] = { /* Rock Organ */ {0x15, 0x30, {0x128,6574,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x31, 0x3c, {0x129,5040,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x3d, 0x48, {0x12a,3498,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x49, 0x54, {0x12b,1957,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}}, {0x55, 0x6c, {0x127, 423,100, 0,1,0x00,0xcc,0x39,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_13[] = { /* Church Organ */ {0x15, 0x29, {0x087,7466,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x2a, 0x30, {0x088,6456,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x31, 0x38, {0x089,5428,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x39, 0x41, {0x08a,4408,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}}, {0x42, 0x6c, {0x08b,3406,100, 0,1,0x00,0xc4,0x11,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_14[] = { /* Reed Organ */ {0x00, 0x53, {0x0ac,5570,100, 0,0,0x06,0xc0,0x38,0xf0,0x00,0x09,0x1}}, {0x54, 0x7f, {0x0ad,2497,100, 0,0,0x00,0xc0,0x38,0xf0,0x00,0x09,0x1}} }; static const struct opl4_region regions_15[] = { /* Accordion */ {0x15, 0x4c, {0x006,4261,100, 0,2,0x00,0xa4,0x22,0x90,0x00,0x09,0x0}}, {0x4d, 0x6c, {0x007,1530,100, 0,2,0x00,0xa4,0x22,0x90,0x00,0x09,0x0}}, {0x15, 0x6c, {0x070,4391,100, 0,3,0x00,0x8a,0x23,0xa0,0x00,0x09,0x0}} }; static const struct opl4_region regions_16[] = { /* Harmonica */ {0x15, 0x6c, {0x070,4408,100, 0,0,0x00,0xae,0x30,0xa0,0x00,0x09,0x2}} }; static const struct opl4_region regions_17[] = { /* Tango Accordion */ {0x00, 0x53, {0x0ac,5573,100, 0,0,0x00,0xae,0x38,0xf0,0x00,0x09,0x0}}, {0x54, 0x7f, {0x0ad,2500,100, 0,0,0x00,0xae,0x38,0xf0,0x00,0x09,0x0}}, {0x15, 0x6c, {0x041,8479,100, 0,2,0x00,0x6a,0x3a,0x75,0x20,0x0a,0x0}} }; static const struct opl4_region regions_18[] = { /* Nylon Guitar */ {0x15, 0x2f, {0x0b3,6964,100, 0,0,0x05,0xca,0x28,0xf5,0x34,0x09,0x0}}, {0x30, 0x36, {0x0b7,5567,100, 0,0,0x0c,0xca,0x28,0xf5,0x34,0x09,0x0}}, {0x37, 0x3c, {0x0b5,4653,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}}, {0x3d, 0x43, {0x0b4,3892,100, 0,0,0x00,0xca,0x28,0xf6,0x35,0x09,0x0}}, {0x44, 0x60, {0x0b6,2723,100, 0,0,0x00,0xca,0x28,0xf6,0x35,0x19,0x0}} }; static const struct opl4_region regions_19[] = { /* Steel Guitar */ {0x15, 0x31, {0x00c,6937,100, 0,0,0x00,0xbc,0x28,0xf0,0x04,0x19,0x0}}, {0x32, 0x38, {0x00d,5410,100, 0,0,0x00,0xbc,0x28,0xf0,0x05,0x09,0x0}}, {0x39, 0x47, {0x00e,4379,100, 0,0,0x00,0xbc,0x28,0xf5,0x94,0x09,0x0}}, {0x48, 0x6c, {0x00f,2843,100, 0,0,0x00,0xbc,0x28,0xf6,0x95,0x09,0x0}} }; static const struct opl4_region regions_1a[] = { /* Jazz Guitar */ {0x15, 0x31, {0x05a,6832,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}}, {0x32, 0x3f, {0x05b,4897,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}}, {0x40, 0x6c, {0x05c,3218,100, 0,0,0x00,0xca,0x28,0xf6,0x34,0x09,0x0}} }; static const struct opl4_region regions_1b[] = { /* Clean Guitar */ {0x15, 0x2c, {0x061,7053,100, 0,1,0x00,0xb4,0x29,0xf5,0x54,0x0a,0x0}}, {0x2d, 0x31, {0x060,6434,100, 0,1,0x00,0xb4,0x29,0xf5,0x54,0x0a,0x0}}, {0x32, 0x38, {0x063,5764,100, 0,1,0x00,0xbe,0x29,0xf5,0x55,0x0a,0x0}}, {0x39, 0x3f, {0x062,4627,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x0a,0x0}}, {0x40, 0x44, {0x065,3963,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x1a,0x0}}, {0x45, 0x4b, {0x064,3313,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x1a,0x0}}, {0x4c, 0x54, {0x066,2462,100, 0,1,0x00,0xb4,0x29,0xf5,0x55,0x2a,0x0}}, {0x55, 0x6c, {0x067,1307,100, 0,1,0x00,0xb4,0x29,0xf6,0x56,0x0a,0x0}} }; static const struct opl4_region regions_1c[] = { /* Muted Guitar */ {0x01, 0x7f, {0x068,4408,100, 0,0,0x00,0xcc,0x28,0xf6,0x15,0x09,0x0}} }; static const struct opl4_region regions_1d[] = { /* Overdriven Guitar */ {0x00, 0x40, {0x0a5,6589,100, 0,1,0x00,0xc0,0x29,0xf2,0x11,0x09,0x0}}, {0x41, 0x7f, {0x0a6,5428,100, 0,1,0x00,0xc0,0x29,0xf2,0x11,0x09,0x0}} }; static const struct opl4_region regions_1e[] = { /* Distortion Guitar */ {0x15, 0x2a, {0x051,6928,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x2b, 0x2e, {0x052,6433,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x2f, 0x32, {0x053,5944,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x33, 0x36, {0x054,5391,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x37, 0x3a, {0x055,4897,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x3b, 0x3e, {0x056,4408,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x3f, 0x42, {0x057,3892,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x43, 0x46, {0x058,3361,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}}, {0x47, 0x6c, {0x059,2784,100, 0,1,0x00,0xbc,0x21,0xa2,0x12,0x0a,0x0}} }; static const struct opl4_region regions_1f[] = { /* Guitar Harmonics */ {0x15, 0x44, {0x05e,5499,100, 0,0,0x00,0xce,0x28,0xf4,0x24,0x09,0x0}}, {0x45, 0x49, {0x05d,4850,100, 0,0,0x00,0xe2,0x28,0xf4,0x24,0x09,0x0}}, {0x4a, 0x6c, {0x05f,4259,100, 0,0,0x00,0xce,0x28,0xf4,0x24,0x09,0x0}} }; static const struct opl4_region regions_20[] = { /* Acoustic Bass */ {0x15, 0x30, {0x004,8053,100, 0,0,0x00,0xe2,0x18,0xf5,0x15,0x09,0x0}}, {0x31, 0x6c, {0x005,4754,100, 0,0,0x00,0xe2,0x18,0xf5,0x15,0x09,0x0}} }; static const struct opl4_region regions_21[] = { /* Fingered Bass */ {0x01, 0x20, {0x04a,8762,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}}, {0x21, 0x25, {0x04b,8114,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}}, {0x26, 0x2a, {0x04c,7475,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}}, {0x2b, 0x7f, {0x04d,6841,100, 0,0,0x00,0xde,0x18,0xf6,0x14,0x09,0x0}} }; static const struct opl4_region regions_22[] = { /* Picked Bass */ {0x15, 0x23, {0x04f,7954,100, 0,0,0x00,0xcc,0x18,0xf3,0x90,0x0a,0x0}}, {0x24, 0x2a, {0x050,7318,100, 0,0,0x05,0xcc,0x18,0xf3,0x90,0x1a,0x0}}, {0x2b, 0x2f, {0x06b,6654,100, 0,0,0x00,0xcc,0x18,0xf3,0x90,0x2a,0x0}}, {0x30, 0x47, {0x069,6031,100, 0,0,0x00,0xcc,0x18,0xf5,0xb0,0x0a,0x0}}, {0x48, 0x6c, {0x06a,5393,100, 0,0,0x00,0xcc,0x18,0xf5,0xb0,0x0a,0x0}} }; static const struct opl4_region regions_23[] = { /* Fretless Bass */ {0x01, 0x7f, {0x04e,5297,100, 0,0,0x00,0xd2,0x10,0xf3,0x63,0x19,0x0}} }; static const struct opl4_region regions_24[] = { /* Slap Bass 1 */ {0x15, 0x6c, {0x0a3,7606,100, 0,1,0x00,0xde,0x19,0xf5,0x32,0x1a,0x0}} }; static const struct opl4_region regions_25[] = { /* Slap Bass 2 */ {0x01, 0x7f, {0x0a2,6694,100, 0,0,0x00,0xda,0x20,0xb0,0x02,0x09,0x0}} }; static const struct opl4_region regions_26[] = { /* Synth Bass 1 */ {0x15, 0x6c, {0x0be,7466,100, 0,1,0x00,0xb8,0x39,0xf4,0x14,0x09,0x0}} }; static const struct opl4_region regions_27[] = { /* Synth Bass 2 */ {0x00, 0x7f, {0x117,8103,100, 0,1,0x00,0xca,0x39,0xf3,0x50,0x08,0x0}} }; static const struct opl4_region regions_28[] = { /* Violin */ {0x15, 0x3a, {0x105,5158,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x3b, 0x3f, {0x102,4754,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x40, 0x41, {0x106,4132,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x42, 0x44, {0x107,4033,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x45, 0x47, {0x108,3580,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x48, 0x4a, {0x10a,2957,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x4b, 0x4c, {0x10b,2724,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x4d, 0x4e, {0x10c,2530,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x4f, 0x51, {0x10d,2166,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}}, {0x52, 0x6c, {0x109,1825,100, 0,3,0x00,0xcc,0x3b,0xf3,0x20,0x09,0x0}} }; static const struct opl4_region regions_29[] = { /* Viola */ {0x15, 0x32, {0x103,5780,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x33, 0x35, {0x104,5534,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x36, 0x38, {0x105,5158,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x39, 0x3d, {0x102,4754,100, 0,3,0x00,0xca,0x3b,0xa3,0x20,0x09,0x0}}, {0x3e, 0x3f, {0x106,4132,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x40, 0x42, {0x107,4033,100, 0,3,0x00,0xc4,0x3b,0xa3,0x20,0x09,0x0}}, {0x43, 0x45, {0x108,3580,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}}, {0x46, 0x48, {0x10a,2957,100, 0,3,0x00,0xca,0x3b,0xa3,0x20,0x09,0x0}}, {0x49, 0x4a, {0x10b,2724,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}}, {0x4b, 0x4c, {0x10c,2530,100, 0,3,0x00,0xca,0x3b,0xa3,0x20,0x09,0x0}}, {0x4d, 0x4f, {0x10d,2166,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}}, {0x50, 0x6c, {0x109,1825,100, 0,3,0x00,0xd0,0x3b,0xa3,0x20,0x09,0x0}} }; static const struct opl4_region regions_2a[] = { /* Cello */ {0x15, 0x2d, {0x112,6545,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x08,0x0}}, {0x2e, 0x37, {0x113,5764,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x08,0x0}}, {0x38, 0x3e, {0x115,4378,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x18,0x0}}, {0x3f, 0x44, {0x116,3998,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x18,0x0}}, {0x45, 0x6c, {0x114,3218,100, 0,3,0x00,0xc0,0x33,0xa0,0x00,0x18,0x0}} }; static const struct opl4_region regions_2b[] = { /* Contrabass */ {0x15, 0x29, {0x110,7713,100, 0,1,0x00,0xc2,0x19,0x90,0x00,0x09,0x0}}, {0x2a, 0x6c, {0x111,6162,100, 0,1,0x00,0xc2,0x19,0x90,0x00,0x09,0x0}} }; static const struct opl4_region regions_2c[] = { /* Tremolo Strings */ {0x15, 0x3b, {0x0b0,4810,100, 0,0,0x0a,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x3c, 0x41, {0x035,4035,100, 0,0,0x05,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x42, 0x47, {0x033,3129,100, 0,0,0x05,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x48, 0x52, {0x034,2625,100, 0,0,0x05,0xde,0x38,0xf0,0x00,0x07,0x6}}, {0x53, 0x6c, {0x0af, 936,100, 0,0,0x00,0xde,0x38,0xf0,0x00,0x07,0x6}} }; static const struct opl4_region regions_2d[] = { /* Pizzicato Strings */ {0x15, 0x32, {0x0b8,6186,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}}, {0x33, 0x3b, {0x0b9,5031,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}}, {0x3c, 0x42, {0x0bb,4146,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}}, {0x43, 0x48, {0x0ba,3245,100, 0,0,0x00,0xc2,0x28,0xf0,0x00,0x05,0x0}}, {0x49, 0x6c, {0x0bc,2352,100, 0,0,0x00,0xbc,0x28,0xf0,0x00,0x05,0x0}} }; static const struct opl4_region regions_2e[] = { /* Harp */ {0x15, 0x46, {0x07e,3740,100, 0,1,0x00,0xd2,0x29,0xf5,0x25,0x07,0x0}}, {0x47, 0x6c, {0x07f,2319,100, 0,1,0x00,0xd2,0x29,0xf5,0x25,0x07,0x0}} }; static const struct opl4_region regions_2f[] = { /* Timpani */ {0x15, 0x6c, {0x100,6570,100, 0,0,0x00,0xf8,0x28,0xf0,0x05,0x16,0x0}} }; static const struct opl4_region regions_30[] = { /* Strings */ {0x15, 0x3b, {0x13c,4806,100, 0,0,0x00,0xc8,0x20,0x80,0x00,0x07,0x0}}, {0x3c, 0x41, {0x13e,4035,100, 0,0,0x00,0xc8,0x20,0x80,0x00,0x07,0x0}}, {0x42, 0x47, {0x13d,3122,100, 0,0,0x00,0xc8,0x20,0x80,0x00,0x07,0x0}}, {0x48, 0x52, {0x13f,2629,100, 0,0,0x00,0xbe,0x20,0x80,0x00,0x07,0x0}}, {0x53, 0x6c, {0x140, 950,100, 0,0,0x00,0xbe,0x20,0x80,0x00,0x07,0x0}} }; static const struct opl4_region regions_31[] = { /* Slow Strings */ {0x15, 0x3b, {0x0b0,4810,100, 0,1,0x0a,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x3c, 0x41, {0x035,4035,100, 0,1,0x05,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x42, 0x47, {0x033,3129,100, 0,1,0x05,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x48, 0x52, {0x034,2625,100, 0,1,0x05,0xbe,0x19,0xf0,0x00,0x07,0x0}}, {0x53, 0x6c, {0x0af, 936,100, 0,1,0x00,0xbe,0x19,0xf0,0x00,0x07,0x0}} }; static const struct opl4_region regions_32[] = { /* Synth Strings 1 */ {0x05, 0x71, {0x002,6045,100,-2,0,0x00,0xa6,0x20,0x93,0x22,0x06,0x0}}, {0x15, 0x6c, {0x0ae,3261,100, 2,0,0x00,0xc6,0x20,0x70,0x01,0x06,0x0}} }; static const struct opl4_region regions_33[] = { /* Synth Strings 2 */ {0x15, 0x6c, {0x002,4513,100, 5,1,0x00,0xb4,0x19,0x70,0x00,0x06,0x0}}, {0x15, 0x6c, {0x002,4501,100,-5,1,0x00,0xb4,0x19,0x70,0x00,0x06,0x0}} }; static const struct opl4_region regions_34[] = { /* Choir Aahs */ {0x15, 0x3a, {0x018,5010,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}}, {0x3b, 0x40, {0x019,4370,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}}, {0x41, 0x47, {0x01a,3478,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}}, {0x48, 0x6c, {0x01b,2197,100, 0,2,0x00,0xc2,0x1a,0x70,0x00,0x08,0x0}} }; static const struct opl4_region regions_35[] = { /* Voice Oohs */ {0x15, 0x6c, {0x029,3596,100, 0,0,0x00,0xe6,0x20,0xf7,0x20,0x08,0x0}} }; static const struct opl4_region regions_36[] = { /* Synth Voice */ {0x15, 0x6c, {0x02a,3482,100, 0,1,0x00,0xc2,0x19,0x85,0x21,0x07,0x0}} }; static const struct opl4_region regions_37[] = { /* Orchestra Hit */ {0x15, 0x6c, {0x049,4394,100, 0,0,0x00,0xfe,0x30,0x80,0x05,0x05,0x0}} }; static const struct opl4_region regions_38[] = { /* Trumpet */ {0x15, 0x3c, {0x0f6,4706,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x3d, 0x43, {0x0f8,3894,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x44, 0x48, {0x0f7,3118,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x49, 0x4e, {0x0fa,2322,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x4f, 0x55, {0x0f9,1634,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}}, {0x56, 0x6c, {0x0fb, 786,100, 0,2,0x00,0xd6,0x32,0xf3,0x20,0x0a,0x0}} }; static const struct opl4_region regions_39[] = { /* Trombone */ {0x15, 0x3a, {0x0f0,5053,100, 0,1,0x00,0xd6,0x21,0xf0,0x00,0x09,0x0}}, {0x3b, 0x3f, {0x0f1,4290,100, 0,1,0x00,0xd6,0x21,0xf0,0x00,0x09,0x0}}, {0x40, 0x6c, {0x0f2,3580,100, 0,1,0x00,0xd6,0x21,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_3a[] = { /* Tuba */ {0x15, 0x2d, {0x085,7096,100, 0,1,0x00,0xde,0x21,0xf5,0x10,0x09,0x0}}, {0x2e, 0x6c, {0x086,6014,100, 0,1,0x00,0xde,0x21,0xf5,0x10,0x09,0x0}} }; static const struct opl4_region regions_3b[] = { /* Muted Trumpet */ {0x15, 0x45, {0x0b1,4135,100, 0,0,0x00,0xcc,0x28,0xf3,0x10,0x0a,0x1}}, {0x46, 0x6c, {0x0b2,2599,100, 0,0,0x00,0xcc,0x28,0x83,0x10,0x0a,0x1}} }; static const struct opl4_region regions_3c[] = { /* French Horns */ {0x15, 0x49, {0x07c,3624,100, 0,2,0x00,0xd0,0x1a,0xf0,0x00,0x09,0x0}}, {0x4a, 0x6c, {0x07d,2664,100, 0,2,0x00,0xd0,0x1a,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_3d[] = { /* Brass Section */ {0x15, 0x42, {0x0fc,4375,100, 0,0,0x00,0xd6,0x28,0xf0,0x00,0x0a,0x0}}, {0x43, 0x6c, {0x0fd,2854,100, 0,0,0x00,0xd6,0x28,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_3e[] = { /* Synth Brass 1 */ {0x01, 0x27, {0x0d3,9094,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x28, 0x2d, {0x0da,8335,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x2e, 0x33, {0x0d4,7558,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x34, 0x39, {0x0db,6785,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x3a, 0x3f, {0x0d5,6042,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x40, 0x45, {0x0dc,5257,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x46, 0x4b, {0x0d6,4493,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x4c, 0x51, {0x0dd,3741,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x52, 0x57, {0x0d7,3012,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x58, 0x5d, {0x0de,2167,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x5e, 0x63, {0x0d8,1421,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x64, 0x7f, {0x0d9,-115,100,-1,0,0x00,0xbe,0x18,0xa5,0x11,0x08,0x0}}, {0x01, 0x27, {0x118,9103,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x28, 0x2d, {0x119,8340,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x2e, 0x33, {0x11a,7565,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x34, 0x39, {0x11b,6804,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x3a, 0x3f, {0x11c,6042,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x40, 0x45, {0x11d,5277,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x46, 0x4b, {0x11e,4520,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x4c, 0x51, {0x11f,3741,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x52, 0x57, {0x120,3012,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x58, 0x5d, {0x121,2166,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x5e, 0x64, {0x122,1421,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}}, {0x65, 0x7f, {0x123,-115,100, 1,1,0x00,0xbe,0x19,0x85,0x23,0x08,0x0}} }; static const struct opl4_region regions_3f[] = { /* Synth Brass 2 */ {0x01, 0x27, {0x118,9113,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x28, 0x2d, {0x119,8350,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x2e, 0x33, {0x11a,7575,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x34, 0x39, {0x11b,6814,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x3a, 0x3f, {0x11c,6052,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x40, 0x45, {0x11d,5287,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x46, 0x4b, {0x11e,4530,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x4c, 0x51, {0x11f,3751,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x52, 0x57, {0x120,3022,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x58, 0x5d, {0x121,2176,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x5e, 0x64, {0x122,1431,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x65, 0x7f, {0x123,-105,100, 3,6,0x00,0xae,0x26,0x85,0x23,0x08,0x0}}, {0x00, 0x7f, {0x124,4034,100,-3,2,0x00,0xea,0x22,0x85,0x23,0x08,0x0}} }; static const struct opl4_region regions_40[] = { /* Soprano Sax */ {0x15, 0x3f, {0x0e3,4228,100, 0,1,0x00,0xc8,0x21,0xf5,0x20,0x0a,0x0}}, {0x40, 0x45, {0x0e4,3495,100, 0,1,0x00,0xc8,0x21,0xf5,0x20,0x0a,0x0}}, {0x46, 0x4b, {0x0e5,2660,100, 0,1,0x00,0xd6,0x21,0xf5,0x20,0x0a,0x0}}, {0x4c, 0x51, {0x0e6,2002,100, 0,1,0x00,0xd6,0x21,0xf5,0x20,0x0a,0x0}}, {0x52, 0x59, {0x0e7,1186,100, 0,1,0x00,0xd6,0x21,0xf5,0x20,0x0a,0x0}}, {0x59, 0x6c, {0x0e8,1730,100, 0,1,0x00,0xc8,0x21,0xf5,0x20,0x0a,0x0}} }; static const struct opl4_region regions_41[] = { /* Alto Sax */ {0x15, 0x32, {0x092,6204,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x33, 0x35, {0x096,5812,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x36, 0x3a, {0x099,5318,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x3b, 0x3b, {0x08f,5076,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x3c, 0x3e, {0x093,4706,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x3f, 0x41, {0x097,4321,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x42, 0x44, {0x09a,3893,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x45, 0x47, {0x090,3497,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x48, 0x4a, {0x094,3119,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x4b, 0x4d, {0x098,2726,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x4e, 0x50, {0x09b,2393,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x51, 0x53, {0x091,2088,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}}, {0x54, 0x6c, {0x095,1732,100, 0,1,0x00,0xbe,0x19,0xf5,0x20,0x0b,0x0}} }; static const struct opl4_region regions_42[] = { /* Tenor Sax */ {0x24, 0x30, {0x0e9,6301,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x31, 0x34, {0x0ea,5781,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x35, 0x3a, {0x0eb,5053,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x3b, 0x41, {0x0ed,4165,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x42, 0x47, {0x0ec,3218,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x48, 0x51, {0x0ee,2462,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}}, {0x52, 0x6c, {0x0ef,1421,100, 0,1,0x00,0xbc,0x19,0xf4,0x10,0x0b,0x0}} }; static const struct opl4_region regions_43[] = { /* Baritone Sax */ {0x15, 0x2d, {0x0df,6714,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}}, {0x2e, 0x34, {0x0e1,5552,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}}, {0x35, 0x39, {0x0e2,5178,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}}, {0x3a, 0x6c, {0x0e0,4437,100, 0,1,0x00,0xce,0x19,0xf0,0x00,0x0a,0x0}} }; static const struct opl4_region regions_44[] = { /* Oboe */ {0x15, 0x3c, {0x042,4493,100, 0,1,0x00,0xe6,0x39,0xf4,0x10,0x0a,0x0}}, {0x3d, 0x43, {0x044,3702,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x44, 0x49, {0x043,2956,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x4a, 0x4f, {0x046,2166,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x50, 0x55, {0x045,1420,100, 0,1,0x00,0xdc,0x39,0xf4,0x10,0x0a,0x0}}, {0x56, 0x6c, {0x047, 630,100, 0,1,0x00,0xe6,0x39,0xf4,0x10,0x0a,0x0}} }; static const struct opl4_region regions_45[] = { /* English Horn */ {0x15, 0x38, {0x03c,5098,100, 0,1,0x00,0xc4,0x31,0xf0,0x00,0x09,0x0}}, {0x39, 0x3e, {0x03b,4291,100, 0,1,0x00,0xc4,0x31,0xf0,0x00,0x09,0x0}}, {0x3f, 0x6c, {0x03d,3540,100, 0,1,0x00,0xc4,0x31,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_46[] = { /* Bassoon */ {0x15, 0x22, {0x038,7833,100, 0,1,0x00,0xc6,0x31,0xf0,0x00,0x0b,0x0}}, {0x23, 0x2e, {0x03a,7070,100, 0,1,0x00,0xc6,0x31,0xf0,0x00,0x0b,0x0}}, {0x2f, 0x6c, {0x039,6302,100, 0,1,0x00,0xc6,0x31,0xf0,0x00,0x0b,0x0}} }; static const struct opl4_region regions_47[] = { /* Clarinet */ {0x15, 0x3b, {0x09e,5900,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}}, {0x3c, 0x41, {0x0a0,5158,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}}, {0x42, 0x4a, {0x09f,4260,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}}, {0x4b, 0x6c, {0x0a1,2957,100, 0,1,0x00,0xc8,0x29,0xf3,0x20,0x0a,0x0}} }; static const struct opl4_region regions_48[] = { /* Piccolo */ {0x15, 0x40, {0x071,4803,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x41, 0x4d, {0x072,3314,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x4e, 0x53, {0x073,1731,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x54, 0x5f, {0x074,2085,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}}, {0x60, 0x6c, {0x075,1421,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}} }; static const struct opl4_region regions_49[] = { /* Flute */ {0x15, 0x40, {0x071,4803,100, 0,0,0x00,0xdc,0x38,0xf0,0x00,0x0a,0x2}}, {0x41, 0x4d, {0x072,3314,100, 0,0,0x00,0xdc,0x38,0xf0,0x00,0x0a,0x2}}, {0x4e, 0x6c, {0x073,1731,100, 0,0,0x00,0xe6,0x38,0xf0,0x00,0x0a,0x2}} }; static const struct opl4_region regions_4a[] = { /* Recorder */ {0x15, 0x6f, {0x0bd,4897,100, 0,0,0x00,0xec,0x30,0x70,0x00,0x09,0x1}} }; static const struct opl4_region regions_4b[] = { /* Pan Flute */ {0x15, 0x6c, {0x077,2359,100, 0,0,0x00,0xde,0x38,0xf0,0x00,0x09,0x3}} }; static const struct opl4_region regions_4c[] = { /* Bottle Blow */ {0x15, 0x6c, {0x077,2359,100, 0,0,0x00,0xc8,0x38,0xf0,0x00,0x09,0x1}}, {0x01, 0x7f, {0x125,7372,100, 0,0,0x1e,0x80,0x00,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_4d[] = { /* Shakuhachi */ {0x00, 0x7f, {0x0ab,4548,100, 0,0,0x00,0xd6,0x30,0xf0,0x00,0x0a,0x3}}, {0x15, 0x6c, {0x076,3716,100, 0,0,0x00,0xa2,0x28,0x70,0x00,0x09,0x2}} }; static const struct opl4_region regions_4e[] = { /* Whistle */ {0x00, 0x7f, {0x0aa,1731,100, 0,4,0x00,0xd2,0x2c,0x70,0x00,0x0a,0x0}} }; static const struct opl4_region regions_4f[] = { /* Ocarina */ {0x00, 0x7f, {0x0aa,1731,100, 0,1,0x00,0xce,0x29,0x90,0x00,0x0a,0x1}} }; static const struct opl4_region regions_50[] = { /* Square Lead */ {0x01, 0x2a, {0x0cc,9853,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x2b, 0x36, {0x0cd,6785,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x37, 0x42, {0x0ca,5248,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x43, 0x4e, {0x0cf,3713,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x4f, 0x5a, {0x0ce,2176,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x5b, 0x7f, {0x0cb, 640,100, 3,0,0x00,0xac,0x38,0xc6,0x21,0x09,0x0}}, {0x01, 0x2a, {0x0cc,9844,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x2b, 0x36, {0x0cd,6776,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x37, 0x42, {0x0ca,5239,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x43, 0x4e, {0x0cf,3704,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x4f, 0x5a, {0x0ce,2167,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}}, {0x5b, 0x7f, {0x0cb, 631,100,-3,0,0x00,0xac,0x08,0xc6,0x21,0x09,0x0}} }; static const struct opl4_region regions_51[] = { /* Sawtooth Lead */ {0x01, 0x27, {0x118,9108,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x28, 0x2d, {0x119,8345,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x2e, 0x33, {0x11a,7570,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x34, 0x39, {0x11b,6809,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x3a, 0x3f, {0x11c,6047,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x40, 0x45, {0x11d,5282,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x46, 0x4b, {0x11e,4525,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x4c, 0x51, {0x11f,3746,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x52, 0x57, {0x120,3017,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x58, 0x5d, {0x121,2171,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x5e, 0x66, {0x122,1426,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x67, 0x7f, {0x123,-110,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x01, 0x27, {0x118,9098,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x28, 0x2d, {0x119,8335,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x2e, 0x33, {0x11a,7560,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x34, 0x39, {0x11b,6799,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x3a, 0x3f, {0x11c,6037,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x40, 0x45, {0x11d,5272,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x46, 0x4b, {0x11e,4515,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x4c, 0x51, {0x11f,3736,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x52, 0x57, {0x120,3007,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x58, 0x5d, {0x121,2161,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x5e, 0x66, {0x122,1416,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}}, {0x67, 0x7f, {0x123,-120,100, 0,0,0x00,0xc8,0x30,0xf2,0x22,0x0a,0x0}} }; static const struct opl4_region regions_52[] = { /* Calliope Lead */ {0x00, 0x7f, {0x0aa,1731,100, 0,0,0x00,0xc2,0x28,0x90,0x00,0x0a,0x2}}, {0x15, 0x6c, {0x076,3716,100, 0,0,0x00,0xb6,0x28,0xb0,0x00,0x09,0x2}} }; static const struct opl4_region regions_53[] = { /* Chiffer Lead */ {0x00, 0x7f, {0x13a,3665,100, 0,2,0x00,0xcc,0x2a,0xf0,0x10,0x09,0x1}}, {0x01, 0x7f, {0x0fe,3660,100, 0,0,0x00,0xbe,0x28,0xf3,0x10,0x17,0x0}} }; static const struct opl4_region regions_54[] = { /* Charang Lead */ {0x00, 0x40, {0x0a5,6594,100, 0,3,0x00,0xba,0x33,0xf2,0x11,0x09,0x0}}, {0x41, 0x7f, {0x0a6,5433,100, 0,3,0x00,0xba,0x33,0xf2,0x11,0x09,0x0}}, {0x01, 0x27, {0x118,9098,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x28, 0x2d, {0x119,8335,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x2e, 0x33, {0x11a,7560,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x34, 0x39, {0x11b,6799,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x3a, 0x3f, {0x11c,6037,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x40, 0x45, {0x11d,5272,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x46, 0x4b, {0x11e,4515,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x4c, 0x51, {0x11f,3736,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x52, 0x57, {0x120,3007,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x58, 0x5d, {0x121,2161,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x5e, 0x66, {0x122,1416,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}}, {0x67, 0x7f, {0x123,-120,100, 0,2,0x00,0xa4,0x2a,0xf2,0x22,0x0e,0x0}} }; static const struct opl4_region regions_55[] = { /* Voice Lead */ {0x00, 0x7f, {0x0aa,1739,100, 0,6,0x00,0x8c,0x2e,0x90,0x00,0x0a,0x0}}, {0x15, 0x6c, {0x02a,3474,100, 0,1,0x00,0xd8,0x29,0xf0,0x05,0x0a,0x0}} }; static const struct opl4_region regions_56[] = { /* 5ths Lead */ {0x01, 0x27, {0x118,8468,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x28, 0x2d, {0x119,7705,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x2e, 0x33, {0x11a,6930,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x34, 0x39, {0x11b,6169,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x3a, 0x3f, {0x11c,5407,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x40, 0x45, {0x11d,4642,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x46, 0x4b, {0x11e,3885,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x4c, 0x51, {0x11f,3106,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x52, 0x57, {0x120,2377,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x58, 0x5d, {0x121,1531,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x5e, 0x64, {0x122, 786,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x65, 0x7f, {0x123,-750,100, 0,2,0x00,0xd0,0x32,0xf5,0x20,0x08,0x0}}, {0x05, 0x71, {0x002,4503,100, 0,1,0x00,0xb8,0x31,0xb3,0x20,0x0b,0x0}} }; static const struct opl4_region regions_57[] = { /* Bass & Lead */ {0x00, 0x7f, {0x117,8109,100, 0,1,0x00,0xbc,0x29,0xf3,0x50,0x08,0x0}}, {0x01, 0x27, {0x118,9097,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x28, 0x2d, {0x119,8334,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x2e, 0x33, {0x11a,7559,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x34, 0x39, {0x11b,6798,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x3a, 0x3f, {0x11c,6036,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x40, 0x45, {0x11d,5271,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x46, 0x4b, {0x11e,4514,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x4c, 0x51, {0x11f,3735,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x52, 0x57, {0x120,3006,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x58, 0x5d, {0x121,2160,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x5e, 0x66, {0x122,1415,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}}, {0x67, 0x7f, {0x123,-121,100, 0,2,0x00,0xbc,0x2a,0xf2,0x20,0x0a,0x0}} }; static const struct opl4_region regions_58[] = { /* New Age Pad */ {0x15, 0x6c, {0x002,4501,100, 0,4,0x00,0xa4,0x24,0x80,0x01,0x05,0x0}}, {0x15, 0x6c, {0x0f3,4253,100, 0,3,0x00,0x8c,0x23,0xa2,0x14,0x06,0x1}} }; static const struct opl4_region regions_59[] = { /* Warm Pad */ {0x15, 0x6c, {0x04e,5306,100, 2,2,0x00,0x92,0x2a,0x34,0x23,0x05,0x2}}, {0x15, 0x6c, {0x029,3575,100,-2,2,0x00,0xbe,0x22,0x31,0x23,0x06,0x0}} }; static const struct opl4_region regions_5a[] = { /* Polysynth Pad */ {0x01, 0x27, {0x118,9111,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x28, 0x2d, {0x119,8348,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x2e, 0x33, {0x11a,7573,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x34, 0x39, {0x11b,6812,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x3a, 0x3f, {0x11c,6050,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x40, 0x45, {0x11d,5285,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x46, 0x4b, {0x11e,4528,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x4c, 0x51, {0x11f,3749,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x52, 0x57, {0x120,3020,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x58, 0x5d, {0x121,2174,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x5e, 0x66, {0x122,1429,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x67, 0x7f, {0x123,-107,100, 0,3,0x00,0xae,0x23,0xf2,0x20,0x07,0x1}}, {0x00, 0x7f, {0x124,4024,100, 0,2,0x00,0xae,0x22,0xe5,0x20,0x08,0x0}} }; static const struct opl4_region regions_5b[] = { /* Choir Pad */ {0x15, 0x3a, {0x018,5010,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x3b, 0x40, {0x019,4370,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x41, 0x47, {0x01a,3478,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x48, 0x6c, {0x01b,2197,100, 0,5,0x00,0xb0,0x25,0x70,0x00,0x06,0x0}}, {0x15, 0x6c, {0x02a,3482,100, 0,4,0x00,0x98,0x24,0x65,0x21,0x06,0x0}} }; static const struct opl4_region regions_5c[] = { /* Bowed Pad */ {0x15, 0x6c, {0x101,4790,100,-1,1,0x00,0xbe,0x19,0x44,0x14,0x16,0x0}}, {0x00, 0x7f, {0x0aa,1720,100, 1,1,0x00,0x94,0x19,0x40,0x00,0x06,0x0}} }; static const struct opl4_region regions_5d[] = { /* Metallic Pad */ {0x15, 0x31, {0x00c,6943,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x32, 0x38, {0x00d,5416,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x39, 0x47, {0x00e,4385,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x48, 0x6c, {0x00f,2849,100, 0,2,0x00,0xa0,0x0a,0x60,0x03,0x06,0x0}}, {0x00, 0x7f, {0x03f,4224,100, 0,1,0x00,0x9c,0x31,0x65,0x16,0x07,0x0}} }; static const struct opl4_region regions_5e[] = { /* Halo Pad */ {0x00, 0x7f, {0x124,4038,100, 0,2,0x00,0xa6,0x1a,0x85,0x23,0x08,0x0}}, {0x15, 0x6c, {0x02a,3471,100, 0,3,0x00,0xc0,0x1b,0xc0,0x05,0x06,0x0}} }; static const struct opl4_region regions_5f[] = { /* Sweep Pad */ {0x01, 0x27, {0x0d3,9100,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x28, 0x2d, {0x0da,8341,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x2e, 0x33, {0x0d4,7564,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x34, 0x39, {0x0db,6791,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x3a, 0x3f, {0x0d5,6048,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x40, 0x45, {0x0dc,5263,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x46, 0x4b, {0x0d6,4499,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x4c, 0x51, {0x0dd,3747,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x52, 0x57, {0x0d7,3018,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x58, 0x5d, {0x0de,2173,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x5e, 0x63, {0x0d8,1427,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x64, 0x7f, {0x0d9,-109,100, 0,1,0x00,0xce,0x19,0x13,0x11,0x06,0x0}}, {0x01, 0x27, {0x0d3,9088,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x28, 0x2d, {0x0da,8329,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x2e, 0x33, {0x0d4,7552,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x34, 0x39, {0x0db,6779,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x3a, 0x3f, {0x0d5,6036,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x40, 0x45, {0x0dc,5251,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x46, 0x4b, {0x0d6,4487,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x4c, 0x51, {0x0dd,3735,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x52, 0x57, {0x0d7,3006,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x58, 0x5d, {0x0de,2161,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x5e, 0x63, {0x0d8,1415,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}}, {0x64, 0x7f, {0x0d9,-121,100, 0,0,0x00,0xce,0x18,0x13,0x11,0x06,0x0}} }; static const struct opl4_region regions_60[] = { /* Ice Rain */ {0x01, 0x7f, {0x04e,9345,100, 0,2,0x00,0xcc,0x22,0xa3,0x63,0x17,0x0}}, {0x00, 0x7f, {0x143,5586, 20, 0,2,0x00,0x6e,0x2a,0xf0,0x05,0x05,0x0}} }; static const struct opl4_region regions_61[] = { /* Soundtrack */ {0x15, 0x6c, {0x002,4501,100, 0,2,0x00,0xb6,0x2a,0x60,0x01,0x05,0x0}}, {0x15, 0x6c, {0x0f3,1160,100, 0,5,0x00,0xa8,0x2d,0x52,0x14,0x06,0x2}} }; static const struct opl4_region regions_62[] = { /* Crystal */ {0x15, 0x6c, {0x0f3,1826,100, 0,3,0x00,0xb8,0x33,0xf6,0x25,0x25,0x0}}, {0x15, 0x2c, {0x06d,7454,100, 0,3,0x00,0xac,0x3b,0x85,0x24,0x06,0x0}}, {0x2d, 0x36, {0x06e,5925,100, 0,3,0x00,0xac,0x3b,0x85,0x24,0x06,0x0}}, {0x37, 0x6c, {0x06f,4403,100, 0,3,0x09,0xac,0x3b,0x85,0x24,0x06,0x0}} }; static const struct opl4_region regions_63[] = { /* Atmosphere */ {0x05, 0x71, {0x002,4509,100, 0,2,0x00,0xc8,0x32,0x73,0x22,0x06,0x1}}, {0x15, 0x2f, {0x0b3,6964,100, 0,2,0x05,0xc2,0x32,0xf5,0x34,0x07,0x2}}, {0x30, 0x36, {0x0b7,5567,100, 0,2,0x0c,0xc2,0x32,0xf5,0x34,0x07,0x2}}, {0x37, 0x3c, {0x0b5,4653,100, 0,2,0x00,0xc2,0x32,0xf6,0x34,0x07,0x2}}, {0x3d, 0x43, {0x0b4,3892,100, 0,2,0x00,0xc2,0x32,0xf6,0x35,0x07,0x2}}, {0x44, 0x60, {0x0b6,2723,100, 0,2,0x00,0xc2,0x32,0xf6,0x35,0x17,0x2}} }; static const struct opl4_region regions_64[] = { /* Brightness */ {0x00, 0x7f, {0x137,5285,100, 0,2,0x00,0xbe,0x2a,0xa5,0x18,0x08,0x0}}, {0x15, 0x6c, {0x02a,3481,100, 0,1,0x00,0xc8,0x29,0x80,0x05,0x05,0x0}} }; static const struct opl4_region regions_65[] = { /* Goblins */ {0x15, 0x6c, {0x002,4501,100,-1,2,0x00,0xca,0x2a,0x40,0x01,0x05,0x0}}, {0x15, 0x6c, {0x009,9679, 20, 1,4,0x00,0x3c,0x0c,0x22,0x11,0x06,0x0}} }; static const struct opl4_region regions_66[] = { /* Echoes */ {0x15, 0x6c, {0x02a,3487,100, 0,3,0x00,0xae,0x2b,0xf5,0x21,0x06,0x0}}, {0x00, 0x7f, {0x124,4027,100, 0,3,0x00,0xae,0x2b,0x85,0x23,0x07,0x0}} }; static const struct opl4_region regions_67[] = { /* Sci-Fi */ {0x15, 0x31, {0x00c,6940,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x32, 0x38, {0x00d,5413,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x39, 0x47, {0x00e,4382,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x48, 0x6c, {0x00f,2846,100, 0,3,0x00,0xc8,0x2b,0x90,0x05,0x06,0x3}}, {0x15, 0x6c, {0x002,4498,100, 0,2,0x00,0xd4,0x22,0x80,0x01,0x05,0x0}} }; static const struct opl4_region regions_68[] = { /* Sitar */ {0x00, 0x7f, {0x10f,4408,100, 0,2,0x00,0xc4,0x32,0xf4,0x15,0x16,0x1}} }; static const struct opl4_region regions_69[] = { /* Banjo */ {0x15, 0x34, {0x013,5685,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x35, 0x38, {0x014,5009,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x39, 0x3c, {0x012,4520,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x3d, 0x44, {0x015,3622,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x45, 0x4c, {0x017,2661,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}}, {0x4d, 0x6d, {0x016,1632,100, 0,0,0x00,0xdc,0x38,0xf6,0x15,0x09,0x0}} }; static const struct opl4_region regions_6a[] = { /* Shamisen */ {0x15, 0x6c, {0x10e,3273,100, 0,0,0x00,0xc0,0x28,0xf7,0x76,0x08,0x0}} }; static const struct opl4_region regions_6b[] = { /* Koto */ {0x00, 0x7f, {0x0a9,4033,100, 0,0,0x00,0xc6,0x20,0xf0,0x06,0x07,0x0}} }; static const struct opl4_region regions_6c[] = { /* Kalimba */ {0x00, 0x7f, {0x137,3749,100, 0,0,0x00,0xce,0x38,0xf5,0x18,0x08,0x0}} }; static const struct opl4_region regions_6d[] = { /* Bagpipe */ {0x15, 0x39, {0x0a4,7683,100, 0,4,0x00,0xc0,0x1c,0xf0,0x00,0x09,0x0}}, {0x15, 0x39, {0x0a7,7680,100, 0,1,0x00,0xaa,0x19,0xf0,0x00,0x09,0x0}}, {0x3a, 0x6c, {0x0a8,3697,100, 0,1,0x00,0xaa,0x19,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_6e[] = { /* Fiddle */ {0x15, 0x3a, {0x105,5158,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x3b, 0x3f, {0x102,4754,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x40, 0x41, {0x106,4132,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x42, 0x44, {0x107,4033,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x45, 0x47, {0x108,3580,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x48, 0x4a, {0x10a,2957,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x4b, 0x4c, {0x10b,2724,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x4d, 0x4e, {0x10c,2530,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x4f, 0x51, {0x10d,2166,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}}, {0x52, 0x6c, {0x109,1825,100, 0,1,0x00,0xca,0x31,0xf3,0x20,0x09,0x0}} }; static const struct opl4_region regions_6f[] = { /* Shanai */ {0x15, 0x6c, {0x041,6946,100, 0,1,0x00,0xc4,0x31,0x95,0x20,0x09,0x0}} }; static const struct opl4_region regions_70[] = { /* Tinkle Bell */ {0x15, 0x73, {0x0f3,1821,100, 0,3,0x00,0xc8,0x3b,0xd6,0x25,0x25,0x0}}, {0x00, 0x7f, {0x137,5669,100, 0,3,0x00,0x66,0x3b,0xf5,0x18,0x08,0x0}} }; static const struct opl4_region regions_71[] = { /* Agogo */ {0x15, 0x74, {0x00b,2474,100, 0,0,0x00,0xd2,0x38,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_72[] = { /* Steel Drums */ {0x01, 0x7f, {0x0fe,3670,100, 0,0,0x00,0xca,0x38,0xf3,0x06,0x17,0x1}}, {0x15, 0x6c, {0x100,9602,100, 0,0,0x00,0x54,0x38,0xb0,0x05,0x16,0x1}} }; static const struct opl4_region regions_73[] = { /* Woodblock */ {0x15, 0x6c, {0x02c,2963, 50, 0,0,0x07,0xd4,0x00,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_74[] = { /* Taiko Drum */ {0x13, 0x6c, {0x03e,1194, 50, 0,0,0x00,0xaa,0x38,0xf0,0x04,0x04,0x0}} }; static const struct opl4_region regions_75[] = { /* Melodic Tom */ {0x15, 0x6c, {0x0c7,6418, 50, 0,0,0x00,0xe4,0x38,0xf0,0x05,0x01,0x0}} }; static const struct opl4_region regions_76[] = { /* Synth Drum */ {0x15, 0x6c, {0x026,3898, 50, 0,0,0x00,0xd0,0x38,0xf0,0x04,0x04,0x0}} }; static const struct opl4_region regions_77[] = { /* Reverse Cymbal */ {0x15, 0x6c, {0x031,4138, 50, 0,0,0x00,0xfe,0x38,0x3a,0xf0,0x09,0x0}} }; static const struct opl4_region regions_78[] = { /* Guitar Fret Noise */ {0x15, 0x6c, {0x138,5266,100, 0,0,0x00,0xa0,0x38,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_79[] = { /* Breath Noise */ {0x01, 0x7f, {0x125,4269,100, 0,0,0x1e,0xd0,0x38,0xf0,0x00,0x09,0x0}} }; static const struct opl4_region regions_7a[] = { /* Seashore */ {0x15, 0x6c, {0x008,2965, 20,-2,0,0x00,0xfe,0x00,0x20,0x03,0x04,0x0}}, {0x01, 0x7f, {0x037,4394, 20, 2,0,0x14,0xfe,0x00,0x20,0x04,0x05,0x0}} }; static const struct opl4_region regions_7b[] = { /* Bird Tweet */ {0x15, 0x6c, {0x009,8078, 5,-4,7,0x00,0xc2,0x0f,0x22,0x12,0x07,0x0}}, {0x15, 0x6c, {0x009,3583, 5, 4,5,0x00,0xae,0x15,0x72,0x12,0x07,0x0}} }; static const struct opl4_region regions_7c[] = { /* Telephone Ring */ {0x15, 0x6c, {0x003,3602, 10, 0,0,0x00,0xce,0x00,0xf0,0x00,0x0f,0x0}} }; static const struct opl4_region regions_7d[] = { /* Helicopter */ {0x0c, 0x7f, {0x001,2965, 10,-2,0,0x00,0xe0,0x08,0x30,0x01,0x07,0x0}}, {0x01, 0x7f, {0x037,4394, 10, 2,0,0x44,0x76,0x00,0x30,0x01,0x07,0x0}} }; static const struct opl4_region regions_7e[] = { /* Applause */ {0x15, 0x6c, {0x036,8273, 20,-6,7,0x00,0xc4,0x0f,0x70,0x01,0x05,0x0}}, {0x15, 0x6c, {0x036,8115, 5, 6,7,0x00,0xc6,0x07,0x70,0x01,0x05,0x0}} }; static const struct opl4_region regions_7f[] = { /* Gun Shot */ {0x15, 0x6c, {0x139,2858, 20, 0,0,0x00,0xbe,0x38,0xf0,0x03,0x00,0x0}} }; static const struct opl4_region regions_drums[] = { {0x18, 0x18, {0x0cb,6397,100, 3,0,0x00,0xf4,0x38,0xc9,0x1c,0x0c,0x0}}, {0x19, 0x19, {0x0c4,3714,100, 0,0,0x00,0xe0,0x00,0x97,0x19,0x09,0x0}}, {0x1a, 0x1a, {0x0c4,3519,100, 0,0,0x00,0xea,0x00,0x61,0x01,0x07,0x0}}, {0x1b, 0x1b, {0x0c4,3586,100, 0,0,0x00,0xea,0x00,0xf7,0x19,0x09,0x0}}, {0x1c, 0x1c, {0x0c4,3586,100, 0,0,0x00,0xea,0x00,0x81,0x01,0x07,0x0}}, {0x1e, 0x1e, {0x0c3,4783,100, 0,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x1f, 0x1f, {0x0d1,4042,100, 0,0,0x00,0xd6,0x00,0xf0,0x05,0x05,0x0}}, {0x20, 0x20, {0x0d2,5943,100, 0,0,0x00,0xcc,0x00,0xf0,0x00,0x09,0x0}}, {0x21, 0x21, {0x011,3842,100, 0,0,0x00,0xea,0x00,0xf0,0x16,0x06,0x0}}, {0x23, 0x23, {0x011,4098,100, 0,0,0x00,0xea,0x00,0xf0,0x16,0x06,0x0}}, {0x24, 0x24, {0x011,4370,100, 0,0,0x00,0xea,0x00,0xf0,0x00,0x06,0x0}}, {0x25, 0x25, {0x0d2,4404,100, 0,0,0x00,0xd6,0x00,0xf0,0x00,0x06,0x0}}, {0x26, 0x26, {0x0d1,4298,100, 0,0,0x00,0xd6,0x00,0xf0,0x05,0x05,0x0}}, {0x27, 0x27, {0x00a,4403,100,-1,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x28, 0x28, {0x0d1,4554,100, 0,0,0x00,0xdc,0x00,0xf0,0x07,0x07,0x0}}, {0x29, 0x29, {0x0c8,4242,100,-4,0,0x00,0xd6,0x00,0xf6,0x16,0x06,0x0}}, {0x2a, 0x2a, {0x079,6160,100, 2,0,0x00,0xe0,0x00,0xf5,0x19,0x09,0x0}}, {0x2b, 0x2b, {0x0c8,4626,100,-3,0,0x00,0xd6,0x00,0xf6,0x16,0x06,0x0}}, {0x2c, 0x2c, {0x07b,6039,100, 2,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x2d, 0x2d, {0x0c8,5394,100,-2,0,0x00,0xd6,0x00,0xf6,0x16,0x06,0x0}}, {0x2e, 0x2e, {0x07a,5690,100, 2,0,0x00,0xd6,0x00,0xf0,0x00,0x05,0x0}}, {0x2f, 0x2f, {0x0c7,5185,100, 2,0,0x00,0xe0,0x00,0xf6,0x17,0x07,0x0}}, {0x30, 0x30, {0x0c7,5650,100, 3,0,0x00,0xe0,0x00,0xf6,0x17,0x07,0x0}}, {0x31, 0x31, {0x031,4395,100, 2,0,0x00,0xea,0x00,0xf0,0x05,0x05,0x0}}, {0x32, 0x32, {0x0c7,6162,100, 4,0,0x00,0xe0,0x00,0xf6,0x17,0x07,0x0}}, {0x33, 0x33, {0x02e,4391,100,-2,0,0x00,0xea,0x00,0xf0,0x05,0x05,0x0}}, {0x34, 0x34, {0x07a,3009,100,-2,0,0x00,0xea,0x00,0xf2,0x15,0x05,0x0}}, {0x35, 0x35, {0x021,4522,100,-3,0,0x00,0xd6,0x00,0xf0,0x05,0x05,0x0}}, {0x36, 0x36, {0x025,5163,100, 1,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x37, 0x37, {0x031,5287,100,-1,0,0x00,0xea,0x00,0xf5,0x16,0x06,0x0}}, {0x38, 0x38, {0x01d,4395,100, 2,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x39, 0x39, {0x031,4647,100,-2,0,0x00,0xea,0x00,0xf4,0x16,0x06,0x0}}, {0x3a, 0x3a, {0x09d,4426,100,-4,0,0x00,0xe0,0x00,0xf4,0x17,0x07,0x0}}, {0x3b, 0x3b, {0x02e,4659,100,-2,0,0x00,0xea,0x00,0xf0,0x06,0x06,0x0}}, {0x3c, 0x3c, {0x01c,4769,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x3d, 0x3d, {0x01c,4611,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x3e, 0x3e, {0x01e,4402,100,-3,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x3f, 0x3f, {0x01f,4387,100,-3,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x40, 0x40, {0x01f,3983,100,-2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x41, 0x41, {0x09c,4526,100, 2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x42, 0x42, {0x09c,4016,100, 2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x43, 0x43, {0x00b,4739,100,-4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x44, 0x44, {0x00b,4179,100,-4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x45, 0x45, {0x02f,4787,100,-4,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x46, 0x46, {0x030,4665,100,-4,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x47, 0x47, {0x144,4519,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x0b,0x0}}, {0x48, 0x48, {0x144,4111,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x0b,0x0}}, {0x49, 0x49, {0x024,6408,100, 3,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x4a, 0x4a, {0x024,4144,100, 3,0,0x00,0xcc,0x00,0xf0,0x00,0x09,0x0}}, {0x4b, 0x4b, {0x020,4001,100, 2,0,0x00,0xe0,0x00,0xf0,0x00,0x09,0x0}}, {0x4c, 0x4c, {0x02c,4402,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x4d, 0x4d, {0x02c,3612,100, 4,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x4e, 0x4e, {0x022,4129,100,-2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x4f, 0x4f, {0x023,4147,100,-2,0,0x00,0xea,0x00,0xf0,0x00,0x09,0x0}}, {0x50, 0x50, {0x032,4412,100,-4,0,0x00,0xd6,0x00,0xf0,0x08,0x09,0x0}}, {0x51, 0x51, {0x032,4385,100,-4,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}}, {0x52, 0x52, {0x02f,5935,100,-1,0,0x00,0xd6,0x00,0xf0,0x00,0x09,0x0}} }; #define REGION(num) { ARRAY_SIZE(regions ## num), regions ## num } const struct opl4_region_ptr snd_yrw801_regions[0x81] = { REGION(_00), REGION(_01), REGION(_02), REGION(_03), REGION(_04), REGION(_05), REGION(_06), REGION(_07), REGION(_08), REGION(_09), REGION(_0a), REGION(_0b), REGION(_0c), REGION(_0d), REGION(_0e), REGION(_0f), REGION(_10), REGION(_11), REGION(_12), REGION(_13), REGION(_14), REGION(_15), REGION(_16), REGION(_17), REGION(_18), REGION(_19), REGION(_1a), REGION(_1b), REGION(_1c), REGION(_1d), REGION(_1e), REGION(_1f), REGION(_20), REGION(_21), REGION(_22), REGION(_23), REGION(_24), REGION(_25), REGION(_26), REGION(_27), REGION(_28), REGION(_29), REGION(_2a), REGION(_2b), REGION(_2c), REGION(_2d), REGION(_2e), REGION(_2f), REGION(_30), REGION(_31), REGION(_32), REGION(_33), REGION(_34), REGION(_35), REGION(_36), REGION(_37), REGION(_38), REGION(_39), REGION(_3a), REGION(_3b), REGION(_3c), REGION(_3d), REGION(_3e), REGION(_3f), REGION(_40), REGION(_41), REGION(_42), REGION(_43), REGION(_44), REGION(_45), REGION(_46), REGION(_47), REGION(_48), REGION(_49), REGION(_4a), REGION(_4b), REGION(_4c), REGION(_4d), REGION(_4e), REGION(_4f), REGION(_50), REGION(_51), REGION(_52), REGION(_53), REGION(_54), REGION(_55), REGION(_56), REGION(_57), REGION(_58), REGION(_59), REGION(_5a), REGION(_5b), REGION(_5c), REGION(_5d), REGION(_5e), REGION(_5f), REGION(_60), REGION(_61), REGION(_62), REGION(_63), REGION(_64), REGION(_65), REGION(_66), REGION(_67), REGION(_68), REGION(_69), REGION(_6a), REGION(_6b), REGION(_6c), REGION(_6d), REGION(_6e), REGION(_6f), REGION(_70), REGION(_71), REGION(_72), REGION(_73), REGION(_74), REGION(_75), REGION(_76), REGION(_77), REGION(_78), REGION(_79), REGION(_7a), REGION(_7b), REGION(_7c), REGION(_7d), REGION(_7e), REGION(_7f), REGION(_drums) };
gpl-2.0
kendling/android_kernel_google_dragon
arch/powerpc/mm/mmap.c
229
2768
/* * flexible mmap layout support * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * Started by Ingo Molnar <mingo@elte.hu> */ #include <linux/personality.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> /* * Top of mmap area (just below the process stack). * * Leave at least a ~128 MB hole on 32bit applications. * * On 64bit applications we randomise the stack by 1GB so we need to * space our mmap start address by a further 1GB, otherwise there is a * chance the mmap area will end up closer to the stack than our ulimit * requires. */ #define MIN_GAP32 (128*1024*1024) #define MIN_GAP64 ((128 + 1024)*1024*1024UL) #define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64) #define MAX_GAP (TASK_SIZE/6*5) static inline int mmap_is_legacy(void) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) return 1; return sysctl_legacy_va_layout; } static unsigned long mmap_rnd(void) { unsigned long rnd = 0; if (current->flags & PF_RANDOMIZE) { /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) rnd = get_random_long() % (1<<(23-PAGE_SHIFT)); else rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT)); } return rnd << PAGE_SHIFT; } static inline unsigned long mmap_base(void) { unsigned long gap = rlimit(RLIMIT_STACK); if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); } /* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { /* * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
gpl-2.0
itsmerajit/kernel_otus
arch/sparc/mm/fault_32.c
485
14927
/* * fault.c: Page fault handlers for the Sparc. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <asm/head.h> #include <linux/string.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/threads.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/perf_event.h> #include <linux/interrupt.h> #include <linux/kdebug.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/memreg.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/smp.h> #include <asm/traps.h> #include <asm/uaccess.h> extern int prom_node_root; int show_unhandled_signals = 1; /* At boot time we determine these two values necessary for setting * up the segment maps and page table entries (pte's). */ int num_segmaps, num_contexts; int invalid_segment; /* various Virtual Address Cache parameters we find at boot time... */ int vac_size, vac_linesize, vac_do_hw_vac_flushes; int vac_entries_per_context, vac_entries_per_segment; int vac_entries_per_page; /* Return how much physical memory we have. */ unsigned long probe_memory(void) { unsigned long total = 0; int i; for (i = 0; sp_banks[i].num_bytes; i++) total += sp_banks[i].num_bytes; return total; } extern void sun4c_complete_all_stores(void); /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */ asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr, unsigned long svaddr, unsigned long aerr, unsigned long avaddr) { sun4c_complete_all_stores(); printk("FAULT: NMI received\n"); printk("SREGS: Synchronous Error %08lx\n", serr); printk(" Synchronous Vaddr %08lx\n", svaddr); printk(" Asynchronous Error %08lx\n", aerr); printk(" Asynchronous Vaddr %08lx\n", avaddr); if (sun4c_memerr_reg) printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg); printk("REGISTER DUMP:\n"); show_regs(regs); prom_halt(); } static void unhandled_fault(unsigned long, struct task_struct *, struct pt_regs *) __attribute__ ((noreturn)); static void unhandled_fault(unsigned long address, struct task_struct *tsk, struct pt_regs *regs) { if((unsigned long) address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference\n"); } else { printk(KERN_ALERT "Unable to handle kernel paging request " "at virtual address %08lx\n", address); } printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", (tsk->mm ? (unsigned long) tsk->mm->pgd : (unsigned long) tsk->active_mm->pgd)); die_if_kernel("Oops", regs); } asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, unsigned long address) { struct pt_regs regs; unsigned long g2; unsigned int insn; int i; i = search_extables_range(ret_pc, &g2); switch (i) { case 3: /* load & store will be handled by fixup */ return 3; case 1: /* store will be handled by fixup, load will bump out */ /* for _to_ macros */ insn = *((unsigned int *) pc); if ((insn >> 21) & 1) return 1; break; case 2: /* load will be handled by fixup, store will bump out */ /* for _from_ macros */ insn = *((unsigned int *) pc); if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) return 2; break; default: break; } memset(&regs, 0, sizeof (regs)); regs.pc = pc; regs.npc = pc + 4; __asm__ __volatile__( "rd %%psr, %0\n\t" "nop\n\t" "nop\n\t" "nop\n" : "=r" (regs.psr)); unhandled_fault(address, current, &regs); /* Not reached */ return 0; } static inline void show_signal_msg(struct pt_regs *regs, int sig, int code, unsigned long address, struct task_struct *tsk) { if (!unhandled_signal(tsk, sig)) return; if (!printk_ratelimit()) return; printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x", task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, tsk->comm, task_pid_nr(tsk), address, (void *)regs->pc, (void *)regs->u_regs[UREG_I7], (void *)regs->u_regs[UREG_FP], code); print_vma_addr(KERN_CONT " in ", regs->pc); printk(KERN_CONT "\n"); } static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, unsigned long addr) { siginfo_t info; info.si_signo = sig; info.si_code = code; info.si_errno = 0; info.si_addr = (void __user *) addr; info.si_trapno = 0; if (unlikely(show_unhandled_signals)) show_signal_msg(regs, sig, info.si_code, addr, current); force_sig_info (sig, &info, current); } extern unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int); static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) { unsigned int insn; if (text_fault) return regs->pc; if (regs->psr & PSR_PS) { insn = *(unsigned int *) regs->pc; } else { __get_user(insn, (unsigned int *) regs->pc); } return safe_compute_effective_address(regs, insn); } static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int text_fault) { unsigned long addr = compute_si_addr(regs, text_fault); __do_fault_siginfo(code, sig, regs, addr); } asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { struct vm_area_struct *vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; unsigned int fixup; unsigned long g2; int from_user = !(regs->psr & PSR_PS); int fault, code; unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | (write ? FAULT_FLAG_WRITE : 0)); if(text_fault) address = regs->pc; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ code = SEGV_MAPERR; if (!ARCH_SUN4C && address >= TASK_SIZE) goto vmalloc_fault; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_atomic() || !mm) goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: down_read(&mm->mmap_sem); /* * The kernel referencing a bad kernel pointer can lock up * a sun4c machine completely, so we must attempt recovery. */ if(!from_user && address >= PAGE_OFFSET) goto bad_area; vma = find_vma(mm, address); if(!vma) goto bad_area; if(vma->vm_start <= address) goto good_area; if(!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if(expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: code = SEGV_ACCERR; if(write) { if(!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { /* Allow reads even for write-only mappings */ if(!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { current->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { current->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (from_user) { do_fault_siginfo(code, SIGSEGV, regs, text_fault); return; } /* Is this in ex_table? */ no_context: g2 = regs->u_regs[UREG_G2]; if (!from_user) { fixup = search_extables_range(regs->pc, &g2); if (fixup > 10) { /* Values below are reserved for other things */ extern const unsigned __memset_start[]; extern const unsigned __memset_end[]; extern const unsigned __csum_partial_copy_start[]; extern const unsigned __csum_partial_copy_end[]; #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address); printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", regs->pc, fixup, g2); #endif if ((regs->pc >= (unsigned long)__memset_start && regs->pc < (unsigned long)__memset_end) || (regs->pc >= (unsigned long)__csum_partial_copy_start && regs->pc < (unsigned long)__csum_partial_copy_end)) { regs->u_regs[UREG_I4] = address; regs->u_regs[UREG_I5] = regs->pc; } regs->u_regs[UREG_G2] = g2; regs->pc = fixup; regs->npc = regs->pc + 4; return; } } unhandled_fault (address, tsk, regs); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (from_user) { pagefault_out_of_memory(); return; } goto no_context; do_sigbus: up_read(&mm->mmap_sem); do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); if (!from_user) goto no_context; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; pmd_t *pmd, *pmd_k; pgd = tsk->active_mm->pgd + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd)) { if (!pgd_present(*pgd_k)) goto bad_area_nosemaphore; pgd_val(*pgd) = pgd_val(*pgd_k); return; } pmd = pmd_offset(pgd, address); pmd_k = pmd_offset(pgd_k, address); if (pmd_present(*pmd) || !pmd_present(*pmd_k)) goto bad_area_nosemaphore; *pmd = *pmd_k; return; } } asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { extern void sun4c_update_mmu_cache(struct vm_area_struct *, unsigned long,pte_t *); extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; pgd_t *pgdp; pte_t *ptep; if (text_fault) { address = regs->pc; } else if (!write && !(regs->psr & PSR_PS)) { unsigned int insn, __user *ip; ip = (unsigned int __user *)regs->pc; if (!get_user(insn, ip)) { if ((insn & 0xc1680000) == 0xc0680000) write = 1; } } if (!mm) { /* We are oopsing. */ do_sparc_fault(regs, text_fault, write, address); BUG(); /* P3 Oops already, you bitch */ } pgdp = pgd_offset(mm, address); ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address); if (pgd_val(*pgdp)) { if (write) { if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) { unsigned long flags; *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY); local_irq_save(flags); if (sun4c_get_segmap(address) != invalid_segment) { sun4c_put_pte(address, pte_val(*ptep)); local_irq_restore(flags); return; } local_irq_restore(flags); } } else { if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) { unsigned long flags; *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_VALID); local_irq_save(flags); if (sun4c_get_segmap(address) != invalid_segment) { sun4c_put_pte(address, pte_val(*ptep)); local_irq_restore(flags); return; } local_irq_restore(flags); } } } /* This conditional is 'interesting'. */ if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE)) && (pte_val(*ptep) & _SUN4C_PAGE_VALID)) /* Note: It is safe to not grab the MMAP semaphore here because * we know that update_mmu_cache() will not sleep for * any reason (at least not in the current implementation) * and therefore there is no danger of another thread getting * on the CPU and doing a shrink_mmap() on this vma. */ sun4c_update_mmu_cache (find_vma(current->mm, address), address, ptep); else do_sparc_fault(regs, text_fault, write, address); } /* This always deals with user addresses. */ static void force_user_fault(unsigned long address, int write) { struct vm_area_struct *vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; int code; code = SEGV_MAPERR; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if(!vma) goto bad_area; if(vma->vm_start <= address) goto good_area; if(!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if(expand_stack(vma, address)) goto bad_area; good_area: code = SEGV_ACCERR; if(write) { if(!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { if(!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; } up_read(&mm->mmap_sem); return; bad_area: up_read(&mm->mmap_sem); __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); return; do_sigbus: up_read(&mm->mmap_sem); __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); } static void check_stack_aligned(unsigned long sp) { if (sp & 0x7UL) force_sig(SIGILL, current); } void window_overflow_fault(void) { unsigned long sp; sp = current_thread_info()->rwbuf_stkptrs[0]; if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 1); force_user_fault(sp, 1); check_stack_aligned(sp); } void window_underflow_fault(unsigned long sp) { if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 0); force_user_fault(sp, 0); check_stack_aligned(sp); } void window_ret_fault(struct pt_regs *regs) { unsigned long sp; sp = regs->u_regs[UREG_FP]; if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 0); force_user_fault(sp, 0); check_stack_aligned(sp); }
gpl-2.0
olegsvs/android_kernel_ark_benefit_m7_mm
drivers/pwm/pwm-atmel.c
741
9890
/* * Driver for Atmel Pulse Width Modulation Controller * * Copyright (C) 2013 Atmel Corporation * Bo Shen <voice.shen@atmel.com> * * Licensed under GPLv2. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> /* The following is global registers for PWM controller */ #define PWM_ENA 0x04 #define PWM_DIS 0x08 #define PWM_SR 0x0C /* Bit field in SR */ #define PWM_SR_ALL_CH_ON 0x0F /* The following register is PWM channel related registers */ #define PWM_CH_REG_OFFSET 0x200 #define PWM_CH_REG_SIZE 0x20 #define PWM_CMR 0x0 /* Bit field in CMR */ #define PWM_CMR_CPOL (1 << 9) #define PWM_CMR_UPD_CDTY (1 << 10) #define PWM_CMR_CPRE_MSK 0xF /* The following registers for PWM v1 */ #define PWMV1_CDTY 0x04 #define PWMV1_CPRD 0x08 #define PWMV1_CUPD 0x10 /* The following registers for PWM v2 */ #define PWMV2_CDTY 0x04 #define PWMV2_CDTYUPD 0x08 #define PWMV2_CPRD 0x0C #define PWMV2_CPRDUPD 0x10 /* * Max value for duty and period * * Although the duty and period register is 32 bit, * however only the LSB 16 bits are significant. */ #define PWM_MAX_DTY 0xFFFF #define PWM_MAX_PRD 0xFFFF #define PRD_MAX_PRES 10 struct atmel_pwm_chip { struct pwm_chip chip; struct clk *clk; void __iomem *base; void (*config)(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long dty, unsigned long prd); }; static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip) { return container_of(chip, struct atmel_pwm_chip, chip); } static inline u32 atmel_pwm_readl(struct atmel_pwm_chip *chip, unsigned long offset) { return readl_relaxed(chip->base + offset); } static inline void atmel_pwm_writel(struct atmel_pwm_chip *chip, unsigned long offset, unsigned long val) { writel_relaxed(val, chip->base + offset); } static inline u32 atmel_pwm_ch_readl(struct atmel_pwm_chip *chip, unsigned int ch, unsigned long offset) { unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE; return readl_relaxed(chip->base + base + offset); } static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip, unsigned int ch, unsigned long offset, unsigned long val) { unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE; writel_relaxed(val, chip->base + base + offset); } static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned long prd, dty; unsigned long long div; unsigned int pres = 0; u32 val; int ret; if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) { dev_err(chip->dev, "cannot change PWM period while enabled\n"); return -EBUSY; } /* Calculate the period cycles and prescale value */ div = (unsigned long long)clk_get_rate(atmel_pwm->clk) * period_ns; do_div(div, NSEC_PER_SEC); while (div > PWM_MAX_PRD) { div >>= 1; pres++; } if (pres > PRD_MAX_PRES) { dev_err(chip->dev, "pres exceeds the maximum value\n"); return -EINVAL; } /* Calculate the duty cycles */ prd = div; div *= duty_ns; do_div(div, period_ns); dty = prd - div; ret = clk_enable(atmel_pwm->clk); if (ret) { dev_err(chip->dev, "failed to enable PWM clock\n"); return ret; } /* It is necessary to preserve CPOL, inside CMR */ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); atmel_pwm->config(chip, pwm, dty, prd); clk_disable(atmel_pwm->clk); return ret; } static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long dty, unsigned long prd) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned int val; if (test_bit(PWMF_ENABLED, &pwm->flags)) { /* * If the PWM channel is enabled, using the update register, * it needs to set bit 10 of CMR to 0 */ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CUPD, dty); val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); val &= ~PWM_CMR_UPD_CDTY; atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); } else { /* * If the PWM channel is disabled, write value to duty and * period registers directly. */ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CDTY, dty); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CPRD, prd); } } static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long dty, unsigned long prd) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); if (test_bit(PWMF_ENABLED, &pwm->flags)) { /* * If the PWM channel is enabled, using the duty update register * to update the value. */ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTYUPD, dty); } else { /* * If the PWM channel is disabled, write value to duty and * period registers directly. */ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTY, dty); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CPRD, prd); } } static int atmel_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); u32 val; int ret; val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); if (polarity == PWM_POLARITY_NORMAL) val &= ~PWM_CMR_CPOL; else val |= PWM_CMR_CPOL; ret = clk_enable(atmel_pwm->clk); if (ret) { dev_err(chip->dev, "failed to enable PWM clock\n"); return ret; } atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); clk_disable(atmel_pwm->clk); return 0; } static int atmel_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); int ret; ret = clk_enable(atmel_pwm->clk); if (ret) { dev_err(chip->dev, "failed to enable PWM clock\n"); return ret; } atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm); return 0; } static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm); clk_disable(atmel_pwm->clk); } static const struct pwm_ops atmel_pwm_ops = { .config = atmel_pwm_config, .set_polarity = atmel_pwm_set_polarity, .enable = atmel_pwm_enable, .disable = atmel_pwm_disable, .owner = THIS_MODULE, }; struct atmel_pwm_data { void (*config)(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long dty, unsigned long prd); }; static const struct atmel_pwm_data atmel_pwm_data_v1 = { .config = atmel_pwm_config_v1, }; static const struct atmel_pwm_data atmel_pwm_data_v2 = { .config = atmel_pwm_config_v2, }; static const struct platform_device_id atmel_pwm_devtypes[] = { { .name = "at91sam9rl-pwm", .driver_data = (kernel_ulong_t)&atmel_pwm_data_v1, }, { .name = "sama5d3-pwm", .driver_data = (kernel_ulong_t)&atmel_pwm_data_v2, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes); static const struct of_device_id atmel_pwm_dt_ids[] = { { .compatible = "atmel,at91sam9rl-pwm", .data = &atmel_pwm_data_v1, }, { .compatible = "atmel,sama5d3-pwm", .data = &atmel_pwm_data_v2, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids); static inline const struct atmel_pwm_data * atmel_pwm_get_driver_data(struct platform_device *pdev) { if (pdev->dev.of_node) { const struct of_device_id *match; match = of_match_device(atmel_pwm_dt_ids, &pdev->dev); if (!match) return NULL; return match->data; } else { const struct platform_device_id *id; id = platform_get_device_id(pdev); return (struct atmel_pwm_data *)id->driver_data; } } static int atmel_pwm_probe(struct platform_device *pdev) { const struct atmel_pwm_data *data; struct atmel_pwm_chip *atmel_pwm; struct resource *res; int ret; data = atmel_pwm_get_driver_data(pdev); if (!data) return -ENODEV; atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL); if (!atmel_pwm) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); atmel_pwm->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(atmel_pwm->base)) return PTR_ERR(atmel_pwm->base); atmel_pwm->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(atmel_pwm->clk)) return PTR_ERR(atmel_pwm->clk); ret = clk_prepare(atmel_pwm->clk); if (ret) { dev_err(&pdev->dev, "failed to prepare PWM clock\n"); return ret; } atmel_pwm->chip.dev = &pdev->dev; atmel_pwm->chip.ops = &atmel_pwm_ops; if (pdev->dev.of_node) { atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags; atmel_pwm->chip.of_pwm_n_cells = 3; } atmel_pwm->chip.base = -1; atmel_pwm->chip.npwm = 4; atmel_pwm->chip.can_sleep = true; atmel_pwm->config = data->config; ret = pwmchip_add(&atmel_pwm->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to add PWM chip %d\n", ret); goto unprepare_clk; } platform_set_drvdata(pdev, atmel_pwm); return ret; unprepare_clk: clk_unprepare(atmel_pwm->clk); return ret; } static int atmel_pwm_remove(struct platform_device *pdev) { struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev); clk_unprepare(atmel_pwm->clk); return pwmchip_remove(&atmel_pwm->chip); } static struct platform_driver atmel_pwm_driver = { .driver = { .name = "atmel-pwm", .of_match_table = of_match_ptr(atmel_pwm_dt_ids), }, .id_table = atmel_pwm_devtypes, .probe = atmel_pwm_probe, .remove = atmel_pwm_remove, }; module_platform_driver(atmel_pwm_driver); MODULE_ALIAS("platform:atmel-pwm"); MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>"); MODULE_DESCRIPTION("Atmel PWM driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Note-2/android_kernel_samsung_smdk4412
arch/arm/mach-tegra/board-harmony.c
2277
5514
/* * arch/arm/mach-tegra/board-harmony.c * * Copyright (C) 2010 Google, Inc. * Copyright (C) 2011 NVIDIA, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/pda_power.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c-tegra.h> #include <sound/wm8903.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/setup.h> #include <mach/tegra_wm8903_pdata.h> #include <mach/iomap.h> #include <mach/irqs.h> #include <mach/sdhci.h> #include "board.h" #include "board-harmony.h" #include "clock.h" #include "devices.h" #include "gpio-names.h" static struct plat_serial8250_port debug_uart_platform_data[] = { { .membase = IO_ADDRESS(TEGRA_UARTD_BASE), .mapbase = TEGRA_UARTD_BASE, .irq = INT_UARTD, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = 216000000, }, { .flags = 0 } }; static struct platform_device debug_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = debug_uart_platform_data, }, }; static struct tegra_wm8903_platform_data harmony_audio_pdata = { .gpio_spkr_en = TEGRA_GPIO_SPKR_EN, .gpio_hp_det = TEGRA_GPIO_HP_DET, .gpio_hp_mute = -1, .gpio_int_mic_en = TEGRA_GPIO_INT_MIC_EN, .gpio_ext_mic_en = TEGRA_GPIO_EXT_MIC_EN, }; static struct platform_device harmony_audio_device = { .name = "tegra-snd-wm8903", .id = 0, .dev = { .platform_data = &harmony_audio_pdata, }, }; static struct tegra_i2c_platform_data harmony_i2c1_platform_data = { .bus_clk_rate = 400000, }; static struct tegra_i2c_platform_data harmony_i2c2_platform_data = { .bus_clk_rate = 400000, }; static struct tegra_i2c_platform_data harmony_i2c3_platform_data = { .bus_clk_rate = 400000, }; static struct tegra_i2c_platform_data harmony_dvc_platform_data = { .bus_clk_rate = 400000, }; static struct wm8903_platform_data harmony_wm8903_pdata = { .irq_active_low = 0, .micdet_cfg = 0, .micdet_delay = 100, .gpio_base = HARMONY_GPIO_WM8903(0), .gpio_cfg = { WM8903_GPIO_NO_CONFIG, WM8903_GPIO_NO_CONFIG, 0, WM8903_GPIO_NO_CONFIG, WM8903_GPIO_NO_CONFIG, }, }; static struct i2c_board_info __initdata wm8903_board_info = { I2C_BOARD_INFO("wm8903", 0x1a), .platform_data = &harmony_wm8903_pdata, .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_CDC_IRQ), }; static void __init harmony_i2c_init(void) { tegra_i2c_device1.dev.platform_data = &harmony_i2c1_platform_data; tegra_i2c_device2.dev.platform_data = &harmony_i2c2_platform_data; tegra_i2c_device3.dev.platform_data = &harmony_i2c3_platform_data; tegra_i2c_device4.dev.platform_data = &harmony_dvc_platform_data; platform_device_register(&tegra_i2c_device1); platform_device_register(&tegra_i2c_device2); platform_device_register(&tegra_i2c_device3); platform_device_register(&tegra_i2c_device4); i2c_register_board_info(0, &wm8903_board_info, 1); } static struct platform_device *harmony_devices[] __initdata = { &debug_uart, &tegra_sdhci_device1, &tegra_sdhci_device2, &tegra_sdhci_device4, &tegra_i2s_device1, &tegra_das_device, &tegra_pcm_device, &harmony_audio_device, }; static void __init tegra_harmony_fixup(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { mi->nr_banks = 2; mi->bank[0].start = PHYS_OFFSET; mi->bank[0].size = 448 * SZ_1M; mi->bank[1].start = SZ_512M; mi->bank[1].size = SZ_512M; } static __initdata struct tegra_clk_init_table harmony_clk_init_table[] = { /* name parent rate enabled */ { "uartd", "pll_p", 216000000, true }, { "pll_a", "pll_p_out1", 56448000, true }, { "pll_a_out0", "pll_a", 11289600, true }, { "cdev1", NULL, 0, true }, { "i2s1", "pll_a_out0", 11289600, false}, { NULL, NULL, 0, 0}, }; static struct tegra_sdhci_platform_data sdhci_pdata1 = { .cd_gpio = -1, .wp_gpio = -1, .power_gpio = -1, }; static struct tegra_sdhci_platform_data sdhci_pdata2 = { .cd_gpio = TEGRA_GPIO_SD2_CD, .wp_gpio = TEGRA_GPIO_SD2_WP, .power_gpio = TEGRA_GPIO_SD2_POWER, }; static struct tegra_sdhci_platform_data sdhci_pdata4 = { .cd_gpio = TEGRA_GPIO_SD4_CD, .wp_gpio = TEGRA_GPIO_SD4_WP, .power_gpio = TEGRA_GPIO_SD4_POWER, .is_8bit = 1, }; static void __init tegra_harmony_init(void) { tegra_clk_init_from_table(harmony_clk_init_table); harmony_pinmux_init(); tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1; tegra_sdhci_device2.dev.platform_data = &sdhci_pdata2; tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4; platform_add_devices(harmony_devices, ARRAY_SIZE(harmony_devices)); harmony_i2c_init(); harmony_regulator_init(); } MACHINE_START(HARMONY, "harmony") .boot_params = 0x00000100, .fixup = tegra_harmony_fixup, .map_io = tegra_map_common_io, .init_early = tegra_init_early, .init_irq = tegra_init_irq, .timer = &tegra_timer, .init_machine = tegra_harmony_init, MACHINE_END
gpl-2.0
Troj80/T.J.T-Kernel-vivo
drivers/scsi/bfa/bfa_fcs.c
2533
33940
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfa_fcs.c BFA FCS main */ #include "bfad_drv.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" BFA_TRC_FILE(FCS, FCS); /* * FCS sub-modules */ struct bfa_fcs_mod_s { void (*attach) (struct bfa_fcs_s *fcs); void (*modinit) (struct bfa_fcs_s *fcs); void (*modexit) (struct bfa_fcs_s *fcs); }; #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } static struct bfa_fcs_mod_s fcs_modules[] = { { bfa_fcs_port_attach, NULL, NULL }, { bfa_fcs_uf_attach, NULL, NULL }, { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, bfa_fcs_fabric_modexit }, }; /* * fcs_api BFA FCS API */ static void bfa_fcs_exit_comp(void *fcs_cbarg) { struct bfa_fcs_s *fcs = fcs_cbarg; struct bfad_s *bfad = fcs->bfad; complete(&bfad->comp); } /* * fcs_api BFA FCS API */ /* * fcs attach -- called once to initialize data structures at driver attach time */ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg) { int i; struct bfa_fcs_mod_s *mod; fcs->bfa = bfa; fcs->bfad = bfad; fcs->min_cfg = min_cfg; bfa->fcs = BFA_TRUE; fcbuild_init(); for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->attach) mod->attach(fcs); } } /* * fcs initialization, called once after bfa initialization is complete */ void bfa_fcs_init(struct bfa_fcs_s *fcs) { int i, npbc_vports; struct bfa_fcs_mod_s *mod; struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->modinit) mod->modinit(fcs); } /* Initialize pbc vports */ if (!fcs->min_cfg) { npbc_vports = bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); for (i = 0; i < npbc_vports; i++) bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); } } /* * brief * FCS driver details initialization. * * param[in] fcs FCS instance * param[in] driver_info Driver Details * * return None */ void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info) { fcs->driver_info = *driver_info; bfa_fcs_fabric_psymb_init(&fcs->fabric); } /* * brief * FCS instance cleanup and exit. * * param[in] fcs FCS instance * return None */ void bfa_fcs_exit(struct bfa_fcs_s *fcs) { struct bfa_fcs_mod_s *mod; int nmods, i; bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); for (i = 0; i < nmods; i++) { mod = &fcs_modules[i]; if (mod->modexit) { bfa_wc_up(&fcs->wc); mod->modexit(fcs); } } bfa_wc_wait(&fcs->wc); } /* * Fabric module implementation. */ #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ #define bfa_fcs_fabric_set_opertype(__fabric) do { \ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ == BFA_PORT_TOPOLOGY_P2P) \ (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ else \ (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ } while (0) /* * forward declarations */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delay(void *cbarg); static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delete_comp(void *cbarg); static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CREATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_fcs_fabric_init(fabric); bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_START: if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); } else bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Link is down, awaiting LINK UP event from port. This is also the * first state at fabric creation. */ static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_UP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_RETRY_OP: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * FLOGI is in progress, awaiting FLOGI reply. */ static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); bfa_trc(fabric->fcs, event); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); } break; case BFA_FCS_FABRIC_SM_RETRY_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, bfa_fcs_fabric_delay, fabric, BFA_FCS_FABRIC_RETRY_DELAY); break; case BFA_FCS_FABRIC_SM_LOOPBACK: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_set_opertype(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELAYED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_timer_stop(&fabric->delay_timer); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_timer_stop(&fabric->delay_timer); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication is in progress, awaiting authentication results. */ static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); break; case BFA_FCS_FABRIC_SM_PERF_EVFP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication failed */ void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Port is in loopback mode. */ void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * There is no attached fabric - private loop or NPort-to-NPort topology. */ static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is online - normal operating state. */ void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Exchanging virtual fabric parameters. */ static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); break; case BFA_FCS_FABRIC_SM_ISOLATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * EVFP exchange complete and VFT tagging is enabled. */ static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); } /* * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). */ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Port is isolated due to VF_ID mismatch. " "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", pwwn_ptr, fabric->fcs->port_vfid, fabric->event_arg.swp_vfid); } /* * Fabric is being deleted, awaiting vport delete completions. */ static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELCOMP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_fcs_fabric_notify_offline(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * fcs_fabric_private fabric private functions */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn; port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn; } /* * Port Symbolic Name Creation for base port. */ void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ strncpy((char *)&port_cfg->sym_name, model, BFA_FCS_PORT_SYMBNAME_MODEL_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Driver Version */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->version, BFA_FCS_PORT_SYMBNAME_VERSION_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Host machine name */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_machine_name, BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* * Host OS Info : * If OS Patch Info is not there, do not truncate any bytes from the * OS name string and instead copy the entire OS info string (64 bytes). */ if (driver_info->host_os_patch[0] == '\0') { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); } else { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_PORT_SYMBNAME_OSINFO_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Append host OS Patch Info */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_patch, BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ); } /* null terminate */ port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } /* * bfa lps login completion callback */ void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) { struct bfa_fcs_fabric_s *fabric = uarg; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, status); switch (status) { case BFA_STATUS_OK: fabric->stats.flogi_accepts++; break; case BFA_STATUS_INVALID_MAC: /* Only for CNA */ fabric->stats.flogi_acc_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_EPROTOCOL: switch (fabric->lps->ext_status) { case BFA_EPROTO_BAD_ACCEPT: fabric->stats.flogi_acc_err++; break; case BFA_EPROTO_UNKNOWN_RSP: fabric->stats.flogi_unknown_rsp++; break; default: break; } bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_FABRIC_RJT: fabric->stats.flogi_rejects++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; default: fabric->stats.flogi_rsp_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; } fabric->bb_credit = fabric->lps->pr_bbcred; bfa_trc(fabric->fcs, fabric->bb_credit); if (!(fabric->lps->brcd_switch)) fabric->fabric_name = fabric->lps->pr_nwwn; /* * Check port type. It should be 1 = F-port. */ if (fabric->lps->fport) { fabric->bport.pid = fabric->lps->lp_pid; fabric->is_npiv = fabric->lps->npiv_en; fabric->is_auth = fabric->lps->auth_req; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); } else { /* * Nport-2-Nport direct attached */ fabric->bport.port_topo.pn2n.rem_port_wwn = fabric->lps->pr_pwwn; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } bfa_trc(fabric->fcs, fabric->bport.pid); bfa_trc(fabric->fcs, fabric->is_npiv); bfa_trc(fabric->fcs, fabric->is_auth); } /* * Allocate and send FLOGI. */ static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) { struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; u8 alpa = 0; if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) alpa = bfa_fcport_get_myalpa(bfa); bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); fabric->stats.flogi_sent++; } static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); bfa_fcs_fabric_set_opertype(fabric); fabric->stats.fabric_onlines++; /* * notify online event to base and then virtual ports */ bfa_fcs_lport_online(&fabric->bport); list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_online(vport); } } static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); fabric->stats.fabric_offlines++; /* * notify offline event first to vports and then base port. */ list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_offline(vport); } bfa_fcs_lport_offline(&fabric->bport); fabric->fabric_name = 0; fabric->fabric_ip_addr[0] = 0; } static void bfa_fcs_fabric_delay(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); } /* * Delete all vports and wait for vport delete completions. */ static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_fcs_delete(vport); } bfa_fcs_lport_delete(&fabric->bport); bfa_wc_wait(&fabric->wc); } static void bfa_fcs_fabric_delete_comp(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); } /* * fcs_fabric_public fabric public functions */ /* * Attach time initialization. */ void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; fabric = &fcs->fabric; memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); /* * Initialize base fabric. */ fabric->fcs = fcs; INIT_LIST_HEAD(&fabric->vport_q); INIT_LIST_HEAD(&fabric->vf_q); fabric->lps = bfa_lps_alloc(fcs->bfa); WARN_ON(!fabric->lps); /* * Initialize fabric delete completion handler. Fabric deletion is * complete when the last vport delete is complete. */ bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); bfa_wc_up(&fabric->wc); /* For the base port */ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); } void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) { bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); bfa_trc(fcs, 0); } /* * Module cleanup */ void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); /* * Cleanup base fabric. */ fabric = &fcs->fabric; bfa_lps_delete(fabric->lps); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); } /* * Fabric module start -- kick starts FCS actions */ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); fabric = &fcs->fabric; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); } /* * Link up notification from BFA physical port module. */ void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); } /* * Link down notification from BFA physical port module. */ void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } /* * A child vport is being created in the fabric. * * Call from vport module at vport creation. A list of base port and vports * belonging to a fabric is maintained to propagate link events. * * param[in] fabric - Fabric instance. This can be a base fabric or vf. * param[in] vport - Vport being created. * * @return None (always succeeds) */ void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { /* * - add vport to fabric's vport_q */ bfa_trc(fabric->fcs, fabric->vf_id); list_add_tail(&vport->qe, &fabric->vport_q); fabric->num_vports++; bfa_wc_up(&fabric->wc); } /* * A child vport is being deleted from fabric. * * Vport is being deleted. */ void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { list_del(&vport->qe); fabric->num_vports--; bfa_wc_down(&fabric->wc); } /* * Lookup for a vport within a fabric given its pwwn */ struct bfa_fcs_vport_s * bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) { struct bfa_fcs_vport_s *vport; struct list_head *qe; list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn) return vport; } return NULL; } /* * Get OUI of the attached switch. * * Note : Use of this function should be avoided as much as possible. * This function should be used only if there is any requirement * to check for FOS version below 6.3. * To check if the attached fabric is a brocade fabric, use * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 * or above only. */ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) { wwn_t fab_nwwn; u8 *tmp; u16 oui; fab_nwwn = fabric->lps->pr_nwwn; tmp = (u8 *)&fab_nwwn; oui = (tmp[3] << 8) | tmp[4]; return oui; } /* * Unsolicited frame receive handling. */ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { u32 pid = fchs->d_id; struct bfa_fcs_vport_s *vport; struct list_head *qe; struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; bfa_trc(fabric->fcs, len); bfa_trc(fabric->fcs, pid); /* * Look for our own FLOGI frames being looped back. This means an * external loopback cable is in place. Our own FLOGI frames are * sometimes looped back when switch port gets temporarily bypassed. */ if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) && (els_cmd->els_code == FC_ELS_FLOGI) && (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); return; } /* * FLOGI/EVFP exchanges should be consumed by base fabric. */ if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) { bfa_trc(fabric->fcs, pid); bfa_fcs_fabric_process_uf(fabric, fchs, len); return; } if (fabric->bport.pid == pid) { /* * All authentication frames should be routed to auth */ bfa_trc(fabric->fcs, els_cmd->els_code); if (els_cmd->els_code == FC_ELS_AUTH) { bfa_trc(fabric->fcs, els_cmd->els_code); return; } bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); return; } /* * look for a matching local port ID */ list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (vport->lport.pid == pid) { bfa_fcs_lport_uf_recv(&vport->lport, fchs, len); return; } } bfa_trc(fabric->fcs, els_cmd->els_code); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); } /* * Unsolicited frames to be processed by fabric. */ static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(fabric->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_FLOGI: bfa_fcs_fabric_process_flogi(fabric, fchs, len); break; default: /* * need to generate a LS_RJT */ break; } } /* * Process incoming FLOGI */ static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); struct bfa_fcs_lport_s *bport = &fabric->bport; bfa_trc(fabric->fcs, fchs->s_id); fabric->stats.flogi_rcvd++; /* * Check port type. It should be 0 = n-port. */ if (flogi->csp.port_type) { /* * @todo: may need to send a LS_RJT */ bfa_trc(fabric->fcs, flogi->port_name); fabric->stats.flogi_rejected++; return; } fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; /* * Send a Flogi Acc */ bfa_fcs_fabric_send_flogi_acc(fabric); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_fcxp_s *fcxp; u16 reqlen; struct fchs_s fchs; fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); /* * Do not expect this failure -- expect remote node to retry */ if (!fcxp) return; reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_hton3b(FC_FABRIC_PORT), n2n_port->reply_oxid, pcfg->pwwn, pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), bfa_fcport_get_rx_bbcredit(bfa)); bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag, BFA_FALSE, FC_CLASS_3, reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, FC_MAX_PDUSZ, 0); } /* * Flogi Acc completion callback. */ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_trc(fabric->fcs, status); } /* * * @param[in] fabric - fabric * @param[in] wwn_t - new fabric name * * @return - none */ void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, wwn_t fabric_name) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; char fwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric_name); if (fabric->fabric_name == 0) { /* * With BRCD switches, we don't get Fabric Name in FLOGI. * Don't generate a fabric name change event in this case. */ fabric->fabric_name = fabric_name; } else { fabric->fabric_name = fabric_name; wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); wwn2str(fwwn_ptr, bfa_fcs_lport_get_fabric_name(&fabric->bport)); BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Base port WWN = %s Fabric WWN = %s\n", pwwn_ptr, fwwn_ptr); } } /* * Returns FCS vf structure for a given vf_id. * * param[in] vf_id - VF_ID * * return * If lookup succeeds, retuns fcs vf object, otherwise returns NULL */ bfa_fcs_vf_t * bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) { bfa_trc(fcs, vf_id); if (vf_id == FC_VF_ID_NULL) return &fcs->fabric; return NULL; } /* * BFA FCS PPORT ( physical port) */ static void bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) { struct bfa_fcs_s *fcs = cbarg; bfa_trc(fcs, event); switch (event) { case BFA_PORT_LINKUP: bfa_fcs_fabric_link_up(&fcs->fabric); break; case BFA_PORT_LINKDOWN: bfa_fcs_fabric_link_down(&fcs->fabric); break; default: WARN_ON(1); } } void bfa_fcs_port_attach(struct bfa_fcs_s *fcs) { bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); } /* * BFA FCS UF ( Unsolicited Frames) */ /* * BFA callback for unsolicited frame receive handler. * * @param[in] cbarg callback arg for receive handler * @param[in] uf unsolicited frame descriptor * * @return None */ static void bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) { struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); u16 len = bfa_uf_get_frmlen(uf); struct fc_vft_s *vft; struct bfa_fcs_fabric_s *fabric; /* * check for VFT header */ if (fchs->routing == FC_RTG_EXT_HDR && fchs->cat_info == FC_CAT_VFT_HDR) { bfa_stats(fcs, uf.tagged); vft = bfa_uf_get_frmbuf(uf); if (fcs->port_vfid == vft->vf_id) fabric = &fcs->fabric; else fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); /* * drop frame if vfid is unknown */ if (!fabric) { WARN_ON(1); bfa_stats(fcs, uf.vfid_unknown); bfa_uf_free(uf); return; } /* * skip vft header */ fchs = (struct fchs_s *) (vft + 1); len -= sizeof(struct fc_vft_s); bfa_trc(fcs, vft->vf_id); } else { bfa_stats(fcs, uf.untagged); fabric = &fcs->fabric; } bfa_trc(fcs, ((u32 *) fchs)[0]); bfa_trc(fcs, ((u32 *) fchs)[1]); bfa_trc(fcs, ((u32 *) fchs)[2]); bfa_trc(fcs, ((u32 *) fchs)[3]); bfa_trc(fcs, ((u32 *) fchs)[4]); bfa_trc(fcs, ((u32 *) fchs)[5]); bfa_trc(fcs, len); bfa_fcs_fabric_uf_recv(fabric, fchs, len); bfa_uf_free(uf); } void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) { bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); }
gpl-2.0
mwoodward/Amaze-ics-sense
security/integrity/ima/ima_main.c
2533
5853
/* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Reiner Sailer <sailer@watson.ibm.com> * Serge Hallyn <serue@us.ibm.com> * Kylene Hall <kylene@us.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, * and ima_file_check. */ #include <linux/module.h> #include <linux/file.h> #include <linux/binfmts.h> #include <linux/mount.h> #include <linux/mman.h> #include <linux/slab.h> #include "ima.h" int ima_initialized; char *ima_hash = "sha1"; static int __init hash_setup(char *str) { if (strncmp(str, "md5", 3) == 0) ima_hash = "md5"; return 1; } __setup("ima_hash=", hash_setup); /* * ima_rdwr_violation_check * * Only invalidate the PCR for measured files: * - Opening a file for write when already open for read, * results in a time of measure, time of use (ToMToU) error. * - Opening a file for read when already open for write, * could result in a file measurement error. * */ static void ima_rdwr_violation_check(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; fmode_t mode = file->f_mode; int rc; bool send_tomtou = false, send_writers = false; if (!S_ISREG(inode->i_mode) || !ima_initialized) return; mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */ if (mode & FMODE_WRITE) { if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) send_tomtou = true; goto out; } rc = ima_must_measure(inode, MAY_READ, FILE_CHECK); if (rc < 0) goto out; if (atomic_read(&inode->i_writecount) > 0) send_writers = true; out: mutex_unlock(&inode->i_mutex); if (send_tomtou) ima_add_violation(inode, dentry->d_name.name, "invalid_pcr", "ToMToU"); if (send_writers) ima_add_violation(inode, dentry->d_name.name, "invalid_pcr", "open_writers"); } static void ima_check_last_writer(struct ima_iint_cache *iint, struct inode *inode, struct file *file) { mode_t mode = file->f_mode; mutex_lock(&iint->mutex); if (mode & FMODE_WRITE && atomic_read(&inode->i_writecount) == 1 && iint->version != inode->i_version) iint->flags &= ~IMA_MEASURED; mutex_unlock(&iint->mutex); } /** * ima_file_free - called on __fput() * @file: pointer to file structure being freed * * Flag files that changed, based on i_version */ void ima_file_free(struct file *file) { struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; if (!iint_initialized || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find(inode); if (!iint) return; ima_check_last_writer(iint, inode, file); } static int process_measurement(struct file *file, const unsigned char *filename, int mask, int function) { struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; int rc = 0; if (!ima_initialized || !S_ISREG(inode->i_mode)) return 0; rc = ima_must_measure(inode, mask, function); if (rc != 0) return rc; retry: iint = ima_iint_find(inode); if (!iint) { rc = ima_inode_alloc(inode); if (!rc || rc == -EEXIST) goto retry; return rc; } mutex_lock(&iint->mutex); rc = iint->flags & IMA_MEASURED ? 1 : 0; if (rc != 0) goto out; rc = ima_collect_measurement(iint, file); if (!rc) ima_store_measurement(iint, file, filename); out: mutex_unlock(&iint->mutex); return rc; } /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) * @prot: contains the protection that will be applied by the kernel. * * Measure files being mmapped executable based on the ima_must_measure() * policy decision. * * Return 0 on success, an error code on failure. * (Based on the results of appraise_measurement().) */ int ima_file_mmap(struct file *file, unsigned long prot) { int rc; if (!file) return 0; if (prot & PROT_EXEC) rc = process_measurement(file, file->f_dentry->d_name.name, MAY_EXEC, FILE_MMAP); return 0; } /** * ima_bprm_check - based on policy, collect/store measurement. * @bprm: contains the linux_binprm structure * * The OS protects against an executable file, already open for write, * from being executed in deny_write_access() and an executable file, * already open for execute, from being modified in get_write_access(). * So we can be certain that what we verify and measure here is actually * what is being executed. * * Return 0 on success, an error code on failure. * (Based on the results of appraise_measurement().) */ int ima_bprm_check(struct linux_binprm *bprm) { int rc; rc = process_measurement(bprm->file, bprm->filename, MAY_EXEC, BPRM_CHECK); return 0; } /** * ima_path_check - based on policy, collect/store measurement. * @file: pointer to the file to be measured * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE * * Measure files based on the ima_must_measure() policy decision. * * Always return 0 and audit dentry_open failures. * (Return code will be based upon measurement appraisal.) */ int ima_file_check(struct file *file, int mask) { int rc; ima_rdwr_violation_check(file); rc = process_measurement(file, file->f_dentry->d_name.name, mask & (MAY_READ | MAY_WRITE | MAY_EXEC), FILE_CHECK); return 0; } EXPORT_SYMBOL_GPL(ima_file_check); static int __init init_ima(void) { int error; error = ima_init(); ima_initialized = 1; return error; } static void __exit cleanup_ima(void) { ima_cleanup(); } late_initcall(init_ima); /* Start IMA after the TPM is available */ MODULE_DESCRIPTION("Integrity Measurement Architecture"); MODULE_LICENSE("GPL");
gpl-2.0
VanirAOSP/kernel_samsung_codinalte
drivers/input/misc/ad714x-i2c.c
2533
2863
/* * AD714X CapTouch Programmable Controller driver (I2C bus) * * Copyright 2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/input.h> /* BUS_I2C */ #include <linux/i2c.h> #include <linux/module.h> #include <linux/types.h> #include <linux/pm.h> #include "ad714x.h" #ifdef CONFIG_PM static int ad714x_i2c_suspend(struct device *dev) { return ad714x_disable(i2c_get_clientdata(to_i2c_client(dev))); } static int ad714x_i2c_resume(struct device *dev) { return ad714x_enable(i2c_get_clientdata(to_i2c_client(dev))); } #endif static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); static int ad714x_i2c_write(struct device *dev, unsigned short reg, unsigned short data) { struct i2c_client *client = to_i2c_client(dev); int ret = 0; u8 *_reg = (u8 *)&reg; u8 *_data = (u8 *)&data; u8 tx[4] = { _reg[1], _reg[0], _data[1], _data[0] }; ret = i2c_master_send(client, tx, 4); if (ret < 0) dev_err(&client->dev, "I2C write error\n"); return ret; } static int ad714x_i2c_read(struct device *dev, unsigned short reg, unsigned short *data) { struct i2c_client *client = to_i2c_client(dev); int ret = 0; u8 *_reg = (u8 *)&reg; u8 *_data = (u8 *)data; u8 tx[2] = { _reg[1], _reg[0] }; u8 rx[2]; ret = i2c_master_send(client, tx, 2); if (ret >= 0) ret = i2c_master_recv(client, rx, 2); if (unlikely(ret < 0)) { dev_err(&client->dev, "I2C read error\n"); } else { _data[0] = rx[1]; _data[1] = rx[0]; } return ret; } static int __devinit ad714x_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ad714x_chip *chip; chip = ad714x_probe(&client->dev, BUS_I2C, client->irq, ad714x_i2c_read, ad714x_i2c_write); if (IS_ERR(chip)) return PTR_ERR(chip); i2c_set_clientdata(client, chip); return 0; } static int __devexit ad714x_i2c_remove(struct i2c_client *client) { struct ad714x_chip *chip = i2c_get_clientdata(client); ad714x_remove(chip); return 0; } static const struct i2c_device_id ad714x_id[] = { { "ad7142_captouch", 0 }, { "ad7143_captouch", 0 }, { "ad7147_captouch", 0 }, { "ad7147a_captouch", 0 }, { "ad7148_captouch", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ad714x_id); static struct i2c_driver ad714x_i2c_driver = { .driver = { .name = "ad714x_captouch", .pm = &ad714x_i2c_pm, }, .probe = ad714x_i2c_probe, .remove = __devexit_p(ad714x_i2c_remove), .id_table = ad714x_id, }; static __init int ad714x_i2c_init(void) { return i2c_add_driver(&ad714x_i2c_driver); } module_init(ad714x_i2c_init); static __exit void ad714x_i2c_exit(void) { i2c_del_driver(&ad714x_i2c_driver); } module_exit(ad714x_i2c_exit); MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor I2C Bus Driver"); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
aftian/linux-blankon
drivers/net/can/c_can/c_can_platform.c
2533
5289
/* * Platform CAN bus driver for Bosch C_CAN controller * * Copyright (C) 2010 ST Microelectronics * Bhupesh Sharma <bhupesh.sharma@st.com> * * Borrowed heavily from the C_CAN driver originally written by: * Copyright (C) 2007 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de> * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch> * * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B. * Bosch C_CAN user manual can be obtained from: * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/ * users_manual_c_can.pdf * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/version.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/can/dev.h> #include "c_can.h" /* * 16-bit c_can registers can be arranged differently in the memory * architecture of different implementations. For example: 16-bit * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. * Handle the same by providing a common read/write interface. */ static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv, void *reg) { return readw(reg); } static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv, void *reg, u16 val) { writew(val, reg); } static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv, void *reg) { return readw(reg + (long)reg - (long)priv->regs); } static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv, void *reg, u16 val) { writew(val, reg + (long)reg - (long)priv->regs); } static int __devinit c_can_plat_probe(struct platform_device *pdev) { int ret; void __iomem *addr; struct net_device *dev; struct c_can_priv *priv; struct resource *mem; int irq; #ifdef CONFIG_HAVE_CLK struct clk *clk; /* get the appropriate clk */ clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "no clock defined\n"); ret = -ENODEV; goto exit; } #endif /* get the platform data */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!mem || irq <= 0) { ret = -ENODEV; goto exit_free_clk; } if (!request_mem_region(mem->start, resource_size(mem), KBUILD_MODNAME)) { dev_err(&pdev->dev, "resource unavailable\n"); ret = -ENODEV; goto exit_free_clk; } addr = ioremap(mem->start, resource_size(mem)); if (!addr) { dev_err(&pdev->dev, "failed to map can port\n"); ret = -ENOMEM; goto exit_release_mem; } /* allocate the c_can device */ dev = alloc_c_can_dev(); if (!dev) { ret = -ENOMEM; goto exit_iounmap; } priv = netdev_priv(dev); dev->irq = irq; priv->regs = addr; #ifdef CONFIG_HAVE_CLK priv->can.clock.freq = clk_get_rate(clk); priv->priv = clk; #endif switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: priv->read_reg = c_can_plat_read_reg_aligned_to_32bit; priv->write_reg = c_can_plat_write_reg_aligned_to_32bit; break; case IORESOURCE_MEM_16BIT: default: priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; break; } platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); goto exit_free_device; } dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->regs, dev->irq); return 0; exit_free_device: platform_set_drvdata(pdev, NULL); free_c_can_dev(dev); exit_iounmap: iounmap(addr); exit_release_mem: release_mem_region(mem->start, resource_size(mem)); exit_free_clk: #ifdef CONFIG_HAVE_CLK clk_put(clk); exit: #endif dev_err(&pdev->dev, "probe failed\n"); return ret; } static int __devexit c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); struct resource *mem; unregister_c_can_dev(dev); platform_set_drvdata(pdev, NULL); free_c_can_dev(dev); iounmap(priv->regs); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); #ifdef CONFIG_HAVE_CLK clk_put(priv->priv); #endif return 0; } static struct platform_driver c_can_plat_driver = { .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, .probe = c_can_plat_probe, .remove = __devexit_p(c_can_plat_remove), }; static int __init c_can_plat_init(void) { return platform_driver_register(&c_can_plat_driver); } module_init(c_can_plat_init); static void __exit c_can_plat_exit(void) { platform_driver_unregister(&c_can_plat_driver); } module_exit(c_can_plat_exit); MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
gpl-2.0
MinoochX/bproj-mxk
security/integrity/ima/ima_main.c
2533
5853
/* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Reiner Sailer <sailer@watson.ibm.com> * Serge Hallyn <serue@us.ibm.com> * Kylene Hall <kylene@us.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, * and ima_file_check. */ #include <linux/module.h> #include <linux/file.h> #include <linux/binfmts.h> #include <linux/mount.h> #include <linux/mman.h> #include <linux/slab.h> #include "ima.h" int ima_initialized; char *ima_hash = "sha1"; static int __init hash_setup(char *str) { if (strncmp(str, "md5", 3) == 0) ima_hash = "md5"; return 1; } __setup("ima_hash=", hash_setup); /* * ima_rdwr_violation_check * * Only invalidate the PCR for measured files: * - Opening a file for write when already open for read, * results in a time of measure, time of use (ToMToU) error. * - Opening a file for read when already open for write, * could result in a file measurement error. * */ static void ima_rdwr_violation_check(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; fmode_t mode = file->f_mode; int rc; bool send_tomtou = false, send_writers = false; if (!S_ISREG(inode->i_mode) || !ima_initialized) return; mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */ if (mode & FMODE_WRITE) { if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) send_tomtou = true; goto out; } rc = ima_must_measure(inode, MAY_READ, FILE_CHECK); if (rc < 0) goto out; if (atomic_read(&inode->i_writecount) > 0) send_writers = true; out: mutex_unlock(&inode->i_mutex); if (send_tomtou) ima_add_violation(inode, dentry->d_name.name, "invalid_pcr", "ToMToU"); if (send_writers) ima_add_violation(inode, dentry->d_name.name, "invalid_pcr", "open_writers"); } static void ima_check_last_writer(struct ima_iint_cache *iint, struct inode *inode, struct file *file) { mode_t mode = file->f_mode; mutex_lock(&iint->mutex); if (mode & FMODE_WRITE && atomic_read(&inode->i_writecount) == 1 && iint->version != inode->i_version) iint->flags &= ~IMA_MEASURED; mutex_unlock(&iint->mutex); } /** * ima_file_free - called on __fput() * @file: pointer to file structure being freed * * Flag files that changed, based on i_version */ void ima_file_free(struct file *file) { struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; if (!iint_initialized || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find(inode); if (!iint) return; ima_check_last_writer(iint, inode, file); } static int process_measurement(struct file *file, const unsigned char *filename, int mask, int function) { struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; int rc = 0; if (!ima_initialized || !S_ISREG(inode->i_mode)) return 0; rc = ima_must_measure(inode, mask, function); if (rc != 0) return rc; retry: iint = ima_iint_find(inode); if (!iint) { rc = ima_inode_alloc(inode); if (!rc || rc == -EEXIST) goto retry; return rc; } mutex_lock(&iint->mutex); rc = iint->flags & IMA_MEASURED ? 1 : 0; if (rc != 0) goto out; rc = ima_collect_measurement(iint, file); if (!rc) ima_store_measurement(iint, file, filename); out: mutex_unlock(&iint->mutex); return rc; } /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) * @prot: contains the protection that will be applied by the kernel. * * Measure files being mmapped executable based on the ima_must_measure() * policy decision. * * Return 0 on success, an error code on failure. * (Based on the results of appraise_measurement().) */ int ima_file_mmap(struct file *file, unsigned long prot) { int rc; if (!file) return 0; if (prot & PROT_EXEC) rc = process_measurement(file, file->f_dentry->d_name.name, MAY_EXEC, FILE_MMAP); return 0; } /** * ima_bprm_check - based on policy, collect/store measurement. * @bprm: contains the linux_binprm structure * * The OS protects against an executable file, already open for write, * from being executed in deny_write_access() and an executable file, * already open for execute, from being modified in get_write_access(). * So we can be certain that what we verify and measure here is actually * what is being executed. * * Return 0 on success, an error code on failure. * (Based on the results of appraise_measurement().) */ int ima_bprm_check(struct linux_binprm *bprm) { int rc; rc = process_measurement(bprm->file, bprm->filename, MAY_EXEC, BPRM_CHECK); return 0; } /** * ima_path_check - based on policy, collect/store measurement. * @file: pointer to the file to be measured * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE * * Measure files based on the ima_must_measure() policy decision. * * Always return 0 and audit dentry_open failures. * (Return code will be based upon measurement appraisal.) */ int ima_file_check(struct file *file, int mask) { int rc; ima_rdwr_violation_check(file); rc = process_measurement(file, file->f_dentry->d_name.name, mask & (MAY_READ | MAY_WRITE | MAY_EXEC), FILE_CHECK); return 0; } EXPORT_SYMBOL_GPL(ima_file_check); static int __init init_ima(void) { int error; error = ima_init(); ima_initialized = 1; return error; } static void __exit cleanup_ima(void) { ima_cleanup(); } late_initcall(init_ima); /* Start IMA after the TPM is available */ MODULE_DESCRIPTION("Integrity Measurement Architecture"); MODULE_LICENSE("GPL");
gpl-2.0
mastero9017/Blu_Spark
drivers/gpu/drm/nouveau/nouveau_i2c.c
4837
9764
/* * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <linux/module.h> #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_i2c.h" #include "nouveau_hw.h" static void i2c_drive_scl(void *data, int state) { struct nouveau_i2c_chan *port = data; if (port->type == 0) { u8 val = NVReadVgaCrtc(port->dev, 0, port->drive); if (state) val |= 0x20; else val &= 0xdf; NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01); } else if (port->type == 4) { nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01); } else if (port->type == 5) { if (state) port->state |= 0x01; else port->state &= 0xfe; nv_wr32(port->dev, port->drive, 4 | port->state); } } static void i2c_drive_sda(void *data, int state) { struct nouveau_i2c_chan *port = data; if (port->type == 0) { u8 val = NVReadVgaCrtc(port->dev, 0, port->drive); if (state) val |= 0x10; else val &= 0xef; NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01); } else if (port->type == 4) { nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01); } else if (port->type == 5) { if (state) port->state |= 0x02; else port->state &= 0xfd; nv_wr32(port->dev, port->drive, 4 | port->state); } } static int i2c_sense_scl(void *data) { struct nouveau_i2c_chan *port = data; struct drm_nouveau_private *dev_priv = port->dev->dev_private; if (port->type == 0) { return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04); } else if (port->type == 4) { return !!(nv_rd32(port->dev, port->sense) & 0x00040000); } else if (port->type == 5) { if (dev_priv->card_type < NV_D0) return !!(nv_rd32(port->dev, port->sense) & 0x01); else return !!(nv_rd32(port->dev, port->sense) & 0x10); } return 0; } static int i2c_sense_sda(void *data) { struct nouveau_i2c_chan *port = data; struct drm_nouveau_private *dev_priv = port->dev->dev_private; if (port->type == 0) { return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08); } else if (port->type == 4) { return !!(nv_rd32(port->dev, port->sense) & 0x00080000); } else if (port->type == 5) { if (dev_priv->card_type < NV_D0) return !!(nv_rd32(port->dev, port->sense) & 0x02); else return !!(nv_rd32(port->dev, port->sense) & 0x20); } return 0; } static const uint32_t nv50_i2c_port[] = { 0x00e138, 0x00e150, 0x00e168, 0x00e180, 0x00e254, 0x00e274, 0x00e764, 0x00e780, 0x00e79c, 0x00e7b8 }; static u8 * i2c_table(struct drm_device *dev, u8 *version) { u8 *dcb = dcb_table(dev), *i2c = NULL; if (dcb) { if (dcb[0] >= 0x15) i2c = ROMPTR(dev, dcb[2]); if (dcb[0] >= 0x30) i2c = ROMPTR(dev, dcb[4]); } /* early revisions had no version number, use dcb version */ if (i2c) { *version = dcb[0]; if (*version >= 0x30) *version = i2c[0]; } return i2c; } int nouveau_i2c_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvbios *bios = &dev_priv->vbios; struct nouveau_i2c_chan *port; u8 version = 0x00, entries, recordlen; u8 *i2c, *entry, legacy[2][4] = {}; int ret, i; INIT_LIST_HEAD(&dev_priv->i2c_ports); i2c = i2c_table(dev, &version); if (!i2c) { u8 *bmp = &bios->data[bios->offset]; if (bios->type != NVBIOS_BMP) return -ENODEV; legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX; legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX; legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX; legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX; /* BMP (from v4.0) has i2c info in the structure, it's in a * fixed location on earlier VBIOS */ if (bmp[5] < 4) i2c = &bios->data[0x48]; else i2c = &bmp[0x36]; if (i2c[4]) legacy[0][0] = i2c[4]; if (i2c[5]) legacy[0][1] = i2c[5]; if (i2c[6]) legacy[1][0] = i2c[6]; if (i2c[7]) legacy[1][1] = i2c[7]; } if (version >= 0x30) { entry = i2c[1] + i2c; entries = i2c[2]; recordlen = i2c[3]; } else if (version) { entry = i2c; entries = 16; recordlen = 4; } else { entry = legacy[0]; entries = 2; recordlen = 4; } for (i = 0; i < entries; i++, entry += recordlen) { port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) { nouveau_i2c_fini(dev); return -ENOMEM; } port->type = entry[3]; if (version < 0x30) { port->type &= 0x07; if (port->type == 0x07) port->type = 0xff; } if (port->type == 0xff) { kfree(port); continue; } switch (port->type) { case 0: /* NV04:NV50 */ port->drive = entry[0]; port->sense = entry[1]; break; case 4: /* NV4E */ port->drive = 0x600800 + entry[1]; port->sense = port->drive; break; case 5: /* NV50- */ port->drive = entry[0] & 0x0f; if (dev_priv->card_type < NV_D0) { if (port->drive >= ARRAY_SIZE(nv50_i2c_port)) break; port->drive = nv50_i2c_port[port->drive]; port->sense = port->drive; } else { port->drive = 0x00d014 + (port->drive * 0x20); port->sense = port->drive; } break; case 6: /* NV50- DP AUX */ port->drive = entry[0]; port->sense = port->drive; port->adapter.algo = &nouveau_dp_i2c_algo; break; default: break; } if (!port->adapter.algo && !port->drive) { NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n", i, port->type, port->drive, port->sense); kfree(port); continue; } snprintf(port->adapter.name, sizeof(port->adapter.name), "nouveau-%s-%d", pci_name(dev->pdev), i); port->adapter.owner = THIS_MODULE; port->adapter.dev.parent = &dev->pdev->dev; port->dev = dev; port->index = i; port->dcb = ROM32(entry[0]); i2c_set_adapdata(&port->adapter, i2c); if (port->adapter.algo != &nouveau_dp_i2c_algo) { port->adapter.algo_data = &port->bit; port->bit.udelay = 10; port->bit.timeout = usecs_to_jiffies(2200); port->bit.data = port; port->bit.setsda = i2c_drive_sda; port->bit.setscl = i2c_drive_scl; port->bit.getsda = i2c_sense_sda; port->bit.getscl = i2c_sense_scl; i2c_drive_scl(port, 0); i2c_drive_sda(port, 1); i2c_drive_scl(port, 1); ret = i2c_bit_add_bus(&port->adapter); } else { port->adapter.algo = &nouveau_dp_i2c_algo; ret = i2c_add_adapter(&port->adapter); } if (ret) { NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret); kfree(port); continue; } list_add_tail(&port->head, &dev_priv->i2c_ports); } return 0; } void nouveau_i2c_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_i2c_chan *port, *tmp; list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) { i2c_del_adapter(&port->adapter); kfree(port); } } struct nouveau_i2c_chan * nouveau_i2c_find(struct drm_device *dev, u8 index) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_i2c_chan *port; if (index == NV_I2C_DEFAULT(0) || index == NV_I2C_DEFAULT(1)) { u8 version, *i2c = i2c_table(dev, &version); if (i2c && version >= 0x30) { if (index == NV_I2C_DEFAULT(0)) index = (i2c[4] & 0x0f); else index = (i2c[4] & 0xf0) >> 4; } else { index = 2; } } list_for_each_entry(port, &dev_priv->i2c_ports, head) { if (port->index == index) break; } if (&port->head == &dev_priv->i2c_ports) return NULL; if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) { u32 reg = 0x00e500, val; if (port->type == 6) { reg += port->drive * 0x50; val = 0x2002; } else { reg += ((port->dcb & 0x1e00) >> 9) * 0x50; val = 0xe001; } /* nfi, but neither auxch or i2c work if it's 1 */ nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000); /* nfi, but switches auxch vs normal i2c */ nv_mask(dev, reg + 0x00, 0x0000f003, val); } return port; } bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr) { uint8_t buf[] = { 0 }; struct i2c_msg msgs[] = { { .addr = addr, .flags = 0, .len = 1, .buf = buf, }, { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = buf, } }; return i2c_transfer(&i2c->adapter, msgs, 2) == 2; } int nouveau_i2c_identify(struct drm_device *dev, const char *what, struct i2c_board_info *info, bool (*match)(struct nouveau_i2c_chan *, struct i2c_board_info *), int index) { struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index); int i; if (!i2c) { NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index); return -ENODEV; } NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index); for (i = 0; info[i].addr; i++) { if (nouveau_probe_i2c_addr(i2c, info[i].addr) && (!match || match(i2c, &info[i]))) { NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); return i; } } NV_DEBUG(dev, "No devices found.\n"); return -ENODEV; }
gpl-2.0
mpokwsths/hammerhead_kernel
arch/powerpc/kernel/io-workarounds.c
4837
4251
/* * Support PCI IO workaround * * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org> * IBM, Corp. * (C) Copyright 2007-2008 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> /* for init_mm */ #include <asm/io.h> #include <asm/machdep.h> #include <asm/pgtable.h> #include <asm/ppc-pci.h> #include <asm/io-workarounds.h> #define IOWA_MAX_BUS 8 static struct iowa_bus iowa_busses[IOWA_MAX_BUS]; static unsigned int iowa_bus_count; static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) { int i, j; struct resource *res; unsigned long vstart, vend; for (i = 0; i < iowa_bus_count; i++) { struct iowa_bus *bus = &iowa_busses[i]; struct pci_controller *phb = bus->phb; if (vaddr) { vstart = (unsigned long)phb->io_base_virt; vend = vstart + phb->pci_io_size - 1; if ((vaddr >= vstart) && (vaddr <= vend)) return bus; } if (paddr) for (j = 0; j < 3; j++) { res = &phb->mem_resources[j]; if (paddr >= res->start && paddr <= res->end) return bus; } } return NULL; } struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) { struct iowa_bus *bus; int token; token = PCI_GET_ADDR_TOKEN(addr); if (token && token <= iowa_bus_count) bus = &iowa_busses[token - 1]; else { unsigned long vaddr, paddr; pte_t *ptep; vaddr = (unsigned long)PCI_FIX_ADDR(addr); if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) return NULL; ptep = find_linux_pte(init_mm.pgd, vaddr); if (ptep == NULL) paddr = 0; else paddr = pte_pfn(*ptep) << PAGE_SHIFT; bus = iowa_pci_find(vaddr, paddr); if (bus == NULL) return NULL; } return bus; } struct iowa_bus *iowa_pio_find_bus(unsigned long port) { unsigned long vaddr = (unsigned long)pci_io_base + port; return iowa_pci_find(vaddr, 0); } #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ static ret iowa_##name at \ { \ struct iowa_bus *bus; \ bus = iowa_##space##_find_bus(aa); \ if (bus && bus->ops && bus->ops->name) \ return bus->ops->name al; \ return __do_##name al; \ } #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ static void iowa_##name at \ { \ struct iowa_bus *bus; \ bus = iowa_##space##_find_bus(aa); \ if (bus && bus->ops && bus->ops->name) { \ bus->ops->name al; \ return; \ } \ __do_##name al; \ } #include <asm/io-defs.h> #undef DEF_PCI_AC_RET #undef DEF_PCI_AC_NORET static const struct ppc_pci_io __devinitconst iowa_pci_io = { #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name, #define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name, #include <asm/io-defs.h> #undef DEF_PCI_AC_RET #undef DEF_PCI_AC_NORET }; static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, unsigned long flags, void *caller) { struct iowa_bus *bus; void __iomem *res = __ioremap_caller(addr, size, flags, caller); int busno; bus = iowa_pci_find(0, (unsigned long)addr); if (bus != NULL) { busno = bus - iowa_busses; PCI_SET_ADDR_TOKEN(res, busno + 1); } return res; } /* Enable IO workaround */ static void __devinit io_workaround_init(void) { static int io_workaround_inited; if (io_workaround_inited) return; ppc_pci_io = iowa_pci_io; ppc_md.ioremap = iowa_ioremap; io_workaround_inited = 1; } /* Register new bus to support workaround */ void __devinit iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops, int (*initfunc)(struct iowa_bus *, void *), void *data) { struct iowa_bus *bus; struct device_node *np = phb->dn; io_workaround_init(); if (iowa_bus_count >= IOWA_MAX_BUS) { pr_err("IOWA:Too many pci bridges, " "workarounds disabled for %s\n", np->full_name); return; } bus = &iowa_busses[iowa_bus_count]; bus->phb = phb; bus->ops = ops; bus->private = data; if (initfunc) if ((*initfunc)(bus, data)) return; iowa_bus_count++; pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); }
gpl-2.0
JackpotClavin/3.4-Kernel-Tuna
drivers/leds/leds-atmel-pwm.c
4837
3664
#include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/io.h> #include <linux/atmel_pwm.h> #include <linux/slab.h> #include <linux/module.h> struct pwmled { struct led_classdev cdev; struct pwm_channel pwmc; struct gpio_led *desc; u32 mult; u8 active_low; }; /* * For simplicity, we use "brightness" as if it were a linear function * of PWM duty cycle. However, a logarithmic function of duty cycle is * probably a better match for perceived brightness: two is half as bright * as four, four is half as bright as eight, etc */ static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b) { struct pwmled *led; /* update the duty cycle for the *next* period */ led = container_of(cdev, struct pwmled, cdev); pwm_channel_writel(&led->pwmc, PWM_CUPD, led->mult * (unsigned) b); } /* * NOTE: we reuse the platform_data structure of GPIO leds, * but repurpose its "gpio" number as a PWM channel number. */ static int __devinit pwmled_probe(struct platform_device *pdev) { const struct gpio_led_platform_data *pdata; struct pwmled *leds; int i; int status; pdata = pdev->dev.platform_data; if (!pdata || pdata->num_leds < 1) return -ENODEV; leds = kcalloc(pdata->num_leds, sizeof(*leds), GFP_KERNEL); if (!leds) return -ENOMEM; for (i = 0; i < pdata->num_leds; i++) { struct pwmled *led = leds + i; const struct gpio_led *dat = pdata->leds + i; u32 tmp; led->cdev.name = dat->name; led->cdev.brightness = LED_OFF; led->cdev.brightness_set = pwmled_brightness; led->cdev.default_trigger = dat->default_trigger; led->active_low = dat->active_low; status = pwm_channel_alloc(dat->gpio, &led->pwmc); if (status < 0) goto err; /* * Prescale clock by 2^x, so PWM counts in low MHz. * Start each cycle with the LED active, so increasing * the duty cycle gives us more time on (== brighter). */ tmp = 5; if (!led->active_low) tmp |= PWM_CPR_CPOL; pwm_channel_writel(&led->pwmc, PWM_CMR, tmp); /* * Pick a period so PWM cycles at 100+ Hz; and a multiplier * for scaling duty cycle: brightness * mult. */ tmp = (led->pwmc.mck / (1 << 5)) / 100; tmp /= 255; led->mult = tmp; pwm_channel_writel(&led->pwmc, PWM_CDTY, led->cdev.brightness * 255); pwm_channel_writel(&led->pwmc, PWM_CPRD, LED_FULL * tmp); pwm_channel_enable(&led->pwmc); /* Hand it over to the LED framework */ status = led_classdev_register(&pdev->dev, &led->cdev); if (status < 0) { pwm_channel_free(&led->pwmc); goto err; } } platform_set_drvdata(pdev, leds); return 0; err: if (i > 0) { for (i = i - 1; i >= 0; i--) { led_classdev_unregister(&leds[i].cdev); pwm_channel_free(&leds[i].pwmc); } } kfree(leds); return status; } static int __exit pwmled_remove(struct platform_device *pdev) { const struct gpio_led_platform_data *pdata; struct pwmled *leds; unsigned i; pdata = pdev->dev.platform_data; leds = platform_get_drvdata(pdev); for (i = 0; i < pdata->num_leds; i++) { struct pwmled *led = leds + i; led_classdev_unregister(&led->cdev); pwm_channel_free(&led->pwmc); } kfree(leds); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver pwmled_driver = { .driver = { .name = "leds-atmel-pwm", .owner = THIS_MODULE, }, /* REVISIT add suspend() and resume() methods */ .probe = pwmled_probe, .remove = __exit_p(pwmled_remove), }; module_platform_driver(pwmled_driver); MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:leds-atmel-pwm");
gpl-2.0
omerjerk/CodyKernel-hammerhead
arch/x86/platform/olpc/olpc-xo1-sci.c
5349
14729
/* * Support for OLPC XO-1 System Control Interrupts (SCI) * * Copyright (C) 2010 One Laptop per Child * Copyright (C) 2006 Red Hat, Inc. * Copyright (C) 2006 Advanced Micro Devices, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/cs5535.h> #include <linux/device.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/mfd/core.h> #include <linux/power_supply.h> #include <linux/suspend.h> #include <linux/workqueue.h> #include <asm/io.h> #include <asm/msr.h> #include <asm/olpc.h> #define DRV_NAME "olpc-xo1-sci" #define PFX DRV_NAME ": " static unsigned long acpi_base; static struct input_dev *power_button_idev; static struct input_dev *ebook_switch_idev; static struct input_dev *lid_switch_idev; static int sci_irq; static bool lid_open; static bool lid_inverted; static int lid_wake_mode; enum lid_wake_modes { LID_WAKE_ALWAYS, LID_WAKE_OPEN, LID_WAKE_CLOSE, }; static const char * const lid_wake_mode_names[] = { [LID_WAKE_ALWAYS] = "always", [LID_WAKE_OPEN] = "open", [LID_WAKE_CLOSE] = "close", }; static void battery_status_changed(void) { struct power_supply *psy = power_supply_get_by_name("olpc-battery"); if (psy) { power_supply_changed(psy); put_device(psy->dev); } } static void ac_status_changed(void) { struct power_supply *psy = power_supply_get_by_name("olpc-ac"); if (psy) { power_supply_changed(psy); put_device(psy->dev); } } /* Report current ebook switch state through input layer */ static void send_ebook_state(void) { unsigned char state; if (olpc_ec_cmd(EC_READ_EB_MODE, NULL, 0, &state, 1)) { pr_err(PFX "failed to get ebook state\n"); return; } input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state); input_sync(ebook_switch_idev); } static void flip_lid_inverter(void) { /* gpio is high; invert so we'll get l->h event interrupt */ if (lid_inverted) cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT); else cs5535_gpio_set(OLPC_GPIO_LID, GPIO_INPUT_INVERT); lid_inverted = !lid_inverted; } static void detect_lid_state(void) { /* * the edge detector hookup on the gpio inputs on the geode is * odd, to say the least. See http://dev.laptop.org/ticket/5703 * for details, but in a nutshell: we don't use the edge * detectors. instead, we make use of an anomoly: with the both * edge detectors turned off, we still get an edge event on a * positive edge transition. to take advantage of this, we use the * front-end inverter to ensure that that's the edge we're always * going to see next. */ int state; state = cs5535_gpio_isset(OLPC_GPIO_LID, GPIO_READ_BACK); lid_open = !state ^ !lid_inverted; /* x ^^ y */ if (!state) return; flip_lid_inverter(); } /* Report current lid switch state through input layer */ static void send_lid_state(void) { input_report_switch(lid_switch_idev, SW_LID, !lid_open); input_sync(lid_switch_idev); } static ssize_t lid_wake_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *mode = lid_wake_mode_names[lid_wake_mode]; return sprintf(buf, "%s\n", mode); } static ssize_t lid_wake_mode_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int i; for (i = 0; i < ARRAY_SIZE(lid_wake_mode_names); i++) { const char *mode = lid_wake_mode_names[i]; if (strlen(mode) != count || strncasecmp(mode, buf, count)) continue; lid_wake_mode = i; return count; } return -EINVAL; } static DEVICE_ATTR(lid_wake_mode, S_IWUSR | S_IRUGO, lid_wake_mode_show, lid_wake_mode_set); /* * Process all items in the EC's SCI queue. * * This is handled in a workqueue because olpc_ec_cmd can be slow (and * can even timeout). * * If propagate_events is false, the queue is drained without events being * generated for the interrupts. */ static void process_sci_queue(bool propagate_events) { int r; u16 data; do { r = olpc_ec_sci_query(&data); if (r || !data) break; pr_debug(PFX "SCI 0x%x received\n", data); switch (data) { case EC_SCI_SRC_BATERR: case EC_SCI_SRC_BATSOC: case EC_SCI_SRC_BATTERY: case EC_SCI_SRC_BATCRIT: battery_status_changed(); break; case EC_SCI_SRC_ACPWR: ac_status_changed(); break; } if (data == EC_SCI_SRC_EBOOK && propagate_events) send_ebook_state(); } while (data); if (r) pr_err(PFX "Failed to clear SCI queue"); } static void process_sci_queue_work(struct work_struct *work) { process_sci_queue(true); } static DECLARE_WORK(sci_work, process_sci_queue_work); static irqreturn_t xo1_sci_intr(int irq, void *dev_id) { struct platform_device *pdev = dev_id; u32 sts; u32 gpe; sts = inl(acpi_base + CS5536_PM1_STS); outl(sts | 0xffff, acpi_base + CS5536_PM1_STS); gpe = inl(acpi_base + CS5536_PM_GPE0_STS); outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS); dev_dbg(&pdev->dev, "sts %x gpe %x\n", sts, gpe); if (sts & CS5536_PWRBTN_FLAG && !(sts & CS5536_WAK_FLAG)) { input_report_key(power_button_idev, KEY_POWER, 1); input_sync(power_button_idev); input_report_key(power_button_idev, KEY_POWER, 0); input_sync(power_button_idev); } if (gpe & CS5536_GPIOM7_PME_FLAG) { /* EC GPIO */ cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS); schedule_work(&sci_work); } cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS); detect_lid_state(); send_lid_state(); return IRQ_HANDLED; } static int xo1_sci_suspend(struct platform_device *pdev, pm_message_t state) { if (device_may_wakeup(&power_button_idev->dev)) olpc_xo1_pm_wakeup_set(CS5536_PM_PWRBTN); else olpc_xo1_pm_wakeup_clear(CS5536_PM_PWRBTN); if (device_may_wakeup(&ebook_switch_idev->dev)) olpc_ec_wakeup_set(EC_SCI_SRC_EBOOK); else olpc_ec_wakeup_clear(EC_SCI_SRC_EBOOK); if (!device_may_wakeup(&lid_switch_idev->dev)) { cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE); } else if ((lid_open && lid_wake_mode == LID_WAKE_OPEN) || (!lid_open && lid_wake_mode == LID_WAKE_CLOSE)) { flip_lid_inverter(); /* we may have just caused an event */ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE); } return 0; } static int xo1_sci_resume(struct platform_device *pdev) { /* * We don't know what may have happened while we were asleep. * Reestablish our lid setup so we're sure to catch all transitions. */ detect_lid_state(); send_lid_state(); cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE); /* Enable all EC events */ olpc_ec_mask_write(EC_SCI_SRC_ALL); /* Power/battery status might have changed too */ battery_status_changed(); ac_status_changed(); return 0; } static int __devinit setup_sci_interrupt(struct platform_device *pdev) { u32 lo, hi; u32 sts; int r; rdmsr(0x51400020, lo, hi); sci_irq = (lo >> 20) & 15; if (sci_irq) { dev_info(&pdev->dev, "SCI is mapped to IRQ %d\n", sci_irq); } else { /* Zero means masked */ dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n"); sci_irq = 3; lo |= 0x00300000; wrmsrl(0x51400020, lo); } /* Select level triggered in PIC */ if (sci_irq < 8) { lo = inb(CS5536_PIC_INT_SEL1); lo |= 1 << sci_irq; outb(lo, CS5536_PIC_INT_SEL1); } else { lo = inb(CS5536_PIC_INT_SEL2); lo |= 1 << (sci_irq - 8); outb(lo, CS5536_PIC_INT_SEL2); } /* Enable SCI from power button, and clear pending interrupts */ sts = inl(acpi_base + CS5536_PM1_STS); outl((CS5536_PM_PWRBTN << 16) | 0xffff, acpi_base + CS5536_PM1_STS); r = request_irq(sci_irq, xo1_sci_intr, 0, DRV_NAME, pdev); if (r) dev_err(&pdev->dev, "can't request interrupt\n"); return r; } static int __devinit setup_ec_sci(void) { int r; r = gpio_request(OLPC_GPIO_ECSCI, "OLPC-ECSCI"); if (r) return r; gpio_direction_input(OLPC_GPIO_ECSCI); /* Clear pending EC SCI events */ cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_POSITIVE_EDGE_STS); /* * Enable EC SCI events, and map them to both a PME and the SCI * interrupt. * * Ordinarily, in addition to functioning as GPIOs, Geode GPIOs can * be mapped to regular interrupts *or* Geode-specific Power * Management Events (PMEs) - events that bring the system out of * suspend. In this case, we want both of those things - the system * wakeup, *and* the ability to get an interrupt when an event occurs. * * To achieve this, we map the GPIO to a PME, and then we use one * of the many generic knobs on the CS5535 PIC to additionally map the * PME to the regular SCI interrupt line. */ cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_EVENTS_ENABLE); /* Set the SCI to cause a PME event on group 7 */ cs5535_gpio_setup_event(OLPC_GPIO_ECSCI, 7, 1); /* And have group 7 also fire the SCI interrupt */ cs5535_pic_unreqz_select_high(7, sci_irq); return 0; } static void free_ec_sci(void) { gpio_free(OLPC_GPIO_ECSCI); } static int __devinit setup_lid_events(void) { int r; r = gpio_request(OLPC_GPIO_LID, "OLPC-LID"); if (r) return r; gpio_direction_input(OLPC_GPIO_LID); cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT); lid_inverted = 0; /* Clear edge detection and event enable for now */ cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE); cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_EN); cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_EN); cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS); /* Set the LID to cause an PME event on group 6 */ cs5535_gpio_setup_event(OLPC_GPIO_LID, 6, 1); /* Set PME group 6 to fire the SCI interrupt */ cs5535_gpio_set_irq(6, sci_irq); /* Enable the event */ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE); return 0; } static void free_lid_events(void) { gpio_free(OLPC_GPIO_LID); } static int __devinit setup_power_button(struct platform_device *pdev) { int r; power_button_idev = input_allocate_device(); if (!power_button_idev) return -ENOMEM; power_button_idev->name = "Power Button"; power_button_idev->phys = DRV_NAME "/input0"; set_bit(EV_KEY, power_button_idev->evbit); set_bit(KEY_POWER, power_button_idev->keybit); power_button_idev->dev.parent = &pdev->dev; device_init_wakeup(&power_button_idev->dev, 1); r = input_register_device(power_button_idev); if (r) { dev_err(&pdev->dev, "failed to register power button: %d\n", r); input_free_device(power_button_idev); } return r; } static void free_power_button(void) { input_unregister_device(power_button_idev); input_free_device(power_button_idev); } static int __devinit setup_ebook_switch(struct platform_device *pdev) { int r; ebook_switch_idev = input_allocate_device(); if (!ebook_switch_idev) return -ENOMEM; ebook_switch_idev->name = "EBook Switch"; ebook_switch_idev->phys = DRV_NAME "/input1"; set_bit(EV_SW, ebook_switch_idev->evbit); set_bit(SW_TABLET_MODE, ebook_switch_idev->swbit); ebook_switch_idev->dev.parent = &pdev->dev; device_set_wakeup_capable(&ebook_switch_idev->dev, true); r = input_register_device(ebook_switch_idev); if (r) { dev_err(&pdev->dev, "failed to register ebook switch: %d\n", r); input_free_device(ebook_switch_idev); } return r; } static void free_ebook_switch(void) { input_unregister_device(ebook_switch_idev); input_free_device(ebook_switch_idev); } static int __devinit setup_lid_switch(struct platform_device *pdev) { int r; lid_switch_idev = input_allocate_device(); if (!lid_switch_idev) return -ENOMEM; lid_switch_idev->name = "Lid Switch"; lid_switch_idev->phys = DRV_NAME "/input2"; set_bit(EV_SW, lid_switch_idev->evbit); set_bit(SW_LID, lid_switch_idev->swbit); lid_switch_idev->dev.parent = &pdev->dev; device_set_wakeup_capable(&lid_switch_idev->dev, true); r = input_register_device(lid_switch_idev); if (r) { dev_err(&pdev->dev, "failed to register lid switch: %d\n", r); goto err_register; } r = device_create_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode); if (r) { dev_err(&pdev->dev, "failed to create wake mode attr: %d\n", r); goto err_create_attr; } return 0; err_create_attr: input_unregister_device(lid_switch_idev); err_register: input_free_device(lid_switch_idev); return r; } static void free_lid_switch(void) { device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode); input_unregister_device(lid_switch_idev); input_free_device(lid_switch_idev); } static int __devinit xo1_sci_probe(struct platform_device *pdev) { struct resource *res; int r; /* don't run on non-XOs */ if (!machine_is_olpc()) return -ENODEV; r = mfd_cell_enable(pdev); if (r) return r; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); return -EIO; } acpi_base = res->start; r = setup_power_button(pdev); if (r) return r; r = setup_ebook_switch(pdev); if (r) goto err_ebook; r = setup_lid_switch(pdev); if (r) goto err_lid; r = setup_lid_events(); if (r) goto err_lidevt; r = setup_ec_sci(); if (r) goto err_ecsci; /* Enable PME generation for EC-generated events */ outl(CS5536_GPIOM6_PME_EN | CS5536_GPIOM7_PME_EN, acpi_base + CS5536_PM_GPE0_EN); /* Clear pending events */ outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS); process_sci_queue(false); /* Initial sync */ send_ebook_state(); detect_lid_state(); send_lid_state(); r = setup_sci_interrupt(pdev); if (r) goto err_sci; /* Enable all EC events */ olpc_ec_mask_write(EC_SCI_SRC_ALL); return r; err_sci: free_ec_sci(); err_ecsci: free_lid_events(); err_lidevt: free_lid_switch(); err_lid: free_ebook_switch(); err_ebook: free_power_button(); return r; } static int __devexit xo1_sci_remove(struct platform_device *pdev) { mfd_cell_disable(pdev); free_irq(sci_irq, pdev); cancel_work_sync(&sci_work); free_ec_sci(); free_lid_events(); free_lid_switch(); free_ebook_switch(); free_power_button(); acpi_base = 0; return 0; } static struct platform_driver xo1_sci_driver = { .driver = { .name = "olpc-xo1-sci-acpi", }, .probe = xo1_sci_probe, .remove = __devexit_p(xo1_sci_remove), .suspend = xo1_sci_suspend, .resume = xo1_sci_resume, }; static int __init xo1_sci_init(void) { return platform_driver_register(&xo1_sci_driver); } arch_initcall(xo1_sci_init);
gpl-2.0
TJAndHisStudents/sting-linux
net/core/gen_estimator.c
6885
8636
/* * net/sched/gen_estimator.c Simple rate estimator. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * Jamal Hadi Salim - moved it to net/core and reshulfed * names to make it usable in general net subsystem. */ #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <net/sock.h> #include <net/gen_stats.h> /* This code is NOT intended to be used for statistics collection, its purpose is to provide a base for statistical multiplexing for controlled load service. If you need only statistics, run a user level daemon which periodically reads byte counters. Unfortunately, rate estimation is not a very easy task. F.e. I did not find a simple way to estimate the current peak rate and even failed to formulate the problem 8)8) So I preferred not to built an estimator into the scheduler, but run this task separately. Ideally, it should be kernel thread(s), but for now it runs from timers, which puts apparent top bounds on the number of rated flows, has minimal overhead on small, but is enough to handle controlled load service, sets of aggregates. We measure rate over A=(1<<interval) seconds and evaluate EWMA: avrate = avrate*(1-W) + rate*W where W is chosen as negative power of 2: W = 2^(-ewma_log) The resulting time constant is: T = A/(-ln(1-W)) NOTES. * avbps is scaled by 2^5, avpps is scaled by 2^10. * both values are reported as 32 bit unsigned values. bps can overflow for fast links : max speed being 34360Mbit/sec * Minimal interval is HZ/4=250msec (it is the greatest common divisor for HZ=100 and HZ=1024 8)), maximal interval is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals are too expensive, longer ones can be implemented at user level painlessly. */ #define EST_MAX_INTERVAL 5 struct gen_estimator { struct list_head list; struct gnet_stats_basic_packed *bstats; struct gnet_stats_rate_est *rate_est; spinlock_t *stats_lock; int ewma_log; u64 last_bytes; u64 avbps; u32 last_packets; u32 avpps; struct rcu_head e_rcu; struct rb_node node; }; struct gen_estimator_head { struct timer_list timer; struct list_head list; }; static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; /* Protects against NULL dereference */ static DEFINE_RWLOCK(est_lock); /* Protects against soft lockup during large deletion */ static struct rb_root est_root = RB_ROOT; static DEFINE_SPINLOCK(est_tree_lock); static void est_timer(unsigned long arg) { int idx = (int)arg; struct gen_estimator *e; rcu_read_lock(); list_for_each_entry_rcu(e, &elist[idx].list, list) { u64 nbytes; u64 brate; u32 npackets; u32 rate; spin_lock(e->stats_lock); read_lock(&est_lock); if (e->bstats == NULL) goto skip; nbytes = e->bstats->bytes; npackets = e->bstats->packets; brate = (nbytes - e->last_bytes)<<(7 - idx); e->last_bytes = nbytes; e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->rate_est->bps = (e->avbps+0xF)>>5; rate = (npackets - e->last_packets)<<(12 - idx); e->last_packets = npackets; e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->rate_est->pps = (e->avpps+0x1FF)>>10; skip: read_unlock(&est_lock); spin_unlock(e->stats_lock); } if (!list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); rcu_read_unlock(); } static void gen_add_node(struct gen_estimator *est) { struct rb_node **p = &est_root.rb_node, *parent = NULL; while (*p) { struct gen_estimator *e; parent = *p; e = rb_entry(parent, struct gen_estimator, node); if (est->bstats > e->bstats) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&est->node, parent, p); rb_insert_color(&est->node, &est_root); } static struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est *rate_est) { struct rb_node *p = est_root.rb_node; while (p) { struct gen_estimator *e; e = rb_entry(p, struct gen_estimator, node); if (bstats > e->bstats) p = p->rb_right; else if (bstats < e->bstats || rate_est != e->rate_est) p = p->rb_left; else return e; } return NULL; } /** * gen_new_estimator - create a new rate estimator * @bstats: basic statistics * @rate_est: rate estimator statistics * @stats_lock: statistics lock * @opt: rate estimator configuration TLV * * Creates a new rate estimator with &bstats as source and &rate_est * as destination. A new timer with the interval specified in the * configuration TLV is created. Upon each interval, the latest statistics * will be read from &bstats and the estimated rate will be stored in * &rate_est with the statistics lock grabed during this period. * * Returns 0 on success or a negative error code. * */ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { struct gen_estimator *est; struct gnet_estimator *parm = nla_data(opt); int idx; if (nla_len(opt) < sizeof(*parm)) return -EINVAL; if (parm->interval < -2 || parm->interval > 3) return -EINVAL; est = kzalloc(sizeof(*est), GFP_KERNEL); if (est == NULL) return -ENOBUFS; idx = parm->interval + 2; est->bstats = bstats; est->rate_est = rate_est; est->stats_lock = stats_lock; est->ewma_log = parm->ewma_log; est->last_bytes = bstats->bytes; est->avbps = rate_est->bps<<5; est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; spin_lock_bh(&est_tree_lock); if (!elist[idx].timer.function) { INIT_LIST_HEAD(&elist[idx].list); setup_timer(&elist[idx].timer, est_timer, idx); } if (list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); list_add_rcu(&est->list, &elist[idx].list); gen_add_node(est); spin_unlock_bh(&est_tree_lock); return 0; } EXPORT_SYMBOL(gen_new_estimator); /** * gen_kill_estimator - remove a rate estimator * @bstats: basic statistics * @rate_est: rate estimator statistics * * Removes the rate estimator specified by &bstats and &rate_est. * * Note : Caller should respect an RCU grace period before freeing stats_lock */ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est) { struct gen_estimator *e; spin_lock_bh(&est_tree_lock); while ((e = gen_find_node(bstats, rate_est))) { rb_erase(&e->node, &est_root); write_lock(&est_lock); e->bstats = NULL; write_unlock(&est_lock); list_del_rcu(&e->list); kfree_rcu(e, e_rcu); } spin_unlock_bh(&est_tree_lock); } EXPORT_SYMBOL(gen_kill_estimator); /** * gen_replace_estimator - replace rate estimator configuration * @bstats: basic statistics * @rate_est: rate estimator statistics * @stats_lock: statistics lock * @opt: rate estimator configuration TLV * * Replaces the configuration of a rate estimator by calling * gen_kill_estimator() and gen_new_estimator(). * * Returns 0 on success or a negative error code. */ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { gen_kill_estimator(bstats, rate_est); return gen_new_estimator(bstats, rate_est, stats_lock, opt); } EXPORT_SYMBOL(gen_replace_estimator); /** * gen_estimator_active - test if estimator is currently in use * @bstats: basic statistics * @rate_est: rate estimator statistics * * Returns true if estimator is active, and false if not. */ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est *rate_est) { bool res; ASSERT_RTNL(); spin_lock_bh(&est_tree_lock); res = gen_find_node(bstats, rate_est) != NULL; spin_unlock_bh(&est_tree_lock); return res; } EXPORT_SYMBOL(gen_estimator_active);
gpl-2.0
Fusion-Devices/kernel_cyanogen_msm8916
net/core/gen_estimator.c
6885
8636
/* * net/sched/gen_estimator.c Simple rate estimator. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * Jamal Hadi Salim - moved it to net/core and reshulfed * names to make it usable in general net subsystem. */ #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <net/sock.h> #include <net/gen_stats.h> /* This code is NOT intended to be used for statistics collection, its purpose is to provide a base for statistical multiplexing for controlled load service. If you need only statistics, run a user level daemon which periodically reads byte counters. Unfortunately, rate estimation is not a very easy task. F.e. I did not find a simple way to estimate the current peak rate and even failed to formulate the problem 8)8) So I preferred not to built an estimator into the scheduler, but run this task separately. Ideally, it should be kernel thread(s), but for now it runs from timers, which puts apparent top bounds on the number of rated flows, has minimal overhead on small, but is enough to handle controlled load service, sets of aggregates. We measure rate over A=(1<<interval) seconds and evaluate EWMA: avrate = avrate*(1-W) + rate*W where W is chosen as negative power of 2: W = 2^(-ewma_log) The resulting time constant is: T = A/(-ln(1-W)) NOTES. * avbps is scaled by 2^5, avpps is scaled by 2^10. * both values are reported as 32 bit unsigned values. bps can overflow for fast links : max speed being 34360Mbit/sec * Minimal interval is HZ/4=250msec (it is the greatest common divisor for HZ=100 and HZ=1024 8)), maximal interval is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals are too expensive, longer ones can be implemented at user level painlessly. */ #define EST_MAX_INTERVAL 5 struct gen_estimator { struct list_head list; struct gnet_stats_basic_packed *bstats; struct gnet_stats_rate_est *rate_est; spinlock_t *stats_lock; int ewma_log; u64 last_bytes; u64 avbps; u32 last_packets; u32 avpps; struct rcu_head e_rcu; struct rb_node node; }; struct gen_estimator_head { struct timer_list timer; struct list_head list; }; static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; /* Protects against NULL dereference */ static DEFINE_RWLOCK(est_lock); /* Protects against soft lockup during large deletion */ static struct rb_root est_root = RB_ROOT; static DEFINE_SPINLOCK(est_tree_lock); static void est_timer(unsigned long arg) { int idx = (int)arg; struct gen_estimator *e; rcu_read_lock(); list_for_each_entry_rcu(e, &elist[idx].list, list) { u64 nbytes; u64 brate; u32 npackets; u32 rate; spin_lock(e->stats_lock); read_lock(&est_lock); if (e->bstats == NULL) goto skip; nbytes = e->bstats->bytes; npackets = e->bstats->packets; brate = (nbytes - e->last_bytes)<<(7 - idx); e->last_bytes = nbytes; e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->rate_est->bps = (e->avbps+0xF)>>5; rate = (npackets - e->last_packets)<<(12 - idx); e->last_packets = npackets; e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->rate_est->pps = (e->avpps+0x1FF)>>10; skip: read_unlock(&est_lock); spin_unlock(e->stats_lock); } if (!list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); rcu_read_unlock(); } static void gen_add_node(struct gen_estimator *est) { struct rb_node **p = &est_root.rb_node, *parent = NULL; while (*p) { struct gen_estimator *e; parent = *p; e = rb_entry(parent, struct gen_estimator, node); if (est->bstats > e->bstats) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&est->node, parent, p); rb_insert_color(&est->node, &est_root); } static struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est *rate_est) { struct rb_node *p = est_root.rb_node; while (p) { struct gen_estimator *e; e = rb_entry(p, struct gen_estimator, node); if (bstats > e->bstats) p = p->rb_right; else if (bstats < e->bstats || rate_est != e->rate_est) p = p->rb_left; else return e; } return NULL; } /** * gen_new_estimator - create a new rate estimator * @bstats: basic statistics * @rate_est: rate estimator statistics * @stats_lock: statistics lock * @opt: rate estimator configuration TLV * * Creates a new rate estimator with &bstats as source and &rate_est * as destination. A new timer with the interval specified in the * configuration TLV is created. Upon each interval, the latest statistics * will be read from &bstats and the estimated rate will be stored in * &rate_est with the statistics lock grabed during this period. * * Returns 0 on success or a negative error code. * */ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { struct gen_estimator *est; struct gnet_estimator *parm = nla_data(opt); int idx; if (nla_len(opt) < sizeof(*parm)) return -EINVAL; if (parm->interval < -2 || parm->interval > 3) return -EINVAL; est = kzalloc(sizeof(*est), GFP_KERNEL); if (est == NULL) return -ENOBUFS; idx = parm->interval + 2; est->bstats = bstats; est->rate_est = rate_est; est->stats_lock = stats_lock; est->ewma_log = parm->ewma_log; est->last_bytes = bstats->bytes; est->avbps = rate_est->bps<<5; est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; spin_lock_bh(&est_tree_lock); if (!elist[idx].timer.function) { INIT_LIST_HEAD(&elist[idx].list); setup_timer(&elist[idx].timer, est_timer, idx); } if (list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); list_add_rcu(&est->list, &elist[idx].list); gen_add_node(est); spin_unlock_bh(&est_tree_lock); return 0; } EXPORT_SYMBOL(gen_new_estimator); /** * gen_kill_estimator - remove a rate estimator * @bstats: basic statistics * @rate_est: rate estimator statistics * * Removes the rate estimator specified by &bstats and &rate_est. * * Note : Caller should respect an RCU grace period before freeing stats_lock */ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est) { struct gen_estimator *e; spin_lock_bh(&est_tree_lock); while ((e = gen_find_node(bstats, rate_est))) { rb_erase(&e->node, &est_root); write_lock(&est_lock); e->bstats = NULL; write_unlock(&est_lock); list_del_rcu(&e->list); kfree_rcu(e, e_rcu); } spin_unlock_bh(&est_tree_lock); } EXPORT_SYMBOL(gen_kill_estimator); /** * gen_replace_estimator - replace rate estimator configuration * @bstats: basic statistics * @rate_est: rate estimator statistics * @stats_lock: statistics lock * @opt: rate estimator configuration TLV * * Replaces the configuration of a rate estimator by calling * gen_kill_estimator() and gen_new_estimator(). * * Returns 0 on success or a negative error code. */ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { gen_kill_estimator(bstats, rate_est); return gen_new_estimator(bstats, rate_est, stats_lock, opt); } EXPORT_SYMBOL(gen_replace_estimator); /** * gen_estimator_active - test if estimator is currently in use * @bstats: basic statistics * @rate_est: rate estimator statistics * * Returns true if estimator is active, and false if not. */ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est *rate_est) { bool res; ASSERT_RTNL(); spin_lock_bh(&est_tree_lock); res = gen_find_node(bstats, rate_est) != NULL; spin_unlock_bh(&est_tree_lock); return res; } EXPORT_SYMBOL(gen_estimator_active);
gpl-2.0
GuneetAtwal/kernel_h1s
net/wireless/ethtool.c
8421
2109
#include <linux/utsname.h> #include <net/cfg80211.h> #include "core.h" #include "ethtool.h" static void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct wireless_dev *wdev = dev->ieee80211_ptr; strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, sizeof(info->driver)); strlcpy(info->version, init_utsname()->release, sizeof(info->version)); if (wdev->wiphy->fw_version[0]) strncpy(info->fw_version, wdev->wiphy->fw_version, sizeof(info->fw_version)); else strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)), sizeof(info->bus_info)); } static int cfg80211_get_regs_len(struct net_device *dev) { /* For now, return 0... */ return 0; } static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *data) { struct wireless_dev *wdev = dev->ieee80211_ptr; regs->version = wdev->wiphy->hw_version; regs->len = 0; } static void cfg80211_get_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); memset(rp, 0, sizeof(*rp)); if (rdev->ops->get_ringparam) rdev->ops->get_ringparam(wdev->wiphy, &rp->tx_pending, &rp->tx_max_pending, &rp->rx_pending, &rp->rx_max_pending); } static int cfg80211_set_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) return -EINVAL; if (rdev->ops->set_ringparam) return rdev->ops->set_ringparam(wdev->wiphy, rp->tx_pending, rp->rx_pending); return -ENOTSUPP; } const struct ethtool_ops cfg80211_ethtool_ops = { .get_drvinfo = cfg80211_get_drvinfo, .get_regs_len = cfg80211_get_regs_len, .get_regs = cfg80211_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = cfg80211_get_ringparam, .set_ringparam = cfg80211_set_ringparam, };
gpl-2.0
brymaster5000/m7-GPE-L
tools/perf/arch/sh/util/dwarf-regs.c
8677
1317
/* * Mapping of DWARF debug register numbers into register names. * * Copyright (C) 2010 Matt Fleming <matt@console-pimps.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <libio.h> #include <dwarf-regs.h> /* * Generic dwarf analysis helpers */ #define SH_MAX_REGS 18 const char *sh_regs_table[SH_MAX_REGS] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "pc", "pr", }; /* Return architecture dependent register string (for kprobe-tracer) */ const char *get_arch_regstr(unsigned int n) { return (n <= SH_MAX_REGS) ? sh_regs_table[n] : NULL; }
gpl-2.0
clumsy1991/M8_GPE_Kernel
arch/mn10300/unit-asb2303/unit-init.c
12261
1693
/* ASB2303 initialisation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/device.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/intctl-regs.h> /* * initialise some of the unit hardware before gdbstub is set up */ asmlinkage void __init unit_init(void) { /* set up the external interrupts */ SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_HILEVEL); SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); SET_XIRQ_TRIGGER(3, XIRQ_TRIGGER_HILEVEL); SET_XIRQ_TRIGGER(4, XIRQ_TRIGGER_LOWLEVEL); SET_XIRQ_TRIGGER(5, XIRQ_TRIGGER_LOWLEVEL); #ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL)); #endif #ifdef CONFIG_ETHERNET_IRQ_LEVEL set_intr_level(XIRQ3, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL)); #endif } /* * initialise the rest of the unit hardware after gdbstub is ready */ void __init unit_setup(void) { } /* * initialise the external interrupts used by a unit of this type */ void __init unit_init_IRQ(void) { unsigned int extnum; for (extnum = 0; extnum < NR_XIRQS; extnum++) { switch (GET_XIRQ_TRIGGER(extnum)) { case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum)); break; default: break; } } }
gpl-2.0
alianmohammad/gem5-linux-kernel
arch/mn10300/unit-asb2303/unit-init.c
12261
1693
/* ASB2303 initialisation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/device.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/intctl-regs.h> /* * initialise some of the unit hardware before gdbstub is set up */ asmlinkage void __init unit_init(void) { /* set up the external interrupts */ SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_HILEVEL); SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); SET_XIRQ_TRIGGER(3, XIRQ_TRIGGER_HILEVEL); SET_XIRQ_TRIGGER(4, XIRQ_TRIGGER_LOWLEVEL); SET_XIRQ_TRIGGER(5, XIRQ_TRIGGER_LOWLEVEL); #ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL)); #endif #ifdef CONFIG_ETHERNET_IRQ_LEVEL set_intr_level(XIRQ3, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL)); #endif } /* * initialise the rest of the unit hardware after gdbstub is ready */ void __init unit_setup(void) { } /* * initialise the external interrupts used by a unit of this type */ void __init unit_init_IRQ(void) { unsigned int extnum; for (extnum = 0; extnum < NR_XIRQS; extnum++) { switch (GET_XIRQ_TRIGGER(extnum)) { case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum)); break; default: break; } } }
gpl-2.0
Hashcode/android_kernel_samsung-jf-common
drivers/media/isdbtmm/tuner_drv.c
230
56835
/* * * drivers/media/isdbtmm/tuner_drv.c * * MM Tuner Driver * * Copyright (C) (2013, Samsung Electronics) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /****************************************************************************** * include ******************************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/poll.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/i2c-dev.h> #include <asm/uaccess.h> #include <asm/irq.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/kthread.h> #include <linux/version.h> #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) #include <linux/mutex.h> #endif #include "tuner_drv.h" #include "tuner.h" #if defined(CONFIG_TMM_ANT_DET) #include "../../../arch/arm/mach-msm/board-8064.h" #include <mach/jf_dcm-gpio.h> /* for delay(sleep) */ #include <linux/delay.h> /* for mutex */ #include <linux/mutex.h> /*using copy to user */ #include <linux/uaccess.h> #include <linux/clk.h> #include <linux/workqueue.h> #include <linux/irq.h> #include <asm/mach/irq.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/wakelock.h> #include <linux/input.h> #endif #if defined(CONFIG_TMM_CHG_CTRL) #include <mach/isdbtmm_pdata.h> #endif /****************************************************************************** * data ******************************************************************************/ /* Mmap Address */ void *mem_p; /* poll control */ wait_queue_head_t g_tuner_poll_wait_queue; /* poll queue */ spinlock_t g_tuner_lock; /* spin lock */ unsigned long g_tuner_wakeup_flag; /* poll wait flag */ /* interrupt factor */ unsigned char g_tuner_intcnd_f; /* INTCNDD_F register */ unsigned char g_tuner_intcnd_s; /* INTCNDD_S register */ unsigned char g_tuner_intst_f; unsigned char g_tuner_intst_s; /* kernel_thread */ struct task_struct *g_tuner_kthread_id; /* identifier */ u32 g_tuner_kthread_flag; /* flag */ wait_queue_head_t g_tuner_kthread_wait_queue; /* wait queue */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* mutex */ struct mutex g_tuner_mutex; /* exclusive control */ #endif #if defined(CONFIG_TMM_ANT_DET) #define DPRINTK printk static struct wake_lock tdmb_ant_wlock; #endif #if defined(CONFIG_TMM_CHG_CTRL) static struct wake_lock tmm_chg_ctrl_wlock; static struct work_struct tmm_chg_ctrl_work; static struct delayed_work tmm_chg_ctrl_polling_work; static struct isdbtmm_platform_data tmm_chg_ctrl_func; static struct workqueue_struct *tmm_chg_wqueue; #define TMM_CHG_CTRL_POLLING_TIME 20 /* unit: second */ #define TMM_CHG_CTRL_START_DELAY 3 /* unit: second */ #endif /****************************************************************************** * function ******************************************************************************/ static ssize_t tuner_module_entry_read( struct file* FIle, char* Buffer, size_t Count, loff_t* OffsetPosition ); static ssize_t tuner_module_entry_write( struct file* FIle, const char* Buffer, size_t Count, loff_t* OffsetPosition ); static unsigned int tuner_module_entry_poll( struct file *file, struct poll_table_struct *poll_tbl ); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) static int tuner_module_entry_ioctl( struct inode* Inode, struct file* FIle, unsigned int uCommand, unsigned long uArgument ); #else /* LINUX_VERSION_CODE */ static long tuner_module_entry_ioctl( struct file *file, unsigned int uCommand, unsigned long uArgument ); #endif /* LINUX_VERSION_CODE */ static int tuner_module_entry_open( struct inode* Inode, struct file* FIle ); static int tuner_module_entry_close( struct inode* Inode, struct file* FIle ); static int tuner_probe( struct platform_device *pdev ); static int tuner_remove( struct platform_device *pdev ); /* Add start 20121219 No.1 */ static int tmm_i2c_main1_probe(struct i2c_client *client,\ const struct i2c_device_id *devid); static int tmm_i2c_main2_probe(struct i2c_client *client,\ const struct i2c_device_id *devid); static int tmm_i2c_sub_probe(struct i2c_client *client,\ const struct i2c_device_id *devid); static int tmm_i2c_main1_remove(struct i2c_client *client); static int tmm_i2c_main2_remove(struct i2c_client *client); static int tmm_i2c_sub_remove(struct i2c_client *client); /* Add end 20121219 No.1 */ /* entry point */ static struct file_operations TunerFileOperations = { .owner = THIS_MODULE, .read = tuner_module_entry_read, .write = tuner_module_entry_write, .poll = tuner_module_entry_poll, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) .ioctl = tuner_module_entry_ioctl, #else /* LINUX_VERSION_CODE */ .unlocked_ioctl = tuner_module_entry_ioctl, #endif /* LINUX_VERSION_CODE */ .open = tuner_module_entry_open, .release = tuner_module_entry_close }; static struct platform_driver mmtuner_driver = { .probe = tuner_probe, .remove = tuner_remove, .driver = { .name = "tmmi2c", .owner = THIS_MODULE, } }; /* Add start 20121219 No.1 */ struct i2c_client *i2c_main1_client; struct i2c_client *i2c_main2_client; struct i2c_client *i2c_sub_client; static const struct i2c_device_id smtej114_i2c_main1_id[] = { {"smtej113_main1", 0}, {} }; static const struct i2c_device_id smtej114_i2c_main2_id[] = { {"smtej113_main2", 0}, {} }; static const struct i2c_device_id smtej114_i2c_sub_id[] = { {"smtej113_sub", 0}, {} }; MODULE_DEVICE_TABLE(i2c, smtej114_i2c_main1_id); MODULE_DEVICE_TABLE(i2c, smtej114_i2c_main2_id); MODULE_DEVICE_TABLE(i2c, smtej114_i2c_sub_id); static struct i2c_driver smtej113_i2c_main1_driver = { .probe = tmm_i2c_main1_probe, .remove = tmm_i2c_main1_remove, .driver = { .name = "smtej113_main1", .owner = THIS_MODULE, }, .id_table = smtej114_i2c_main1_id, }; static struct i2c_driver smtej113_i2c_main2_driver = { .probe = tmm_i2c_main2_probe, .remove = tmm_i2c_main2_remove, .driver = { .name = "smtej113_main2", .owner = THIS_MODULE, }, .id_table = smtej114_i2c_main2_id, }; static struct i2c_driver smtej113_i2c_sub_driver = { .probe = tmm_i2c_sub_probe, .remove = tmm_i2c_sub_remove, .driver = { .name = "smtej113_sub", .owner = THIS_MODULE, }, .id_table = smtej114_i2c_sub_id, }; /* Add start 20121219 No.1 */ static struct platform_device *mmtuner_device; static struct class *device_class; /* Add Start 20121218 No_1 */ static unsigned long open_cnt = 0; /* OPEN counter */ static unsigned long moni_cnt = 0; /* Monitor counter */ /* Add End 20121218 No_1 */ #ifndef TUNER_CONFIG_IRQ_PC_LINUX irqreturn_t tuner_interrupt( int irq, void *dev_id ); #else /* TUNER_CONFIG_IRQ_PC_LINUX */ int tuner_interrupt( void ); #endif /* TUNER_CONFIG_IRQ_PC_LINUX */ /****************************************************************************** * code area ******************************************************************************/ #if defined(CONFIG_TMM_ANT_DET) enum { TDMB_ANT_OPEN = 0, TDMB_ANT_CLOSE, TDMB_ANT_UNKNOWN, }; enum { TDMB_ANT_DET_LOW = 0, TDMB_ANT_DET_HIGH, }; static struct input_dev *tdmb_ant_input; static int tdmb_check_ant; static int ant_prev_status; #define TDMB_ANT_WAIT_INIT_TIME 500000 /* us */ #define TDMB_ANT_CHECK_DURATION 50000 /* us */ #define TDMB_ANT_CHECK_COUNT 10 #define TDMB_ANT_WLOCK_TIMEOUT \ ((TDMB_ANT_CHECK_DURATION * TDMB_ANT_CHECK_COUNT * 2) / 500000) static int tdmb_ant_det_check_value(void) { int loop = 0, cur_val = 0; int ret = TDMB_ANT_UNKNOWN; tdmb_check_ant = 1; DPRINTK("%s ant_prev_status(%d)\n", __func__, ant_prev_status); usleep_range(TDMB_ANT_WAIT_INIT_TIME, TDMB_ANT_WAIT_INIT_TIME); /* wait initial noise */ for (loop = 0; loop < TDMB_ANT_CHECK_COUNT; loop++) { usleep_range(TDMB_ANT_CHECK_DURATION, TDMB_ANT_CHECK_DURATION); cur_val = gpio_get_value_cansleep(GPIO_TMM_ANT_DET); if (ant_prev_status == cur_val) break; } if (loop == TDMB_ANT_CHECK_COUNT) { if (ant_prev_status == TDMB_ANT_DET_LOW && cur_val == TDMB_ANT_DET_HIGH) { ret = TDMB_ANT_OPEN; } else if (ant_prev_status == TDMB_ANT_DET_HIGH && cur_val == TDMB_ANT_DET_LOW) { ret = TDMB_ANT_CLOSE; } ant_prev_status = cur_val; } tdmb_check_ant = 0; DPRINTK("%s cnt(%d) cur(%d) prev(%d)\n", __func__, loop, cur_val, ant_prev_status); return ret; } static int tdmb_ant_det_ignore_irq(void) { DPRINTK("chk_ant=%d\n", tdmb_check_ant); return tdmb_check_ant; } static void tdmb_ant_det_work_func(struct work_struct *work) { if (!tdmb_ant_input) { DPRINTK("%s: input device is not registered\n", __func__); return; } switch (tdmb_ant_det_check_value()) { case TDMB_ANT_OPEN: input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_UP, 1); input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_UP, 0); input_sync(tdmb_ant_input); DPRINTK("%s : TDMB_ANT_OPEN\n", __func__); break; case TDMB_ANT_CLOSE: input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_DOWN, 1); input_report_key(tdmb_ant_input, KEY_DMB_ANT_DET_DOWN, 0); input_sync(tdmb_ant_input); DPRINTK("%s : TDMB_ANT_CLOSE\n", __func__); break; case TDMB_ANT_UNKNOWN: DPRINTK("%s : TDMB_ANT_UNKNOWN\n", __func__); break; default: break; } } static struct workqueue_struct *tdmb_ant_det_wq; static DECLARE_WORK(tdmb_ant_det_work, tdmb_ant_det_work_func); static bool tdmb_ant_det_reg_input(struct platform_device *pdev) { struct input_dev *input; int err; DPRINTK("%s\n", __func__); input = input_allocate_device(); if (!input) { DPRINTK("Can't allocate input device\n"); err = -ENOMEM; } set_bit(EV_KEY, input->evbit); set_bit(KEY_DMB_ANT_DET_UP & KEY_MAX, input->keybit); set_bit(KEY_DMB_ANT_DET_DOWN & KEY_MAX, input->keybit); input->name = "sec_dmb_key"; input->phys = "sec_dmb_key/input0"; input->dev.parent = &pdev->dev; err = input_register_device(input); if (err) { DPRINTK("Can't register dmb_ant_det key: %d\n", err); goto free_input_dev; } tdmb_ant_input = input; ant_prev_status = gpio_get_value_cansleep(GPIO_TMM_ANT_DET); return true; free_input_dev: input_free_device(input); return false; } static void tdmb_ant_det_unreg_input(void) { DPRINTK("%s\n", __func__); if (tdmb_ant_input) { input_unregister_device(tdmb_ant_input); tdmb_ant_input = NULL; } } static bool tdmb_ant_det_create_wq(void) { DPRINTK("%s\n", __func__); tdmb_ant_det_wq = create_singlethread_workqueue("tdmb_ant_det_wq"); if (tdmb_ant_det_wq) return true; else return false; } static bool tdmb_ant_det_destroy_wq(void) { DPRINTK("%s\n", __func__); if (tdmb_ant_det_wq) { flush_workqueue(tdmb_ant_det_wq); destroy_workqueue(tdmb_ant_det_wq); tdmb_ant_det_wq = NULL; } return true; } static irqreturn_t tdmb_ant_det_irq_handler(int irq, void *dev_id) { int ret = 0; if (tdmb_ant_det_ignore_irq()) return IRQ_HANDLED; wake_lock_timeout(&tdmb_ant_wlock, TDMB_ANT_WLOCK_TIMEOUT * HZ); if (tdmb_ant_det_wq) { ret = queue_work(tdmb_ant_det_wq, &tdmb_ant_det_work); if (ret == 0) DPRINTK("%s queue_work fail\n", __func__); } return IRQ_HANDLED; } static void ant_det_pm_gpio_config(void) { struct pm_gpio tmm_det_int_cfg = { .direction = PM_GPIO_DIR_IN, .pull = PM_GPIO_PULL_NO, .vin_sel = 2, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, }; pm8xxx_gpio_config(GPIO_TMM_ANT_DET, &tmm_det_int_cfg); DPRINTK("%s\n", __func__); return; } static bool tdmb_ant_det_irq_set(bool set) { bool ret = true; int irq_ret; unsigned int gpio_irq_det = 0; DPRINTK("%s\n", __func__); if (set) { gpio_irq_det = gpio_to_irq(GPIO_TMM_ANT_DET); irq_set_irq_type(gpio_irq_det , IRQ_TYPE_EDGE_BOTH); irq_ret = request_irq(gpio_irq_det , tdmb_ant_det_irq_handler , IRQF_DISABLED , "tdmb_ant_det" , NULL); if (irq_ret < 0) { DPRINTK("%s %d\r\n", __func__, irq_ret); ret = false; } enable_irq_wake(gpio_irq_det); } else { disable_irq_wake(gpio_irq_det); free_irq(gpio_irq_det, NULL); } return ret; } #endif #if defined(CONFIG_TMM_CHG_CTRL) void tmm_chg_ctrl_start(void) { schedule_delayed_work(&tmm_chg_ctrl_polling_work, TMM_CHG_CTRL_START_DELAY * HZ); tmm_chg_log(KERN_ALERT"%s: schedule_delayed_work(tmm_chg_ctrl_polling_work, TMM_CHG_CTRL_START_DELAY(%d) * HZ(0x%x))!\n", __func__, TMM_CHG_CTRL_START_DELAY, HZ); return; } void tmm_chg_ctrl_stop(void) { cancel_work_sync(&tmm_chg_ctrl_work); tmm_chg_log(KERN_ALERT"%s: cancel_work_sync(&tmm_chg_ctrl_work)!\n", __func__); cancel_delayed_work(&tmm_chg_ctrl_polling_work); tmm_chg_log(KERN_ALERT"%s: cancel_delayed_work(tmm_chg_ctrl_polling_work)!\n", __func__); /* recover input charging current */ tmm_chg_ctrl_func.tmm_recover_chg_curr(); tmm_chg_log(KERN_ALERT"%s: tmm_recover_chg_curr()!\n", __func__); return; } static void tmm_chg_ctrl_work_func(struct work_struct *work) { /* reduce the input charging current */ tmm_chg_ctrl_func.tmm_reduce_chg_curr(); tmm_chg_log(KERN_ALERT"%s: tmm_reduce_chg_curr()!\n", __func__); schedule_delayed_work(&tmm_chg_ctrl_polling_work, TMM_CHG_CTRL_POLLING_TIME * HZ); tmm_chg_log(KERN_ALERT"%s: schedule_delayed_work(tmm_chg_ctrl_polling_work, TMM_CHG_CTRL_POLLING_TIME(%d) * HZ(0x%x))!\n", __func__, TMM_CHG_CTRL_POLLING_TIME, HZ); wake_unlock(&tmm_chg_ctrl_wlock); return; } static void tmm_chg_ctrl_polling_work_func(struct work_struct *work) { wake_lock(&tmm_chg_ctrl_wlock); if(tmm_chg_wqueue) queue_work(tmm_chg_wqueue, &tmm_chg_ctrl_work); else wake_unlock(&tmm_chg_ctrl_wlock); tmm_chg_log(KERN_ALERT"%s: queue_work(tmm_chg_ctrl_work, tmm_chg_ctrl_work_func)!\n", __func__); return; } #endif /****************************************************************************** * function: tuner_probe * brief : probe control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : pdev * output : none ******************************************************************************/ static int tuner_probe(struct platform_device *pdev) { int ret; INFO_PRINT("mmtuner_probe: Called."); /* tuner register */ if (register_chrdev(TUNER_CONFIG_DRV_MAJOR, TUNER_CONFIG_DRIVER_NAME, &TunerFileOperations)) { ERROR_PRINT("mmtuner_probe: register_chrdev()\ Failed Major:%d.", TUNER_CONFIG_DRV_MAJOR); return -1; } /* initialization of external variable */ init_waitqueue_head( &g_tuner_poll_wait_queue ); spin_lock_init( &g_tuner_lock ); g_tuner_wakeup_flag = TUNER_OFF; g_tuner_intcnd_f = 0x00; g_tuner_intcnd_s = 0x00; open_cnt = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) mutex_init(&g_tuner_mutex); #endif /* Add Start 20121219 No_1 */ ret = i2c_add_driver(&smtej113_i2c_main1_driver); if (ret < 0) { ERROR_PRINT("main1: i2c driver init failed"); goto err_main1; } ret = i2c_add_driver(&smtej113_i2c_main2_driver); if (ret < 0) { ERROR_PRINT("main2: i2c driver init failed"); goto err_main2; } ret = i2c_add_driver(&smtej113_i2c_sub_driver); if (ret < 0) { ERROR_PRINT("sub: disc i2c driver init failed"); goto err_sub; } INFO_PRINT("tuner_probe: END."); #if defined(CONFIG_TMM_ANT_DET) wake_lock_init(&tdmb_ant_wlock, WAKE_LOCK_SUSPEND, "tdmb_ant_wlock"); ant_det_pm_gpio_config(); if (!tdmb_ant_det_reg_input(pdev)) goto err_reg_input; if (!tdmb_ant_det_create_wq()) goto free_reg_input; if (!tdmb_ant_det_irq_set(true)) goto free_ant_det_wq; #endif #if defined(CONFIG_TMM_CHG_CTRL) { struct isdbtmm_platform_data *p = pdev->dev.platform_data; memcpy(&tmm_chg_ctrl_func, p, sizeof(struct isdbtmm_platform_data)); wake_lock_init(&tmm_chg_ctrl_wlock, WAKE_LOCK_SUSPEND, "tmm_chg_ctrl_wlock"); INIT_WORK(&tmm_chg_ctrl_work, tmm_chg_ctrl_work_func); INIT_DELAYED_WORK_DEFERRABLE(&tmm_chg_ctrl_polling_work, tmm_chg_ctrl_polling_work_func); tmm_chg_wqueue = create_singlethread_workqueue("tmm_chg_wqueue"); if(tmm_chg_wqueue == NULL) goto err_reg_input; } #endif /* NORMAL END */ return 0; err_sub: i2c_del_driver(&smtej113_i2c_main2_driver); err_main2: i2c_del_driver(&smtej113_i2c_main1_driver); err_main1: unregister_chrdev(TUNER_CONFIG_DRV_MAJOR, TUNER_CONFIG_DRIVER_NAME); #if defined(CONFIG_TMM_ANT_DET) free_ant_det_wq: tdmb_ant_det_destroy_wq(); free_reg_input: tdmb_ant_det_unreg_input(); err_reg_input: ret = -EFAULT; #endif /* ERROR END */ return ret; /* Add End 20121219 No_1 */ } /****************************************************************************** * function: tuner_remove * brief : remove control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : pdev * output : none ******************************************************************************/ static int tuner_remove(struct platform_device *pdev) { INFO_PRINT("tuner_remove: Called."); TRACE(); /* release of the interrupt */ tuner_drv_release_interrupt(); /* tuner unregister */ unregister_chrdev(TUNER_CONFIG_DRV_MAJOR, TUNER_CONFIG_DRIVER_NAME); /* Add Start 20121219 No_1 */ i2c_del_driver(&smtej113_i2c_main1_driver); i2c_del_driver(&smtej113_i2c_main2_driver); i2c_del_driver(&smtej113_i2c_sub_driver); /* Add End 20121219 No_1 */ INFO_PRINT("tuner_remove: END."); #if defined(CONFIG_TMM_ANT_DET) tdmb_ant_det_unreg_input(); tdmb_ant_det_destroy_wq(); tdmb_ant_det_irq_set(false); wake_lock_destroy(&tdmb_ant_wlock); #endif #if defined(CONFIG_TMM_CHG_CTRL) wake_lock_destroy(&tmm_chg_ctrl_wlock); if(tmm_chg_wqueue) { flush_workqueue(tmm_chg_wqueue); destroy_workqueue(tmm_chg_wqueue); tmm_chg_wqueue = NULL; } #endif return 0; } /****************************************************************************** * function: tuner_kernel_thread * brief : kernel_thread of mmtuner driver * date : 2011.09.16 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : none * output : none ******************************************************************************/ int tuner_kernel_thread( void * arg ) { int ret = 0; unsigned long flags; unsigned long ktread_flg; mm_segment_t oldfs; struct sched_param param; struct i2c_adapter *adap; struct i2c_msg msgs[4]; unsigned char buff[3]; unsigned char bufs[3]; INFO_PRINT("tuner_kernel_thread: START."); /* initialization of internal variables */ ret = 0; flags = 0; ktread_flg = 0; adap = 0; param.sched_priority = TUNER_CONFIG_KTH_PRI; daemonize( "tuner_kthread" ); oldfs = get_fs(); set_fs( KERNEL_DS ); ret = sched_setscheduler( g_tuner_kthread_id, SCHED_FIFO, &param ); set_fs( oldfs ); buff[0] = (unsigned char)TUNER_DRV_ADR_INTCND_F; bufs[0] = (unsigned char)TUNER_DRV_ADR_INTCND_S; while(1) { DEBUG_PRINT("tuner_kernel_thread waiting... "); wait_event_interruptible( g_tuner_kthread_wait_queue, g_tuner_kthread_flag ); spin_lock_irqsave( &g_tuner_lock, flags ); ktread_flg = g_tuner_kthread_flag; g_tuner_kthread_flag &= ~TUNER_KTH_IRQHANDLER; spin_unlock_irqrestore( &g_tuner_lock, flags); memset( msgs, 0x00, sizeof(struct i2c_msg) * 4 ); /* get interrupt factor */ if ( ( ktread_flg & TUNER_KTH_IRQHANDLER ) == TUNER_KTH_IRQHANDLER ) { DEBUG_PRINT("tuner_kernel_thread IRQHANDLER start "); buff[1] = buff[2] = 0; bufs[1] = bufs[2] = 0; /* read data */ /* INTCND_F */ msgs[0].addr = TUNER_SLAVE_ADR_M1; msgs[0].flags = 0; /* write */ msgs[0].len = 1; msgs[0].buf = &buff[0]; msgs[1].addr = TUNER_SLAVE_ADR_M1; msgs[1].flags = I2C_M_RD; msgs[1].len = 2; msgs[1].buf = buff+1; msgs[2].addr = TUNER_SLAVE_ADR_M2; msgs[2].flags = 0; /* write */ msgs[2].len = 1; msgs[2].buf = &bufs[0]; msgs[3].addr = TUNER_SLAVE_ADR_M2; msgs[3].flags = I2C_M_RD; msgs[3].len = 2; msgs[3].buf = bufs+1; ret = i2c_transfer_wrap(adap, msgs, 4); if (ret < 0) { TRACE(); break; } DEBUG_PRINT( "read slv:0x%02x adr:0x%02x len:%-4d 0x%02x ... 0x%02x ", msgs[0].addr, *(msgs[0].buf), msgs[1].len, msgs[1].buf[0], msgs[1].buf[1]); DEBUG_PRINT( "read slv:0x%02x adr:0x%02x len:%-4d 0x%02x ... 0x%02x ", msgs[2].addr, *(msgs[2].buf), msgs[3].len, msgs[3].buf[0], msgs[3].buf[1]); g_tuner_intcnd_f |= buff[1]; g_tuner_intst_f |= buff[2]; g_tuner_intcnd_s |= bufs[1]; g_tuner_intst_s |= bufs[2]; DEBUG_PRINT( "// IRQ factor update: INTCND_F:0x%02x INTST_F:0x%02x" ,g_tuner_intcnd_f, g_tuner_intst_f ); DEBUG_PRINT( "// IRQ factor update: INTCND_S:0x%02x INTST_S:0x%02x" ,g_tuner_intcnd_s, g_tuner_intst_s ); memset( msgs, 0x00, sizeof(struct i2c_msg) * 4 ); msgs[0].addr = TUNER_SLAVE_ADR_M1; msgs[0].flags = 0; /* write */ msgs[0].len = 2; msgs[0].buf = buff; msgs[1].addr = TUNER_SLAVE_ADR_M2; msgs[1].flags = 0; /* write */ msgs[1].len = 2; msgs[1].buf = bufs; ret = i2c_transfer_wrap(adap, msgs, 2); if (ret < 0) { TRACE(); break; } /* poll wait release */ g_tuner_wakeup_flag = TUNER_ON; wake_up( &g_tuner_poll_wait_queue ); DEBUG_PRINT("tuner_interrupt end "); #ifdef TUNER_CONFIG_IRQ_LEVELTRIGGER tuner_drv_enable_interrupt(); #endif /* TUNER_CONFIG_IRQ_LEVELTRIGGER */ } /* thread stop set */ if ( ( ktread_flg & TUNER_KTH_END ) == TUNER_KTH_END ) { DEBUG_PRINT("tuner_kernel_thread KTH_END start "); spin_lock_irqsave( &g_tuner_lock, flags ); g_tuner_kthread_flag &= ~TUNER_KTH_END; spin_unlock_irqrestore( &g_tuner_lock, flags ); break; } } INFO_PRINT("tuner_kernel_thread: END. "); return 0; } /****************************************************************************** * function: tuner_drv_start * brief : initialization control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : none * output : none ******************************************************************************/ /* Modify Start 20121218 No_3 */ #if 1 int tuner_drv_start(void) #else static int __init tuner_drv_start(void) #endif /* Modify End 20121218 No_3 */ { int ret =0; struct device *dev = NULL; INFO_PRINT("mmtuner_tuner_drv_start: Called"); /* driver register */ ret = platform_driver_register(&mmtuner_driver); if( ret != 0 ) { ERROR_PRINT("init_module: Error:\ failed in platform_driver_register."); return ret; } /* device alloc */ mmtuner_device = platform_device_alloc(TUNER_CONFIG_DRIVER_NAME, -1); if (!mmtuner_device) { ERROR_PRINT("init_module: Error: failed in platform_device_alloc."); platform_driver_unregister(&mmtuner_driver); return -ENOMEM; } /* device add */ ret = platform_device_add(mmtuner_device); if ( ret ) { ERROR_PRINT("init_module: Error: failed in platform_device_add."); platform_device_put(mmtuner_device); platform_driver_unregister(&mmtuner_driver); return ret; } device_class = class_create(THIS_MODULE, TUNER_CONFIG_DRIVER_NAME); if (IS_ERR(device_class)) { ERROR_PRINT("init_module: Error: failed in class_create."); platform_device_put(mmtuner_device); platform_driver_unregister(&mmtuner_driver); return PTR_ERR(device_class); } dev = device_create ( device_class, NULL, MKDEV(TUNER_CONFIG_DRV_MAJOR, TUNER_CONFIG_DRV_MINOR), NULL, TUNER_CONFIG_DRIVER_NAME); if(IS_ERR(dev)) { ERROR_PRINT("init_module: Error: failed in device_create."); platform_device_put(mmtuner_device); platform_driver_unregister(&mmtuner_driver); return PTR_ERR(dev); } /* thread creat */ g_tuner_kthread_flag = TUNER_KTH_NONE; init_waitqueue_head( &g_tuner_kthread_wait_queue ); g_tuner_kthread_id = kthread_create( tuner_kernel_thread, NULL, "tuner_kthread" ); if( IS_ERR( g_tuner_kthread_id ) ) { g_tuner_kthread_id = NULL; platform_device_put(mmtuner_device); platform_driver_unregister(&mmtuner_driver); return -EIO; } wake_up_process( g_tuner_kthread_id ); INFO_PRINT("mmtuner_tuner_drv_start: END"); return 0; } /****************************************************************************** * function: tuner_drv_end * brief : exit control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : none * input : none * output : none ******************************************************************************/ /* Modify Start 20121218 No_3 */ #if 1 void tuner_drv_end(void) #else static void __exit tuner_drv_end(void) #endif /* Modify End 20121218 No_3 */ { INFO_PRINT("mmtuner_tuner_drv_end: Called"); /* thread stop flag */ g_tuner_kthread_flag |= TUNER_KTH_END; if( waitqueue_active( &g_tuner_kthread_wait_queue )) { wake_up( &g_tuner_kthread_wait_queue ); } /* thread stop */ if( g_tuner_kthread_id ) { kthread_stop( g_tuner_kthread_id ); } /* device destroy */ device_destroy(device_class, MKDEV(TUNER_CONFIG_DRV_MAJOR, TUNER_CONFIG_DRV_MINOR)); /* class destroy */ class_destroy(device_class); /* device unregister */ platform_device_unregister(mmtuner_device); /* driver unregister */ platform_driver_unregister(&mmtuner_driver); INFO_PRINT("mmtuner_tuner_drv_end: END"); } /****************************************************************************** * function: tuner_module_entry_open * brief : open control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * output : none ******************************************************************************/ static int tuner_module_entry_open(struct inode* Inode, struct file* FIle) { INFO_PRINT("tuner_module_entry_open: Called"); #ifdef TUNER_CONFIG_DRV_MULTI /* allow multiple open */ open_cnt++; #else /* TUNER_CONFIG_DRV_MULTI */ /* not allow multiple open */ /* already open */ if( open_cnt > 0 ) { INFO_PRINT("tuner_module_entry_open: open error"); return -1; } /* first open */ else { INFO_PRINT("tuner_module_entry_open: open_cnt = 1"); open_cnt++; } #endif /* TUNER_CONFIG_DRV_MULTI */ INFO_PRINT("tuner_module_entry_open: end << open cnt = %ld >>", open_cnt ); return 0; } /****************************************************************************** * function: tuner_module_entry_close * brief : close control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * output : none ******************************************************************************/ static int tuner_module_entry_close(struct inode* Inode, struct file* FIle) { struct devone_data *dev; INFO_PRINT("tuner_module_entry_close: Called"); if( open_cnt <= 0 ) { INFO_PRINT("tuner_module_entry_close: close error"); open_cnt = 0; return -1; } else { open_cnt--; } /* close all open */ if( open_cnt == 0 ) { /* interrupt release */ tuner_drv_release_interrupt(); if( FIle == NULL ) { return -1; } dev = FIle->private_data; if( dev ) { kfree( dev ); } } return 0; } /****************************************************************************** * function: tuner_module_entry_read * brief : read control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : FIle * : Buffer * : Count * : OffsetPosition * output : none ******************************************************************************/ static ssize_t tuner_module_entry_read(struct file * FIle, char * Buffer, size_t Count, loff_t * OffsetPosition) { return 0; } /****************************************************************************** * function: tuner_module_entry_write * brief : write control of a driver * date : 2011.10.31 * author : K.Okawa(KXD14) * * return : 0 normal exit * : -1 error exit * input : FIle * : Buffer [slave][reg.addr][data-0][data-1]...[data-(n-1)] * : Count (>=3) n+2 * : OffsetPosition * output : none ******************************************************************************/ static ssize_t tuner_module_entry_write(struct file* FIle, const char* Buffer, size_t Count, loff_t* OffsetPosition) { int ret; unsigned long copy_ret; unsigned char *buf; /* pointer to data area */ struct i2c_adapter *adap; struct i2c_msg msgs[1]; adap = 0; /* argument check */ if (Count < 3) { TRACE(); return -EINVAL; } /* memory allocation for data area */ buf = (unsigned char *)vmalloc(Count); if (buf == NULL) { return -EINVAL; } copy_ret = copy_from_user(buf, Buffer, Count); if (copy_ret != 0) { vfree(buf); return -EINVAL; } /* construct i2c message */ memset(msgs, 0x00, sizeof(struct i2c_msg) * 1); msgs[0].addr = buf[0]; msgs[0].flags = 0; /* write */ msgs[0].len = Count - 1; msgs[0].buf = buf + 1; ret = i2c_transfer_wrap(adap, msgs, 1); if (ret < 0) { TRACE(); vfree(buf); return -EINVAL; } DEBUG_PRINT( "write slv:0x%02x adr:0x%02x len:%-4d 0x%02x ... 0x%02x ", buf[0], buf[1], Count-2, buf[2], buf[Count-1]); vfree(buf); return ret; } /* Add Start 20121218 No_1 */ /****************************************************************************** * function: SMT113J_IOCTL_GET_OPEN_COUNT * brief : ioctl control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int SMT113J_IOCTL_GET_OPEN_COUNT ( struct file* FIle, unsigned int cmd, unsigned long arg ) { TUNER_STS_DATA *arg_data; int ret = 0; unsigned long temp_open = 0, temp_moni = 0; DEBUG_PRINT ("SMT113J_IOCTL_GET_OPEN_COUNT << Start >> : open = %ld", ( open_cnt - moni_cnt )); /* Parameter check */ arg_data = (TUNER_STS_DATA*)arg; if ( NULL == arg_data ) { ERROR_PRINT ("Parameter Error : arg = NULL"); return ( -1 ); } /* state check */ if ( open_cnt < moni_cnt ) { ERROR_PRINT ("-> Status Error : open = %ld, moni = %ld", open_cnt, moni_cnt ); return ( -1 ); } temp_open = (open_cnt - moni_cnt); temp_moni = moni_cnt; /* Copy to User Area */ ret = put_user ( temp_open, (unsigned long __user *)&(arg_data->open_cnt) ); if ( 0 != ret ) { ERROR_PRINT ("-> put_user(arg_data->open_cnt) Error : ret = %d", ret ); return ( -1 ); } /* Copy to User Area */ ret = put_user ( moni_cnt, (unsigned long __user *)&(arg_data->moni_cnt) ); if ( 0 != ret ) { ERROR_PRINT ("-> put_user(arg_data->moni_cnt) Error : ret = %d", ret ); return ( -1 ); } DEBUG_PRINT ("SMT113J_IOCTL_GET_OPEN_COUNT << End >>"); DEBUG_PRINT ("-> Open Count Result : %ld", open_cnt ); DEBUG_PRINT ("-> Monitor Count Result : %ld", moni_cnt ); return ( 0 ); } /****************************************************************************** * function: SMT113J_IOCTL_SET_MONITOR_MODE * brief : ioctl control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int SMT113J_IOCTL_SET_MONITOR_MODE ( struct file* FIle, unsigned int cmd, unsigned long arg ) { int ret = 0; DEBUG_PRINT ("SMT113J_IOCTL_SET_MONITOR_MODE << Start >> "); if ( 1 == arg ) { /* Monitor Mode Start */ moni_cnt++; } else { /* Monitor Mode Stop */ moni_cnt--; if ( 0 > moni_cnt ) { DEBUG_PRINT (" -> under counter = %ld => 0", moni_cnt ); moni_cnt = 0; } } DEBUG_PRINT ("SMT113J_IOCTL_SET_MONITOR_MODE << End >> : moni_cnt = %ld", moni_cnt ); return ( ret ); } /* Add End 20121218 No_1 */ /* Add Start 20121219 No_1 */ /****************************************************************************** * function: tmm_i2c_main1_probe * brief : probe control of a driver * date : 2012.12.19 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int tmm_i2c_main1_probe(struct i2c_client *client, const struct i2c_device_id *devid) { INFO_PRINT("tmm_i2c_main1_probe start"); i2c_main1_client = client; if (!i2c_main1_client) { ERROR_PRINT("tmm_i2c_main1_probe: Error: fail client"); return -EINVAL; } INFO_PRINT("tmm_i2c_main1_probe end"); return 0; } /****************************************************************************** * function: tmm_i2c_main2_probe * brief : probe control of a driver * date : 2012.12.19 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int tmm_i2c_main2_probe(struct i2c_client *client, const struct i2c_device_id *devid) { INFO_PRINT("tmm_i2c_main2_probe start"); i2c_main2_client = client; if (!i2c_main2_client) { ERROR_PRINT("tmm_i2c_main2_probe: Error: fail client"); return -EINVAL; } INFO_PRINT("tmm_i2c_main2_probe end"); return 0; } /****************************************************************************** * function: tmm_i2c_sub_probe * brief : probe control of a driver * date : 2012.12.19 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int tmm_i2c_sub_probe(struct i2c_client *client, const struct i2c_device_id *devid) { INFO_PRINT("tmm_i2c_sub_probe start"); i2c_sub_client = client; if (!i2c_sub_client) { ERROR_PRINT("tmm_i2c_sub_probe: Error: fail client"); return -EINVAL; } INFO_PRINT("tmm_i2c_sub_probe end"); return 0; } /****************************************************************************** * function: tmm_i2c_main1_remove * brief : remove control of a driver * date : 2012.12.19 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int tmm_i2c_main1_remove(struct i2c_client *client) { INFO_PRINT("tmm_i2c_main1_remove start"); /* no operation */ INFO_PRINT("tmm_i2c_main1_remove end"); return 0; } /****************************************************************************** * function: tmm_i2c_main2_remove * brief : remove control of a driver * date : 2012.12.19 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int tmm_i2c_main2_remove(struct i2c_client *client) { INFO_PRINT("tmm_i2c_main2_remove start"); /* no operation */ INFO_PRINT("tmm_i2c_main2_remove end"); return 0; } /****************************************************************************** * function: tmm_i2c_sub_remove * brief : remove control of a driver * date : 2012.12.19 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ static int tmm_i2c_sub_remove(struct i2c_client *client) { INFO_PRINT("tmm_i2c_sub_remove start"); /* no operation */ INFO_PRINT("tmm_i2c_sub_remove end"); return 0; } /* Add End 20121219 No_1 */ /****************************************************************************** * function: tuner_module_entry_ioctl * brief : ioctl control of a driver * date : 2011.08.02 * author : K.Kitamura(*) * * return : 0 normal exit * : -1 error exit * input : Inode * : FIle * : uCommand * : uArgument * output : none ******************************************************************************/ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) static int tuner_module_entry_ioctl(struct inode* Inode, struct file* FIle, unsigned int uCommand, unsigned long uArgument) #else /* LINUX_VERSION_CODE */ static long tuner_module_entry_ioctl(struct file *file, unsigned int uCommand, unsigned long uArgument) #endif /* LINUX_VERSION_CODE */ { int ret; TUNER_DATA_RW data; unsigned long copy_ret; int param; TUNER_DATA_RW event_status[ TUNER_EVENT_REGNUM ]; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* get lock */ mutex_lock(&g_tuner_mutex); #endif /* argument check */ if( uArgument == 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } switch( uCommand ) { /* read data */ case TUNER_IOCTL_VALGET: copy_ret = copy_from_user( &data, &( *(TUNER_DATA_RW *)uArgument ), sizeof( TUNER_DATA_RW )); if( copy_ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } ret = tuner_drv_hw_access( uCommand, &data, 1 ); if( ret == 0 ) { copy_ret = copy_to_user( &( *(TUNER_DATA_RW *)uArgument ), &data, sizeof( TUNER_DATA_RW )); if( copy_ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } } break; /* write data */ case TUNER_IOCTL_VALSET: copy_ret = copy_from_user( &data, &( *(TUNER_DATA_RW *)uArgument ), sizeof( TUNER_DATA_RW )); if( copy_ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } ret = tuner_drv_hw_access( uCommand, &data, 1 ); break; case TUNER_IOCTL_VALGET_EVENT: /* INTCND_F */ copy_ret = copy_to_user( &( *( unsigned char *)uArgument ), &g_tuner_intcnd_f, sizeof( unsigned char )); if (copy_ret != 0) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } /* INTCND_S */ copy_ret = copy_to_user( &( *( unsigned char *)( uArgument + 1 )), &g_tuner_intcnd_s, sizeof( unsigned char )); if (copy_ret != 0) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } /* INTST_F */ copy_ret = copy_to_user( &( *( unsigned char *)(uArgument + 2)), &g_tuner_intst_f, sizeof( unsigned char )); if (copy_ret != 0) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } /* INTCND_F */ copy_ret = copy_to_user( &( *( unsigned char *)(uArgument + 3)), &g_tuner_intst_s, sizeof( unsigned char )); if (copy_ret != 0) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } DEBUG_PRINT( "// IRQ factor send: INTCND_F:0x%02x INTST_F:0x%02x" ,g_tuner_intcnd_f, g_tuner_intst_f ); DEBUG_PRINT( "// IRQ factor send: INTCND_S:0x%02x INTST_S:0x%02x" ,g_tuner_intcnd_s, g_tuner_intst_s ); /* initialization */ g_tuner_intcnd_f = 0x00; g_tuner_intcnd_s = 0x00; g_tuner_intst_f = 0x00; g_tuner_intst_s = 0x00; ret = copy_ret; break; /* event set */ case TUNER_IOCTL_VALSET_EVENT: DEBUG_PRINT("*** VALSET_EVENT ***"); copy_ret = copy_from_user( &data, &( *(TUNER_DATA_RW *)uArgument ), sizeof( TUNER_DATA_RW )); if( copy_ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } /* when 1st time of event setting, be enable interrupt */ /* slave address */ event_status[0].slave_adr = TUNER_SLAVE_ADR_M1; /* reg. address */ event_status[0].adr = REG_INTDEF1_F; /* start bit position */ event_status[0].sbit = SIG_ENS_INTDEF1_F; /* end bit position */ event_status[0].ebit = SIG_ENE_INTDEF1_F; /* clear for read */ event_status[0].param = 0x00; /* enable bit mask */ event_status[0].enabit = SIG_ENA_INTDEF1_F; event_status[1].slave_adr = TUNER_SLAVE_ADR_M1; event_status[1].adr = REG_INTDEF2_F; event_status[1].sbit = SIG_ENS_INTDEF2_F; event_status[1].ebit = SIG_ENE_INTDEF2_F; event_status[1].param = 0x00; event_status[1].enabit = SIG_ENA_INTDEF2_F; event_status[2].slave_adr = TUNER_SLAVE_ADR_M2; event_status[2].adr = REG_INTDEF1_S; event_status[2].sbit = SIG_ENS_INTDEF1_S; event_status[2].ebit = SIG_ENE_INTDEF1_S; event_status[2].param = 0x00; event_status[2].enabit = SIG_ENA_INTDEF1_S; event_status[3].slave_adr = TUNER_SLAVE_ADR_M2; event_status[3].adr = REG_INTDEF2_S; event_status[3].sbit = SIG_ENS_INTDEF2_S; event_status[3].ebit = SIG_ENE_INTDEF2_S; event_status[3].param = 0x00; event_status[3].enabit = SIG_ENA_INTDEF2_S; ret = tuner_drv_hw_access( TUNER_IOCTL_VALGET, event_status, TUNER_EVENT_REGNUM ); if( ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } if (((event_status[0].param & event_status[0].enabit) == 0x00) && ((event_status[1].param & event_status[1].enabit) == 0x00) && ((event_status[2].param & event_status[2].enabit) == 0x00) && ((event_status[3].param & event_status[3].enabit) == 0x00)) { DEBUG_PRINT("*** REQUEST IRQ ***"); ret = tuner_drv_set_interrupt(); if( ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } } ret = tuner_drv_hw_access( TUNER_IOCTL_VALSET, &data, 1 ); break; /* event release */ case TUNER_IOCTL_VALREL_EVENT: copy_ret = copy_from_user( &data, &( *(TUNER_DATA_RW *)uArgument ), sizeof( TUNER_DATA_RW )); if( copy_ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } ret = tuner_drv_hw_access( TUNER_IOCTL_VALSET, &data, 1 ); if( ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } /* slave address */ event_status[0].slave_adr = TUNER_SLAVE_ADR_M1; /* reg. address */ event_status[0].adr = REG_INTDEF1_F; /* start bit position */ event_status[0].sbit = SIG_ENS_INTDEF1_F; /* end bit position */ event_status[0].ebit = SIG_ENE_INTDEF1_F; /* clear for read */ event_status[0].param = 0x00; /* enable bit mask */ event_status[0].enabit = SIG_ENA_INTDEF1_F; event_status[1].slave_adr = TUNER_SLAVE_ADR_M1; event_status[1].adr = REG_INTDEF2_F; event_status[1].sbit = SIG_ENS_INTDEF2_F; event_status[1].ebit = SIG_ENE_INTDEF2_F; event_status[1].param = 0x00; event_status[1].enabit = SIG_ENA_INTDEF2_F; event_status[2].slave_adr = TUNER_SLAVE_ADR_M2; event_status[2].adr = REG_INTDEF1_S; event_status[2].sbit = SIG_ENS_INTDEF1_S; event_status[2].ebit = SIG_ENE_INTDEF1_S; event_status[2].param = 0x00; event_status[2].enabit = SIG_ENA_INTDEF1_S; event_status[3].slave_adr = TUNER_SLAVE_ADR_M2; event_status[3].adr = REG_INTDEF2_S; event_status[3].sbit = SIG_ENS_INTDEF2_S; event_status[3].ebit = SIG_ENE_INTDEF2_S; event_status[3].param = 0x00; event_status[3].enabit = SIG_ENA_INTDEF2_S; ret = tuner_drv_hw_access( TUNER_IOCTL_VALGET, event_status, TUNER_EVENT_REGNUM ); if (((event_status[0].param & event_status[0].enabit) == 0x00) && ((event_status[1].param & event_status[1].enabit) == 0x00) && ((event_status[2].param & event_status[2].enabit) == 0x00) && ((event_status[3].param & event_status[3].enabit) == 0x00)) { DEBUG_PRINT("*** release IRQ REQUEST ***"); tuner_drv_release_interrupt(); } break; case TUNER_IOCTL_VALSET_POWER: copy_ret = copy_from_user( &param, &( *( int * )uArgument ), sizeof( int )); if( copy_ret != 0 ) { TRACE(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return -EINVAL; } ret = tuner_drv_ctl_power( param ); break; /* Add Start 20121218 No_1 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) case TUNER_IOCTL_VALGET_OPENCNT: ret = SMT113J_IOCTL_GET_OPEN_COUNT ( FIle->private_data, uCommand, uArgument ); break; case TUNER_IOCTL_VALSET_MONICNT: ret = SMT113J_IOCTL_SET_MONITOR_MODE ( FIle->private_data, uCommand, uArgument ); break; #else /* LINUX_VERSION_CODE */ case TUNER_IOCTL_VALGET_OPENCNT: ret = SMT113J_IOCTL_GET_OPEN_COUNT ( file->private_data, uCommand, uArgument ); break; case TUNER_IOCTL_VALSET_MONICNT: ret = SMT113J_IOCTL_SET_MONITOR_MODE ( file->private_data, uCommand, uArgument ); break; #endif /* LINUX_VERSION_CODE */ /* Add End 20121218 No_1 */ default: TRACE(); ret = -EINVAL; break; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) /* lock release */ mutex_unlock(&g_tuner_mutex); #endif return ret; } /****************************************************************************** * function: tuner_module_entry_poll * brief : poll control of a driver * date : 2011.08.23 * author : M.Takahashi(*) * * return : 0 normal exit * : -1 error exit * input : file * : poll_tbl * output : none ******************************************************************************/ static unsigned int tuner_module_entry_poll( struct file *file, struct poll_table_struct *poll_tbl ) { unsigned long tuner_flags; unsigned int tuner_mask; /* Initialization */ tuner_mask = 0; /* poll wait */ poll_wait( file, &g_tuner_poll_wait_queue, poll_tbl ); /* interrupt disable */ spin_lock_irqsave( &g_tuner_lock, tuner_flags ); if( g_tuner_wakeup_flag == TUNER_ON ) { tuner_mask = ( POLLIN | POLLRDNORM ); } g_tuner_wakeup_flag = TUNER_OFF; /* interrupt enable */ spin_unlock_irqrestore( &g_tuner_lock, tuner_flags ); return tuner_mask; } /****************************************************************************** * function: tuner_interrupt * brief : interrpu control of a driver * date : 2011.08.23 * author : M.Takahashi(*) * * return : 0 normal exit * : -1 error exit * input : irq * : dev_id * output : none ******************************************************************************/ #ifndef TUNER_CONFIG_IRQ_PC_LINUX irqreturn_t tuner_interrupt( int irq, void *dev_id ) #else /* TUNER_CONFIG_IRQ_PC_LINUX */ int tuner_interrupt( void ) #endif /* TUNER_CONFIG_IRQ_PC_LINUX */ { DEBUG_PRINT("tuner_interrupt start "); g_tuner_kthread_flag |= TUNER_KTH_IRQHANDLER; if( waitqueue_active( &g_tuner_kthread_wait_queue )) { #ifdef TUNER_CONFIG_IRQ_LEVELTRIGGER tuner_drv_disable_interrupt(); #endif /* TUNER_CONFIG_IRQ_LEVELTRIGGER */ wake_up( &g_tuner_kthread_wait_queue ); } else { DEBUG_PRINT("tuner_interrupt waitqueue_active err!!! "); } DEBUG_PRINT("tuner_interrupt end "); return IRQ_HANDLED; } MODULE_AUTHOR("Samsung"); MODULE_DESCRIPTION("MM Tuner Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
ZUK2/android_kernel_zuk_z2_plus
drivers/platform/x86/intel_scu_ipc.c
486
18543
/* * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism * * (C) Copyright 2008-2010 Intel Corporation * Author: Sreedhara DS (sreedhara.ds@intel.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * SCU running in ARC processor communicates with other entity running in IA * core through IPC mechanism which in turn messaging between IA core ad SCU. * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC) * along with other APIs. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/device.h> #include <linux/pm.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/sfi.h> #include <linux/module.h> #include <asm/intel-mid.h> #include <asm/intel_scu_ipc.h> /* IPC defines the following message types */ #define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */ #define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */ #define IPCMSG_FW_UPDATE 0xFE /* Firmware update */ #define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */ #define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */ /* Command id associated with message IPCMSG_PCNTRL */ #define IPC_CMD_PCNTRL_W 0 /* Register write */ #define IPC_CMD_PCNTRL_R 1 /* Register read */ #define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */ /* * IPC register summary * * IPC register blocks are memory mapped at fixed address of 0xFF11C000 * To read or write information to the SCU, driver writes to IPC-1 memory * mapped registers (base address 0xFF11C000). The following is the IPC * mechanism * * 1. IA core cDMI interface claims this transaction and converts it to a * Transaction Layer Packet (TLP) message which is sent across the cDMI. * * 2. South Complex cDMI block receives this message and writes it to * the IPC-1 register block, causing an interrupt to the SCU * * 3. SCU firmware decodes this interrupt and IPC message and the appropriate * message handler is called within firmware. */ #define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */ #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ #define IPC_IOC 0x100 /* IPC command register IOC bit */ #define PCI_DEVICE_ID_LINCROFT 0x082a #define PCI_DEVICE_ID_PENWELL 0x080e #define PCI_DEVICE_ID_CLOVERVIEW 0x08ea #define PCI_DEVICE_ID_TANGIER 0x11a0 /* intel scu ipc driver data*/ struct intel_scu_ipc_pdata_t { u32 ipc_base; u32 i2c_base; u32 ipc_len; u32 i2c_len; u8 irq_mode; }; static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { .ipc_base = 0xff11c000, .i2c_base = 0xff12b000, .ipc_len = 0x100, .i2c_len = 0x10, .irq_mode = 0, }; /* Penwell and Cloverview */ static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { .ipc_base = 0xff11c000, .i2c_base = 0xff12b000, .ipc_len = 0x100, .i2c_len = 0x10, .irq_mode = 1, }; static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { .ipc_base = 0xff009000, .i2c_base = 0xff00d000, .ipc_len = 0x100, .i2c_len = 0x10, .irq_mode = 0, }; static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id); static void ipc_remove(struct pci_dev *pdev); struct intel_scu_ipc_dev { struct pci_dev *pdev; void __iomem *ipc_base; void __iomem *i2c_base; struct completion cmd_complete; u8 irq_mode; }; static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ static int platform; /* Platform type */ /* * IPC Read Buffer (Read Only): * 16 byte buffer for receiving data from SCU, if IPC command * processing results in response data */ #define IPC_READ_BUFFER 0x90 #define IPC_I2C_CNTRL_ADDR 0 #define I2C_DATA_ADDR 0x04 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ /* * Command Register (Write Only): * A write to this register results in an interrupt to the SCU core processor * Format: * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)| */ static inline void ipc_command(u32 cmd) /* Send ipc command */ { if (ipcdev.irq_mode) { reinit_completion(&ipcdev.cmd_complete); writel(cmd | IPC_IOC, ipcdev.ipc_base); } writel(cmd, ipcdev.ipc_base); } /* * IPC Write Buffer (Write Only): * 16-byte buffer for sending data associated with IPC command to * SCU. Size of the data is specified in the IPC_COMMAND_REG register */ static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */ { writel(data, ipcdev.ipc_base + 0x80 + offset); } /* * Status Register (Read Only): * Driver will read this register to get the ready/busy status of the IPC * block and error status of the IPC command that was just processed by SCU * Format: * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)| */ static inline u8 ipc_read_status(void) { return __raw_readl(ipcdev.ipc_base + 0x04); } static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */ { return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset); } static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */ { return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); } static inline int busy_loop(void) /* Wait till scu status is busy */ { u32 status = 0; u32 loop_count = 0; status = ipc_read_status(); while (status & 1) { udelay(1); /* scu processing time is in few u secods */ status = ipc_read_status(); loop_count++; /* break if scu doesn't reset busy bit after huge retry */ if (loop_count > 100000) { dev_err(&ipcdev.pdev->dev, "IPC timed out"); return -ETIMEDOUT; } } if ((status >> 1) & 1) return -EIO; return 0; } /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ static inline int ipc_wait_for_interrupt(void) { int status; if (!wait_for_completion_timeout(&ipcdev.cmd_complete, 3 * HZ)) { struct device *dev = &ipcdev.pdev->dev; dev_err(dev, "IPC timed out\n"); return -ETIMEDOUT; } status = ipc_read_status(); if ((status >> 1) & 1) return -EIO; return 0; } int intel_scu_ipc_check_status(void) { return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop(); } /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) { int nc; u32 offset = 0; int err; u8 cbuf[IPC_WWBUF_SIZE] = { }; u32 *wbuf = (u32 *)&cbuf; mutex_lock(&ipclock); memset(cbuf, 0, sizeof(cbuf)); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } for (nc = 0; nc < count; nc++, offset += 2) { cbuf[offset] = addr[nc]; cbuf[offset + 1] = addr[nc] >> 8; } if (id == IPC_CMD_PCNTRL_R) { for (nc = 0, offset = 0; nc < count; nc++, offset += 4) ipc_data_writel(wbuf[nc], offset); ipc_command((count*2) << 16 | id << 12 | 0 << 8 | op); } else if (id == IPC_CMD_PCNTRL_W) { for (nc = 0; nc < count; nc++, offset += 1) cbuf[offset] = data[nc]; for (nc = 0, offset = 0; nc < count; nc++, offset += 4) ipc_data_writel(wbuf[nc], offset); ipc_command((count*3) << 16 | id << 12 | 0 << 8 | op); } else if (id == IPC_CMD_PCNTRL_M) { cbuf[offset] = data[0]; cbuf[offset + 1] = data[1]; ipc_data_writel(wbuf[0], 0); /* Write wbuff */ ipc_command(4 << 16 | id << 12 | 0 << 8 | op); } err = intel_scu_ipc_check_status(); if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */ /* Workaround: values are read as 0 without memcpy_fromio */ memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16); for (nc = 0; nc < count; nc++) data[nc] = ipc_data_readb(nc); } mutex_unlock(&ipclock); return err; } /** * intel_scu_ipc_ioread8 - read a word via the SCU * @addr: register on SCU * @data: return pointer for read byte * * Read a single register. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_ioread8(u16 addr, u8 *data) { return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_ioread8); /** * intel_scu_ipc_ioread16 - read a word via the SCU * @addr: register on SCU * @data: return pointer for read word * * Read a register pair. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_ioread16(u16 addr, u16 *data) { u16 x[2] = {addr, addr + 1 }; return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_ioread16); /** * intel_scu_ipc_ioread32 - read a dword via the SCU * @addr: register on SCU * @data: return pointer for read dword * * Read four registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_ioread32(u16 addr, u32 *data) { u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_ioread32); /** * intel_scu_ipc_iowrite8 - write a byte via the SCU * @addr: register on SCU * @data: byte to write * * Write a single register. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_iowrite8(u16 addr, u8 data) { return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_iowrite8); /** * intel_scu_ipc_iowrite16 - write a word via the SCU * @addr: register on SCU * @data: word to write * * Write two registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_iowrite16(u16 addr, u16 data) { u16 x[2] = {addr, addr + 1 }; return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_iowrite16); /** * intel_scu_ipc_iowrite32 - write a dword via the SCU * @addr: register on SCU * @data: dword to write * * Write four registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_iowrite32(u16 addr, u32 data) { u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_iowrite32); /** * intel_scu_ipc_readvv - read a set of registers * @addr: register list * @data: bytes to return * @len: length of array * * Read registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * The largest array length permitted by the hardware is 5 items. * * This function may sleep. */ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) { return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_readv); /** * intel_scu_ipc_writev - write a set of registers * @addr: register list * @data: bytes to write * @len: length of array * * Write registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * The largest array length permitted by the hardware is 5 items. * * This function may sleep. * */ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) { return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_writev); /** * intel_scu_ipc_update_register - r/m/w a register * @addr: register address * @bits: bits to update * @mask: mask of bits to update * * Read-modify-write power control unit register. The first data argument * must be register value and second is mask value * mask is a bitmap that indicates which bits to update. * 0 = masked. Don't modify this bit, 1 = modify this bit. * returns 0 on success or an error code. * * This function may sleep. Locking between SCU accesses is handled * for the caller. */ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) { u8 data[2] = { bits, mask }; return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M); } EXPORT_SYMBOL(intel_scu_ipc_update_register); /** * intel_scu_ipc_simple_command - send a simple command * @cmd: command * @sub: sub type * * Issue a simple command to the SCU. Do not use this interface if * you must then access data as any data values may be overwritten * by another SCU access by the time this function returns. * * This function may sleep. Locking for SCU accesses is handled for * the caller. */ int intel_scu_ipc_simple_command(int cmd, int sub) { int err; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } ipc_command(sub << 12 | cmd); err = intel_scu_ipc_check_status(); mutex_unlock(&ipclock); return err; } EXPORT_SYMBOL(intel_scu_ipc_simple_command); /** * intel_scu_ipc_command - command with data * @cmd: command * @sub: sub type * @in: input data * @inlen: input length in dwords * @out: output data * @outlein: output length in dwords * * Issue a command to the SCU which involves data transfers. Do the * data copies under the lock but leave it for the caller to interpret */ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, u32 *out, int outlen) { int i, err; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } for (i = 0; i < inlen; i++) ipc_data_writel(*in++, 4 * i); ipc_command((inlen << 16) | (sub << 12) | cmd); err = intel_scu_ipc_check_status(); if (!err) { for (i = 0; i < outlen; i++) *out++ = ipc_data_readl(4 * i); } mutex_unlock(&ipclock); return err; } EXPORT_SYMBOL(intel_scu_ipc_command); /*I2C commands */ #define IPC_I2C_WRITE 1 /* I2C Write command */ #define IPC_I2C_READ 2 /* I2C Read command */ /** * intel_scu_ipc_i2c_cntrl - I2C read/write operations * @addr: I2C address + command bits * @data: data to read/write * * Perform an an I2C read/write operation via the SCU. All locking is * handled for the caller. This function may sleep. * * Returns an error code or 0 on success. * * This has to be in the IPC driver for the locking. */ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) { u32 cmd = 0; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } cmd = (addr >> 24) & 0xFF; if (cmd == IPC_I2C_READ) { writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); /* Write not getting updated without delay */ mdelay(1); *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR); } else if (cmd == IPC_I2C_WRITE) { writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR); mdelay(1); writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); } else { dev_err(&ipcdev.pdev->dev, "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd); mutex_unlock(&ipclock); return -EIO; } mutex_unlock(&ipclock); return 0; } EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); /* * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1 * When ioc bit is set to 1, caller api must wait for interrupt handler called * which in turn unlocks the caller api. Currently this is not used * * This is edge triggered so we need take no action to clear anything */ static irqreturn_t ioc(int irq, void *dev_id) { if (ipcdev.irq_mode) complete(&ipcdev.cmd_complete); return IRQ_HANDLED; } /** * ipc_probe - probe an Intel SCU IPC * @dev: the PCI device matching * @id: entry in the match table * * Enable and install an intel SCU IPC. This appears in the PCI space * but uses some hard coded addresses as well. */ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; struct intel_scu_ipc_pdata_t *pdata; resource_size_t pci_resource; if (ipcdev.pdev) /* We support only one SCU */ return -EBUSY; pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data; ipcdev.pdev = pci_dev_get(dev); ipcdev.irq_mode = pdata->irq_mode; err = pci_enable_device(dev); if (err) return err; err = pci_request_regions(dev, "intel_scu_ipc"); if (err) return err; pci_resource = pci_resource_start(dev, 0); if (!pci_resource) return -ENOMEM; init_completion(&ipcdev.cmd_complete); if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) return -EBUSY; ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len); if (!ipcdev.ipc_base) return -ENOMEM; ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len); if (!ipcdev.i2c_base) { iounmap(ipcdev.ipc_base); return -ENOMEM; } intel_scu_devices_create(); return 0; } /** * ipc_remove - remove a bound IPC device * @pdev: PCI device * * In practice the SCU is not removable but this function is also * called for each device on a module unload or cleanup which is the * path that will get used. * * Free up the mappings and release the PCI resources */ static void ipc_remove(struct pci_dev *pdev) { free_irq(pdev->irq, &ipcdev); pci_release_regions(pdev); pci_dev_put(ipcdev.pdev); iounmap(ipcdev.ipc_base); iounmap(ipcdev.i2c_base); ipcdev.pdev = NULL; intel_scu_devices_destroy(); } static const struct pci_device_id pci_ids[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT), (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&intel_scu_ipc_penwell_pdata, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW), (kernel_ulong_t)&intel_scu_ipc_penwell_pdata, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&intel_scu_ipc_tangier_pdata, }, { 0, } }; MODULE_DEVICE_TABLE(pci, pci_ids); static struct pci_driver ipc_driver = { .name = "intel_scu_ipc", .id_table = pci_ids, .probe = ipc_probe, .remove = ipc_remove, }; static int __init intel_scu_ipc_init(void) { platform = intel_mid_identify_cpu(); if (platform == 0) return -ENODEV; return pci_register_driver(&ipc_driver); } static void __exit intel_scu_ipc_exit(void) { pci_unregister_driver(&ipc_driver); } MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>"); MODULE_DESCRIPTION("Intel SCU IPC driver"); MODULE_LICENSE("GPL"); module_init(intel_scu_ipc_init); module_exit(intel_scu_ipc_exit);
gpl-2.0
lukier/linux-hi3518
sound/firewire/bebob/bebob_maudio.c
486
21556
/* * bebob_maudio.c - a part of driver for BeBoB based devices * * Copyright (c) 2013-2014 Takashi Sakamoto * * Licensed under the terms of the GNU General Public License, version 2. */ #include "./bebob.h" #include <sound/control.h> /* * Just powering on, Firewire 410/Audiophile/1814 and ProjectMix I/O wait to * download firmware blob. To enable these devices, drivers should upload * firmware blob and send a command to initialize configuration to factory * settings when completing uploading. Then these devices generate bus reset * and are recognized as new devices with the firmware. * * But with firmware version 5058 or later, the firmware is stored to flash * memory in the device and drivers can tell bootloader to load the firmware * by sending a cue. This cue must be sent one time. * * For streaming, both of output and input streams are needed for Firewire 410 * and Ozonic. The single stream is OK for the other devices even if the clock * source is not SYT-Match (I note no devices use SYT-Match). * * Without streaming, the devices except for Firewire Audiophile can mix any * input and output. For this reason, Audiophile cannot be used as standalone * mixer. * * Firewire 1814 and ProjectMix I/O uses special firmware. It will be freezed * when receiving any commands which the firmware can't understand. These * devices utilize completely different system to control. It is some * write-transaction directly into a certain address. All of addresses for mixer * functionality is between 0xffc700700000 to 0xffc70070009c. */ /* Offset from information register */ #define INFO_OFFSET_SW_DATE 0x20 /* Bootloader Protocol Version 1 */ #define MAUDIO_BOOTLOADER_CUE1 0x00000001 /* * Initializing configuration to factory settings (= 0x1101), (swapped in line), * Command code is zero (= 0x00), * the number of operands is zero (= 0x00)(at least significant byte) */ #define MAUDIO_BOOTLOADER_CUE2 0x01110000 /* padding */ #define MAUDIO_BOOTLOADER_CUE3 0x00000000 #define MAUDIO_SPECIFIC_ADDRESS 0xffc700000000ULL #define METER_OFFSET 0x00600000 /* some device has sync info after metering data */ #define METER_SIZE_SPECIAL 84 /* with sync info */ #define METER_SIZE_FW410 76 /* with sync info */ #define METER_SIZE_AUDIOPHILE 60 /* with sync info */ #define METER_SIZE_SOLO 52 /* with sync info */ #define METER_SIZE_OZONIC 48 #define METER_SIZE_NRV10 80 /* labels for metering */ #define ANA_IN "Analog In" #define ANA_OUT "Analog Out" #define DIG_IN "Digital In" #define SPDIF_IN "S/PDIF In" #define ADAT_IN "ADAT In" #define DIG_OUT "Digital Out" #define SPDIF_OUT "S/PDIF Out" #define ADAT_OUT "ADAT Out" #define STRM_IN "Stream In" #define AUX_OUT "Aux Out" #define HP_OUT "HP Out" /* for NRV */ #define UNKNOWN_METER "Unknown" struct special_params { bool is1814; unsigned int clk_src; unsigned int dig_in_fmt; unsigned int dig_out_fmt; unsigned int clk_lock; struct snd_ctl_elem_id *ctl_id_sync; }; /* * For some M-Audio devices, this module just send cue to load firmware. After * loading, the device generates bus reset and newly detected. * * If we make any transactions to load firmware, the operation may failed. */ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) { struct fw_device *device = fw_parent_device(unit); int err, rcode; u64 date; __le32 cues[3] = { cpu_to_le32(MAUDIO_BOOTLOADER_CUE1), cpu_to_le32(MAUDIO_BOOTLOADER_CUE2), cpu_to_le32(MAUDIO_BOOTLOADER_CUE3) }; /* check date of software used to build */ err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, &date, sizeof(u64)); if (err < 0) goto end; /* * firmware version 5058 or later has date later than "20070401", but * 'date' is not null-terminated. */ if (date < 0x3230303730343031LL) { dev_err(&unit->device, "Use firmware version 5058 or later\n"); err = -ENOSYS; goto end; } rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, device->node_id, device->generation, device->max_speed, BEBOB_ADDR_REG_REQ, cues, sizeof(cues)); if (rcode != RCODE_COMPLETE) { dev_err(&unit->device, "Failed to send a cue to load firmware\n"); err = -EIO; } end: return err; } static inline int get_meter(struct snd_bebob *bebob, void *buf, unsigned int size) { return snd_fw_transaction(bebob->unit, TCODE_READ_BLOCK_REQUEST, MAUDIO_SPECIFIC_ADDRESS + METER_OFFSET, buf, size, 0); } static int check_clk_sync(struct snd_bebob *bebob, unsigned int size, bool *sync) { int err; u8 *buf; buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; err = get_meter(bebob, buf, size); if (err < 0) goto end; /* if synced, this value is the same as SFC of FDF in CIP header */ *sync = (buf[size - 2] != 0xff); end: kfree(buf); return err; } /* * dig_fmt: 0x00:S/PDIF, 0x01:ADAT * clk_lock: 0x00:unlock, 0x01:lock */ static int avc_maudio_set_special_clk(struct snd_bebob *bebob, unsigned int clk_src, unsigned int dig_in_fmt, unsigned int dig_out_fmt, unsigned int clk_lock) { struct special_params *params = bebob->maudio_special_quirk; int err; u8 *buf; if (amdtp_stream_running(&bebob->rx_stream) || amdtp_stream_running(&bebob->tx_stream)) return -EBUSY; buf = kmalloc(12, GFP_KERNEL); if (buf == NULL) return -ENOMEM; buf[0] = 0x00; /* CONTROL */ buf[1] = 0xff; /* UNIT */ buf[2] = 0x00; /* vendor dependent */ buf[3] = 0x04; /* company ID high */ buf[4] = 0x00; /* company ID middle */ buf[5] = 0x04; /* company ID low */ buf[6] = 0xff & clk_src; /* clock source */ buf[7] = 0xff & dig_in_fmt; /* input digital format */ buf[8] = 0xff & dig_out_fmt; /* output digital format */ buf[9] = 0xff & clk_lock; /* lock these settings */ buf[10] = 0x00; /* padding */ buf[11] = 0x00; /* padding */ err = fcp_avc_transaction(bebob->unit, buf, 12, buf, 12, BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9)); if ((err > 0) && (err < 10)) err = -EIO; else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ err = -ENOSYS; else if (buf[0] == 0x0a) /* REJECTED */ err = -EINVAL; if (err < 0) goto end; params->clk_src = buf[6]; params->dig_in_fmt = buf[7]; params->dig_out_fmt = buf[8]; params->clk_lock = buf[9]; if (params->ctl_id_sync) snd_ctl_notify(bebob->card, SNDRV_CTL_EVENT_MASK_VALUE, params->ctl_id_sync); err = 0; end: kfree(buf); return err; } static void special_stream_formation_set(struct snd_bebob *bebob) { static const unsigned int ch_table[2][2][3] = { /* AMDTP_OUT_STREAM */ { { 6, 6, 4 }, /* SPDIF */ { 12, 8, 4 } }, /* ADAT */ /* AMDTP_IN_STREAM */ { { 10, 10, 2 }, /* SPDIF */ { 16, 12, 2 } } /* ADAT */ }; struct special_params *params = bebob->maudio_special_quirk; unsigned int i, max; max = SND_BEBOB_STRM_FMT_ENTRIES - 1; if (!params->is1814) max -= 2; for (i = 0; i < max; i++) { bebob->tx_stream_formations[i + 1].pcm = ch_table[AMDTP_IN_STREAM][params->dig_in_fmt][i / 2]; bebob->tx_stream_formations[i + 1].midi = 1; bebob->rx_stream_formations[i + 1].pcm = ch_table[AMDTP_OUT_STREAM][params->dig_out_fmt][i / 2]; bebob->rx_stream_formations[i + 1].midi = 1; } } static int add_special_controls(struct snd_bebob *bebob); int snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814) { struct special_params *params; int err; params = kzalloc(sizeof(struct special_params), GFP_KERNEL); if (params == NULL) return -ENOMEM; mutex_lock(&bebob->mutex); bebob->maudio_special_quirk = (void *)params; params->is1814 = is1814; /* initialize these parameters because driver is not allowed to ask */ bebob->rx_stream.context = ERR_PTR(-1); bebob->tx_stream.context = ERR_PTR(-1); err = avc_maudio_set_special_clk(bebob, 0x03, 0x00, 0x00, 0x00); if (err < 0) { dev_err(&bebob->unit->device, "fail to initialize clock params: %d\n", err); goto end; } err = add_special_controls(bebob); if (err < 0) goto end; special_stream_formation_set(bebob); if (params->is1814) { bebob->midi_input_ports = 1; bebob->midi_output_ports = 1; } else { bebob->midi_input_ports = 2; bebob->midi_output_ports = 2; } end: if (err < 0) { kfree(params); bebob->maudio_special_quirk = NULL; } mutex_unlock(&bebob->mutex); return err; } /* Input plug shows actual rate. Output plug is needless for this purpose. */ static int special_get_rate(struct snd_bebob *bebob, unsigned int *rate) { int err, trials; trials = 0; do { err = avc_general_get_sig_fmt(bebob->unit, rate, AVC_GENERAL_PLUG_DIR_IN, 0); } while (err == -EAGAIN && ++trials < 3); return err; } static int special_set_rate(struct snd_bebob *bebob, unsigned int rate) { struct special_params *params = bebob->maudio_special_quirk; int err; err = avc_general_set_sig_fmt(bebob->unit, rate, AVC_GENERAL_PLUG_DIR_OUT, 0); if (err < 0) goto end; /* * Just after changing sampling rate for output, a followed command * for input is easy to fail. This is a workaround fot this issue. */ msleep(100); err = avc_general_set_sig_fmt(bebob->unit, rate, AVC_GENERAL_PLUG_DIR_IN, 0); if (err < 0) goto end; if (params->ctl_id_sync) snd_ctl_notify(bebob->card, SNDRV_CTL_EVENT_MASK_VALUE, params->ctl_id_sync); end: return err; } /* Clock source control for special firmware */ static enum snd_bebob_clock_type special_clk_types[] = { SND_BEBOB_CLOCK_TYPE_INTERNAL, /* With digital mute */ SND_BEBOB_CLOCK_TYPE_EXTERNAL, /* SPDIF/ADAT */ SND_BEBOB_CLOCK_TYPE_EXTERNAL, /* Word Clock */ SND_BEBOB_CLOCK_TYPE_INTERNAL, }; static int special_clk_get(struct snd_bebob *bebob, unsigned int *id) { struct special_params *params = bebob->maudio_special_quirk; *id = params->clk_src; return 0; } static int special_clk_ctl_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *einf) { static const char *const special_clk_labels[] = { "Internal with Digital Mute", "Digital", "Word Clock", "Internal" }; return snd_ctl_enum_info(einf, 1, ARRAY_SIZE(special_clk_types), special_clk_labels); } static int special_clk_ctl_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uval) { struct snd_bebob *bebob = snd_kcontrol_chip(kctl); struct special_params *params = bebob->maudio_special_quirk; uval->value.enumerated.item[0] = params->clk_src; return 0; } static int special_clk_ctl_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uval) { struct snd_bebob *bebob = snd_kcontrol_chip(kctl); struct special_params *params = bebob->maudio_special_quirk; int err, id; id = uval->value.enumerated.item[0]; if (id >= ARRAY_SIZE(special_clk_types)) return -EINVAL; mutex_lock(&bebob->mutex); err = avc_maudio_set_special_clk(bebob, id, params->dig_in_fmt, params->dig_out_fmt, params->clk_lock); mutex_unlock(&bebob->mutex); if (err >= 0) err = 1; return err; } static struct snd_kcontrol_new special_clk_ctl = { .name = "Clock Source", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = special_clk_ctl_info, .get = special_clk_ctl_get, .put = special_clk_ctl_put }; /* Clock synchronization control for special firmware */ static int special_sync_ctl_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *einf) { einf->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; einf->count = 1; einf->value.integer.min = 0; einf->value.integer.max = 1; return 0; } static int special_sync_ctl_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uval) { struct snd_bebob *bebob = snd_kcontrol_chip(kctl); int err; bool synced = 0; err = check_clk_sync(bebob, METER_SIZE_SPECIAL, &synced); if (err >= 0) uval->value.integer.value[0] = synced; return 0; } static struct snd_kcontrol_new special_sync_ctl = { .name = "Sync Status", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = special_sync_ctl_info, .get = special_sync_ctl_get, }; /* Digital input interface control for special firmware */ static const char *const special_dig_in_iface_labels[] = { "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical" }; static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *einf) { return snd_ctl_enum_info(einf, 1, ARRAY_SIZE(special_dig_in_iface_labels), special_dig_in_iface_labels); } static int special_dig_in_iface_ctl_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uval) { struct snd_bebob *bebob = snd_kcontrol_chip(kctl); struct special_params *params = bebob->maudio_special_quirk; unsigned int dig_in_iface; int err, val; mutex_lock(&bebob->mutex); err = avc_audio_get_selector(bebob->unit, 0x00, 0x04, &dig_in_iface); if (err < 0) { dev_err(&bebob->unit->device, "fail to get digital input interface: %d\n", err); goto end; } /* encoded id for user value */ val = (params->dig_in_fmt << 1) | (dig_in_iface & 0x01); /* for ADAT Optical */ if (val > 2) val = 2; uval->value.enumerated.item[0] = val; end: mutex_unlock(&bebob->mutex); return err; } static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uval) { struct snd_bebob *bebob = snd_kcontrol_chip(kctl); struct special_params *params = bebob->maudio_special_quirk; unsigned int id, dig_in_fmt, dig_in_iface; int err; id = uval->value.enumerated.item[0]; if (id >= ARRAY_SIZE(special_dig_in_iface_labels)) return -EINVAL; /* decode user value */ dig_in_fmt = (id >> 1) & 0x01; dig_in_iface = id & 0x01; mutex_lock(&bebob->mutex); err = avc_maudio_set_special_clk(bebob, params->clk_src, dig_in_fmt, params->dig_out_fmt, params->clk_lock); if (err < 0) goto end; /* For ADAT, optical interface is only available. */ if (params->dig_in_fmt > 0) { err = 1; goto end; } /* For S/PDIF, optical/coaxial interfaces are selectable. */ err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface); if (err < 0) dev_err(&bebob->unit->device, "fail to set digital input interface: %d\n", err); err = 1; end: special_stream_formation_set(bebob); mutex_unlock(&bebob->mutex); return err; } static struct snd_kcontrol_new special_dig_in_iface_ctl = { .name = "Digital Input Interface", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = special_dig_in_iface_ctl_info, .get = special_dig_in_iface_ctl_get, .put = special_dig_in_iface_ctl_set }; /* Digital output interface control for special firmware */ static const char *const special_dig_out_iface_labels[] = { "S/PDIF Optical and Coaxial", "ADAT Optical" }; static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *einf) { return snd_ctl_enum_info(einf, 1, ARRAY_SIZE(special_dig_out_iface_labels), special_dig_out_iface_labels); } static int special_dig_out_iface_ctl_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uval) { struct snd_bebob *bebob = snd_kcontrol_chip(kctl); struct special_params *params = bebob->maudio_special_quirk; mutex_lock(&bebob->mutex); uval->value.enumerated.item[0] = params->dig_out_fmt; mutex_unlock(&bebob->mutex); return 0; } static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uval) { struct snd_bebob *bebob = snd_kcontrol_chip(kctl); struct special_params *params = bebob->maudio_special_quirk; unsigned int id; int err; id = uval->value.enumerated.item[0]; if (id >= ARRAY_SIZE(special_dig_out_iface_labels)) return -EINVAL; mutex_lock(&bebob->mutex); err = avc_maudio_set_special_clk(bebob, params->clk_src, params->dig_in_fmt, id, params->clk_lock); if (err >= 0) { special_stream_formation_set(bebob); err = 1; } mutex_unlock(&bebob->mutex); return err; } static struct snd_kcontrol_new special_dig_out_iface_ctl = { .name = "Digital Output Interface", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = special_dig_out_iface_ctl_info, .get = special_dig_out_iface_ctl_get, .put = special_dig_out_iface_ctl_set }; static int add_special_controls(struct snd_bebob *bebob) { struct snd_kcontrol *kctl; struct special_params *params = bebob->maudio_special_quirk; int err; kctl = snd_ctl_new1(&special_clk_ctl, bebob); err = snd_ctl_add(bebob->card, kctl); if (err < 0) goto end; kctl = snd_ctl_new1(&special_sync_ctl, bebob); err = snd_ctl_add(bebob->card, kctl); if (err < 0) goto end; params->ctl_id_sync = &kctl->id; kctl = snd_ctl_new1(&special_dig_in_iface_ctl, bebob); err = snd_ctl_add(bebob->card, kctl); if (err < 0) goto end; kctl = snd_ctl_new1(&special_dig_out_iface_ctl, bebob); err = snd_ctl_add(bebob->card, kctl); end: return err; } /* Hardware metering for special firmware */ static const char *const special_meter_labels[] = { ANA_IN, ANA_IN, ANA_IN, ANA_IN, SPDIF_IN, ADAT_IN, ADAT_IN, ADAT_IN, ADAT_IN, ANA_OUT, ANA_OUT, SPDIF_OUT, ADAT_OUT, ADAT_OUT, ADAT_OUT, ADAT_OUT, HP_OUT, HP_OUT, AUX_OUT }; static int special_meter_get(struct snd_bebob *bebob, u32 *target, unsigned int size) { __be16 *buf; unsigned int i, c, channels; int err; channels = ARRAY_SIZE(special_meter_labels) * 2; if (size < channels * sizeof(u32)) return -EINVAL; /* omit last 4 bytes because it's clock info. */ buf = kmalloc(METER_SIZE_SPECIAL - 4, GFP_KERNEL); if (buf == NULL) return -ENOMEM; err = get_meter(bebob, (void *)buf, METER_SIZE_SPECIAL - 4); if (err < 0) goto end; /* Its format is u16 and some channels are unknown. */ i = 0; for (c = 2; c < channels + 2; c++) target[i++] = be16_to_cpu(buf[c]) << 16; end: kfree(buf); return err; } /* last 4 bytes are omitted because it's clock info. */ static const char *const fw410_meter_labels[] = { ANA_IN, DIG_IN, ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT, DIG_OUT, HP_OUT }; static const char *const audiophile_meter_labels[] = { ANA_IN, DIG_IN, ANA_OUT, ANA_OUT, DIG_OUT, HP_OUT, AUX_OUT, }; static const char *const solo_meter_labels[] = { ANA_IN, DIG_IN, STRM_IN, STRM_IN, ANA_OUT, DIG_OUT }; /* no clock info */ static const char *const ozonic_meter_labels[] = { ANA_IN, ANA_IN, STRM_IN, STRM_IN, ANA_OUT, ANA_OUT }; /* TODO: need testers. these positions are based on authour's assumption */ static const char *const nrv10_meter_labels[] = { ANA_IN, ANA_IN, ANA_IN, ANA_IN, DIG_IN, ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT, DIG_IN }; static int normal_meter_get(struct snd_bebob *bebob, u32 *buf, unsigned int size) { const struct snd_bebob_meter_spec *spec = bebob->spec->meter; unsigned int c, channels; int err; channels = spec->num * 2; if (size < channels * sizeof(u32)) return -EINVAL; err = get_meter(bebob, (void *)buf, size); if (err < 0) goto end; for (c = 0; c < channels; c++) be32_to_cpus(&buf[c]); /* swap stream channels because inverted */ if (spec->labels == solo_meter_labels) { swap(buf[4], buf[6]); swap(buf[5], buf[7]); } end: return err; } /* for special customized devices */ static const struct snd_bebob_rate_spec special_rate_spec = { .get = &special_get_rate, .set = &special_set_rate, }; static const struct snd_bebob_clock_spec special_clk_spec = { .num = ARRAY_SIZE(special_clk_types), .types = special_clk_types, .get = &special_clk_get, }; static const struct snd_bebob_meter_spec special_meter_spec = { .num = ARRAY_SIZE(special_meter_labels), .labels = special_meter_labels, .get = &special_meter_get }; const struct snd_bebob_spec maudio_special_spec = { .clock = &special_clk_spec, .rate = &special_rate_spec, .meter = &special_meter_spec }; /* Firewire 410 specification */ static const struct snd_bebob_rate_spec usual_rate_spec = { .get = &snd_bebob_stream_get_rate, .set = &snd_bebob_stream_set_rate, }; static const struct snd_bebob_meter_spec fw410_meter_spec = { .num = ARRAY_SIZE(fw410_meter_labels), .labels = fw410_meter_labels, .get = &normal_meter_get }; const struct snd_bebob_spec maudio_fw410_spec = { .clock = NULL, .rate = &usual_rate_spec, .meter = &fw410_meter_spec }; /* Firewire Audiophile specification */ static const struct snd_bebob_meter_spec audiophile_meter_spec = { .num = ARRAY_SIZE(audiophile_meter_labels), .labels = audiophile_meter_labels, .get = &normal_meter_get }; const struct snd_bebob_spec maudio_audiophile_spec = { .clock = NULL, .rate = &usual_rate_spec, .meter = &audiophile_meter_spec }; /* Firewire Solo specification */ static const struct snd_bebob_meter_spec solo_meter_spec = { .num = ARRAY_SIZE(solo_meter_labels), .labels = solo_meter_labels, .get = &normal_meter_get }; const struct snd_bebob_spec maudio_solo_spec = { .clock = NULL, .rate = &usual_rate_spec, .meter = &solo_meter_spec }; /* Ozonic specification */ static const struct snd_bebob_meter_spec ozonic_meter_spec = { .num = ARRAY_SIZE(ozonic_meter_labels), .labels = ozonic_meter_labels, .get = &normal_meter_get }; const struct snd_bebob_spec maudio_ozonic_spec = { .clock = NULL, .rate = &usual_rate_spec, .meter = &ozonic_meter_spec }; /* NRV10 specification */ static const struct snd_bebob_meter_spec nrv10_meter_spec = { .num = ARRAY_SIZE(nrv10_meter_labels), .labels = nrv10_meter_labels, .get = &normal_meter_get }; const struct snd_bebob_spec maudio_nrv10_spec = { .clock = NULL, .rate = &usual_rate_spec, .meter = &nrv10_meter_spec };
gpl-2.0
sleshepic/epic_touch_kernel
drivers/infiniband/hw/nes/nes_cm.c
742
103557
/* * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #define TCPOPT_TIMESTAMP 8 #include <asm/atomic.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/init.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/notifier.h> #include <linux/net.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/random.h> #include <linux/list.h> #include <linux/threads.h> #include <linux/highmem.h> #include <linux/slab.h> #include <net/arp.h> #include <net/neighbour.h> #include <net/route.h> #include <net/ip_fib.h> #include <net/tcp.h> #include "nes.h" u32 cm_packets_sent; u32 cm_packets_bounced; u32 cm_packets_dropped; u32 cm_packets_retrans; u32 cm_packets_created; u32 cm_packets_received; atomic_t cm_listens_created; atomic_t cm_listens_destroyed; u32 cm_backlog_drops; atomic_t cm_loopbacks; atomic_t cm_nodes_created; atomic_t cm_nodes_destroyed; atomic_t cm_accel_dropped_pkts; atomic_t cm_resets_recvd; static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *); static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, struct nes_vnic *, u16, void *, struct nes_cm_info *); static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *); static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *); static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *); static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *); static int mini_cm_dealloc_core(struct nes_cm_core *); static int mini_cm_get(struct nes_cm_core *); static int mini_cm_set(struct nes_cm_core *, u32, u32); static void form_cm_frame(struct sk_buff *, struct nes_cm_node *, void *, u32, void *, u32, u8); static int add_ref_cm_node(struct nes_cm_node *); static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); static int nes_cm_disconn_true(struct nes_qp *); static int nes_cm_post_event(struct nes_cm_event *event); static int nes_disconnect(struct nes_qp *nesqp, int abrupt); static void nes_disconnect_worker(struct work_struct *work); static int send_mpa_request(struct nes_cm_node *, struct sk_buff *); static int send_mpa_reject(struct nes_cm_node *); static int send_syn(struct nes_cm_node *, u32, struct sk_buff *); static int send_reset(struct nes_cm_node *, struct sk_buff *); static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb); static void process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *); static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); static void cleanup_retrans_entry(struct nes_cm_node *); static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *); static void free_retrans_entry(struct nes_cm_node *cm_node); static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb, int optionsize, int passive); /* CM event handler functions */ static void cm_event_connected(struct nes_cm_event *); static void cm_event_connect_error(struct nes_cm_event *); static void cm_event_reset(struct nes_cm_event *); static void cm_event_mpa_req(struct nes_cm_event *); static void cm_event_mpa_reject(struct nes_cm_event *); static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node); static void print_core(struct nes_cm_core *core); /* External CM API Interface */ /* instance of function pointers for client API */ /* set address of this instance to cm_core->cm_ops at cm_core alloc */ static struct nes_cm_ops nes_cm_api = { mini_cm_accelerated, mini_cm_listen, mini_cm_del_listen, mini_cm_connect, mini_cm_close, mini_cm_accept, mini_cm_reject, mini_cm_recv_pkt, mini_cm_dealloc_core, mini_cm_get, mini_cm_set }; static struct nes_cm_core *g_cm_core; atomic_t cm_connects; atomic_t cm_accepts; atomic_t cm_disconnects; atomic_t cm_closes; atomic_t cm_connecteds; atomic_t cm_connect_reqs; atomic_t cm_rejects; /** * create_event */ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, enum nes_cm_event_type type) { struct nes_cm_event *event; if (!cm_node->cm_id) return NULL; /* allocate an empty event */ event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) return NULL; event->type = type; event->cm_node = cm_node; event->cm_info.rem_addr = cm_node->rem_addr; event->cm_info.loc_addr = cm_node->loc_addr; event->cm_info.rem_port = cm_node->rem_port; event->cm_info.loc_port = cm_node->loc_port; event->cm_info.cm_id = cm_node->cm_id; nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, " "dst_addr=%08x[%x], src_addr=%08x[%x]\n", cm_node, event, type, event->cm_info.loc_addr, event->cm_info.loc_port, event->cm_info.rem_addr, event->cm_info.rem_port); nes_cm_post_event(event); return event; } /** * send_mpa_request */ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) { if (!skb) { nes_debug(NES_DBG_CM, "skb set to NULL\n"); return -1; } /* send an MPA Request frame */ form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, cm_node->mpa_frame_size, SET_ACK); return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); } static int send_mpa_reject(struct nes_cm_node *cm_node) { struct sk_buff *skb = NULL; skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); return -ENOMEM; } /* send an MPA reject frame */ form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, cm_node->mpa_frame_size, SET_ACK | SET_FIN); cm_node->state = NES_CM_STATE_FIN_WAIT1; return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); } /** * recv_mpa - process a received TCP pkt, we are expecting an * IETF MPA frame */ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, u32 len) { struct ietf_mpa_frame *mpa_frame; *type = NES_MPA_REQUEST_ACCEPT; /* assume req frame is in tcp data payload */ if (len < sizeof(struct ietf_mpa_frame)) { nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); return -EINVAL; } mpa_frame = (struct ietf_mpa_frame *)buffer; cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len); /* make sure mpa private data len is less than 512 bytes */ if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) { nes_debug(NES_DBG_CM, "The received Length of Private" " Data field exceeds 512 octets\n"); return -EINVAL; } /* * make sure MPA receiver interoperate with the * received MPA version and MPA key information * */ if (mpa_frame->rev != mpa_version) { nes_debug(NES_DBG_CM, "The received mpa version" " can not be interoperated\n"); return -EINVAL; } if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n"); return -EINVAL; } } else { if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) { nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n"); return -EINVAL; } } if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { nes_debug(NES_DBG_CM, "The received ietf buffer was not right" " complete (%x + %x != %x)\n", cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len); return -EINVAL; } /* make sure it does not exceed the max size */ if (len > MAX_CM_BUFFER) { nes_debug(NES_DBG_CM, "The received ietf buffer was too large" " (%x + %x != %x)\n", cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len); return -EINVAL; } /* copy entire MPA frame to our cm_node's frame */ memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame), cm_node->mpa_frame_size); if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) *type = NES_MPA_REQUEST_REJECT; return 0; } /** * form_cm_frame - get a free packet and build empty frame Use * node info to build. */ static void form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node, void *options, u32 optionsize, void *data, u32 datasize, u8 flags) { struct tcphdr *tcph; struct iphdr *iph; struct ethhdr *ethh; u8 *buf; u16 packetsize = sizeof(*iph); packetsize += sizeof(*tcph); packetsize += optionsize + datasize; memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph)); skb->len = 0; buf = skb_put(skb, packetsize + ETH_HLEN); ethh = (struct ethhdr *) buf; buf += ETH_HLEN; iph = (struct iphdr *)buf; buf += sizeof(*iph); tcph = (struct tcphdr *)buf; skb_reset_mac_header(skb); skb_set_network_header(skb, ETH_HLEN); skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph)); buf += sizeof(*tcph); skb->ip_summed = CHECKSUM_PARTIAL; skb->protocol = htons(0x800); skb->data_len = 0; skb->mac_len = ETH_HLEN; memcpy(ethh->h_dest, cm_node->rem_mac, ETH_ALEN); memcpy(ethh->h_source, cm_node->loc_mac, ETH_ALEN); ethh->h_proto = htons(0x0800); iph->version = IPVERSION; iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ iph->tos = 0; iph->tot_len = htons(packetsize); iph->id = htons(++cm_node->tcp_cntxt.loc_id); iph->frag_off = htons(0x4000); iph->ttl = 0x40; iph->protocol = 0x06; /* IPPROTO_TCP */ iph->saddr = htonl(cm_node->loc_addr); iph->daddr = htonl(cm_node->rem_addr); tcph->source = htons(cm_node->loc_port); tcph->dest = htons(cm_node->rem_port); tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); if (flags & SET_ACK) { cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); tcph->ack = 1; } else tcph->ack_seq = 0; if (flags & SET_SYN) { cm_node->tcp_cntxt.loc_seq_num++; tcph->syn = 1; } else cm_node->tcp_cntxt.loc_seq_num += datasize; if (flags & SET_FIN) { cm_node->tcp_cntxt.loc_seq_num++; tcph->fin = 1; } if (flags & SET_RST) tcph->rst = 1; tcph->doff = (u16)((sizeof(*tcph) + optionsize + 3) >> 2); tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd); tcph->urg_ptr = 0; if (optionsize) memcpy(buf, options, optionsize); buf += optionsize; if (datasize) memcpy(buf, data, datasize); skb_shinfo(skb)->nr_frags = 0; cm_packets_created++; } /** * print_core - dump a cm core */ static void print_core(struct nes_cm_core *core) { nes_debug(NES_DBG_CM, "---------------------------------------------\n"); nes_debug(NES_DBG_CM, "CM Core -- (core = %p )\n", core); if (!core) return; nes_debug(NES_DBG_CM, "---------------------------------------------\n"); nes_debug(NES_DBG_CM, "State : %u \n", core->state); nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); nes_debug(NES_DBG_CM, "core : %p \n", core); nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); } /** * schedule_nes_timer * note - cm_node needs to be protected before calling this. Encase in: * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node); */ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, enum nes_timer_type type, int send_retrans, int close_when_complete) { unsigned long flags; struct nes_cm_core *cm_core = cm_node->cm_core; struct nes_timer_entry *new_send; int ret = 0; u32 was_timer_set; new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) return -ENOMEM; /* new_send->timetosend = currenttime */ new_send->retrycount = NES_DEFAULT_RETRYS; new_send->retranscount = NES_DEFAULT_RETRANS; new_send->skb = skb; new_send->timetosend = jiffies; new_send->type = type; new_send->netdev = cm_node->netdev; new_send->send_retrans = send_retrans; new_send->close_when_complete = close_when_complete; if (type == NES_TIMER_TYPE_CLOSE) { new_send->timetosend += (HZ/10); if (cm_node->recv_entry) { kfree(new_send); WARN_ON(1); return -EINVAL; } cm_node->recv_entry = new_send; } if (type == NES_TIMER_TYPE_SEND) { new_send->seq_num = ntohl(tcp_hdr(skb)->seq); atomic_inc(&new_send->skb->users); spin_lock_irqsave(&cm_node->retrans_list_lock, flags); cm_node->send_entry = new_send; add_ref_cm_node(cm_node); spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); new_send->timetosend = jiffies + NES_RETRY_TIMEOUT; ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); if (ret != NETDEV_TX_OK) { nes_debug(NES_DBG_CM, "Error sending packet %p " "(jiffies = %lu)\n", new_send, jiffies); new_send->timetosend = jiffies; ret = NETDEV_TX_OK; } else { cm_packets_sent++; if (!send_retrans) { cleanup_retrans_entry(cm_node); if (close_when_complete) rem_ref_cm_node(cm_core, cm_node); return ret; } } } was_timer_set = timer_pending(&cm_core->tcp_timer); if (!was_timer_set) { cm_core->tcp_timer.expires = new_send->timetosend; add_timer(&cm_core->tcp_timer); } return ret; } static void nes_retrans_expired(struct nes_cm_node *cm_node) { struct iw_cm_id *cm_id = cm_node->cm_id; enum nes_cm_node_state state = cm_node->state; cm_node->state = NES_CM_STATE_CLOSED; switch (state) { case NES_CM_STATE_SYN_RCVD: case NES_CM_STATE_CLOSING: rem_ref_cm_node(cm_node->cm_core, cm_node); break; case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_FIN_WAIT1: if (cm_node->cm_id) cm_id->rem_ref(cm_id); send_reset(cm_node, NULL); break; default: add_ref_cm_node(cm_node); send_reset(cm_node, NULL); create_event(cm_node, NES_CM_EVENT_ABORTED); } } static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node) { struct nes_timer_entry *recv_entry = cm_node->recv_entry; struct iw_cm_id *cm_id = cm_node->cm_id; struct nes_qp *nesqp; unsigned long qplockflags; if (!recv_entry) return; nesqp = (struct nes_qp *)recv_entry->skb; if (nesqp) { spin_lock_irqsave(&nesqp->lock, qplockflags); if (nesqp->cm_id) { nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " "refcount = %d: HIT A " "NES_TIMER_TYPE_CLOSE with something " "to do!!!\n", nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount)); nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; nesqp->ibqp_state = IB_QPS_ERR; spin_unlock_irqrestore(&nesqp->lock, qplockflags); nes_cm_disconn(nesqp); } else { spin_unlock_irqrestore(&nesqp->lock, qplockflags); nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " "refcount = %d: HIT A " "NES_TIMER_TYPE_CLOSE with nothing " "to do!!!\n", nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount)); } } else if (rem_node) { /* TIME_WAIT state */ rem_ref_cm_node(cm_node->cm_core, cm_node); } if (cm_node->cm_id) cm_id->rem_ref(cm_id); kfree(recv_entry); cm_node->recv_entry = NULL; } /** * nes_cm_timer_tick */ static void nes_cm_timer_tick(unsigned long pass) { unsigned long flags; unsigned long nexttimeout = jiffies + NES_LONG_TIME; struct nes_cm_node *cm_node; struct nes_timer_entry *send_entry, *recv_entry; struct list_head *list_core_temp; struct list_head *list_node; struct nes_cm_core *cm_core = g_cm_core; u32 settimer = 0; unsigned long timetosend; int ret = NETDEV_TX_OK; struct list_head timer_list; INIT_LIST_HEAD(&timer_list); spin_lock_irqsave(&cm_core->ht_lock, flags); list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) { cm_node = container_of(list_node, struct nes_cm_node, list); if ((cm_node->recv_entry) || (cm_node->send_entry)) { add_ref_cm_node(cm_node); list_add(&cm_node->timer_entry, &timer_list); } } spin_unlock_irqrestore(&cm_core->ht_lock, flags); list_for_each_safe(list_node, list_core_temp, &timer_list) { cm_node = container_of(list_node, struct nes_cm_node, timer_entry); recv_entry = cm_node->recv_entry; if (recv_entry) { if (time_after(recv_entry->timetosend, jiffies)) { if (nexttimeout > recv_entry->timetosend || !settimer) { nexttimeout = recv_entry->timetosend; settimer = 1; } } else handle_recv_entry(cm_node, 1); } spin_lock_irqsave(&cm_node->retrans_list_lock, flags); do { send_entry = cm_node->send_entry; if (!send_entry) break; if (time_after(send_entry->timetosend, jiffies)) { if (cm_node->state != NES_CM_STATE_TSA) { if ((nexttimeout > send_entry->timetosend) || !settimer) { nexttimeout = send_entry->timetosend; settimer = 1; } } else { free_retrans_entry(cm_node); } break; } if ((cm_node->state == NES_CM_STATE_TSA) || (cm_node->state == NES_CM_STATE_CLOSED)) { free_retrans_entry(cm_node); break; } if (!send_entry->retranscount || !send_entry->retrycount) { cm_packets_dropped++; free_retrans_entry(cm_node); spin_unlock_irqrestore( &cm_node->retrans_list_lock, flags); nes_retrans_expired(cm_node); cm_node->state = NES_CM_STATE_CLOSED; spin_lock_irqsave(&cm_node->retrans_list_lock, flags); break; } atomic_inc(&send_entry->skb->users); cm_packets_retrans++; nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " "for node %p, jiffies = %lu, time to send = " "%lu, retranscount = %u, send_entry->seq_num = " "0x%08X, cm_node->tcp_cntxt.rem_ack_num = " "0x%08X\n", send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount, send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num); spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev); spin_lock_irqsave(&cm_node->retrans_list_lock, flags); if (ret != NETDEV_TX_OK) { nes_debug(NES_DBG_CM, "rexmit failed for " "node=%p\n", cm_node); cm_packets_bounced++; send_entry->retrycount--; nexttimeout = jiffies + NES_SHORT_TIME; settimer = 1; break; } else { cm_packets_sent++; } nes_debug(NES_DBG_CM, "Packet Sent: retrans count = " "%u, retry count = %u.\n", send_entry->retranscount, send_entry->retrycount); if (send_entry->send_retrans) { send_entry->retranscount--; timetosend = (NES_RETRY_TIMEOUT << (NES_DEFAULT_RETRANS - send_entry->retranscount)); send_entry->timetosend = jiffies + min(timetosend, NES_MAX_TIMEOUT); if (nexttimeout > send_entry->timetosend || !settimer) { nexttimeout = send_entry->timetosend; settimer = 1; } } else { int close_when_complete; close_when_complete = send_entry->close_when_complete; nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n", cm_node, cm_node->state); free_retrans_entry(cm_node); if (close_when_complete) rem_ref_cm_node(cm_node->cm_core, cm_node); } } while (0); spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); rem_ref_cm_node(cm_node->cm_core, cm_node); } if (settimer) { if (!timer_pending(&cm_core->tcp_timer)) { cm_core->tcp_timer.expires = nexttimeout; add_timer(&cm_core->tcp_timer); } } } /** * send_syn */ static int send_syn(struct nes_cm_node *cm_node, u32 sendack, struct sk_buff *skb) { int ret; int flags = SET_SYN; char optionsbuffer[sizeof(struct option_mss) + sizeof(struct option_windowscale) + sizeof(struct option_base) + TCP_OPTIONS_PADDING]; int optionssize = 0; /* Sending MSS option */ union all_known_options *options; if (!cm_node) return -EINVAL; options = (union all_known_options *)&optionsbuffer[optionssize]; options->as_mss.optionnum = OPTION_NUMBER_MSS; options->as_mss.length = sizeof(struct option_mss); options->as_mss.mss = htons(cm_node->tcp_cntxt.mss); optionssize += sizeof(struct option_mss); options = (union all_known_options *)&optionsbuffer[optionssize]; options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE; options->as_windowscale.length = sizeof(struct option_windowscale); options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale; optionssize += sizeof(struct option_windowscale); if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)) { options = (union all_known_options *)&optionsbuffer[optionssize]; options->as_base.optionnum = OPTION_NUMBER_WRITE0; options->as_base.length = sizeof(struct option_base); optionssize += sizeof(struct option_base); /* we need the size to be a multiple of 4 */ options = (union all_known_options *)&optionsbuffer[optionssize]; options->as_end = 1; optionssize += 1; options = (union all_known_options *)&optionsbuffer[optionssize]; options->as_end = 1; optionssize += 1; } options = (union all_known_options *)&optionsbuffer[optionssize]; options->as_end = OPTION_NUMBER_END; optionssize += 1; if (!skb) skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); return -1; } if (sendack) flags |= SET_ACK; form_cm_frame(skb, cm_node, optionsbuffer, optionssize, NULL, 0, flags); ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); return ret; } /** * send_reset */ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb) { int ret; int flags = SET_RST | SET_ACK; if (!skb) skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); return -ENOMEM; } form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags); ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1); return ret; } /** * send_ack */ static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb) { int ret; if (!skb) skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); return -1; } form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK); ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 0); return ret; } /** * send_fin */ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) { int ret; /* if we didn't get a frame get one */ if (!skb) skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); return -1; } form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK | SET_FIN); ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); return ret; } /** * find_node - find a cm node that matches the reference cm node */ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) { unsigned long flags; struct list_head *hte; struct nes_cm_node *cm_node; /* get a handle on the hte */ hte = &cm_core->connected_nodes; /* walk list and find cm_node associated with this session ID */ spin_lock_irqsave(&cm_core->ht_lock, flags); list_for_each_entry(cm_node, hte, list) { /* compare quad, return node handle if a match */ nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", cm_node->loc_addr, cm_node->loc_port, loc_addr, loc_port, cm_node->rem_addr, cm_node->rem_port, rem_addr, rem_port); if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) && (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) { add_ref_cm_node(cm_node); spin_unlock_irqrestore(&cm_core->ht_lock, flags); return cm_node; } } spin_unlock_irqrestore(&cm_core->ht_lock, flags); /* no owner node */ return NULL; } /** * find_listener - find a cm node listening on this addr-port pair */ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) { unsigned long flags; struct nes_cm_listener *listen_node; /* walk list and find cm_node associated with this session ID */ spin_lock_irqsave(&cm_core->listen_list_lock, flags); list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { /* compare node pair, return node handle if a match */ if (((listen_node->loc_addr == dst_addr) || listen_node->loc_addr == 0x00000000) && (listen_node->loc_port == dst_port) && (listener_state & listen_node->listener_state)) { atomic_inc(&listen_node->ref_count); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); return listen_node; } } spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); /* no listener */ return NULL; } /** * add_hte_node - add a cm node to the hash table */ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) { unsigned long flags; struct list_head *hte; if (!cm_node || !cm_core) return -EINVAL; nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n", cm_node); spin_lock_irqsave(&cm_core->ht_lock, flags); /* get a handle on the hash table element (list head for this slot) */ hte = &cm_core->connected_nodes; list_add_tail(&cm_node->list, hte); atomic_inc(&cm_core->ht_node_cnt); spin_unlock_irqrestore(&cm_core->ht_lock, flags); return 0; } /** * mini_cm_dec_refcnt_listen */ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, struct nes_cm_listener *listener, int free_hanging_nodes) { int ret = -EINVAL; int err = 0; unsigned long flags; struct list_head *list_pos = NULL; struct list_head *list_temp = NULL; struct nes_cm_node *cm_node = NULL; struct list_head reset_list; nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, " "refcnt=%d\n", listener, free_hanging_nodes, atomic_read(&listener->ref_count)); /* free non-accelerated child nodes for this listener */ INIT_LIST_HEAD(&reset_list); if (free_hanging_nodes) { spin_lock_irqsave(&cm_core->ht_lock, flags); list_for_each_safe(list_pos, list_temp, &g_cm_core->connected_nodes) { cm_node = container_of(list_pos, struct nes_cm_node, list); if ((cm_node->listener == listener) && (!cm_node->accelerated)) { add_ref_cm_node(cm_node); list_add(&cm_node->reset_entry, &reset_list); } } spin_unlock_irqrestore(&cm_core->ht_lock, flags); } list_for_each_safe(list_pos, list_temp, &reset_list) { cm_node = container_of(list_pos, struct nes_cm_node, reset_entry); { struct nes_cm_node *loopback = cm_node->loopbackpartner; enum nes_cm_node_state old_state; if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) { rem_ref_cm_node(cm_node->cm_core, cm_node); } else { if (!loopback) { cleanup_retrans_entry(cm_node); err = send_reset(cm_node, NULL); if (err) { cm_node->state = NES_CM_STATE_CLOSED; WARN_ON(1); } else { old_state = cm_node->state; cm_node->state = NES_CM_STATE_LISTENER_DESTROYED; if (old_state != NES_CM_STATE_MPAREQ_RCVD) rem_ref_cm_node( cm_node->cm_core, cm_node); } } else { struct nes_cm_event event; event.cm_node = loopback; event.cm_info.rem_addr = loopback->rem_addr; event.cm_info.loc_addr = loopback->loc_addr; event.cm_info.rem_port = loopback->rem_port; event.cm_info.loc_port = loopback->loc_port; event.cm_info.cm_id = loopback->cm_id; add_ref_cm_node(loopback); loopback->state = NES_CM_STATE_CLOSED; cm_event_connect_error(&event); cm_node->state = NES_CM_STATE_LISTENER_DESTROYED; rem_ref_cm_node(cm_node->cm_core, cm_node); } } } } spin_lock_irqsave(&cm_core->listen_list_lock, flags); if (!atomic_dec_return(&listener->ref_count)) { list_del(&listener->list); /* decrement our listen node count */ atomic_dec(&cm_core->listen_node_cnt); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); if (listener->nesvnic) { nes_manage_apbvt(listener->nesvnic, listener->loc_port, PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); } nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); kfree(listener); listener = NULL; ret = 0; atomic_inc(&cm_listens_destroyed); } else { spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } if (listener) { if (atomic_read(&listener->pend_accepts_cnt) > 0) nes_debug(NES_DBG_CM, "destroying listener (%p)" " with non-zero pending accepts=%u\n", listener, atomic_read(&listener->pend_accepts_cnt)); } return ret; } /** * mini_cm_del_listen */ static int mini_cm_del_listen(struct nes_cm_core *cm_core, struct nes_cm_listener *listener) { listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE; listener->cm_id = NULL; /* going to be destroyed pretty soon */ return mini_cm_dec_refcnt_listen(cm_core, listener, 1); } /** * mini_cm_accelerated */ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) { u32 was_timer_set; cm_node->accelerated = 1; if (cm_node->accept_pend) { BUG_ON(!cm_node->listener); atomic_dec(&cm_node->listener->pend_accepts_cnt); cm_node->accept_pend = 0; BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); } was_timer_set = timer_pending(&cm_core->tcp_timer); if (!was_timer_set) { cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME; add_timer(&cm_core->tcp_timer); } return 0; } /** * nes_addr_resolve_neigh */ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex) { struct rtable *rt; struct neighbour *neigh; int rc = arpindex; struct net_device *netdev; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); if (IS_ERR(rt)) { printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", __func__, dst_ip); return rc; } if (netif_is_bond_slave(nesvnic->netdev)) netdev = nesvnic->netdev->master; else netdev = nesvnic->netdev; neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); if (neigh) { if (neigh->nud_state & NUD_VALID) { nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" " is %pM, Gateway is 0x%08X \n", dst_ip, neigh->ha, ntohl(rt->rt_gateway)); if (arpindex >= 0) { if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, neigh->ha, ETH_ALEN)){ /* Mac address same as in nes_arp_table */ neigh_release(neigh); ip_rt_put(rt); return rc; } nes_manage_arp_cache(nesvnic->netdev, nesadapter->arp_table[arpindex].mac_addr, dst_ip, NES_ARP_DELETE); } nes_manage_arp_cache(nesvnic->netdev, neigh->ha, dst_ip, NES_ARP_ADD); rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL, NES_ARP_RESOLVE); } neigh_release(neigh); } if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) neigh_event_send(rt->dst.neighbour, NULL); ip_rt_put(rt); return rc; } /** * make_cm_node - create a new instance of a cm node */ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, struct nes_cm_info *cm_info, struct nes_cm_listener *listener) { struct nes_cm_node *cm_node; struct timespec ts; int oldarpindex = 0; int arpindex = 0; struct nes_device *nesdev; struct nes_adapter *nesadapter; /* create an hte and cm_node for this instance */ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); if (!cm_node) return NULL; /* set our node specific transport info */ cm_node->loc_addr = cm_info->loc_addr; cm_node->rem_addr = cm_info->rem_addr; cm_node->loc_port = cm_info->loc_port; cm_node->rem_port = cm_info->rem_port; cm_node->send_write0 = send_first; nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", &cm_node->loc_addr, cm_node->loc_port, &cm_node->rem_addr, cm_node->rem_port); cm_node->listener = listener; cm_node->netdev = nesvnic->netdev; cm_node->cm_id = cm_info->cm_id; memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener, cm_node->cm_id); spin_lock_init(&cm_node->retrans_list_lock); cm_node->loopbackpartner = NULL; atomic_set(&cm_node->ref_count, 1); /* associate our parent CM core */ cm_node->cm_core = cm_core; cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID; cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >> NES_CM_DEFAULT_RCV_WND_SCALE; ts = current_kernel_time(); cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec); cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) - sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN; cm_node->tcp_cntxt.rcv_nxt = 0; /* get a unique session ID , add thread_id to an upcounter to handle race */ atomic_inc(&cm_core->node_cnt); cm_node->conn_type = cm_info->conn_type; cm_node->apbvt_set = 0; cm_node->accept_pend = 0; cm_node->nesvnic = nesvnic; /* get some device handles, for arp lookup */ nesdev = nesvnic->nesdev; nesadapter = nesdev->nesadapter; cm_node->loopbackpartner = NULL; /* get the mac addr for the remote node */ if (ipv4_is_loopback(htonl(cm_node->rem_addr))) arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); else { oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex); } if (arpindex < 0) { kfree(cm_node); return NULL; } /* copy the mac addr to node context */ memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %pM\n", cm_node->rem_mac); add_hte_node(cm_core, cm_node); atomic_inc(&cm_nodes_created); return cm_node; } /** * add_ref_cm_node - destroy an instance of a cm node */ static int add_ref_cm_node(struct nes_cm_node *cm_node) { atomic_inc(&cm_node->ref_count); return 0; } /** * rem_ref_cm_node - destroy an instance of a cm node */ static int rem_ref_cm_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) { unsigned long flags; struct nes_qp *nesqp; if (!cm_node) return -EINVAL; spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags); if (atomic_dec_return(&cm_node->ref_count)) { spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); return 0; } list_del(&cm_node->list); atomic_dec(&cm_core->ht_node_cnt); spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); /* if the node is destroyed before connection was accelerated */ if (!cm_node->accelerated && cm_node->accept_pend) { BUG_ON(!cm_node->listener); atomic_dec(&cm_node->listener->pend_accepts_cnt); BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); } WARN_ON(cm_node->send_entry); if (cm_node->recv_entry) handle_recv_entry(cm_node, 0); if (cm_node->listener) { mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); } else { if (cm_node->apbvt_set && cm_node->nesvnic) { nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, PCI_FUNC( cm_node->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); } } atomic_dec(&cm_core->node_cnt); atomic_inc(&cm_nodes_destroyed); nesqp = cm_node->nesqp; if (nesqp) { nesqp->cm_node = NULL; nes_rem_ref(&nesqp->ibqp); cm_node->nesqp = NULL; } kfree(cm_node); return 0; } /** * process_options */ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet) { u32 tmp; u32 offset = 0; union all_known_options *all_options; char got_mss_option = 0; while (offset < optionsize) { all_options = (union all_known_options *)(optionsloc + offset); switch (all_options->as_base.optionnum) { case OPTION_NUMBER_END: offset = optionsize; break; case OPTION_NUMBER_NONE: offset += 1; continue; case OPTION_NUMBER_MSS: nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d " "Size: %d\n", __func__, all_options->as_mss.length, offset, optionsize); got_mss_option = 1; if (all_options->as_mss.length != 4) { return 1; } else { tmp = ntohs(all_options->as_mss.mss); if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss) cm_node->tcp_cntxt.mss = tmp; } break; case OPTION_NUMBER_WINDOW_SCALE: cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount; break; case OPTION_NUMBER_WRITE0: cm_node->send_write0 = 1; break; default: nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n", all_options->as_base.optionnum); break; } offset += all_options->as_base.length; } if ((!got_mss_option) && (syn_packet)) cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS; return 0; } static void drop_packet(struct sk_buff *skb) { atomic_inc(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); } static void handle_fin_pkt(struct nes_cm_node *cm_node) { nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " "refcnt=%d\n", cm_node, cm_node->state, atomic_read(&cm_node->ref_count)); switch (cm_node->state) { case NES_CM_STATE_SYN_RCVD: case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_MPAREJ_RCVD: cm_node->tcp_cntxt.rcv_nxt++; cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_LAST_ACK; send_fin(cm_node, NULL); break; case NES_CM_STATE_MPAREQ_SENT: create_event(cm_node, NES_CM_EVENT_ABORTED); cm_node->tcp_cntxt.rcv_nxt++; cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_CLOSED; add_ref_cm_node(cm_node); send_reset(cm_node, NULL); break; case NES_CM_STATE_FIN_WAIT1: cm_node->tcp_cntxt.rcv_nxt++; cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_CLOSING; send_ack(cm_node, NULL); /* Wait for ACK as this is simultaneous close.. * After we receive ACK, do not send anything.. * Just rm the node.. Done.. */ break; case NES_CM_STATE_FIN_WAIT2: cm_node->tcp_cntxt.rcv_nxt++; cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_TIME_WAIT; send_ack(cm_node, NULL); schedule_nes_timer(cm_node, NULL, NES_TIMER_TYPE_CLOSE, 1, 0); break; case NES_CM_STATE_TIME_WAIT: cm_node->tcp_cntxt.rcv_nxt++; cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_CLOSED; rem_ref_cm_node(cm_node->cm_core, cm_node); break; case NES_CM_STATE_TSA: default: nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n", cm_node, cm_node->state); break; } } static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, struct tcphdr *tcph) { int reset = 0; /* whether to send reset in case of err.. */ atomic_inc(&cm_resets_recvd); nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." " refcnt=%d\n", cm_node, cm_node->state, atomic_read(&cm_node->ref_count)); cleanup_retrans_entry(cm_node); switch (cm_node->state) { case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_MPAREQ_SENT: nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " "listener=%p state=%d\n", __func__, __LINE__, cm_node, cm_node->listener, cm_node->state); active_open_err(cm_node, skb, reset); break; case NES_CM_STATE_MPAREQ_RCVD: atomic_inc(&cm_node->passive_state); dev_kfree_skb_any(skb); break; case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_SYN_RCVD: case NES_CM_STATE_LISTENING: nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__); passive_open_err(cm_node, skb, reset); break; case NES_CM_STATE_TSA: active_open_err(cm_node, skb, reset); break; case NES_CM_STATE_CLOSED: drop_packet(skb); break; case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_LAST_ACK: cm_node->cm_id->rem_ref(cm_node->cm_id); case NES_CM_STATE_TIME_WAIT: cm_node->state = NES_CM_STATE_CLOSED; rem_ref_cm_node(cm_node->cm_core, cm_node); drop_packet(skb); break; default: drop_packet(skb); break; } } static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) { int ret = 0; int datasize = skb->len; u8 *dataloc = skb->data; enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN; u32 res_type; ret = parse_mpa(cm_node, dataloc, &res_type, datasize); if (ret) { nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) { nes_debug(NES_DBG_CM, "%s[%u] create abort for " "cm_node=%p listener=%p state=%d\n", __func__, __LINE__, cm_node, cm_node->listener, cm_node->state); active_open_err(cm_node, skb, 1); } else { passive_open_err(cm_node, skb, 1); } return; } switch (cm_node->state) { case NES_CM_STATE_ESTABLISHED: if (res_type == NES_MPA_REQUEST_REJECT) { /*BIG problem as we are receiving the MPA.. So should * not be REJECT.. This is Passive Open.. We can * only receive it Reject for Active Open...*/ WARN_ON(1); } cm_node->state = NES_CM_STATE_MPAREQ_RCVD; type = NES_CM_EVENT_MPA_REQ; atomic_set(&cm_node->passive_state, NES_PASSIVE_STATE_INDICATED); break; case NES_CM_STATE_MPAREQ_SENT: cleanup_retrans_entry(cm_node); if (res_type == NES_MPA_REQUEST_REJECT) { type = NES_CM_EVENT_MPA_REJECT; cm_node->state = NES_CM_STATE_MPAREJ_RCVD; } else { type = NES_CM_EVENT_CONNECTED; cm_node->state = NES_CM_STATE_TSA; } break; default: WARN_ON(1); break; } dev_kfree_skb_any(skb); create_event(cm_node, type); } static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) { switch (cm_node->state) { case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_MPAREQ_SENT: nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " "listener=%p state=%d\n", __func__, __LINE__, cm_node, cm_node->listener, cm_node->state); active_open_err(cm_node, skb, 1); break; case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_SYN_RCVD: passive_open_err(cm_node, skb, 1); break; case NES_CM_STATE_TSA: default: drop_packet(skb); } } static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb) { int err; err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num))? 0 : 1; if (err) active_open_err(cm_node, skb, 1); return err; } static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb) { int err = 0; u32 seq; u32 ack_seq; u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; u32 rcv_wnd; seq = ntohl(tcph->seq); ack_seq = ntohl(tcph->ack_seq); rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; if (ack_seq != loc_seq_num) err = 1; else if (!between(seq, rcv_nxt, (rcv_nxt+rcv_wnd))) err = 1; if (err) { nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p " "listener=%p state=%d\n", __func__, __LINE__, cm_node, cm_node->listener, cm_node->state); indicate_pkt_err(cm_node, skb); nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X " "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt, rcv_wnd); } return err; } /* * handle_syn_pkt() is for Passive node. The syn packet is received when a node * is created with a listener or it may comein as rexmitted packet which in * that case will be just dropped. */ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, struct tcphdr *tcph) { int ret; u32 inc_sequence; int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); skb_trim(skb, 0); inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_MPAREQ_SENT: /* Rcvd syn on active open connection*/ active_open_err(cm_node, skb, 1); break; case NES_CM_STATE_LISTENING: /* Passive OPEN */ if (atomic_read(&cm_node->listener->pend_accepts_cnt) > cm_node->listener->backlog) { nes_debug(NES_DBG_CM, "drop syn due to backlog " "pressure \n"); cm_backlog_drops++; passive_open_err(cm_node, skb, 0); break; } ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 1); if (ret) { passive_open_err(cm_node, skb, 0); /* drop pkt */ break; } cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; BUG_ON(cm_node->send_entry); cm_node->accept_pend = 1; atomic_inc(&cm_node->listener->pend_accepts_cnt); cm_node->state = NES_CM_STATE_SYN_RCVD; send_syn(cm_node, 1, skb); break; case NES_CM_STATE_CLOSED: cleanup_retrans_entry(cm_node); add_ref_cm_node(cm_node); send_reset(cm_node, skb); break; case NES_CM_STATE_TSA: case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_MPAREQ_RCVD: case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_CLOSING: case NES_CM_STATE_UNKNOWN: default: drop_packet(skb); break; } } static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, struct tcphdr *tcph) { int ret; u32 inc_sequence; int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); skb_trim(skb, 0); inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { case NES_CM_STATE_SYN_SENT: cleanup_retrans_entry(cm_node); /* active open */ if (check_syn(cm_node, tcph, skb)) return; cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); /* setup options */ ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0); if (ret) { nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n", cm_node); break; } cleanup_retrans_entry(cm_node); cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; send_mpa_request(cm_node, skb); cm_node->state = NES_CM_STATE_MPAREQ_SENT; break; case NES_CM_STATE_MPAREQ_RCVD: /* passive open, so should not be here */ passive_open_err(cm_node, skb, 1); break; case NES_CM_STATE_LISTENING: cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_CLOSED; send_reset(cm_node, skb); break; case NES_CM_STATE_CLOSED: cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); cleanup_retrans_entry(cm_node); add_ref_cm_node(cm_node); send_reset(cm_node, skb); break; case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_TSA: case NES_CM_STATE_CLOSING: case NES_CM_STATE_UNKNOWN: case NES_CM_STATE_MPAREQ_SENT: default: drop_packet(skb); break; } } static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, struct tcphdr *tcph) { int datasize = 0; u32 inc_sequence; int ret = 0; int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); if (check_seq(cm_node, tcph, skb)) return -EINVAL; skb_pull(skb, tcph->doff << 2); inc_sequence = ntohl(tcph->seq); datasize = skb->len; switch (cm_node->state) { case NES_CM_STATE_SYN_RCVD: /* Passive OPEN */ cleanup_retrans_entry(cm_node); ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 1); if (ret) break; cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); cm_node->state = NES_CM_STATE_ESTABLISHED; if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; handle_rcv_mpa(cm_node, skb); } else /* rcvd ACK only */ dev_kfree_skb_any(skb); break; case NES_CM_STATE_ESTABLISHED: /* Passive OPEN */ cleanup_retrans_entry(cm_node); if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; handle_rcv_mpa(cm_node, skb); } else drop_packet(skb); break; case NES_CM_STATE_MPAREQ_SENT: cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; handle_rcv_mpa(cm_node, skb); } else /* Could be just an ack pkt.. */ dev_kfree_skb_any(skb); break; case NES_CM_STATE_LISTENING: cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_CLOSED; send_reset(cm_node, skb); break; case NES_CM_STATE_CLOSED: cleanup_retrans_entry(cm_node); add_ref_cm_node(cm_node); send_reset(cm_node, skb); break; case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_CLOSING: cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_CLOSED; cm_node->cm_id->rem_ref(cm_node->cm_id); rem_ref_cm_node(cm_node->cm_core, cm_node); drop_packet(skb); break; case NES_CM_STATE_FIN_WAIT1: cleanup_retrans_entry(cm_node); drop_packet(skb); cm_node->state = NES_CM_STATE_FIN_WAIT2; break; case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_TSA: case NES_CM_STATE_MPAREQ_RCVD: case NES_CM_STATE_UNKNOWN: default: cleanup_retrans_entry(cm_node); drop_packet(skb); break; } return ret; } static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb, int optionsize, int passive) { u8 *optionsloc = (u8 *)&tcph[1]; if (optionsize) { if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) { nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node); if (passive) passive_open_err(cm_node, skb, 1); else active_open_err(cm_node, skb, 1); return 1; } } cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << cm_node->tcp_cntxt.snd_wscale; if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; return 0; } /* * active_open_err() will send reset() if flag set.. * It will also send ABORT event. */ static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, int reset) { cleanup_retrans_entry(cm_node); if (reset) { nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, " "state=%d\n", cm_node, cm_node->state); add_ref_cm_node(cm_node); send_reset(cm_node, skb); } else dev_kfree_skb_any(skb); cm_node->state = NES_CM_STATE_CLOSED; create_event(cm_node, NES_CM_EVENT_ABORTED); } /* * passive_open_err() will either do a reset() or will free up the skb and * remove the cm_node. */ static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb, int reset) { cleanup_retrans_entry(cm_node); cm_node->state = NES_CM_STATE_CLOSED; if (reset) { nes_debug(NES_DBG_CM, "passive_open_err sending RST for " "cm_node=%p state =%d\n", cm_node, cm_node->state); send_reset(cm_node, skb); } else { dev_kfree_skb_any(skb); rem_ref_cm_node(cm_node->cm_core, cm_node); } } /* * free_retrans_entry() routines assumes that the retrans_list_lock has * been acquired before calling. */ static void free_retrans_entry(struct nes_cm_node *cm_node) { struct nes_timer_entry *send_entry; send_entry = cm_node->send_entry; if (send_entry) { cm_node->send_entry = NULL; dev_kfree_skb_any(send_entry->skb); kfree(send_entry); rem_ref_cm_node(cm_node->cm_core, cm_node); } } static void cleanup_retrans_entry(struct nes_cm_node *cm_node) { unsigned long flags; spin_lock_irqsave(&cm_node->retrans_list_lock, flags); free_retrans_entry(cm_node); spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); } /** * process_packet * Returns skb if to be freed, else it will return NULL if already used.. */ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, struct nes_cm_core *cm_core) { enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; struct tcphdr *tcph = tcp_hdr(skb); u32 fin_set = 0; int ret = 0; skb_pull(skb, ip_hdr(skb)->ihl << 2); nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn, tcph->ack, tcph->rst, tcph->fin); if (tcph->rst) pkt_type = NES_PKT_TYPE_RST; else if (tcph->syn) { pkt_type = NES_PKT_TYPE_SYN; if (tcph->ack) pkt_type = NES_PKT_TYPE_SYNACK; } else if (tcph->ack) pkt_type = NES_PKT_TYPE_ACK; if (tcph->fin) fin_set = 1; switch (pkt_type) { case NES_PKT_TYPE_SYN: handle_syn_pkt(cm_node, skb, tcph); break; case NES_PKT_TYPE_SYNACK: handle_synack_pkt(cm_node, skb, tcph); break; case NES_PKT_TYPE_ACK: ret = handle_ack_pkt(cm_node, skb, tcph); if (fin_set && !ret) handle_fin_pkt(cm_node); break; case NES_PKT_TYPE_RST: handle_rst_pkt(cm_node, skb, tcph); break; default: if ((fin_set) && (!check_seq(cm_node, tcph, skb))) handle_fin_pkt(cm_node); drop_packet(skb); break; } } /** * mini_cm_listen - create a listen node with params */ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) { struct nes_cm_listener *listener; unsigned long flags; nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", cm_info->loc_addr, cm_info->loc_port); /* cannot have multiple matching listeners */ listener = find_listener(cm_core, htonl(cm_info->loc_addr), htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE); if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { /* find automatically incs ref count ??? */ atomic_dec(&listener->ref_count); nes_debug(NES_DBG_CM, "Not creating listener since it already exists\n"); return NULL; } if (!listener) { /* create a CM listen node (1/2 node to compare incoming traffic to) */ listener = kzalloc(sizeof(*listener), GFP_ATOMIC); if (!listener) { nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n"); return NULL; } listener->loc_addr = htonl(cm_info->loc_addr); listener->loc_port = htons(cm_info->loc_port); listener->reused_node = 0; atomic_set(&listener->ref_count, 1); } /* pasive case */ /* find already inc'ed the ref count */ else { listener->reused_node = 1; } listener->cm_id = cm_info->cm_id; atomic_set(&listener->pend_accepts_cnt, 0); listener->cm_core = cm_core; listener->nesvnic = nesvnic; atomic_inc(&cm_core->node_cnt); listener->conn_type = cm_info->conn_type; listener->backlog = cm_info->backlog; listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE; if (!listener->reused_node) { spin_lock_irqsave(&cm_core->listen_list_lock, flags); list_add(&listener->list, &cm_core->listen_list.list); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); atomic_inc(&cm_core->listen_node_cnt); } nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x," " listener = %p, backlog = %d, cm_id = %p.\n", cm_info->loc_addr, cm_info->loc_port, listener, listener->backlog, listener->cm_id); return listener; } /** * mini_cm_connect - make a connection node with params */ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, u16 private_data_len, void *private_data, struct nes_cm_info *cm_info) { int ret = 0; struct nes_cm_node *cm_node; struct nes_cm_listener *loopbackremotelistener; struct nes_cm_node *loopbackremotenode; struct nes_cm_info loopback_cm_info; u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + private_data_len; struct ietf_mpa_frame *mpa_frame = NULL; /* create a CM connection node */ cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL); if (!cm_node) return NULL; mpa_frame = &cm_node->mpa_frame; memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); mpa_frame->flags = IETF_MPA_FLAGS_CRC; mpa_frame->rev = IETF_MPA_VERSION; mpa_frame->priv_data_len = htons(private_data_len); /* set our node side to client (active) side */ cm_node->tcp_cntxt.client = 1; cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; if (cm_info->loc_addr == cm_info->rem_addr) { loopbackremotelistener = find_listener(cm_core, ntohl(nesvnic->local_ipaddr), cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE); if (loopbackremotelistener == NULL) { create_event(cm_node, NES_CM_EVENT_ABORTED); } else { loopback_cm_info = *cm_info; loopback_cm_info.loc_port = cm_info->rem_port; loopback_cm_info.rem_port = cm_info->loc_port; loopback_cm_info.cm_id = loopbackremotelistener->cm_id; loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info, loopbackremotelistener); if (!loopbackremotenode) { rem_ref_cm_node(cm_node->cm_core, cm_node); return NULL; } atomic_inc(&cm_loopbacks); loopbackremotenode->loopbackpartner = cm_node; loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; cm_node->loopbackpartner = loopbackremotenode; memcpy(loopbackremotenode->mpa_frame_buf, private_data, private_data_len); loopbackremotenode->mpa_frame_size = private_data_len; /* we are done handling this state. */ /* set node to a TSA state */ cm_node->state = NES_CM_STATE_TSA; cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num; loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num; cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd; loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd; cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd; loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd; cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale; loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; loopbackremotenode->state = NES_CM_STATE_MPAREQ_RCVD; create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ); } return cm_node; } /* set our node side to client (active) side */ cm_node->tcp_cntxt.client = 1; /* init our MPA frame ptr */ memcpy(mpa_frame->priv_data, private_data, private_data_len); cm_node->mpa_frame_size = mpa_frame_size; /* send a syn and goto syn sent state */ cm_node->state = NES_CM_STATE_SYN_SENT; ret = send_syn(cm_node, 0, NULL); if (ret) { /* error in sending the syn free up the cm_node struct */ nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest " "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n", cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id); rem_ref_cm_node(cm_node->cm_core, cm_node); cm_node = NULL; } if (cm_node) nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X," "port=0x%04x, cm_node=%p, cm_id = %p.\n", cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id); return cm_node; } /** * mini_cm_accept - accept a connection * This function is never called */ static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) { return 0; } /** * mini_cm_reject - reject and teardown a connection */ static int mini_cm_reject(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) { int ret = 0; int err = 0; int passive_state; struct nes_cm_event event; struct iw_cm_id *cm_id = cm_node->cm_id; struct nes_cm_node *loopback = cm_node->loopbackpartner; nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); if (cm_node->tcp_cntxt.client) return ret; cleanup_retrans_entry(cm_node); if (!loopback) { passive_state = atomic_add_return(1, &cm_node->passive_state); if (passive_state == NES_SEND_RESET_EVENT) { cm_node->state = NES_CM_STATE_CLOSED; rem_ref_cm_node(cm_core, cm_node); } else { if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) { rem_ref_cm_node(cm_core, cm_node); } else { ret = send_mpa_reject(cm_node); if (ret) { cm_node->state = NES_CM_STATE_CLOSED; err = send_reset(cm_node, NULL); if (err) WARN_ON(1); } else cm_id->add_ref(cm_id); } } } else { cm_node->cm_id = NULL; if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) { rem_ref_cm_node(cm_core, cm_node); rem_ref_cm_node(cm_core, loopback); } else { event.cm_node = loopback; event.cm_info.rem_addr = loopback->rem_addr; event.cm_info.loc_addr = loopback->loc_addr; event.cm_info.rem_port = loopback->rem_port; event.cm_info.loc_port = loopback->loc_port; event.cm_info.cm_id = loopback->cm_id; cm_event_mpa_reject(&event); rem_ref_cm_node(cm_core, cm_node); loopback->state = NES_CM_STATE_CLOSING; cm_id = loopback->cm_id; rem_ref_cm_node(cm_core, loopback); cm_id->rem_ref(cm_id); } } return ret; } /** * mini_cm_close */ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) { int ret = 0; if (!cm_core || !cm_node) return -EINVAL; switch (cm_node->state) { case NES_CM_STATE_SYN_RCVD: case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_ONE_SIDE_ESTABLISHED: case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_ACCEPTING: case NES_CM_STATE_MPAREQ_SENT: case NES_CM_STATE_MPAREQ_RCVD: cleanup_retrans_entry(cm_node); send_reset(cm_node, NULL); break; case NES_CM_STATE_CLOSE_WAIT: cm_node->state = NES_CM_STATE_LAST_ACK; send_fin(cm_node, NULL); break; case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_LAST_ACK: case NES_CM_STATE_TIME_WAIT: case NES_CM_STATE_CLOSING: ret = -1; break; case NES_CM_STATE_LISTENING: cleanup_retrans_entry(cm_node); send_reset(cm_node, NULL); break; case NES_CM_STATE_MPAREJ_RCVD: case NES_CM_STATE_UNKNOWN: case NES_CM_STATE_INITED: case NES_CM_STATE_CLOSED: case NES_CM_STATE_LISTENER_DESTROYED: ret = rem_ref_cm_node(cm_core, cm_node); break; case NES_CM_STATE_TSA: if (cm_node->send_entry) printk(KERN_ERR "ERROR Close got called from STATE_TSA " "send_entry=%p\n", cm_node->send_entry); ret = rem_ref_cm_node(cm_core, cm_node); break; } return ret; } /** * recv_pkt - recv an ETHERNET packet, and process it through CM * node state machine */ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, struct sk_buff *skb) { struct nes_cm_node *cm_node = NULL; struct nes_cm_listener *listener = NULL; struct iphdr *iph; struct tcphdr *tcph; struct nes_cm_info nfo; int skb_handled = 1; __be32 tmp_daddr, tmp_saddr; if (!skb) return 0; if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { return 0; } iph = (struct iphdr *)skb->data; tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); nfo.loc_addr = ntohl(iph->daddr); nfo.loc_port = ntohs(tcph->dest); nfo.rem_addr = ntohl(iph->saddr); nfo.rem_port = ntohs(tcph->source); tmp_daddr = cpu_to_be32(iph->daddr); tmp_saddr = cpu_to_be32(iph->saddr); nes_debug(NES_DBG_CM, "Received packet: dest=%pI4:0x%04X src=%pI4:0x%04X\n", &tmp_daddr, tcph->dest, &tmp_saddr, tcph->source); do { cm_node = find_node(cm_core, nfo.rem_port, nfo.rem_addr, nfo.loc_port, nfo.loc_addr); if (!cm_node) { /* Only type of packet accepted are for */ /* the PASSIVE open (syn only) */ if ((!tcph->syn) || (tcph->ack)) { skb_handled = 0; break; } listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port, NES_CM_LISTENER_ACTIVE_STATE); if (!listener) { nfo.cm_id = NULL; nfo.conn_type = 0; nes_debug(NES_DBG_CM, "Unable to find listener for the pkt\n"); skb_handled = 0; break; } nfo.cm_id = listener->cm_id; nfo.conn_type = listener->conn_type; cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener); if (!cm_node) { nes_debug(NES_DBG_CM, "Unable to allocate " "node\n"); cm_packets_dropped++; atomic_dec(&listener->ref_count); dev_kfree_skb_any(skb); break; } if (!tcph->rst && !tcph->fin) { cm_node->state = NES_CM_STATE_LISTENING; } else { cm_packets_dropped++; rem_ref_cm_node(cm_core, cm_node); dev_kfree_skb_any(skb); break; } add_ref_cm_node(cm_node); } else if (cm_node->state == NES_CM_STATE_TSA) { rem_ref_cm_node(cm_core, cm_node); atomic_inc(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); break; } skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(*tcph)); skb->len = ntohs(iph->tot_len); process_packet(cm_node, skb, cm_core); rem_ref_cm_node(cm_core, cm_node); } while (0); return skb_handled; } /** * nes_cm_alloc_core - allocate a top level instance of a cm core */ static struct nes_cm_core *nes_cm_alloc_core(void) { struct nes_cm_core *cm_core; /* setup the CM core */ /* alloc top level core control structure */ cm_core = kzalloc(sizeof(*cm_core), GFP_KERNEL); if (!cm_core) return NULL; INIT_LIST_HEAD(&cm_core->connected_nodes); init_timer(&cm_core->tcp_timer); cm_core->tcp_timer.function = nes_cm_timer_tick; cm_core->mtu = NES_CM_DEFAULT_MTU; cm_core->state = NES_CM_STATE_INITED; cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; atomic_set(&cm_core->events_posted, 0); cm_core->api = &nes_cm_api; spin_lock_init(&cm_core->ht_lock); spin_lock_init(&cm_core->listen_list_lock); INIT_LIST_HEAD(&cm_core->listen_list.list); nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core); nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n"); cm_core->event_wq = create_singlethread_workqueue("nesewq"); cm_core->post_event = nes_cm_post_event; nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n"); cm_core->disconn_wq = create_singlethread_workqueue("nesdwq"); print_core(cm_core); return cm_core; } /** * mini_cm_dealloc_core - deallocate a top level instance of a cm core */ static int mini_cm_dealloc_core(struct nes_cm_core *cm_core) { nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core); if (!cm_core) return -EINVAL; barrier(); if (timer_pending(&cm_core->tcp_timer)) { del_timer(&cm_core->tcp_timer); } destroy_workqueue(cm_core->event_wq); destroy_workqueue(cm_core->disconn_wq); nes_debug(NES_DBG_CM, "\n"); kfree(cm_core); return 0; } /** * mini_cm_get */ static int mini_cm_get(struct nes_cm_core *cm_core) { return cm_core->state; } /** * mini_cm_set */ static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value) { int ret = 0; switch (type) { case NES_CM_SET_PKT_SIZE: cm_core->mtu = value; break; case NES_CM_SET_FREE_PKT_Q_SIZE: cm_core->free_tx_pkt_max = value; break; default: /* unknown set option */ ret = -EINVAL; } return ret; } /** * nes_cm_init_tsa_conn setup HW; MPA frames must be * successfully exchanged when this is called */ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_node) { int ret = 0; if (!nesqp) return -EINVAL; nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 | NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG | NES_QPCONTEXT_MISC_DROS); if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale) nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE); nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT); nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16); nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32( (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT); nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) & NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK); nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) & NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK); nesqp->nesqp_context->keepalive = cpu_to_le32(0x80); nesqp->nesqp_context->ts_recent = 0; nesqp->nesqp_context->ts_age = 0; nesqp->nesqp_context->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << cm_node->tcp_cntxt.rcv_wscale); nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); nesqp->nesqp_context->srtt = 0; nesqp->nesqp_context->rttvar = cpu_to_le32(0x6); nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000); nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss); nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X," " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n", nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt), le32_to_cpu(nesqp->nesqp_context->snd_nxt), cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale), le32_to_cpu(nesqp->nesqp_context->rcv_wnd), le32_to_cpu(nesqp->nesqp_context->misc)); nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd)); nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd)); nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd)); nes_debug(NES_DBG_CM, "Change cm_node state to TSA\n"); cm_node->state = NES_CM_STATE_TSA; return ret; } /** * nes_cm_disconn */ int nes_cm_disconn(struct nes_qp *nesqp) { struct disconn_work *work; work = kzalloc(sizeof *work, GFP_ATOMIC); if (!work) return -ENOMEM; /* Timer will clean up */ nes_add_ref(&nesqp->ibqp); work->nesqp = nesqp; INIT_WORK(&work->work, nes_disconnect_worker); queue_work(g_cm_core->disconn_wq, &work->work); return 0; } /** * nes_disconnect_worker */ static void nes_disconnect_worker(struct work_struct *work) { struct disconn_work *dwork = container_of(work, struct disconn_work, work); struct nes_qp *nesqp = dwork->nesqp; kfree(dwork); nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", nesqp->last_aeq, nesqp->hwqp.qp_id); nes_cm_disconn_true(nesqp); nes_rem_ref(&nesqp->ibqp); } /** * nes_cm_disconn_true */ static int nes_cm_disconn_true(struct nes_qp *nesqp) { unsigned long flags; int ret = 0; struct iw_cm_id *cm_id; struct iw_cm_event cm_event; struct nes_vnic *nesvnic; u16 last_ae; u8 original_hw_tcp_state; u8 original_ibqp_state; int disconn_status = 0; int issue_disconn = 0; int issue_close = 0; int issue_flush = 0; u32 flush_q = NES_CQP_FLUSH_RQ; struct ib_event ibevent; if (!nesqp) { nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); return -1; } spin_lock_irqsave(&nesqp->lock, flags); cm_id = nesqp->cm_id; /* make sure we havent already closed this connection */ if (!cm_id) { nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n", nesqp->hwqp.qp_id); spin_unlock_irqrestore(&nesqp->lock, flags); return -1; } nesvnic = to_nesvnic(nesqp->ibqp.device); nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id); original_hw_tcp_state = nesqp->hw_tcp_state; original_ibqp_state = nesqp->ibqp_state; last_ae = nesqp->last_aeq; if (nesqp->term_flags) { issue_disconn = 1; issue_close = 1; nesqp->cm_id = NULL; if (nesqp->flush_issued == 0) { nesqp->flush_issued = 1; issue_flush = 1; } } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || ((original_ibqp_state == IB_QPS_RTS) && (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { issue_disconn = 1; if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) disconn_status = -ECONNRESET; } if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) || (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) || (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { issue_close = 1; nesqp->cm_id = NULL; if (nesqp->flush_issued == 0) { nesqp->flush_issued = 1; issue_flush = 1; } } spin_unlock_irqrestore(&nesqp->lock, flags); if ((issue_flush) && (nesqp->destroyed == 0)) { /* Flush the queue(s) */ if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE) flush_q |= NES_CQP_FLUSH_SQ; flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1); if (nesqp->term_flags) { ibevent.device = nesqp->ibqp.device; ibevent.event = nesqp->terminate_eventtype; ibevent.element.qp = &nesqp->ibqp; nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); } } if ((cm_id) && (cm_id->event_handler)) { if (issue_disconn) { atomic_inc(&cm_disconnects); cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.status = disconn_status; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event" " for QP%u, SQ Head = %u, SQ Tail = %u. " "cm_id = %p, refcount = %u.\n", nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id, atomic_read(&nesqp->refcount)); ret = cm_id->event_handler(cm_id, &cm_event); if (ret) nes_debug(NES_DBG_CM, "OFA CM event_handler " "returned, ret=%d\n", ret); } if (issue_close) { atomic_inc(&cm_closes); nes_disconnect(nesqp, 1); cm_id->provider_data = nesqp; /* Send up the close complete event */ cm_event.event = IW_CM_EVENT_CLOSE; cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; ret = cm_id->event_handler(cm_id, &cm_event); if (ret) { nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); } cm_id->rem_ref(cm_id); } } return 0; } /** * nes_disconnect */ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) { int ret = 0; struct nes_vnic *nesvnic; struct nes_device *nesdev; struct nes_ib_device *nesibdev; nesvnic = to_nesvnic(nesqp->ibqp.device); if (!nesvnic) return -EINVAL; nesdev = nesvnic->nesdev; nesibdev = nesvnic->nesibdev; nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", netdev_refcnt_read(nesvnic->netdev)); if (nesqp->active_conn) { /* indicate this connection is NOT active */ nesqp->active_conn = 0; } else { /* Need to free the Last Streaming Mode Message */ if (nesqp->ietf_frame) { if (nesqp->lsmm_mr) nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); pci_free_consistent(nesdev->pcidev, nesqp->private_data_len+sizeof(struct ietf_mpa_frame), nesqp->ietf_frame, nesqp->ietf_frame_pbase); } } /* close the CM node down if it is still active */ if (nesqp->cm_node) { nes_debug(NES_DBG_CM, "Call close API\n"); g_cm_core->api->close(g_cm_core, nesqp->cm_node); } return ret; } /** * nes_accept */ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { u64 u64temp; struct ib_qp *ibqp; struct nes_qp *nesqp; struct nes_vnic *nesvnic; struct nes_device *nesdev; struct nes_cm_node *cm_node; struct nes_adapter *adapter; struct ib_qp_attr attr; struct iw_cm_event cm_event; struct nes_hw_qp_wqe *wqe; struct nes_v4_quad nes_quad; u32 crc_value; int ret; int passive_state; struct nes_ib_device *nesibdev; struct ib_mr *ibmr = NULL; struct ib_phys_buf ibphysbuf; struct nes_pd *nespd; u64 tagged_offset; ibqp = nes_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) return -EINVAL; /* get all our handles */ nesqp = to_nesqp(ibqp); nesvnic = to_nesvnic(nesqp->ibqp.device); nesdev = nesvnic->nesdev; adapter = nesdev->nesadapter; cm_node = (struct nes_cm_node *)cm_id->provider_data; nes_debug(NES_DBG_CM, "nes_accept: cm_node= %p nesvnic=%p, netdev=%p," "%s\n", cm_node, nesvnic, nesvnic->netdev, nesvnic->netdev->name); if (NES_CM_STATE_LISTENER_DESTROYED == cm_node->state) { if (cm_node->loopbackpartner) rem_ref_cm_node(cm_node->cm_core, cm_node->loopbackpartner); rem_ref_cm_node(cm_node->cm_core, cm_node); return -EINVAL; } passive_state = atomic_add_return(1, &cm_node->passive_state); if (passive_state == NES_SEND_RESET_EVENT) { rem_ref_cm_node(cm_node->cm_core, cm_node); return -ECONNRESET; } /* associate the node with the QP */ nesqp->cm_node = (void *)cm_node; cm_node->nesqp = nesqp; nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); atomic_inc(&cm_accepts); nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", netdev_refcnt_read(nesvnic->netdev)); /* allocate the ietf frame and space for private data */ nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, sizeof(struct ietf_mpa_frame) + conn_param->private_data_len, &nesqp->ietf_frame_pbase); if (!nesqp->ietf_frame) { nes_debug(NES_DBG_CM, "Unable to allocate memory for private " "data\n"); return -ENOMEM; } /* setup the MPA frame */ nesqp->private_data_len = conn_param->private_data_len; memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data, conn_param->private_data_len); nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len); nesqp->ietf_frame->rev = mpa_version; nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC; /* setup our first outgoing iWarp send WQE (the IETF frame response) */ wqe = &nesqp->hwqp.sq_vbase[0]; if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) { u64temp = (unsigned long)nesqp; nesibdev = nesvnic->nesibdev; nespd = nesqp->nespd; ibphysbuf.addr = nesqp->ietf_frame_pbase; ibphysbuf.size = conn_param->private_data_len + sizeof(struct ietf_mpa_frame); tagged_offset = (u64)(unsigned long)nesqp->ietf_frame; ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, &ibphysbuf, 1, IB_ACCESS_LOCAL_WRITE, &tagged_offset); if (!ibmr) { nes_debug(NES_DBG_CM, "Unable to register memory region" "for lSMM for cm_node = %p \n", cm_node); pci_free_consistent(nesdev->pcidev, nesqp->private_data_len+sizeof(struct ietf_mpa_frame), nesqp->ietf_frame, nesqp->ietf_frame_pbase); return -ENOMEM; } ibmr->pd = &nespd->ibpd; ibmr->device = nespd->ibpd.device; nesqp->lsmm_mr = ibmr; u64temp |= NES_SW_CONTEXT_ALIGN>>1; set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU); wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, (u64)(unsigned long)nesqp->ietf_frame); wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; if (nesqp->sq_kmapped) { nesqp->sq_kmapped = 0; kunmap(nesqp->page); } nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU); } else { nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU); } nesqp->skip_lsmm = 1; /* Cache the cm_id in the qp */ nesqp->cm_id = cm_id; cm_node->cm_id = cm_id; /* nesqp->cm_node = (void *)cm_id->provider_data; */ cm_id->provider_data = nesqp; nesqp->active_conn = 0; if (cm_node->state == NES_CM_STATE_TSA) nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n", cm_node); nes_cm_init_tsa_conn(nesqp, cm_node); nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port)); nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port)); if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr)) nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(nesvnic->local_ipaddr)); else nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); nesqp->nesqp_context->misc2 |= cpu_to_le32( (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL, NES_ARP_RESOLVE) << 16); nesqp->nesqp_context->ts_val_delta = cpu_to_le32( jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id); nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT)); nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); memset(&nes_quad, 0, sizeof(nes_quad)); nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr)) nes_quad.SrcIpadr = nesvnic->local_ipaddr; else nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr; nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port; nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; /* Produce hash key */ crc_value = get_crc_value(&nes_quad); nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); nesqp->hte_index &= adapter->hte_index_mask; nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = " "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + " "private data length=%zu.\n", nesqp->hwqp.qp_id, ntohl(cm_id->remote_addr.sin_addr.s_addr), ntohs(cm_id->remote_addr.sin_port), ntohl(cm_id->local_addr.sin_addr.s_addr), ntohs(cm_id->local_addr.sin_port), le32_to_cpu(nesqp->nesqp_context->rcv_nxt), le32_to_cpu(nesqp->nesqp_context->snd_nxt), conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); /* notify OF layer that accept event was successful */ cm_id->add_ref(cm_id); nes_add_ref(&nesqp->ibqp); cm_event.event = IW_CM_EVENT_ESTABLISHED; cm_event.status = 0; cm_event.provider_data = (void *)nesqp; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; ret = cm_id->event_handler(cm_id, &cm_event); attr.qp_state = IB_QPS_RTS; nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); if (cm_node->loopbackpartner) { cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len; /* copy entire MPA frame to our cm_node's frame */ memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data, nesqp->private_data_len); create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED); } if (ret) printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " "ret=%d\n", __func__, __LINE__, ret); return 0; } /** * nes_reject */ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct nes_cm_node *cm_node; struct nes_cm_node *loopback; struct nes_cm_core *cm_core; atomic_inc(&cm_rejects); cm_node = (struct nes_cm_node *) cm_id->provider_data; loopback = cm_node->loopbackpartner; cm_core = cm_node->cm_core; cm_node->cm_id = cm_id; cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; if (cm_node->mpa_frame_size > MAX_CM_BUFFER) return -EINVAL; memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); if (loopback) { memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len); loopback->mpa_frame.priv_data_len = pdata_len; loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; } else { memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len); } cm_node->mpa_frame.rev = mpa_version; cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT; return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); } /** * nes_connect * setup and launch cm connect node */ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct ib_qp *ibqp; struct nes_qp *nesqp; struct nes_vnic *nesvnic; struct nes_device *nesdev; struct nes_cm_node *cm_node; struct nes_cm_info cm_info; int apbvt_set = 0; ibqp = nes_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) return -EINVAL; nesqp = to_nesqp(ibqp); if (!nesqp) return -EINVAL; nesvnic = to_nesvnic(nesqp->ibqp.device); if (!nesvnic) return -EINVAL; nesdev = nesvnic->nesdev; if (!nesdev) return -EINVAL; if (!(cm_id->local_addr.sin_port) || !(cm_id->remote_addr.sin_port)) return -EINVAL; nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = " "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id, ntohl(nesvnic->local_ipaddr), ntohl(cm_id->remote_addr.sin_addr.s_addr), ntohs(cm_id->remote_addr.sin_port), ntohl(cm_id->local_addr.sin_addr.s_addr), ntohs(cm_id->local_addr.sin_port)); atomic_inc(&cm_connects); nesqp->active_conn = 1; /* cache the cm_id in the qp */ nesqp->cm_id = cm_id; cm_id->provider_data = nesqp; nesqp->private_data_len = conn_param->private_data_len; nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len); if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr) { nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); apbvt_set = 1; } /* set up the connection params for the node */ cm_info.loc_addr = htonl(cm_id->local_addr.sin_addr.s_addr); cm_info.loc_port = htons(cm_id->local_addr.sin_port); cm_info.rem_addr = htonl(cm_id->remote_addr.sin_addr.s_addr); cm_info.rem_port = htons(cm_id->remote_addr.sin_port); cm_info.cm_id = cm_id; cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; cm_id->add_ref(cm_id); /* create a connect CM node connection */ cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, conn_param->private_data_len, (void *)conn_param->private_data, &cm_info); if (!cm_node) { if (apbvt_set) nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); cm_id->rem_ref(cm_id); return -ENOMEM; } cm_node->apbvt_set = apbvt_set; nesqp->cm_node = cm_node; cm_node->nesqp = nesqp; nes_add_ref(&nesqp->ibqp); return 0; } /** * nes_create_listen */ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) { struct nes_vnic *nesvnic; struct nes_cm_listener *cm_node; struct nes_cm_info cm_info; int err; nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n", cm_id, ntohs(cm_id->local_addr.sin_port)); nesvnic = to_nesvnic(cm_id->device); if (!nesvnic) return -EINVAL; nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n", nesvnic, nesvnic->netdev, nesvnic->netdev->name); nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n", nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr); /* setup listen params in our api call struct */ cm_info.loc_addr = nesvnic->local_ipaddr; cm_info.loc_port = cm_id->local_addr.sin_port; cm_info.backlog = backlog; cm_info.cm_id = cm_id; cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); if (!cm_node) { printk(KERN_ERR "%s[%u] Error returned from listen API call\n", __func__, __LINE__); return -ENOMEM; } cm_id->provider_data = cm_node; if (!cm_node->reused_node) { err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); if (err) { printk(KERN_ERR "nes_manage_apbvt call returned %d.\n", err); g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); return err; } atomic_inc(&cm_listens_created); } cm_id->add_ref(cm_id); cm_id->provider_data = (void *)cm_node; return 0; } /** * nes_destroy_listen */ int nes_destroy_listen(struct iw_cm_id *cm_id) { if (cm_id->provider_data) g_cm_core->api->stop_listener(g_cm_core, cm_id->provider_data); else nes_debug(NES_DBG_CM, "cm_id->provider_data was NULL\n"); cm_id->rem_ref(cm_id); return 0; } /** * nes_cm_recv */ int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) { int rc = 0; cm_packets_received++; if ((g_cm_core) && (g_cm_core->api)) { rc = g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); } else { nes_debug(NES_DBG_CM, "Unable to process packet for CM," " cm is not setup properly.\n"); } return rc; } /** * nes_cm_start * Start and init a cm core module */ int nes_cm_start(void) { nes_debug(NES_DBG_CM, "\n"); /* create the primary CM core, pass this handle to subsequent core inits */ g_cm_core = nes_cm_alloc_core(); if (g_cm_core) { return 0; } else { return -ENOMEM; } } /** * nes_cm_stop * stop and dealloc all cm core instances */ int nes_cm_stop(void) { g_cm_core->api->destroy_cm_core(g_cm_core); return 0; } /** * cm_event_connected * handle a connected event, setup QPs and HW */ static void cm_event_connected(struct nes_cm_event *event) { u64 u64temp; struct nes_qp *nesqp; struct nes_vnic *nesvnic; struct nes_device *nesdev; struct nes_cm_node *cm_node; struct nes_adapter *nesadapter; struct ib_qp_attr attr; struct iw_cm_id *cm_id; struct iw_cm_event cm_event; struct nes_hw_qp_wqe *wqe; struct nes_v4_quad nes_quad; u32 crc_value; int ret; /* get all our handles */ cm_node = event->cm_node; cm_id = cm_node->cm_id; nes_debug(NES_DBG_CM, "cm_event_connected - %p - cm_id = %p\n", cm_node, cm_id); nesqp = (struct nes_qp *)cm_id->provider_data; nesvnic = to_nesvnic(nesqp->ibqp.device); nesdev = nesvnic->nesdev; nesadapter = nesdev->nesadapter; if (nesqp->destroyed) { return; } atomic_inc(&cm_connecteds); nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" " local port 0x%04X. jiffies = %lu.\n", nesqp->hwqp.qp_id, ntohl(cm_id->remote_addr.sin_addr.s_addr), ntohs(cm_id->remote_addr.sin_port), ntohs(cm_id->local_addr.sin_port), jiffies); nes_cm_init_tsa_conn(nesqp, cm_node); /* set the QP tsa context */ nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port)); nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port)); if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr)) nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(nesvnic->local_ipaddr)); else nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); nesqp->nesqp_context->misc2 |= cpu_to_le32( (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32( nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL, NES_ARP_RESOLVE) << 16); nesqp->nesqp_context->ts_val_delta = cpu_to_le32( jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id); nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); /* Adjust tail for not having a LSMM */ nesqp->hwqp.sq_tail = 1; #if defined(NES_SEND_FIRST_WRITE) if (cm_node->send_write0) { nes_debug(NES_DBG_CM, "Sending first write.\n"); wqe = &nesqp->hwqp.sq_vbase[0]; u64temp = (unsigned long)nesqp; u64temp |= NES_SW_CONTEXT_ALIGN>>1; set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW); wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; if (nesqp->sq_kmapped) { nesqp->sq_kmapped = 0; kunmap(nesqp->page); } /* use the reserved spot on the WQ for the extra first WQE */ nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM)); nesqp->skip_lsmm = 1; nesqp->hwqp.sq_tail = 0; nes_write32(nesdev->regs + NES_WQE_ALLOC, (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); } #endif memset(&nes_quad, 0, sizeof(nes_quad)); nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr)) nes_quad.SrcIpadr = nesvnic->local_ipaddr; else nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr; nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port; nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; /* Produce hash key */ crc_value = get_crc_value(&nes_quad); nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); nesqp->hte_index &= nesadapter->hte_index_mask; nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); nesqp->ietf_frame = &cm_node->mpa_frame; nesqp->private_data_len = (u8) cm_node->mpa_frame_size; cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); /* notify OF layer we successfully created the requested connection */ cm_event.event = IW_CM_EVENT_CONNECT_REPLY; cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr.sin_family = AF_INET; cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size; cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr; ret = cm_id->event_handler(cm_id, &cm_event); nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); if (ret) printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " "ret=%d\n", __func__, __LINE__, ret); attr.qp_state = IB_QPS_RTS; nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = " "%lu\n", nesqp->hwqp.qp_id, jiffies); return; } /** * cm_event_connect_error */ static void cm_event_connect_error(struct nes_cm_event *event) { struct nes_qp *nesqp; struct iw_cm_id *cm_id; struct iw_cm_event cm_event; /* struct nes_cm_info cm_info; */ int ret; if (!event->cm_node) return; cm_id = event->cm_node->cm_id; if (!cm_id) { return; } nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id); nesqp = cm_id->provider_data; if (!nesqp) { return; } /* notify OF layer about this connection error event */ /* cm_id->rem_ref(cm_id); */ nesqp->cm_id = NULL; cm_id->provider_data = NULL; cm_event.event = IW_CM_EVENT_CONNECT_REPLY; cm_event.status = -ECONNRESET; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, " "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr); ret = cm_id->event_handler(cm_id, &cm_event); nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); if (ret) printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " "ret=%d\n", __func__, __LINE__, ret); cm_id->rem_ref(cm_id); rem_ref_cm_node(event->cm_node->cm_core, event->cm_node); return; } /** * cm_event_reset */ static void cm_event_reset(struct nes_cm_event *event) { struct nes_qp *nesqp; struct iw_cm_id *cm_id; struct iw_cm_event cm_event; /* struct nes_cm_info cm_info; */ int ret; if (!event->cm_node) return; if (!event->cm_node->cm_id) return; cm_id = event->cm_node->cm_id; nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id); nesqp = cm_id->provider_data; if (!nesqp) return; nesqp->cm_id = NULL; /* cm_id->provider_data = NULL; */ cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.status = -ECONNRESET; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; cm_id->add_ref(cm_id); ret = cm_id->event_handler(cm_id, &cm_event); atomic_inc(&cm_closes); cm_event.event = IW_CM_EVENT_CLOSE; cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node); ret = cm_id->event_handler(cm_id, &cm_event); nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); /* notify OF layer about this connection error event */ cm_id->rem_ref(cm_id); return; } /** * cm_event_mpa_req */ static void cm_event_mpa_req(struct nes_cm_event *event) { struct iw_cm_id *cm_id; struct iw_cm_event cm_event; int ret; struct nes_cm_node *cm_node; cm_node = event->cm_node; if (!cm_node) return; cm_id = cm_node->cm_id; atomic_inc(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; cm_event.status = 0; cm_event.provider_data = (void *)cm_node; cm_event.local_addr.sin_family = AF_INET; cm_event.local_addr.sin_port = htons(event->cm_info.loc_port); cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr); cm_event.remote_addr.sin_family = AF_INET; cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); cm_event.private_data = cm_node->mpa_frame_buf; cm_event.private_data_len = (u8) cm_node->mpa_frame_size; ret = cm_id->event_handler(cm_id, &cm_event); if (ret) printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", __func__, __LINE__, ret); return; } static void cm_event_mpa_reject(struct nes_cm_event *event) { struct iw_cm_id *cm_id; struct iw_cm_event cm_event; struct nes_cm_node *cm_node; int ret; cm_node = event->cm_node; if (!cm_node) return; cm_id = cm_node->cm_id; atomic_inc(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); cm_event.event = IW_CM_EVENT_CONNECT_REPLY; cm_event.status = -ECONNREFUSED; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr.sin_family = AF_INET; cm_event.local_addr.sin_port = htons(event->cm_info.loc_port); cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr); cm_event.remote_addr.sin_family = AF_INET; cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); cm_event.private_data = cm_node->mpa_frame_buf; cm_event.private_data_len = (u8) cm_node->mpa_frame_size; nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, " "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr); ret = cm_id->event_handler(cm_id, &cm_event); if (ret) printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", __func__, __LINE__, ret); return; } static void nes_cm_event_handler(struct work_struct *); /** * nes_cm_post_event * post an event to the cm event handler */ static int nes_cm_post_event(struct nes_cm_event *event) { atomic_inc(&event->cm_node->cm_core->events_posted); add_ref_cm_node(event->cm_node); event->cm_info.cm_id->add_ref(event->cm_info.cm_id); INIT_WORK(&event->event_work, nes_cm_event_handler); nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n", event->cm_node, event); queue_work(event->cm_node->cm_core->event_wq, &event->event_work); nes_debug(NES_DBG_CM, "Exit\n"); return 0; } /** * nes_cm_event_handler * worker function to handle cm events * will free instance of nes_cm_event */ static void nes_cm_event_handler(struct work_struct *work) { struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work); struct nes_cm_core *cm_core; if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) return; cm_core = event->cm_node->cm_core; nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n", event, event->type, atomic_read(&cm_core->events_posted)); switch (event->type) { case NES_CM_EVENT_MPA_REQ: cm_event_mpa_req(event); nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n", event->cm_node); break; case NES_CM_EVENT_RESET: nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n", event->cm_node); cm_event_reset(event); break; case NES_CM_EVENT_CONNECTED: if ((!event->cm_node->cm_id) || (event->cm_node->state != NES_CM_STATE_TSA)) break; cm_event_connected(event); nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); break; case NES_CM_EVENT_MPA_REJECT: if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) break; cm_event_mpa_reject(event); nes_debug(NES_DBG_CM, "CM Event: REJECT\n"); break; case NES_CM_EVENT_ABORTED: if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) break; cm_event_connect_error(event); nes_debug(NES_DBG_CM, "CM Event: ABORTED\n"); break; case NES_CM_EVENT_DROPPED_PKT: nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n"); break; default: nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n"); break; } atomic_dec(&cm_core->events_posted); event->cm_info.cm_id->rem_ref(event->cm_info.cm_id); rem_ref_cm_node(cm_core, event->cm_node); kfree(event); return; }
gpl-2.0
2fast4u88/Htc-Design-FastKernel
fs/nilfs2/file.c
742
4220
/* * file.c - NILFS regular file handling primitives including fsync(). * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Amagai Yoshiji <amagai@osrg.net>, * Ryusuke Konishi <ryusuke@osrg.net> */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/writeback.h> #include "nilfs.h" #include "segment.h" int nilfs_sync_file(struct file *file, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct inode *inode = file->f_mapping->host; int err; if (!nilfs_inode_dirty(inode)) return 0; if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); return err; } static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_dentry->d_inode; struct nilfs_transaction_info ti; int ret; if (unlikely(nilfs_near_disk_full(NILFS_SB(inode->i_sb)->s_nilfs))) return VM_FAULT_SIGBUS; /* -ENOSPC */ lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); return VM_FAULT_NOPAGE; /* make the VM retry the fault */ } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) { unlock_page(page); goto mapped; } if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); unlock_page(page); goto mapped; } } unlock_page(page); /* * fill hole blocks */ ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); /* never returns -ENOMEM, but may return -ENOSPC */ if (unlikely(ret)) return VM_FAULT_SIGBUS; ret = block_page_mkwrite(vma, vmf, nilfs_get_block); if (unlikely(ret)) { nilfs_transaction_abort(inode->i_sb); return ret; } nilfs_transaction_commit(inode->i_sb); mapped: SetPageChecked(page); wait_on_page_writeback(page); return 0; } static const struct vm_operations_struct nilfs_file_vm_ops = { .fault = filemap_fault, .page_mkwrite = nilfs_page_mkwrite, }; static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &nilfs_file_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } /* * We have mostly NULL's here: the current defaults are ok for * the nilfs filesystem. */ const struct file_operations nilfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .unlocked_ioctl = nilfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_ioctl, #endif /* CONFIG_COMPAT */ .mmap = nilfs_file_mmap, .open = generic_file_open, /* .release = nilfs_release_file, */ .fsync = nilfs_sync_file, .splice_read = generic_file_splice_read, }; const struct inode_operations nilfs_file_inode_operations = { .truncate = nilfs_truncate, .setattr = nilfs_setattr, .permission = nilfs_permission, }; /* end of file */
gpl-2.0
knightkill3r/FalcoKernel
arch/arm/mach-imx/mach-mx1ads.c
2278
3365
/* * arch/arm/mach-imx/mach-mx1ads.c * * Initially based on: * linux-2.6.7-imx/arch/arm/mach-imx/scb9328.c * Copyright (c) 2004 Sascha Hauer <sascha@saschahauer.de> * * 2004 (c) MontaVista Software, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/i2c.h> #include <linux/i2c/pcf857x.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-mx1.h> #include <mach/irqs.h> #include "devices-imx1.h" static const int mx1ads_pins[] __initconst = { /* UART1 */ PC9_PF_UART1_CTS, PC10_PF_UART1_RTS, PC11_PF_UART1_TXD, PC12_PF_UART1_RXD, /* UART2 */ PB28_PF_UART2_CTS, PB29_PF_UART2_RTS, PB30_PF_UART2_TXD, PB31_PF_UART2_RXD, /* I2C */ PA15_PF_I2C_SDA, PA16_PF_I2C_SCL, /* SPI */ PC13_PF_SPI1_SPI_RDY, PC14_PF_SPI1_SCLK, PC15_PF_SPI1_SS, PC16_PF_SPI1_MISO, PC17_PF_SPI1_MOSI, }; /* * UARTs platform data */ static const struct imxuart_platform_data uart0_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static const struct imxuart_platform_data uart1_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; /* * Physmap flash */ static struct physmap_flash_data mx1ads_flash_data = { .width = 4, /* bankwidth in bytes */ }; static struct resource flash_resource = { .start = MX1_CS0_PHYS, .end = MX1_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device = { .name = "physmap-flash", .id = 0, .resource = &flash_resource, .num_resources = 1, }; /* * I2C */ static struct pcf857x_platform_data pcf857x_data[] = { { .gpio_base = 4 * 32, }, { .gpio_base = 4 * 32 + 16, } }; static const struct imxi2c_platform_data mx1ads_i2c_data __initconst = { .bitrate = 100000, }; static struct i2c_board_info mx1ads_i2c_devices[] = { { I2C_BOARD_INFO("pcf8575", 0x22), .platform_data = &pcf857x_data[0], }, { I2C_BOARD_INFO("pcf8575", 0x24), .platform_data = &pcf857x_data[1], }, }; /* * Board init */ static void __init mx1ads_init(void) { mxc_gpio_setup_multiple_pins(mx1ads_pins, ARRAY_SIZE(mx1ads_pins), "mx1ads"); /* UART */ imx1_add_imx_uart0(&uart0_pdata); imx1_add_imx_uart1(&uart1_pdata); /* Physmap flash */ mxc_register_device(&flash_device, &mx1ads_flash_data); /* I2C */ i2c_register_board_info(0, mx1ads_i2c_devices, ARRAY_SIZE(mx1ads_i2c_devices)); imx1_add_imx_i2c(&mx1ads_i2c_data); } static void __init mx1ads_timer_init(void) { mx1_clocks_init(32000); } struct sys_timer mx1ads_timer = { .init = mx1ads_timer_init, }; MACHINE_START(MX1ADS, "Freescale MX1ADS") /* Maintainer: Sascha Hauer, Pengutronix */ .boot_params = MX1_PHYS_OFFSET + 0x100, .map_io = mx1_map_io, .init_early = imx1_init_early, .init_irq = mx1_init_irq, .timer = &mx1ads_timer, .init_machine = mx1ads_init, MACHINE_END MACHINE_START(MXLADS, "Freescale MXLADS") .boot_params = MX1_PHYS_OFFSET + 0x100, .map_io = mx1_map_io, .init_early = imx1_init_early, .init_irq = mx1_init_irq, .timer = &mx1ads_timer, .init_machine = mx1ads_init, MACHINE_END
gpl-2.0
evnit/android_kernel_samsung_msm8660-common
drivers/pcmcia/pxa2xx_palmld.c
2534
3185
/* * linux/drivers/pcmcia/pxa2xx_palmld.c * * Driver for Palm LifeDrive PCMCIA * * Copyright (C) 2006 Alex Osborne <ato@meshy.org> * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/palmld.h> #include "soc_common.h" static struct gpio palmld_pcmcia_gpios[] = { { GPIO_NR_PALMLD_PCMCIA_POWER, GPIOF_INIT_LOW, "PCMCIA Power" }, { GPIO_NR_PALMLD_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, { GPIO_NR_PALMLD_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, }; static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { int ret; ret = gpio_request_array(palmld_pcmcia_gpios, ARRAY_SIZE(palmld_pcmcia_gpios)); skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); return ret; } static void palmld_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { gpio_free_array(palmld_pcmcia_gpios, ARRAY_SIZE(palmld_pcmcia_gpios)); } static void palmld_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->detect = 1; /* always inserted */ state->ready = !!gpio_get_value(GPIO_NR_PALMLD_PCMCIA_READY); state->bvd1 = 1; state->bvd2 = 1; state->wrprot = 0; state->vs_3v = 1; state->vs_Xv = 0; } static int palmld_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { gpio_set_value(GPIO_NR_PALMLD_PCMCIA_POWER, 1); gpio_set_value(GPIO_NR_PALMLD_PCMCIA_RESET, !!(state->flags & SS_RESET)); return 0; } static void palmld_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { } static void palmld_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { } static struct pcmcia_low_level palmld_pcmcia_ops = { .owner = THIS_MODULE, .first = 1, .nr = 1, .hw_init = palmld_pcmcia_hw_init, .hw_shutdown = palmld_pcmcia_hw_shutdown, .socket_state = palmld_pcmcia_socket_state, .configure_socket = palmld_pcmcia_configure_socket, .socket_init = palmld_pcmcia_socket_init, .socket_suspend = palmld_pcmcia_socket_suspend, }; static struct platform_device *palmld_pcmcia_device; static int __init palmld_pcmcia_init(void) { int ret; if (!machine_is_palmld()) return -ENODEV; palmld_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!palmld_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(palmld_pcmcia_device, &palmld_pcmcia_ops, sizeof(palmld_pcmcia_ops)); if (!ret) ret = platform_device_add(palmld_pcmcia_device); if (ret) platform_device_put(palmld_pcmcia_device); return ret; } static void __exit palmld_pcmcia_exit(void) { platform_device_unregister(palmld_pcmcia_device); } module_init(palmld_pcmcia_init); module_exit(palmld_pcmcia_exit); MODULE_AUTHOR("Alex Osborne <ato@meshy.org>," " Marek Vasut <marek.vasut@gmail.com>"); MODULE_DESCRIPTION("PCMCIA support for Palm LifeDrive"); MODULE_ALIAS("platform:pxa2xx-pcmcia"); MODULE_LICENSE("GPL");
gpl-2.0
Motorhead1991/android_kernel_samsung_exynos7420
drivers/media/usb/tlg2300/pd-alsa.c
2790
7997
#include <linux/kernel.h> #include <linux/usb.h> #include <linux/init.h> #include <linux/sound.h> #include <linux/spinlock.h> #include <linux/soundcard.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/gfp.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/initval.h> #include <sound/control.h> #include <media/v4l2-common.h> #include "pd-common.h" #include "vendorcmds.h" static void complete_handler_audio(struct urb *urb); #define AUDIO_EP (0x83) #define AUDIO_BUF_SIZE (512) #define PERIOD_SIZE (1024 * 8) #define PERIOD_MIN (4) #define PERIOD_MAX PERIOD_MIN static struct snd_pcm_hardware snd_pd_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = PERIOD_SIZE * PERIOD_MIN, .period_bytes_min = PERIOD_SIZE, .period_bytes_max = PERIOD_SIZE, .periods_min = PERIOD_MIN, .periods_max = PERIOD_MAX, /* .buffer_bytes_max = 62720 * 8, .period_bytes_min = 64, .period_bytes_max = 12544, .periods_min = 2, .periods_max = 98 */ }; static int snd_pd_capture_open(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; struct snd_pcm_runtime *runtime = substream->runtime; if (!p) return -ENODEV; pa->users++; pa->card_close = 0; pa->capture_pcm_substream = substream; runtime->private_data = p; runtime->hw = snd_pd_hw_capture; snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); usb_autopm_get_interface(p->interface); kref_get(&p->kref); return 0; } static int snd_pd_pcm_close(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; pa->users--; pa->card_close = 1; usb_autopm_put_interface(p->interface); kref_put(&p->kref, poseidon_delete); return 0; } static int snd_pd_hw_capture_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size; size = params_buffer_bytes(hw_params); if (runtime->dma_area) { if (runtime->dma_bytes > size) return 0; vfree(runtime->dma_area); } runtime->dma_area = vmalloc(size); if (!runtime->dma_area) return -ENOMEM; else runtime->dma_bytes = size; return 0; } static int audio_buf_free(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; int i; for (i = 0; i < AUDIO_BUFS; i++) if (pa->urb_array[i]) usb_kill_urb(pa->urb_array[i]); free_all_urb_generic(pa->urb_array, AUDIO_BUFS); logpm(); return 0; } static int snd_pd_hw_capture_free(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); logpm(); audio_buf_free(p); return 0; } static int snd_pd_prepare(struct snd_pcm_substream *substream) { return 0; } #define AUDIO_TRAILER_SIZE (16) static inline void handle_audio_data(struct urb *urb, int *period_elapsed) { struct poseidon_audio *pa = urb->context; struct snd_pcm_runtime *runtime = pa->capture_pcm_substream->runtime; int stride = runtime->frame_bits >> 3; int len = urb->actual_length / stride; unsigned char *cp = urb->transfer_buffer; unsigned int oldptr = pa->rcv_position; if (urb->actual_length == AUDIO_BUF_SIZE - 4) len -= (AUDIO_TRAILER_SIZE / stride); /* do the copy */ if (oldptr + len >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride); memcpy(runtime->dma_area, (cp + cnt * stride), (len * stride - cnt * stride)); } else memcpy(runtime->dma_area + oldptr * stride, cp, len * stride); /* update the statas */ snd_pcm_stream_lock(pa->capture_pcm_substream); pa->rcv_position += len; if (pa->rcv_position >= runtime->buffer_size) pa->rcv_position -= runtime->buffer_size; pa->copied_position += (len); if (pa->copied_position >= runtime->period_size) { pa->copied_position -= runtime->period_size; *period_elapsed = 1; } snd_pcm_stream_unlock(pa->capture_pcm_substream); } static void complete_handler_audio(struct urb *urb) { struct poseidon_audio *pa = urb->context; struct snd_pcm_substream *substream = pa->capture_pcm_substream; int period_elapsed = 0; int ret; if (1 == pa->card_close || pa->capture_stream != STREAM_ON) return; if (urb->status != 0) { /*if (urb->status == -ESHUTDOWN)*/ return; } if (substream) { if (urb->actual_length) { handle_audio_data(urb, &period_elapsed); if (period_elapsed) snd_pcm_period_elapsed(substream); } } ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) log("audio urb failed (errcod = %i)", ret); return; } static int fire_audio_urb(struct poseidon *p) { int i, ret = 0; struct poseidon_audio *pa = &p->audio; alloc_bulk_urbs_generic(pa->urb_array, AUDIO_BUFS, p->udev, AUDIO_EP, AUDIO_BUF_SIZE, GFP_ATOMIC, complete_handler_audio, pa); for (i = 0; i < AUDIO_BUFS; i++) { ret = usb_submit_urb(pa->urb_array[i], GFP_KERNEL); if (ret) log("urb err : %d", ret); } log(); return ret; } static int snd_pd_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; if (debug_mode) log("cmd %d, audio stat : %d\n", cmd, pa->capture_stream); switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_START: if (pa->capture_stream == STREAM_ON) return 0; pa->rcv_position = pa->copied_position = 0; pa->capture_stream = STREAM_ON; if (in_hibernation(p)) return 0; fire_audio_urb(p); return 0; case SNDRV_PCM_TRIGGER_SUSPEND: pa->capture_stream = STREAM_SUSPEND; return 0; case SNDRV_PCM_TRIGGER_STOP: pa->capture_stream = STREAM_OFF; return 0; default: return -EINVAL; } } static snd_pcm_uframes_t snd_pd_capture_pointer(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; return pa->rcv_position; } static struct page *snd_pcm_pd_get_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } static struct snd_pcm_ops pcm_capture_ops = { .open = snd_pd_capture_open, .close = snd_pd_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pd_hw_capture_params, .hw_free = snd_pd_hw_capture_free, .prepare = snd_pd_prepare, .trigger = snd_pd_capture_trigger, .pointer = snd_pd_capture_pointer, .page = snd_pcm_pd_get_page, }; #ifdef CONFIG_PM int pm_alsa_suspend(struct poseidon *p) { logpm(p); audio_buf_free(p); return 0; } int pm_alsa_resume(struct poseidon *p) { logpm(p); fire_audio_urb(p); return 0; } #endif int poseidon_audio_init(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; struct snd_card *card; struct snd_pcm *pcm; int ret; ret = snd_card_create(-1, "Telegent", THIS_MODULE, 0, &card); if (ret != 0) return ret; ret = snd_pcm_new(card, "poseidon audio", 0, 0, 1, &pcm); if (ret < 0) { snd_card_free(card); return ret; } snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops); pcm->info_flags = 0; pcm->private_data = p; strcpy(pcm->name, "poseidon audio capture"); strcpy(card->driver, "ALSA driver"); strcpy(card->shortname, "poseidon Audio"); strcpy(card->longname, "poseidon ALSA Audio"); if (snd_card_register(card)) { snd_card_free(card); return -ENOMEM; } pa->card = card; return 0; } int poseidon_audio_free(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; if (pa->card) snd_card_free(pa->card); return 0; }
gpl-2.0
chen2011521/xt560_kernel
drivers/usb/otg/ulpi.c
3046
6271
/* * Generic ULPI USB transceiver support * * Copyright (C) 2009 Daniel Mack <daniel@caiaq.de> * * Based on sources from * * Sascha Hauer <s.hauer@pengutronix.de> * Freescale Semiconductors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> struct ulpi_info { unsigned int id; char *name; }; #define ULPI_ID(vendor, product) (((vendor) << 16) | (product)) #define ULPI_INFO(_id, _name) \ { \ .id = (_id), \ .name = (_name), \ } /* ULPI hardcoded IDs, used for probing */ static struct ulpi_info ulpi_ids[] = { ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"), ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"), }; static int ulpi_set_otg_flags(struct otg_transceiver *otg) { unsigned int flags = ULPI_OTG_CTRL_DP_PULLDOWN | ULPI_OTG_CTRL_DM_PULLDOWN; if (otg->flags & ULPI_OTG_ID_PULLUP) flags |= ULPI_OTG_CTRL_ID_PULLUP; /* * ULPI Specification rev.1.1 default * for Dp/DmPulldown is enabled. */ if (otg->flags & ULPI_OTG_DP_PULLDOWN_DIS) flags &= ~ULPI_OTG_CTRL_DP_PULLDOWN; if (otg->flags & ULPI_OTG_DM_PULLDOWN_DIS) flags &= ~ULPI_OTG_CTRL_DM_PULLDOWN; if (otg->flags & ULPI_OTG_EXTVBUSIND) flags |= ULPI_OTG_CTRL_EXTVBUSIND; return otg_io_write(otg, flags, ULPI_OTG_CTRL); } static int ulpi_set_fc_flags(struct otg_transceiver *otg) { unsigned int flags = 0; /* * ULPI Specification rev.1.1 default * for XcvrSelect is Full Speed. */ if (otg->flags & ULPI_FC_HS) flags |= ULPI_FUNC_CTRL_HIGH_SPEED; else if (otg->flags & ULPI_FC_LS) flags |= ULPI_FUNC_CTRL_LOW_SPEED; else if (otg->flags & ULPI_FC_FS4LS) flags |= ULPI_FUNC_CTRL_FS4LS; else flags |= ULPI_FUNC_CTRL_FULL_SPEED; if (otg->flags & ULPI_FC_TERMSEL) flags |= ULPI_FUNC_CTRL_TERMSELECT; /* * ULPI Specification rev.1.1 default * for OpMode is Normal Operation. */ if (otg->flags & ULPI_FC_OP_NODRV) flags |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; else if (otg->flags & ULPI_FC_OP_DIS_NRZI) flags |= ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI; else if (otg->flags & ULPI_FC_OP_NSYNC_NEOP) flags |= ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP; else flags |= ULPI_FUNC_CTRL_OPMODE_NORMAL; /* * ULPI Specification rev.1.1 default * for SuspendM is Powered. */ flags |= ULPI_FUNC_CTRL_SUSPENDM; return otg_io_write(otg, flags, ULPI_FUNC_CTRL); } static int ulpi_set_ic_flags(struct otg_transceiver *otg) { unsigned int flags = 0; if (otg->flags & ULPI_IC_AUTORESUME) flags |= ULPI_IFC_CTRL_AUTORESUME; if (otg->flags & ULPI_IC_EXTVBUS_INDINV) flags |= ULPI_IFC_CTRL_EXTERNAL_VBUS; if (otg->flags & ULPI_IC_IND_PASSTHRU) flags |= ULPI_IFC_CTRL_PASSTHRU; if (otg->flags & ULPI_IC_PROTECT_DIS) flags |= ULPI_IFC_CTRL_PROTECT_IFC_DISABLE; return otg_io_write(otg, flags, ULPI_IFC_CTRL); } static int ulpi_set_flags(struct otg_transceiver *otg) { int ret; ret = ulpi_set_otg_flags(otg); if (ret) return ret; ret = ulpi_set_ic_flags(otg); if (ret) return ret; return ulpi_set_fc_flags(otg); } static int ulpi_check_integrity(struct otg_transceiver *otg) { int ret, i; unsigned int val = 0x55; for (i = 0; i < 2; i++) { ret = otg_io_write(otg, val, ULPI_SCRATCH); if (ret < 0) return ret; ret = otg_io_read(otg, ULPI_SCRATCH); if (ret < 0) return ret; if (ret != val) { pr_err("ULPI integrity check: failed!"); return -ENODEV; } val = val << 1; } pr_info("ULPI integrity check: passed.\n"); return 0; } static int ulpi_init(struct otg_transceiver *otg) { int i, vid, pid, ret; u32 ulpi_id = 0; for (i = 0; i < 4; i++) { ret = otg_io_read(otg, ULPI_PRODUCT_ID_HIGH - i); if (ret < 0) return ret; ulpi_id = (ulpi_id << 8) | ret; } vid = ulpi_id & 0xffff; pid = ulpi_id >> 16; pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid); for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++) { if (ulpi_ids[i].id == ULPI_ID(vid, pid)) { pr_info("Found %s ULPI transceiver.\n", ulpi_ids[i].name); break; } } ret = ulpi_check_integrity(otg); if (ret) return ret; return ulpi_set_flags(otg); } static int ulpi_set_host(struct otg_transceiver *otg, struct usb_bus *host) { unsigned int flags = otg_io_read(otg, ULPI_IFC_CTRL); if (!host) { otg->host = NULL; return 0; } otg->host = host; flags &= ~(ULPI_IFC_CTRL_6_PIN_SERIAL_MODE | ULPI_IFC_CTRL_3_PIN_SERIAL_MODE | ULPI_IFC_CTRL_CARKITMODE); if (otg->flags & ULPI_IC_6PIN_SERIAL) flags |= ULPI_IFC_CTRL_6_PIN_SERIAL_MODE; else if (otg->flags & ULPI_IC_3PIN_SERIAL) flags |= ULPI_IFC_CTRL_3_PIN_SERIAL_MODE; else if (otg->flags & ULPI_IC_CARKIT) flags |= ULPI_IFC_CTRL_CARKITMODE; return otg_io_write(otg, flags, ULPI_IFC_CTRL); } static int ulpi_set_vbus(struct otg_transceiver *otg, bool on) { unsigned int flags = otg_io_read(otg, ULPI_OTG_CTRL); flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT); if (on) { if (otg->flags & ULPI_OTG_DRVVBUS) flags |= ULPI_OTG_CTRL_DRVVBUS; if (otg->flags & ULPI_OTG_DRVVBUS_EXT) flags |= ULPI_OTG_CTRL_DRVVBUS_EXT; } return otg_io_write(otg, flags, ULPI_OTG_CTRL); } struct otg_transceiver * otg_ulpi_create(struct otg_io_access_ops *ops, unsigned int flags) { struct otg_transceiver *otg; otg = kzalloc(sizeof(*otg), GFP_KERNEL); if (!otg) return NULL; otg->label = "ULPI"; otg->flags = flags; otg->io_ops = ops; otg->init = ulpi_init; otg->set_host = ulpi_set_host; otg->set_vbus = ulpi_set_vbus; return otg; } EXPORT_SYMBOL_GPL(otg_ulpi_create);
gpl-2.0
FennyFatal/SGS4-M919-FennyKernel
drivers/gpu/drm/nouveau/nv50_evo.c
5350
10937
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_ramht.h" #include "nv50_display.h" static void nv50_evo_channel_del(struct nouveau_channel **pevo) { struct nouveau_channel *evo = *pevo; if (!evo) return; *pevo = NULL; nouveau_ramht_ref(NULL, &evo->ramht, evo); nouveau_gpuobj_channel_takedown(evo); nouveau_bo_unmap(evo->pushbuf_bo); nouveau_bo_ref(NULL, &evo->pushbuf_bo); if (evo->user) iounmap(evo->user); kfree(evo); } void nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size) { struct drm_nouveau_private *dev_priv = obj->dev->dev_private; u32 flags5; if (dev_priv->chipset < 0xc0) { /* not supported on 0x50, specified in format mthd */ if (dev_priv->chipset == 0x50) memtype = 0; flags5 = 0x00010000; } else { if (memtype & 0x80000000) flags5 = 0x00000000; /* large pages */ else flags5 = 0x00020000; } nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); nv_wo32(obj, 0x14, flags5); dev_priv->engine.instmem.flush(obj->dev); } int nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, u64 base, u64 size, struct nouveau_gpuobj **pobj) { struct nv50_display *disp = nv50_display(evo->dev); struct nouveau_gpuobj *obj = NULL; int ret; ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj); if (ret) return ret; obj->engine = NVOBJ_ENGINE_DISPLAY; nv50_evo_dmaobj_init(obj, memtype, base, size); ret = nouveau_ramht_insert(evo, handle, obj); if (ret) goto out; if (pobj) nouveau_gpuobj_ref(obj, pobj); out: nouveau_gpuobj_ref(NULL, &obj); return ret; } static int nv50_evo_channel_new(struct drm_device *dev, int chid, struct nouveau_channel **pevo) { struct nv50_display *disp = nv50_display(dev); struct nouveau_channel *evo; int ret; evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); if (!evo) return -ENOMEM; *pevo = evo; evo->id = chid; evo->dev = dev; evo->user_get = 4; evo->user_put = 0; ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, &evo->pushbuf_bo); if (ret == 0) ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); nv50_evo_channel_del(pevo); return ret; } ret = nouveau_bo_map(evo->pushbuf_bo); if (ret) { NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); nv50_evo_channel_del(pevo); return ret; } evo->user = ioremap(pci_resource_start(dev->pdev, 0) + NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); if (!evo->user) { NV_ERROR(dev, "Error mapping EVO control regs.\n"); nv50_evo_channel_del(pevo); return -ENOMEM; } /* bind primary evo channel's ramht to the channel */ if (disp->master && evo != disp->master) nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL); return 0; } static int nv50_evo_channel_init(struct nouveau_channel *evo) { struct drm_device *dev = evo->dev; int id = evo->id, ret, i; u64 pushbuf = evo->pushbuf_bo->bo.offset; u32 tmp; tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); if ((tmp & 0x009f0000) == 0x00020000) nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); if ((tmp & 0x003f0000) == 0x00030000) nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); /* initialise fifo */ nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | NV50_PDISPLAY_EVO_DMA_CB_VALID); nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); return -EBUSY; } /* enable error reporting on the channel */ nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); evo->dma.max = (4096/4) - 2; evo->dma.max &= ~7; evo->dma.put = 0; evo->dma.cur = evo->dma.put; evo->dma.free = evo->dma.max - evo->dma.cur; ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); if (ret) return ret; for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) OUT_RING(evo, 0); return 0; } static void nv50_evo_channel_fini(struct nouveau_channel *evo) { struct drm_device *dev = evo->dev; int id = evo->id; nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); } } void nv50_evo_destroy(struct drm_device *dev) { struct nv50_display *disp = nv50_display(dev); int i; for (i = 0; i < 2; i++) { if (disp->crtc[i].sem.bo) { nouveau_bo_unmap(disp->crtc[i].sem.bo); nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo); } nv50_evo_channel_del(&disp->crtc[i].sync); } nouveau_gpuobj_ref(NULL, &disp->ntfy); nv50_evo_channel_del(&disp->master); } int nv50_evo_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_display *disp = nv50_display(dev); struct nouveau_gpuobj *ramht = NULL; struct nouveau_channel *evo; int ret, i, j; /* create primary evo channel, the one we use for modesetting * purporses */ ret = nv50_evo_channel_new(dev, 0, &disp->master); if (ret) return ret; evo = disp->master; /* setup object management on it, any other evo channel will * use this also as there's no per-channel support on the * hardware */ ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); if (ret) { NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); goto err; } ret = drm_mm_init(&evo->ramin_heap, 0, 32768); if (ret) { NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); goto err; } ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); if (ret) { NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); goto err; } ret = nouveau_ramht_new(dev, ramht, &evo->ramht); nouveau_gpuobj_ref(NULL, &ramht); if (ret) goto err; /* not sure exactly what this is.. * * the first dword of the structure is used by nvidia to wait on * full completion of an EVO "update" command. * * method 0x8c on the master evo channel will fill a lot more of * this structure with some undefined info */ ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0, NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, disp->ntfy->vinst, disp->ntfy->size, NULL); if (ret) goto err; /* create some default objects for the scanout memtypes we support */ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000, 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000, 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; /* create "display sync" channels and other structures we need * to implement page flipping */ for (i = 0; i < 2; i++) { struct nv50_display_crtc *dispc = &disp->crtc[i]; u64 offset; ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync); if (ret) goto err; ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 0, 0x0000, &dispc->sem.bo); if (!ret) { ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(dispc->sem.bo); if (ret) nouveau_bo_ref(NULL, &dispc->sem.bo); offset = dispc->sem.bo->bo.offset; } if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000, offset, 4096, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000, 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; for (j = 0; j < 4096; j += 4) nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000); dispc->sem.offset = 0; } return 0; err: nv50_evo_destroy(dev); return ret; } int nv50_evo_init(struct drm_device *dev) { struct nv50_display *disp = nv50_display(dev); int ret, i; ret = nv50_evo_channel_init(disp->master); if (ret) return ret; for (i = 0; i < 2; i++) { ret = nv50_evo_channel_init(disp->crtc[i].sync); if (ret) return ret; } return 0; } void nv50_evo_fini(struct drm_device *dev) { struct nv50_display *disp = nv50_display(dev); int i; for (i = 0; i < 2; i++) { if (disp->crtc[i].sync) nv50_evo_channel_fini(disp->crtc[i].sync); } if (disp->master) nv50_evo_channel_fini(disp->master); }
gpl-2.0
crpalmer/dna-kernel
arch/arm/mach-gemini/time.c
12006
2470
/* * Copyright (C) 2001-2006 Storlink, Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/global_reg.h> #include <asm/mach/time.h> /* * Register definitions for the timers */ #define TIMER_COUNT(BASE_ADDR) (BASE_ADDR + 0x00) #define TIMER_LOAD(BASE_ADDR) (BASE_ADDR + 0x04) #define TIMER_MATCH1(BASE_ADDR) (BASE_ADDR + 0x08) #define TIMER_MATCH2(BASE_ADDR) (BASE_ADDR + 0x0C) #define TIMER_CR(BASE_ADDR) (BASE_ADDR + 0x30) #define TIMER_1_CR_ENABLE (1 << 0) #define TIMER_1_CR_CLOCK (1 << 1) #define TIMER_1_CR_INT (1 << 2) #define TIMER_2_CR_ENABLE (1 << 3) #define TIMER_2_CR_CLOCK (1 << 4) #define TIMER_2_CR_INT (1 << 5) #define TIMER_3_CR_ENABLE (1 << 6) #define TIMER_3_CR_CLOCK (1 << 7) #define TIMER_3_CR_INT (1 << 8) /* * IRQ handler for the timer */ static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id) { timer_tick(); return IRQ_HANDLED; } static struct irqaction gemini_timer_irq = { .name = "Gemini Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = gemini_timer_interrupt, }; /* * Set up timer interrupt, and return the current time in seconds. */ void __init gemini_timer_init(void) { unsigned int tick_rate, reg_v; reg_v = __raw_readl(IO_ADDRESS(GEMINI_GLOBAL_BASE + GLOBAL_STATUS)); tick_rate = REG_TO_AHB_SPEED(reg_v) * 1000000; printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000); tick_rate /= 6; /* APB bus run AHB*(1/6) */ switch(reg_v & CPU_AHB_RATIO_MASK) { case CPU_AHB_1_1: printk(KERN_CONT "(1/1)\n"); break; case CPU_AHB_3_2: printk(KERN_CONT "(3/2)\n"); break; case CPU_AHB_24_13: printk(KERN_CONT "(24/13)\n"); break; case CPU_AHB_2_1: printk(KERN_CONT "(2/1)\n"); break; } /* * Make irqs happen for the system timer */ setup_irq(IRQ_TIMER2, &gemini_timer_irq); /* Start the timer */ __raw_writel(tick_rate / HZ, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE))); __raw_writel(tick_rate / HZ, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE))); __raw_writel(TIMER_2_CR_ENABLE | TIMER_2_CR_INT, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); }
gpl-2.0
fduchene/mptcp-rpi
sound/pci/echoaudio/mona_dsp.c
12518
11013
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int write_control_reg(struct echoaudio *chip, u32 value, char force); static int set_input_clock(struct echoaudio *chip, u16 clock); static int set_professional_spdif(struct echoaudio *chip, char prof); static int set_digital_mode(struct echoaudio *chip, u8 mode); static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic); static int check_asic_status(struct echoaudio *chip); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Mona\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != MONA)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF | ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT; chip->digital_modes = ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA | ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL | ECHOCAPS_HAS_DIGITAL_MODE_ADAT; /* Mona comes in both '301 and '361 flavors */ if (chip->device_id == DEVICE_ID_56361) chip->dsp_code_to_load = FW_MONA_361_DSP; else chip->dsp_code_to_load = FW_MONA_301_DSP; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { chip->digital_mode = DIGITAL_MODE_SPDIF_RCA; chip->professional_spdif = FALSE; chip->digital_in_automute = TRUE; return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { u32 clocks_from_dsp, clock_bits; /* Map the DSP clock detect bits to the generic driver clock detect bits */ clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks); clock_bits = ECHO_CLOCK_BIT_INTERNAL; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF) clock_bits |= ECHO_CLOCK_BIT_SPDIF; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_ADAT) clock_bits |= ECHO_CLOCK_BIT_ADAT; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD) clock_bits |= ECHO_CLOCK_BIT_WORD; return clock_bits; } /* Mona has an ASIC on the PCI card and another ASIC in the external box; both need to be loaded. */ static int load_asic(struct echoaudio *chip) { u32 control_reg; int err; short asic; if (chip->asic_loaded) return 0; mdelay(10); if (chip->device_id == DEVICE_ID_56361) asic = FW_MONA_361_1_ASIC48; else asic = FW_MONA_301_1_ASIC48; err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic); if (err < 0) return err; chip->asic_code = asic; mdelay(10); /* Do the external one */ err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_EXTERNAL_ASIC, FW_MONA_2_ASIC); if (err < 0) return err; mdelay(10); err = check_asic_status(chip); /* Set up the control register if the load succeeded - 48 kHz, internal clock, S/PDIF RCA mode */ if (!err) { control_reg = GML_CONVERTER_ENABLE | GML_48KHZ; err = write_control_reg(chip, control_reg, TRUE); } return err; } /* Depending on what digital mode you want, Mona needs different ASICs loaded. This function checks the ASIC needed for the new mode and sees if it matches the one already loaded. */ static int switch_asic(struct echoaudio *chip, char double_speed) { int err; short asic; /* Check the clock detect bits to see if this is a single-speed clock or a double-speed clock; load a new ASIC if necessary. */ if (chip->device_id == DEVICE_ID_56361) { if (double_speed) asic = FW_MONA_361_1_ASIC96; else asic = FW_MONA_361_1_ASIC48; } else { if (double_speed) asic = FW_MONA_301_1_ASIC96; else asic = FW_MONA_301_1_ASIC48; } if (asic != chip->asic_code) { /* Load the desired ASIC */ err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic); if (err < 0) return err; chip->asic_code = asic; } return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u32 control_reg, clock; short asic; char force_write; /* Only set the clock for internal mode. */ if (chip->input_clock != ECHO_CLOCK_INTERNAL) { DE_ACT(("set_sample_rate: Cannot set sample rate - " "clock not set to CLK_CLOCKININTERNAL\n")); /* Save the rate anyhow */ chip->comm_page->sample_rate = cpu_to_le32(rate); chip->sample_rate = rate; return 0; } /* Now, check to see if the required ASIC is loaded */ if (rate >= 88200) { if (chip->digital_mode == DIGITAL_MODE_ADAT) return -EINVAL; if (chip->device_id == DEVICE_ID_56361) asic = FW_MONA_361_1_ASIC96; else asic = FW_MONA_301_1_ASIC96; } else { if (chip->device_id == DEVICE_ID_56361) asic = FW_MONA_361_1_ASIC48; else asic = FW_MONA_301_1_ASIC48; } force_write = 0; if (asic != chip->asic_code) { int err; /* Load the desired ASIC (load_asic_generic() can sleep) */ spin_unlock_irq(&chip->lock); err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic); spin_lock_irq(&chip->lock); if (err < 0) return err; chip->asic_code = asic; force_write = 1; } /* Compute the new control register value */ clock = 0; control_reg = le32_to_cpu(chip->comm_page->control_register); control_reg &= GML_CLOCK_CLEAR_MASK; control_reg &= GML_SPDIF_RATE_CLEAR_MASK; switch (rate) { case 96000: clock = GML_96KHZ; break; case 88200: clock = GML_88KHZ; break; case 48000: clock = GML_48KHZ | GML_SPDIF_SAMPLE_RATE1; break; case 44100: clock = GML_44KHZ; /* Professional mode */ if (control_reg & GML_SPDIF_PRO_MODE) clock |= GML_SPDIF_SAMPLE_RATE0; break; case 32000: clock = GML_32KHZ | GML_SPDIF_SAMPLE_RATE0 | GML_SPDIF_SAMPLE_RATE1; break; case 22050: clock = GML_22KHZ; break; case 16000: clock = GML_16KHZ; break; case 11025: clock = GML_11KHZ; break; case 8000: clock = GML_8KHZ; break; default: DE_ACT(("set_sample_rate: %d invalid!\n", rate)); return -EINVAL; } control_reg |= clock; chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */ chip->sample_rate = rate; DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock)); return write_control_reg(chip, control_reg, force_write); } static int set_input_clock(struct echoaudio *chip, u16 clock) { u32 control_reg, clocks_from_dsp; int err; DE_ACT(("set_input_clock:\n")); /* Prevent two simultaneous calls to switch_asic() */ if (atomic_read(&chip->opencount)) return -EAGAIN; /* Mask off the clock select bits */ control_reg = le32_to_cpu(chip->comm_page->control_register) & GML_CLOCK_CLEAR_MASK; clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks); switch (clock) { case ECHO_CLOCK_INTERNAL: DE_ACT(("Set Mona clock to INTERNAL\n")); chip->input_clock = ECHO_CLOCK_INTERNAL; return set_sample_rate(chip, chip->sample_rate); case ECHO_CLOCK_SPDIF: if (chip->digital_mode == DIGITAL_MODE_ADAT) return -EAGAIN; spin_unlock_irq(&chip->lock); err = switch_asic(chip, clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96); spin_lock_irq(&chip->lock); if (err < 0) return err; DE_ACT(("Set Mona clock to SPDIF\n")); control_reg |= GML_SPDIF_CLOCK; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96) control_reg |= GML_DOUBLE_SPEED_MODE; else control_reg &= ~GML_DOUBLE_SPEED_MODE; break; case ECHO_CLOCK_WORD: DE_ACT(("Set Mona clock to WORD\n")); spin_unlock_irq(&chip->lock); err = switch_asic(chip, clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96); spin_lock_irq(&chip->lock); if (err < 0) return err; control_reg |= GML_WORD_CLOCK; if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96) control_reg |= GML_DOUBLE_SPEED_MODE; else control_reg &= ~GML_DOUBLE_SPEED_MODE; break; case ECHO_CLOCK_ADAT: DE_ACT(("Set Mona clock to ADAT\n")); if (chip->digital_mode != DIGITAL_MODE_ADAT) return -EAGAIN; control_reg |= GML_ADAT_CLOCK; control_reg &= ~GML_DOUBLE_SPEED_MODE; break; default: DE_ACT(("Input clock 0x%x not supported for Mona\n", clock)); return -EINVAL; } chip->input_clock = clock; return write_control_reg(chip, control_reg, TRUE); } static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode) { u32 control_reg; int err, incompatible_clock; /* Set clock to "internal" if it's not compatible with the new mode */ incompatible_clock = FALSE; switch (mode) { case DIGITAL_MODE_SPDIF_OPTICAL: case DIGITAL_MODE_SPDIF_RCA: if (chip->input_clock == ECHO_CLOCK_ADAT) incompatible_clock = TRUE; break; case DIGITAL_MODE_ADAT: if (chip->input_clock == ECHO_CLOCK_SPDIF) incompatible_clock = TRUE; break; default: DE_ACT(("Digital mode not supported: %d\n", mode)); return -EINVAL; } spin_lock_irq(&chip->lock); if (incompatible_clock) { /* Switch to 48KHz, internal */ chip->sample_rate = 48000; set_input_clock(chip, ECHO_CLOCK_INTERNAL); } /* Clear the current digital mode */ control_reg = le32_to_cpu(chip->comm_page->control_register); control_reg &= GML_DIGITAL_MODE_CLEAR_MASK; /* Tweak the control reg */ switch (mode) { case DIGITAL_MODE_SPDIF_OPTICAL: control_reg |= GML_SPDIF_OPTICAL_MODE; break; case DIGITAL_MODE_SPDIF_RCA: /* GML_SPDIF_OPTICAL_MODE bit cleared */ break; case DIGITAL_MODE_ADAT: /* If the current ASIC is the 96KHz ASIC, switch the ASIC and set to 48 KHz */ if (chip->asic_code == FW_MONA_361_1_ASIC96 || chip->asic_code == FW_MONA_301_1_ASIC96) { set_sample_rate(chip, 48000); } control_reg |= GML_ADAT_MODE; control_reg &= ~GML_DOUBLE_SPEED_MODE; break; } err = write_control_reg(chip, control_reg, FALSE); spin_unlock_irq(&chip->lock); if (err < 0) return err; chip->digital_mode = mode; DE_ACT(("set_digital_mode to %d\n", mode)); return incompatible_clock; }
gpl-2.0
ajayramaswamy/linux-imx-gk802
arch/powerpc/boot/virtex.c
14054
2919
/* * The platform specific code for virtex devices since a boot loader is not * always used. * * (C) Copyright 2008 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "io.h" #include "stdio.h" #define UART_DLL 0 /* Out: Divisor Latch Low */ #define UART_DLM 1 /* Out: Divisor Latch High */ #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ #define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */ #define UART_LCR 3 /* Out: Line Control Register */ #define UART_MCR 4 /* Out: Modem Control Register */ #define UART_MCR_RTS 0x02 /* RTS complement */ #define UART_MCR_DTR 0x01 /* DTR complement */ #define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ #define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */ static int virtex_ns16550_console_init(void *devp) { unsigned char *reg_base; u32 reg_shift, reg_offset, clk, spd; u16 divisor; int n; if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) return -1; n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset)); if (n == sizeof(reg_offset)) reg_base += reg_offset; n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift)); if (n != sizeof(reg_shift)) reg_shift = 0; n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd)); if (n != sizeof(spd)) spd = 9600; /* should there be a default clock rate?*/ n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk)); if (n != sizeof(clk)) return -1; divisor = clk / (16 * spd); /* Access baud rate */ out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_DLAB); /* Baud rate based on input clock */ out_8(reg_base + (UART_DLL << reg_shift), divisor & 0xFF); out_8(reg_base + (UART_DLM << reg_shift), divisor >> 8); /* 8 data, 1 stop, no parity */ out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_WLEN8); /* RTS/DTR */ out_8(reg_base + (UART_MCR << reg_shift), UART_MCR_RTS | UART_MCR_DTR); /* Clear transmitter and receiver */ out_8(reg_base + (UART_FCR << reg_shift), UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); return 0; } /* For virtex, the kernel may be loaded without using a bootloader and if so some UARTs need more setup than is provided in the normal console init */ int platform_specific_init(void) { void *devp; char devtype[MAX_PROP_LEN]; char path[MAX_PATH_LEN]; devp = finddevice("/chosen"); if (devp == NULL) return -1; if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) { devp = finddevice(path); if (devp == NULL) return -1; if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0) && !strcmp(devtype, "serial") && (dt_is_compatible(devp, "ns16550"))) virtex_ns16550_console_init(devp); } return 0; }
gpl-2.0
lbule/android_kernel_htc_m9pw
drivers/parisc/lasi.c
14566
6308
/* * LASI Device Driver * * (c) Copyright 1999 Red Hat Software * Portions (c) Copyright 1999 The Puffin Group Inc. * Portions (c) Copyright 1999 Hewlett-Packard * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * by Alan Cox <alan@redhat.com> and * Alex deVries <alex@onefishtwo.ca> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/types.h> #include <asm/io.h> #include <asm/hardware.h> #include <asm/led.h> #include "gsc.h" #define LASI_VER 0xC008 /* LASI Version */ #define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */ #define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */ static void lasi_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; switch (dev->id.sversion) { case 0x74: irq = 7; break; /* Centronics */ case 0x7B: irq = 13; break; /* Audio */ case 0x81: irq = 14; break; /* Lasi itself */ case 0x82: irq = 9; break; /* SCSI */ case 0x83: irq = 20; break; /* Floppy */ case 0x84: irq = 26; break; /* PS/2 Keyboard */ case 0x87: irq = 18; break; /* ISDN */ case 0x8A: irq = 8; break; /* LAN */ case 0x8C: irq = 5; break; /* RS232 */ case 0x8D: irq = (dev->hw_path == 13) ? 16 : 17; break; /* Telephone */ default: return; /* unknown */ } gsc_asic_assign_irq(ctrl, irq, &dev->irq); } static void __init lasi_init_irq(struct gsc_asic *this_lasi) { unsigned long lasi_base = this_lasi->hpa; /* Stop LASI barking for a bit */ gsc_writel(0x00000000, lasi_base+OFFSET_IMR); /* clear pending interrupts */ gsc_readl(lasi_base+OFFSET_IRR); /* We're not really convinced we want to reset the onboard * devices. Firmware does it for us... */ /* Resets */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x2000);*/ /* Parallel */ if(pdc_add_valid(lasi_base+0x4004) == PDC_OK) gsc_writel(0xFFFFFFFF, lasi_base+0x4004); /* Audio */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x5000);*/ /* Serial */ /* gsc_writel(0xFFFFFFFF, lasi_base+0x6000);*/ /* SCSI */ gsc_writel(0xFFFFFFFF, lasi_base+0x7000); /* LAN */ gsc_writel(0xFFFFFFFF, lasi_base+0x8000); /* Keyboard */ gsc_writel(0xFFFFFFFF, lasi_base+0xA000); /* FDC */ /* Ok we hit it on the head with a hammer, our Dog is now ** comatose and muzzled. Devices will now unmask LASI ** interrupts as they are registered as irq's in the LASI range. */ /* XXX: I thought it was `awks that got `it on the `ead with an * `ammer. -- willy */ } /* ** lasi_led_init() ** ** lasi_led_init() initializes the LED controller on the LASI. ** ** Since Mirage and Electra machines use a different LED ** address register, we need to check for these machines ** explicitly. */ #ifndef CONFIG_CHASSIS_LCD_LED #define lasi_led_init(x) /* nothing */ #else static void __init lasi_led_init(unsigned long lasi_hpa) { unsigned long datareg; switch (CPU_HVERSION) { /* Gecko machines have only one single LED, which can be permanently turned on by writing a zero into the power control register. */ case 0x600: /* Gecko (712/60) */ case 0x601: /* Gecko (712/80) */ case 0x602: /* Gecko (712/100) */ case 0x603: /* Anole 64 (743/64) */ case 0x604: /* Anole 100 (743/100) */ case 0x605: /* Gecko (712/120) */ datareg = lasi_hpa + 0x0000C000; gsc_writeb(0, datareg); return; /* no need to register the LED interrupt-function */ /* Mirage and Electra machines need special offsets */ case 0x60A: /* Mirage Jr (715/64) */ case 0x60B: /* Mirage 100 */ case 0x60C: /* Mirage 100+ */ case 0x60D: /* Electra 100 */ case 0x60E: /* Electra 120 */ datareg = lasi_hpa - 0x00020000; break; default: datareg = lasi_hpa + 0x0000C000; break; } register_led_driver(DISPLAY_MODEL_LASI, LED_CMD_REG_NONE, datareg); } #endif /* * lasi_power_off * * Function for lasi to turn off the power. This is accomplished by setting a * 1 to PWR_ON_L in the Power Control Register * */ static unsigned long lasi_power_off_hpa __read_mostly; static void lasi_power_off(void) { unsigned long datareg; /* calculate addr of the Power Control Register */ datareg = lasi_power_off_hpa + 0x0000C000; /* Power down the machine */ gsc_writel(0x02, datareg); } static int __init lasi_init_chip(struct parisc_device *dev) { extern void (*chassis_power_off)(void); struct gsc_asic *lasi; struct gsc_irq gsc_irq; int ret; lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); if (!lasi) return -ENOMEM; lasi->name = "Lasi"; lasi->hpa = dev->hpa.start; /* Check the 4-bit (yes, only 4) version register */ lasi->version = gsc_readl(lasi->hpa + LASI_VER) & 0xf; printk(KERN_INFO "%s version %d at 0x%lx found.\n", lasi->name, lasi->version, lasi->hpa); /* initialize the chassis LEDs really early */ lasi_led_init(lasi->hpa); /* Stop LASI barking for a bit */ lasi_init_irq(lasi); /* the IRQ lasi should use */ dev->irq = gsc_alloc_irq(&gsc_irq); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); kfree(lasi); return -EBUSY; } lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); if (ret < 0) { kfree(lasi); return ret; } /* enable IRQ's for devices below LASI */ gsc_writel(lasi->eim, lasi->hpa + OFFSET_IAR); /* Done init'ing, register this driver */ ret = gsc_common_setup(dev, lasi); if (ret) { kfree(lasi); return ret; } gsc_fixup_irqs(dev, lasi, lasi_choose_irq); /* initialize the power off function */ /* FIXME: Record the LASI HPA for the power off function. This should * ensure that only the first LASI (the one controlling the power off) * should set the HPA here */ lasi_power_off_hpa = lasi->hpa; chassis_power_off = lasi_power_off; return ret; } static struct parisc_device_id lasi_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00081 }, { 0, } }; struct parisc_driver lasi_driver = { .name = "lasi", .id_table = lasi_tbl, .probe = lasi_init_chip, };
gpl-2.0
espenfjo/android_kernel_samsung_n8000
arch/arm/mach-exynos/pmu-exynos4.c
231
16603
/* linux/arch/arm/mach-exynos/pmu-exynos4.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * EXYNOS4210 - CPU PMU(Power Management Unit) support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/kernel.h> #include <mach/regs-clock.h> #include <mach/pmu.h> #include <mach/regs-pmu.h> #include <plat/cpu.h> static struct exynos4_pmu_conf *exynos4_pmu_config; static unsigned int entry_cnt; static struct exynos4_pmu_conf exynos4210_pmu_config[] = { /* { .reg = address, .val = { AFTR, LPA, SLEEP } */ { S5P_ARM_CORE0_SYS, { 0, 0, 2 } }, { S5P_DIS_IRQ_ARM_CORE0_LOCAL_SYS, { 0, 0, 0 } }, { S5P_DIS_IRQ_ARM_CORE0_CENTRAL_SYS, { 0, 0, 0 } }, { S5P_ARM_CORE1_SYS, { 0, 0, 2 } }, { S5P_DIS_IRQ_ARM_CORE1_LOCAL_SYS, { 0, 0, 0 } }, { S5P_DIS_IRQ_ARM_CORE1_CENTRAL_SYS, { 0, 0, 0 } }, { S5P_ARM_COMMON_SYS, { 0, 0, 2 } }, { S5P_ARM_L2_0_SYS, { 2, 2, 3 } }, { S5P_ARM_L2_1_SYS, { 2, 2, 3 } }, { S5P_CMU_ACLKSTOP_SYS, { 1, 0, 0 } }, { S5P_CMU_SCLKSTOP_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_SYS, { 1, 1, 0 } }, { S5P_APLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_MPLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_VPLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_EPLL_SYSCLK_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_GPS_ALIVE_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_GPSALIVE_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_CAM_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_TV_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_MFC_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_G3D_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_LCD0_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_LCD1_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_CMU_CLKSTOP_GPS_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_CAM_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_TV_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_MFC_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_G3D_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_LCD0_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_LCD1_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_GPS_SYS, { 1, 1, 0 } }, { S5P_TOP_BUS_SYS, { 3, 0, 0 } }, { S5P_TOP_RETENTION_SYS, { 1, 0, 1 } }, { S5P_TOP_PWR_SYS, { 3, 0, 3 } }, { S5P_LOGIC_RESET_SYS, { 1, 1, 0 } }, { S5P_ONENAND_MEM_SYS, { 3, 0, 0 } }, { S5P_MODIMIF_MEM_SYS, { 3, 0, 0 } }, { S5P_G2D_ACP_MEM_SYS, { 3, 0, 0 } }, { S5P_USBOTG_MEM_SYS, { 3, 0, 0 } }, { S5P_SDMMC_MEM_SYS, { 3, 0, 0 } }, { S5P_CSSYS_MEM_SYS, { 3, 0, 0 } }, { S5P_SECSS_MEM_SYS, { 3, 0, 0 } }, { S5P_PCIE_MEM_SYS, { 3, 0, 0 } }, { S5P_SATA_MEM_SYS, { 3, 0, 0 } }, { S5P_PAD_RETENTION_DRAM_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_PAD_RETENTION_GPIO_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_UART_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MMCA_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MMCB_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_EBIA_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_EBIB_SYS, { 1, 0, 0 } }, { S5P_PAD_ISOLATION_SYS, { 1, 0, 0 } }, { S5P_PAD_ALV_SEL_SYS, { 1, 0, 0 } }, { S5P_XXTI_SYS, { 1, 1, 0 } }, { S5P_EXT_REGULATOR_SYS, { 1, 1, 0 } }, { S5P_GPIO_MODE_SYS, { 1, 0, 0 } }, { S5P_GPIO_MODE_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_CAM_SYS, { 7, 0, 0 } }, { S5P_TV_SYS, { 7, 0, 0 } }, { S5P_MFC_SYS, { 7, 0, 0 } }, { S5P_G3D_SYS, { 7, 0, 0 } }, { S5P_LCD0_SYS, { 7, 0, 0 } }, { S5P_LCD1_SYS, { 7, 0, 0 } }, { S5P_MAUDIO_SYS, { 7, 7, 0 } }, { S5P_GPS_SYS, { 7, 0, 0 } }, { S5P_GPS_ALIVE_SYS, { 7, 0, 0 } }, { S5P_XUSBXTI_SYS, { 1, 1, 0 } }, }; static struct exynos4_pmu_conf exynos4212_pmu_config[] = { { S5P_ARM_CORE0_SYS, { 0, 0, 2 } }, { S5P_DIS_IRQ_ARM_CORE0_LOCAL_SYS, { 0, 0, 0 } }, { S5P_DIS_IRQ_ARM_CORE0_CENTRAL_SYS, { 0, 0, 0 } }, { S5P_ARM_CORE1_SYS, { 0, 0, 2 } }, { S5P_DIS_IRQ_ARM_CORE1_LOCAL_SYS, { 0, 0, 0 } }, { S5P_DIS_IRQ_ARM_CORE1_CENTRAL_SYS, { 0, 0, 0 } }, { S5P_ISP_ARM_SYS, { 1, 0, 0 } }, { S5P_DIS_IRQ_ISP_ARM_LOCAL_SYS, { 0, 0, 0 } }, { S5P_DIS_IRQ_ISP_ARM_CENTRAL_SYS, { 1, 0, 0 } }, { S5P_ARM_COMMON_SYS, { 0, 0, 2 } }, { S5P_ARM_L2_0_SYS, { 0, 0, 3 } }, { S5P_ARM_L2_0_OPTION, { 0x10, 0x10, 0 } }, { S5P_ARM_L2_1_SYS, { 0, 0, 3 } }, { S5P_ARM_L2_1_OPTION, { 0x10, 0x10, 0 } }, { S5P_CMU_ACLKSTOP_SYS, { 1, 0, 0 } }, { S5P_CMU_SCLKSTOP_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_SYS, { 1, 1, 0 } }, { S5P_DRAM_FREQ_DOWN_SYS, { 1, 1, 1 } }, { S5P_DDRPHY_DLLOFF_SYS, { 1, 1, 1 } }, { S5P_LPDDR_PHY_DLL_LOCK_SYS, { 1, 1, 1 } }, { S5P_CMU_ACLKSTOP_COREBLK_SYS, { 1, 0, 0 } }, { S5P_CMU_SCLKSTOP_COREBLK_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_COREBLK_SYS, { 1, 1, 0 } }, { S5P_APLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_MPLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_VPLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_EPLL_SYSCLK_SYS, { 1, 1, 0 } }, { S5P_MPLLUSER_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_GPS_ALIVE_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_GPSALIVE_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_CAM_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_TV_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_MFC_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_G3D_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_LCD0_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_ISP_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_MAUDIO_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_GPS_SYS, { 0, 0, 0 } }, { S5P_CMU_RESET_CAM_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_TV_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_MFC_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_G3D_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_LCD0_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_ISP_SYS, { 0, 0, 0 } }, { S5P_CMU_RESET_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_CMU_RESET_GPS_SYS, { 1, 0, 0 } }, { S5P_TOP_BUS_SYS, { 3, 0, 0 } }, { S5P_TOP_RETENTION_SYS, { 1, 0, 1 } }, { S5P_TOP_PWR_SYS, { 3, 0, 3 } }, { S5P_TOP_BUS_COREBLK_SYS, { 3, 0, 0 } }, { S5P_TOP_RETENTION_COREBLK_SYS, { 1, 0, 1 } }, { S5P_TOP_PWR_COREBLK_SYS, { 3, 0, 3 } }, { S5P_LOGIC_RESET_SYS, { 1, 1, 0 } }, { S5P_OSCCLK_GATE_SYS, { 1, 0, 1 } }, { S5P_LOGIC_RESET_COREBLK_SYS, { 1, 1, 0 } }, { S5P_OSCCLK_GATE_COREBLK_SYS, { 1, 0, 1 } }, { S5P_ONENAND_MEM_SYS, { 3, 0, 0 } }, { S5P_ONENAND_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_HSI_MEM_SYS, { 3, 0, 0 } }, { S5P_HSI_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_G2D_ACP_MEM_SYS, { 3, 0, 0 } }, { S5P_G2D_ACP_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_USBOTG_MEM_SYS, { 3, 0, 0 } }, { S5P_USBOTG_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_SDMMC_MEM_SYS, { 3, 0, 0 } }, { S5P_SDMMC_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_CSSYS_MEM_SYS, { 3, 0, 0 } }, { S5P_CSSYS_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_SECSS_MEM_SYS, { 3, 0, 0 } }, { S5P_SECSS_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_ROTATOR_MEM_SYS, { 3, 0, 0 } }, { S5P_ROTATOR_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_PAD_RETENTION_DRAM_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_PAD_RETENTION_GPIO_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_UART_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MMCA_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MMCB_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_EBIA_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_EBIB_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_GPIO_COREBLK_SYS, { 1, 0, 0 } }, { S5P_PAD_ISOLATION_SYS, { 1, 0, 0 } }, { S5P_PAD_ISOLATION_COREBLK_SYS, { 1, 0, 0 } }, { S5P_PAD_ALV_SEL_SYS, { 1, 0, 0 } }, { S5P_XXTI_SYS, { 1, 1, 0 } }, { S5P_EXT_REGULATOR_SYS, { 1, 1, 0 } }, { S5P_GPIO_MODE_SYS, { 1, 0, 0 } }, { S5P_GPIO_MODE_COREBLK_SYS, { 1, 0, 0 } }, { S5P_GPIO_MODE_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_TOP_ASB_RESET_SYS, { 1, 1, 1 } }, { S5P_TOP_ASB_ISOLATION_SYS, { 1, 0, 1 } }, { S5P_CAM_SYS, { 7, 0, 0 } }, { S5P_TV_SYS, { 7, 0, 0 } }, { S5P_MFC_SYS, { 7, 0, 0 } }, { S5P_G3D_SYS, { 7, 0, 0 } }, { S5P_LCD0_SYS, { 7, 0, 0 } }, { S5P_ISP_SYS, { 7, 0, 0 } }, { S5P_MAUDIO_SYS, { 7, 7, 0 } }, { S5P_GPS_SYS, { 7, 0, 0 } }, { S5P_GPS_ALIVE_SYS, { 7, 0, 0 } }, { S5P_CMU_SYSCLK_ISP_SYS, { 0, 0, 0 } }, { S5P_CMU_SYSCLK_GPS_SYS, { 1, 0, 0 } }, { S5P_XUSBXTI_SYS, { 1, 1, 0 } }, }; static struct exynos4_pmu_conf exynos4412_pmu_config[] = { { S5P_ARM_CORE0_SYS, { 0, 0, 2 } }, { S5P_ARM_CORE1_SYS, { 0, 0, 2 } }, { S5P_ARM_CORE2_SYS, { 0, 0, 2 } }, { S5P_ARM_CORE3_SYS, { 0, 0, 2 } }, { S5P_ISP_ARM_SYS, { 1, 0, 0 } }, { S5P_DIS_IRQ_ISP_ARM_LOCAL_SYS, { 0, 0, 0 } }, { S5P_DIS_IRQ_ISP_ARM_CENTRAL_SYS, { 1, 0, 0 } }, { S5P_ARM_COMMON_SYS, { 0, 0, 2 } }, { S5P_ARM_L2_0_SYS, { 0, 0, 3 } }, { S5P_ARM_L2_0_OPTION, { 0x10, 0x10, 0 } }, { S5P_ARM_L2_1_SYS, { 0, 0, 3 } }, { S5P_ARM_L2_1_OPTION, { 0x10, 0x10, 0 } }, { S5P_CMU_ACLKSTOP_SYS, { 1, 0, 0 } }, { S5P_CMU_SCLKSTOP_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_SYS, { 1, 1, 0 } }, { S5P_CMU_ACLKSTOP_COREBLK_SYS, { 1, 0, 0 } }, { S5P_CMU_SCLKSTOP_COREBLK_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_COREBLK_SYS, { 1, 1, 0 } }, { S5P_APLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_MPLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_VPLL_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_EPLL_SYSCLK_SYS, { 1, 1, 0 } }, { S5P_MPLLUSER_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_GPS_ALIVE_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_GPSALIVE_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_CAM_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_TV_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_MFC_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_G3D_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_LCD0_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_ISP_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_MAUDIO_SYS, { 1, 0, 0 } }, { S5P_CMU_CLKSTOP_GPS_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_CAM_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_TV_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_MFC_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_G3D_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_LCD0_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_ISP_SYS, { 0, 0, 0 } }, { S5P_CMU_RESET_MAUDIO_SYS, { 1, 0, 0 } }, { S5P_CMU_RESET_GPS_SYS, { 1, 0, 0 } }, { S5P_TOP_BUS_SYS, { 3, 0, 0 } }, { S5P_TOP_RETENTION_SYS, { 1, 0, 1 } }, { S5P_TOP_PWR_SYS, { 3, 0, 3 } }, { S5P_TOP_BUS_COREBLK_SYS, { 3, 0, 0 } }, { S5P_TOP_RETENTION_COREBLK_SYS, { 1, 0, 1 } }, { S5P_TOP_PWR_COREBLK_SYS, { 3, 0, 3 } }, { S5P_LOGIC_RESET_SYS, { 1, 1, 0 } }, { S5P_OSCCLK_GATE_SYS, { 1, 0, 1 } }, { S5P_LOGIC_RESET_COREBLK_SYS, { 1, 1, 0 } }, { S5P_OSCCLK_GATE_COREBLK_SYS, { 1, 0, 1 } }, { S5P_HSI_MEM_SYS, { 3, 0, 0 } }, { S5P_HSI_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_G2D_ACP_MEM_SYS, { 3, 0, 0 } }, { S5P_G2D_ACP_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_USBOTG_MEM_SYS, { 3, 0, 0 } }, { S5P_USBOTG_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_SDMMC_MEM_SYS, { 3, 0, 0 } }, { S5P_SDMMC_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_CSSYS_MEM_SYS, { 3, 0, 0 } }, { S5P_CSSYS_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_SECSS_MEM_SYS, { 3, 0, 0 } }, { S5P_SECSS_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_ROTATOR_MEM_SYS, { 3, 0, 0 } }, { S5P_ROTATOR_MEM_OPTION, { 0x10, 0x10, 0 } }, { S5P_PAD_RETENTION_DRAM_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_PAD_RETENTION_GPIO_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_UART_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MMCA_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_MMCB_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_EBIA_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_EBIB_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_GPIO_COREBLK_SYS, { 1, 0, 0 } }, { S5P_PAD_ISOLATION_SYS, { 1, 0, 0 } }, { S5P_PAD_ISOLATION_COREBLK_SYS, { 1, 0, 0 } }, { S5P_PAD_ALV_SEL_SYS, { 1, 0, 0 } }, { S5P_XXTI_SYS, { 1, 1, 0 } }, { S5P_EXT_REGULATOR_SYS, { 1, 1, 0 } }, { S5P_GPIO_MODE_SYS, { 1, 0, 0 } }, { S5P_GPIO_MODE_COREBLK_SYS, { 1, 0, 0 } }, { S5P_GPIO_MODE_MAUDIO_SYS, { 1, 1, 0 } }, { S5P_TOP_ASB_RESET_SYS, { 1, 1, 1 } }, { S5P_TOP_ASB_ISOLATION_SYS, { 1, 0, 1 } }, { S5P_CAM_SYS, { 7, 0, 0 } }, { S5P_TV_SYS, { 7, 0, 0 } }, { S5P_MFC_SYS, { 7, 0, 0 } }, { S5P_G3D_SYS, { 7, 0, 0 } }, { S5P_LCD0_SYS, { 7, 0, 0 } }, { S5P_ISP_SYS, { 7, 0, 0 } }, { S5P_MAUDIO_SYS, { 7, 7, 0 } }, { S5P_GPS_SYS, { 7, 0, 0 } }, { S5P_GPS_ALIVE_SYS, { 7, 0, 0 } }, { S5P_CMU_SYSCLK_ISP_SYS, { 1, 0, 0 } }, { S5P_CMU_SYSCLK_GPS_SYS, { 1, 0, 0 } }, { S5P_XUSBXTI_SYS, { 1, 1, 0 } }, }; static struct exynos4_pmu_conf exynos4x12_c2c_pmu_conf[] = { { S5P_CMU_RESET_COREBLK_SYS, { 1, 1, 1 } }, { S5P_MPLLUSER_SYSCLK_SYS, { 1, 0, 0 } }, { S5P_TOP_RETENTION_COREBLK_SYS, { 1, 0, 0 } }, { S5P_TOP_PWR_COREBLK_SYS, { 3, 0, 0 } }, { S5P_LOGIC_RESET_COREBLK_SYS, { 1, 1, 1 } }, { S5P_OSCCLK_GATE_COREBLK_SYS, { 1, 0, 0 } }, { S5P_PAD_RETENTION_GPIO_COREBLK_SYS, { 1, 1, 1 } }, { S5P_TOP_ASB_RESET_SYS, { 1, 1, 0 } }, { S5P_TOP_ASB_ISOLATION_SYS, { 1, 0, 0 } }, }; static struct exynos4_pmu_conf exynos4212_c2c_pmu_conf[] = { { S5P_LPDDR_PHY_DLL_LOCK_SYS, { 1, 0, 0 } }, }; static struct exynos4_c2c_pmu_conf exynos4_config_for_c2c[] = { /* Register Address Value */ { S5P_TOP_BUS_COREBLK_SYS, 0x0}, { S5P_TOP_PWR_COREBLK_SYS, 0x0}, { S5P_MPLL_SYSCLK_SYS, 0x0}, #ifdef CONFIG_MACH_SMDK4212 { S5P_XUSBXTI_SYS, 0x0}, #endif }; void exynos4_pmu_xclkout_set(unsigned int enable, enum xclkout_select source) { unsigned int tmp; if (enable) { tmp = __raw_readl(S5P_PMU_DEBUG); /* CLKOUT enable */ tmp &= ~(0xF << S5P_PMU_CLKOUT_SEL_SHIFT | S5P_CLKOUT_DISABLE); tmp |= (source << S5P_PMU_CLKOUT_SEL_SHIFT); __raw_writel(tmp, S5P_PMU_DEBUG); } else { tmp = __raw_readl(S5P_PMU_DEBUG); tmp |= S5P_CLKOUT_DISABLE; /* CLKOUT disable */ __raw_writel(tmp, S5P_PMU_DEBUG); } printk(KERN_DEBUG "pmu_debug: 0x%08x\n", __raw_readl(S5P_PMU_DEBUG)); } EXPORT_SYMBOL_GPL(exynos4_pmu_xclkout_set); void exynos4_sys_powerdown_xusbxti_control(unsigned int enable) { unsigned int count = entry_cnt; if (enable) exynos4_pmu_config[count - 1].val[SYS_SLEEP] = 0x1; else exynos4_pmu_config[count - 1].val[SYS_SLEEP] = 0x0; printk(KERN_DEBUG "xusbxti_control: %ld\n", exynos4_pmu_config[count - 1].val[SYS_SLEEP]); } EXPORT_SYMBOL_GPL(exynos4_sys_powerdown_xusbxti_control); void exynos4_sys_powerdown_conf(enum sys_powerdown mode) { unsigned int count = entry_cnt; unsigned int tmp; for (; count > 0; count--) __raw_writel(exynos4_pmu_config[count - 1].val[mode], exynos4_pmu_config[count - 1].reg); if ((!soc_is_exynos4210()) && (exynos4_is_c2c_use())) { for (count = 0 ; count < ARRAY_SIZE(exynos4x12_c2c_pmu_conf) ; count++) __raw_writel(exynos4x12_c2c_pmu_conf[count].val[mode], exynos4x12_c2c_pmu_conf[count].reg); if (soc_is_exynos4212()) __raw_writel(exynos4212_c2c_pmu_conf[0].val[mode], exynos4212_c2c_pmu_conf[0].reg); for (count = 0 ; count < ARRAY_SIZE(exynos4_config_for_c2c) ; count++) { tmp = __raw_readl(exynos4_config_for_c2c[count].reg); tmp |= exynos4_config_for_c2c[count].val; __raw_writel(tmp, exynos4_config_for_c2c[count].reg); } } } void exynos4_c2c_request_pwr_mode(enum c2c_pwr_mode mode) { exynos4_config_for_c2c[0].val = 0x3; switch (mode) { /* If C2C mode is MAXIMAL LATENCY */ case MAX_LATENCY: exynos4_config_for_c2c[1].val = 0x0; if (soc_is_exynos4412() && (samsung_rev() < EXYNOS4412_REV_1_0)) exynos4_config_for_c2c[2].val = 0x1; else exynos4_config_for_c2c[2].val = 0x0; #ifdef CONFIG_MACH_SMDK4212 exynos4_config_for_c2c[3].val = 0x0; #endif break; /* If C2C mode is Minimal or Short LATENCY */ default: exynos4_config_for_c2c[1].val = 0x3; exynos4_config_for_c2c[2].val = 0x1; #ifdef CONFIG_MACH_SMDK4212 exynos4_config_for_c2c[3].val = 0x1; #endif break; } } static int __init exynos4_pmu_init(void) { unsigned int i; if(!soc_is_exynos4210()) exynos4_reset_assert_ctrl(1); if (soc_is_exynos4210()) { exynos4_pmu_config = exynos4210_pmu_config; entry_cnt = ARRAY_SIZE(exynos4210_pmu_config); printk(KERN_INFO "%s: PMU supports 4210(%d)\n", __func__, entry_cnt); } else if (soc_is_exynos4212()) { exynos4_pmu_config = exynos4212_pmu_config; entry_cnt = ARRAY_SIZE(exynos4212_pmu_config); printk(KERN_INFO "%s: PMU supports 4212(%d)\n", __func__, entry_cnt); } else if (soc_is_exynos4412()) { exynos4_pmu_config = exynos4412_pmu_config; entry_cnt = ARRAY_SIZE(exynos4412_pmu_config); printk(KERN_INFO "%s: PMU supports 4412(%d)\n", __func__, entry_cnt); } else { printk(KERN_INFO "%s: PMU not supported\n", __func__); } return 0; } arch_initcall(exynos4_pmu_init);
gpl-2.0
tinyclub/linux-loongson-community
drivers/crypto/qat/qat_common/adf_transport.c
231
17448
/* This file is provided under a dual BSD/GPLv2 license. When using or redistributing this file, you may do so under either license. GPL LICENSE SUMMARY Copyright(c) 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Contact Information: qat-linux@intel.com BSD LICENSE Copyright(c) 2014 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_transport_internal.h" #include "adf_transport_access_macros.h" #include "adf_cfg.h" #include "adf_common_drv.h" static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) { uint32_t div = data >> shift; uint32_t mult = div << shift; return data - mult; } static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) { if (((size - 1) & addr) != 0) return -EFAULT; return 0; } static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) { int i = ADF_MIN_RING_SIZE; for (; i <= ADF_MAX_RING_SIZE; i++) if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) return i; return ADF_DEFAULT_RING_SIZE; } static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) { spin_lock(&bank->lock); if (bank->ring_mask & (1 << ring)) { spin_unlock(&bank->lock); return -EFAULT; } bank->ring_mask |= (1 << ring); spin_unlock(&bank->lock); return 0; } static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) { spin_lock(&bank->lock); bank->ring_mask &= ~(1 << ring); spin_unlock(&bank->lock); } static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) { spin_lock_bh(&bank->lock); bank->irq_mask |= (1 << ring); spin_unlock_bh(&bank->lock); WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number, bank->irq_coalesc_timer); } static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) { spin_lock_bh(&bank->lock); bank->irq_mask &= ~(1 << ring); spin_unlock_bh(&bank->lock); WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); } int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) { if (atomic_add_return(1, ring->inflights) > ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { atomic_dec(ring->inflights); return -EAGAIN; } spin_lock_bh(&ring->lock); memcpy(ring->base_addr + ring->tail, msg, ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); ring->tail = adf_modulo(ring->tail + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring->tail); spin_unlock_bh(&ring->lock); return 0; } static int adf_handle_response(struct adf_etr_ring_data *ring) { uint32_t msg_counter = 0; uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); while (*msg != ADF_RING_EMPTY_SIG) { ring->callback((uint32_t *)msg); *msg = ADF_RING_EMPTY_SIG; ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); msg_counter++; msg = (uint32_t *)(ring->base_addr + ring->head); } if (msg_counter > 0) { WRITE_CSR_RING_HEAD(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring->head); atomic_sub(msg_counter, ring->inflights); } return 0; } static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) { uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring_config); } static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) { uint32_t ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size, ADF_RING_NEAR_WATERMARK_512, ADF_RING_NEAR_WATERMARK_0); WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring_config); } static int adf_init_ring(struct adf_etr_ring_data *ring) { struct adf_etr_bank_data *bank = ring->bank; struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; uint64_t ring_base; uint32_t ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev), ring_size_bytes, &ring->dma_addr, GFP_KERNEL); if (!ring->base_addr) return -ENOMEM; memset(ring->base_addr, 0x7F, ring_size_bytes); /* The base_addr has to be aligned to the size of the buffer */ if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) { dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n"); dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, ring->base_addr, ring->dma_addr); return -EFAULT; } if (hw_data->tx_rings_mask & (1 << ring->ring_number)) adf_configure_tx_ring(ring); else adf_configure_rx_ring(ring); ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size); WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring_base); spin_lock_init(&ring->lock); return 0; } static void adf_cleanup_ring(struct adf_etr_ring_data *ring) { uint32_t ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); if (ring->base_addr) { memset(ring->base_addr, 0x7F, ring_size_bytes); dma_free_coherent(&GET_DEV(ring->bank->accel_dev), ring_size_bytes, ring->base_addr, ring->dma_addr); } } int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, uint32_t bank_num, uint32_t num_msgs, uint32_t msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr) { struct adf_etr_data *transport_data = accel_dev->transport; struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; uint32_t ring_num; int ret; if (bank_num >= GET_MAX_BANKS(accel_dev)) { dev_err(&GET_DEV(accel_dev), "Invalid bank number\n"); return -EFAULT; } if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { dev_err(&GET_DEV(accel_dev), "Invalid msg size\n"); return -EFAULT; } if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs), ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) { dev_err(&GET_DEV(accel_dev), "Invalid ring size for given msg size\n"); return -EFAULT; } if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) { dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n", section, ring_name); return -EFAULT; } if (kstrtouint(val, 10, &ring_num)) { dev_err(&GET_DEV(accel_dev), "Can't get ring number\n"); return -EFAULT; } bank = &transport_data->banks[bank_num]; if (adf_reserve_ring(bank, ring_num)) { dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n", ring_num, ring_name); return -EFAULT; } ring = &bank->rings[ring_num]; ring->ring_number = ring_num; ring->bank = bank; ring->callback = callback; ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size); ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); ring->head = 0; ring->tail = 0; atomic_set(ring->inflights, 0); ret = adf_init_ring(ring); if (ret) goto err; /* Enable HW arbitration for the given ring */ accel_dev->hw_device->hw_arb_ring_enable(ring); if (adf_ring_debugfs_add(ring, ring_name)) { dev_err(&GET_DEV(accel_dev), "Couldn't add ring debugfs entry\n"); ret = -EFAULT; goto err; } /* Enable interrupts if needed */ if (callback && (!poll_mode)) adf_enable_ring_irq(bank, ring->ring_number); *ring_ptr = ring; return 0; err: adf_cleanup_ring(ring); adf_unreserve_ring(bank, ring_num); accel_dev->hw_device->hw_arb_ring_disable(ring); return ret; } void adf_remove_ring(struct adf_etr_ring_data *ring) { struct adf_etr_bank_data *bank = ring->bank; struct adf_accel_dev *accel_dev = bank->accel_dev; /* Disable interrupts for the given ring */ adf_disable_ring_irq(bank, ring->ring_number); /* Clear PCI config space */ WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number, ring->ring_number, 0); WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number, ring->ring_number, 0); adf_ring_debugfs_rm(ring); adf_unreserve_ring(bank, ring->ring_number); /* Disable HW arbitration for the given ring */ accel_dev->hw_device->hw_arb_ring_disable(ring); adf_cleanup_ring(ring); } static void adf_ring_response_handler(struct adf_etr_bank_data *bank) { uint32_t empty_rings, i; empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); empty_rings = ~empty_rings & bank->irq_mask; for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) { if (empty_rings & (1 << i)) adf_handle_response(&bank->rings[i]); } } /** * adf_response_handler() - Bottom half handler response handler * @bank_addr: Address of a ring bank for with the BH was scheduled. * * Function is the bottom half handler for the response from acceleration * device. There is one handler for every ring bank. Function checks all * communication rings in the bank. * To be used by QAT device specific drivers. * * Return: void */ void adf_response_handler(unsigned long bank_addr) { struct adf_etr_bank_data *bank = (void *)bank_addr; /* Handle all the responses nad reenable IRQs */ adf_ring_response_handler(bank); WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, bank->irq_mask); } EXPORT_SYMBOL_GPL(adf_response_handler); static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, const char *section, const char *format, uint32_t key, uint32_t *value) { char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key); if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf)) return -EFAULT; if (kstrtouint(val_buf, 10, value)) return -EFAULT; return 0; } static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, const char *section, uint32_t bank_num_in_accel) { if (adf_get_cfg_int(bank->accel_dev, section, ADF_ETRMGR_COALESCE_TIMER_FORMAT, bank_num_in_accel, &bank->irq_coalesc_timer)) bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer || ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer) bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; } static int adf_init_bank(struct adf_accel_dev *accel_dev, struct adf_etr_bank_data *bank, uint32_t bank_num, void __iomem *csr_addr) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_etr_ring_data *ring; struct adf_etr_ring_data *tx_ring; uint32_t i, coalesc_enabled = 0; memset(bank, 0, sizeof(*bank)); bank->bank_number = bank_num; bank->csr_addr = csr_addr; bank->accel_dev = accel_dev; spin_lock_init(&bank->lock); /* Enable IRQ coalescing always. This will allow to use * the optimised flag and coalesc register. * If it is disabled in the config file just use min time value */ if ((adf_get_cfg_int(accel_dev, "Accelerator0", ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num, &coalesc_enabled) == 0) && coalesc_enabled) adf_get_coalesc_timer(bank, "Accelerator0", bank_num); else bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0); WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); ring = &bank->rings[i]; if (hw_data->tx_rings_mask & (1 << i)) { ring->inflights = kzalloc_node(sizeof(atomic_t), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!ring->inflights) goto err; } else { if (i < hw_data->tx_rx_gap) { dev_err(&GET_DEV(accel_dev), "Invalid tx rings mask config\n"); goto err; } tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; ring->inflights = tx_ring->inflights; } } if (adf_bank_debugfs_add(bank)) { dev_err(&GET_DEV(accel_dev), "Failed to add bank debugfs entry\n"); goto err; } WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); return 0; err: for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { ring = &bank->rings[i]; if (hw_data->tx_rings_mask & (1 << i) && ring->inflights) kfree(ring->inflights); } return -ENOMEM; } /** * adf_init_etr_data() - Initialize transport rings for acceleration device * @accel_dev: Pointer to acceleration device. * * Function is the initializes the communications channels (rings) to the * acceleration device accel_dev. * To be used by QAT device specific drivers. * * Return: 0 on success, error code othewise. */ int adf_init_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; uint32_t size; uint32_t num_banks = 0; int i, ret; etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!etr_data) return -ENOMEM; num_banks = GET_MAX_BANKS(accel_dev); size = num_banks * sizeof(struct adf_etr_bank_data); etr_data->banks = kzalloc_node(size, GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!etr_data->banks) { ret = -ENOMEM; goto err_bank; } accel_dev->transport = etr_data; i = hw_data->get_etr_bar_id(hw_data); csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; /* accel_dev->debugfs_dir should always be non-NULL here */ etr_data->debug = debugfs_create_dir("transport", accel_dev->debugfs_dir); if (!etr_data->debug) { dev_err(&GET_DEV(accel_dev), "Unable to create transport debugfs entry\n"); ret = -ENOENT; goto err_bank_debug; } for (i = 0; i < num_banks; i++) { ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, csr_addr); if (ret) goto err_bank_all; } return 0; err_bank_all: debugfs_remove(etr_data->debug); err_bank_debug: kfree(etr_data->banks); err_bank: kfree(etr_data); accel_dev->transport = NULL; return ret; } EXPORT_SYMBOL_GPL(adf_init_etr_data); static void cleanup_bank(struct adf_etr_bank_data *bank) { uint32_t i; for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_etr_ring_data *ring = &bank->rings[i]; if (bank->ring_mask & (1 << i)) adf_cleanup_ring(ring); if (hw_data->tx_rings_mask & (1 << i)) kfree(ring->inflights); } adf_bank_debugfs_rm(bank); memset(bank, 0, sizeof(*bank)); } static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data = accel_dev->transport; uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); for (i = 0; i < num_banks; i++) cleanup_bank(&etr_data->banks[i]); } /** * adf_cleanup_etr_data() - Clear transport rings for acceleration device * @accel_dev: Pointer to acceleration device. * * Function is the clears the communications channels (rings) of the * acceleration device accel_dev. * To be used by QAT device specific drivers. * * Return: void */ void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data = accel_dev->transport; if (etr_data) { adf_cleanup_etr_handles(accel_dev); debugfs_remove(etr_data->debug); kfree(etr_data->banks); kfree(etr_data); accel_dev->transport = NULL; } } EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
gpl-2.0
jdlfg/lith-omap
net/9p/protocol.c
743
12054
/* * net/9p/protocol.c * * 9P Protocol Support Code * * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com> * * Base on code from Anthony Liguori <aliguori@us.ibm.com> * Copyright (C) 2008 by IBM, Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/types.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "protocol.h" #ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif #ifndef MAX #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif #ifndef offset_of #define offset_of(type, memb) \ ((unsigned long)(&((type *)0)->memb)) #endif #ifndef container_of #define container_of(obj, type, memb) \ ((type *)(((char *)obj) - offset_of(type, memb))) #endif static int p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...); #ifdef CONFIG_NET_9P_DEBUG void p9pdu_dump(int way, struct p9_fcall *pdu) { int i, n; u8 *data = pdu->sdata; int datalen = pdu->size; char buf[255]; int buflen = 255; i = n = 0; if (datalen > (buflen-16)) datalen = buflen-16; while (i < datalen) { n += scnprintf(buf + n, buflen - n, "%02x ", data[i]); if (i%4 == 3) n += scnprintf(buf + n, buflen - n, " "); if (i%32 == 31) n += scnprintf(buf + n, buflen - n, "\n"); i++; } n += scnprintf(buf + n, buflen - n, "\n"); if (way) P9_DPRINTK(P9_DEBUG_PKT, "[[[(%d) %s\n", datalen, buf); else P9_DPRINTK(P9_DEBUG_PKT, "]]](%d) %s\n", datalen, buf); } #else void p9pdu_dump(int way, struct p9_fcall *pdu) { } #endif EXPORT_SYMBOL(p9pdu_dump); void p9stat_free(struct p9_wstat *stbuf) { kfree(stbuf->name); kfree(stbuf->uid); kfree(stbuf->gid); kfree(stbuf->muid); kfree(stbuf->extension); } EXPORT_SYMBOL(p9stat_free); static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) { size_t len = MIN(pdu->size - pdu->offset, size); memcpy(data, &pdu->sdata[pdu->offset], len); pdu->offset += len; return size - len; } static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size) { size_t len = MIN(pdu->capacity - pdu->size, size); memcpy(&pdu->sdata[pdu->size], data, len); pdu->size += len; return size - len; } static size_t pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) { size_t len = MIN(pdu->capacity - pdu->size, size); int err = copy_from_user(&pdu->sdata[pdu->size], udata, len); if (err) printk(KERN_WARNING "pdu_write_u returning: %d\n", err); pdu->size += len; return size - len; } /* b - int8_t w - int16_t d - int32_t q - int64_t s - string S - stat Q - qid D - data blob (int32_t size followed by void *, results are not freed) T - array of strings (int16_t count, followed by strings) R - array of qids (int16_t count, followed by qids) ? - if optional = 1, continue parsing */ static int p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) { const char *ptr; int errcode = 0; for (ptr = fmt; *ptr; ptr++) { switch (*ptr) { case 'b':{ int8_t *val = va_arg(ap, int8_t *); if (pdu_read(pdu, val, sizeof(*val))) { errcode = -EFAULT; break; } } break; case 'w':{ int16_t *val = va_arg(ap, int16_t *); __le16 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *val = le16_to_cpu(le_val); } break; case 'd':{ int32_t *val = va_arg(ap, int32_t *); __le32 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *val = le32_to_cpu(le_val); } break; case 'q':{ int64_t *val = va_arg(ap, int64_t *); __le64 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *val = le64_to_cpu(le_val); } break; case 's':{ char **sptr = va_arg(ap, char **); int16_t len; int size; errcode = p9pdu_readf(pdu, optional, "w", &len); if (errcode) break; size = MAX(len, 0); *sptr = kmalloc(size + 1, GFP_KERNEL); if (*sptr == NULL) { errcode = -EFAULT; break; } if (pdu_read(pdu, *sptr, size)) { errcode = -EFAULT; kfree(*sptr); *sptr = NULL; } else (*sptr)[size] = 0; } break; case 'Q':{ struct p9_qid *qid = va_arg(ap, struct p9_qid *); errcode = p9pdu_readf(pdu, optional, "bdq", &qid->type, &qid->version, &qid->path); } break; case 'S':{ struct p9_wstat *stbuf = va_arg(ap, struct p9_wstat *); memset(stbuf, 0, sizeof(struct p9_wstat)); stbuf->n_uid = stbuf->n_gid = stbuf->n_muid = -1; errcode = p9pdu_readf(pdu, optional, "wwdQdddqssss?sddd", &stbuf->size, &stbuf->type, &stbuf->dev, &stbuf->qid, &stbuf->mode, &stbuf->atime, &stbuf->mtime, &stbuf->length, &stbuf->name, &stbuf->uid, &stbuf->gid, &stbuf->muid, &stbuf->extension, &stbuf->n_uid, &stbuf->n_gid, &stbuf->n_muid); if (errcode) p9stat_free(stbuf); } break; case 'D':{ int32_t *count = va_arg(ap, int32_t *); void **data = va_arg(ap, void **); errcode = p9pdu_readf(pdu, optional, "d", count); if (!errcode) { *count = MIN(*count, pdu->size - pdu->offset); *data = &pdu->sdata[pdu->offset]; } } break; case 'T':{ int16_t *nwname = va_arg(ap, int16_t *); char ***wnames = va_arg(ap, char ***); errcode = p9pdu_readf(pdu, optional, "w", nwname); if (!errcode) { *wnames = kmalloc(sizeof(char *) * *nwname, GFP_KERNEL); if (!*wnames) errcode = -ENOMEM; } if (!errcode) { int i; for (i = 0; i < *nwname; i++) { errcode = p9pdu_readf(pdu, optional, "s", &(*wnames)[i]); if (errcode) break; } } if (errcode) { if (*wnames) { int i; for (i = 0; i < *nwname; i++) kfree((*wnames)[i]); } kfree(*wnames); *wnames = NULL; } } break; case 'R':{ int16_t *nwqid = va_arg(ap, int16_t *); struct p9_qid **wqids = va_arg(ap, struct p9_qid **); *wqids = NULL; errcode = p9pdu_readf(pdu, optional, "w", nwqid); if (!errcode) { *wqids = kmalloc(*nwqid * sizeof(struct p9_qid), GFP_KERNEL); if (*wqids == NULL) errcode = -ENOMEM; } if (!errcode) { int i; for (i = 0; i < *nwqid; i++) { errcode = p9pdu_readf(pdu, optional, "Q", &(*wqids)[i]); if (errcode) break; } } if (errcode) { kfree(*wqids); *wqids = NULL; } } break; case '?': if (!optional) return 0; break; default: BUG(); break; } if (errcode) break; } return errcode; } int p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) { const char *ptr; int errcode = 0; for (ptr = fmt; *ptr; ptr++) { switch (*ptr) { case 'b':{ int8_t val = va_arg(ap, int); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'w':{ __le16 val = cpu_to_le16(va_arg(ap, int)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'd':{ __le32 val = cpu_to_le32(va_arg(ap, int32_t)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'q':{ __le64 val = cpu_to_le64(va_arg(ap, int64_t)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 's':{ const char *sptr = va_arg(ap, const char *); int16_t len = 0; if (sptr) len = MIN(strlen(sptr), USHORT_MAX); errcode = p9pdu_writef(pdu, optional, "w", len); if (!errcode && pdu_write(pdu, sptr, len)) errcode = -EFAULT; } break; case 'Q':{ const struct p9_qid *qid = va_arg(ap, const struct p9_qid *); errcode = p9pdu_writef(pdu, optional, "bdq", qid->type, qid->version, qid->path); } break; case 'S':{ const struct p9_wstat *stbuf = va_arg(ap, const struct p9_wstat *); errcode = p9pdu_writef(pdu, optional, "wwdQdddqssss?sddd", stbuf->size, stbuf->type, stbuf->dev, &stbuf->qid, stbuf->mode, stbuf->atime, stbuf->mtime, stbuf->length, stbuf->name, stbuf->uid, stbuf->gid, stbuf->muid, stbuf->extension, stbuf->n_uid, stbuf->n_gid, stbuf->n_muid); } break; case 'D':{ int32_t count = va_arg(ap, int32_t); const void *data = va_arg(ap, const void *); errcode = p9pdu_writef(pdu, optional, "d", count); if (!errcode && pdu_write(pdu, data, count)) errcode = -EFAULT; } break; case 'U':{ int32_t count = va_arg(ap, int32_t); const char __user *udata = va_arg(ap, const void __user *); errcode = p9pdu_writef(pdu, optional, "d", count); if (!errcode && pdu_write_u(pdu, udata, count)) errcode = -EFAULT; } break; case 'T':{ int16_t nwname = va_arg(ap, int); const char **wnames = va_arg(ap, const char **); errcode = p9pdu_writef(pdu, optional, "w", nwname); if (!errcode) { int i; for (i = 0; i < nwname; i++) { errcode = p9pdu_writef(pdu, optional, "s", wnames[i]); if (errcode) break; } } } break; case 'R':{ int16_t nwqid = va_arg(ap, int); struct p9_qid *wqids = va_arg(ap, struct p9_qid *); errcode = p9pdu_writef(pdu, optional, "w", nwqid); if (!errcode) { int i; for (i = 0; i < nwqid; i++) { errcode = p9pdu_writef(pdu, optional, "Q", &wqids[i]); if (errcode) break; } } } break; case '?': if (!optional) return 0; break; default: BUG(); break; } if (errcode) break; } return errcode; } int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = p9pdu_vreadf(pdu, optional, fmt, ap); va_end(ap); return ret; } static int p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = p9pdu_vwritef(pdu, optional, fmt, ap); va_end(ap); return ret; } int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu) { struct p9_fcall fake_pdu; int ret; fake_pdu.size = len; fake_pdu.capacity = len; fake_pdu.sdata = buf; fake_pdu.offset = 0; ret = p9pdu_readf(&fake_pdu, dotu, "S", st); if (ret) { P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); p9pdu_dump(1, &fake_pdu); } return ret; } EXPORT_SYMBOL(p9stat_read); int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type) { return p9pdu_writef(pdu, 0, "dbw", 0, type, tag); } int p9pdu_finalize(struct p9_fcall *pdu) { int size = pdu->size; int err; pdu->size = 0; err = p9pdu_writef(pdu, 0, "d", size); pdu->size = size; #ifdef CONFIG_NET_9P_DEBUG if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT) p9pdu_dump(0, pdu); #endif P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size, pdu->id, pdu->tag); return err; } void p9pdu_reset(struct p9_fcall *pdu) { pdu->offset = 0; pdu->size = 0; }
gpl-2.0
desaishivam26/android_kernel_motorola_msm8916
drivers/platform/msm/msm_bus/msm_bus_config.c
999
2352
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/radix-tree.h> #include <linux/clk.h> #include <linux/msm-bus-board.h> #include <linux/msm-bus.h> #include "msm_bus_core.h" static DEFINE_MUTEX(msm_bus_config_lock); /** * msm_bus_axi_porthalt() - Halt the given axi master port * @master_port: AXI Master port to be halted */ int msm_bus_axi_porthalt(int master_port) { int ret = 0; int priv_id; struct msm_bus_fabric_device *fabdev; priv_id = msm_bus_board_get_iid(master_port); MSM_BUS_DBG("master_port: %d iid: %d fabid%d\n", master_port, priv_id, GET_FABID(priv_id)); fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id)); if (IS_ERR_OR_NULL(fabdev)) { MSM_BUS_ERR("Fabric device not found for mport: %d\n", master_port); return -ENODEV; } mutex_lock(&msm_bus_config_lock); ret = fabdev->algo->port_halt(fabdev, priv_id); mutex_unlock(&msm_bus_config_lock); return ret; } EXPORT_SYMBOL(msm_bus_axi_porthalt); /** * msm_bus_axi_portunhalt() - Unhalt the given axi master port * @master_port: AXI Master port to be unhalted */ int msm_bus_axi_portunhalt(int master_port) { int ret = 0; int priv_id; struct msm_bus_fabric_device *fabdev; priv_id = msm_bus_board_get_iid(master_port); MSM_BUS_DBG("master_port: %d iid: %d fabid: %d\n", master_port, priv_id, GET_FABID(priv_id)); fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id)); if (IS_ERR_OR_NULL(fabdev)) { MSM_BUS_ERR("Fabric device not found for mport: %d\n", master_port); return -ENODEV; } mutex_lock(&msm_bus_config_lock); ret = fabdev->algo->port_unhalt(fabdev, priv_id); mutex_unlock(&msm_bus_config_lock); return ret; } EXPORT_SYMBOL(msm_bus_axi_portunhalt);
gpl-2.0
crimsonthunder/kernel_samsung_trlte
drivers/clocksource/tcb_clksrc.c
2279
9422
#include <linux/init.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/atmel_tc.h> /* * We're configured to use a specific TC block, one that's not hooked * up to external hardware, to provide a time solution: * * - Two channels combine to create a free-running 32 bit counter * with a base rate of 5+ MHz, packaged as a clocksource (with * resolution better than 200 nsec). * - Some chips support 32 bit counter. A single channel is used for * this 32 bit free-running counter. the second channel is not used. * * - The third channel may be used to provide a 16-bit clockevent * source, used in either periodic or oneshot mode. This runs * at 32 KiHZ, and can handle delays of up to two seconds. * * A boot clocksource and clockevent source are also currently needed, * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so * this code can be used when init_timers() is called, well before most * devices are set up. (Some low end AT91 parts, which can run uClinux, * have only the timers in one TC block... they currently don't support * the tclib code, because of that initialization issue.) * * REVISIT behavior during system suspend states... we should disable * all clocks and save the power. Easily done for clockevent devices, * but clocksources won't necessarily get the needed notifications. * For deeper system sleep states, this will be mandatory... */ static void __iomem *tcaddr; static cycle_t tc_get_cycles(struct clocksource *cs) { unsigned long flags; u32 lower, upper; raw_local_irq_save(flags); do { upper = __raw_readl(tcaddr + ATMEL_TC_REG(1, CV)); lower = __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); } while (upper != __raw_readl(tcaddr + ATMEL_TC_REG(1, CV))); raw_local_irq_restore(flags); return (upper << 16) | lower; } static cycle_t tc_get_cycles32(struct clocksource *cs) { return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); } static struct clocksource clksrc = { .name = "tcb_clksrc", .rating = 200, .read = tc_get_cycles, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; #ifdef CONFIG_GENERIC_CLOCKEVENTS struct tc_clkevt_device { struct clock_event_device clkevt; struct clk *clk; void __iomem *regs; }; static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) { return container_of(clkevt, struct tc_clkevt_device, clkevt); } /* For now, we always use the 32K clock ... this optimizes for NO_HZ, * because using one of the divided clocks would usually mean the * tick rate can never be less than several dozen Hz (vs 0.5 Hz). * * A divided clock could be good for high resolution timers, since * 30.5 usec resolution can seem "low". */ static u32 timer_clock; static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) { struct tc_clkevt_device *tcd = to_tc_clkevt(d); void __iomem *regs = tcd->regs; if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); clk_disable(tcd->clk); } switch (m) { /* By not making the gentime core emulate periodic mode on top * of oneshot, we get lower overhead and improved accuracy. */ case CLOCK_EVT_MODE_PERIODIC: clk_enable(tcd->clk); /* slow clock, count up to RC, then irq and restart */ __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); /* Enable clock and interrupts on RC compare */ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); /* go go gadget! */ __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs + ATMEL_TC_REG(2, CCR)); break; case CLOCK_EVT_MODE_ONESHOT: clk_enable(tcd->clk); /* slow clock, count up to RC, then irq and stop */ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); /* set_next_event() configures and starts the timer */ break; default: break; } } static int tc_next_event(unsigned long delta, struct clock_event_device *d) { __raw_writel(delta, tcaddr + ATMEL_TC_REG(2, RC)); /* go go gadget! */ __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, tcaddr + ATMEL_TC_REG(2, CCR)); return 0; } static struct tc_clkevt_device clkevt = { .clkevt = { .name = "tc_clkevt", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, /* Should be lower than at91rm9200's system timer */ .rating = 125, .set_next_event = tc_next_event, .set_mode = tc_mode, }, }; static irqreturn_t ch2_irq(int irq, void *handle) { struct tc_clkevt_device *dev = handle; unsigned int sr; sr = __raw_readl(dev->regs + ATMEL_TC_REG(2, SR)); if (sr & ATMEL_TC_CPCS) { dev->clkevt.event_handler(&dev->clkevt); return IRQ_HANDLED; } return IRQ_NONE; } static struct irqaction tc_irqaction = { .name = "tc_clkevt", .flags = IRQF_TIMER | IRQF_DISABLED, .handler = ch2_irq, }; static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) { struct clk *t2_clk = tc->clk[2]; int irq = tc->irq[2]; clkevt.regs = tc->regs; clkevt.clk = t2_clk; tc_irqaction.dev_id = &clkevt; timer_clock = clk32k_divisor_idx; clkevt.clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); setup_irq(irq, &tc_irqaction); } #else /* !CONFIG_GENERIC_CLOCKEVENTS */ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) { /* NOTHING */ } #endif static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx) { /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */ __raw_writel(mck_divisor_idx /* likely divide-by-8 */ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP /* free-run */ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */ tcaddr + ATMEL_TC_REG(0, CMR)); __raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA)); __raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC)); __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); /* channel 1: waveform mode, input TIOA0 */ __raw_writel(ATMEL_TC_XC1 /* input: TIOA0 */ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP, /* free-run */ tcaddr + ATMEL_TC_REG(1, CMR)); __raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */ __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR)); /* chain channel 0 to channel 1*/ __raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR); /* then reset all the timers */ __raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); } static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx) { /* channel 0: waveform mode, input mclk/8 */ __raw_writel(mck_divisor_idx /* likely divide-by-8 */ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP, /* free-run */ tcaddr + ATMEL_TC_REG(0, CMR)); __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); /* then reset all the timers */ __raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); } static int __init tcb_clksrc_init(void) { static char bootinfo[] __initdata = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n"; struct platform_device *pdev; struct atmel_tc *tc; struct clk *t0_clk; u32 rate, divided_rate = 0; int best_divisor_idx = -1; int clk32k_divisor_idx = -1; int i; tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name); if (!tc) { pr_debug("can't alloc TC for clocksource\n"); return -ENODEV; } tcaddr = tc->regs; pdev = tc->pdev; t0_clk = tc->clk[0]; clk_enable(t0_clk); /* How fast will we be counting? Pick something over 5 MHz. */ rate = (u32) clk_get_rate(t0_clk); for (i = 0; i < 5; i++) { unsigned divisor = atmel_tc_divisors[i]; unsigned tmp; /* remember 32 KiHz clock for later */ if (!divisor) { clk32k_divisor_idx = i; continue; } tmp = rate / divisor; pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); if (best_divisor_idx > 0) { if (tmp < 5 * 1000 * 1000) continue; } divided_rate = tmp; best_divisor_idx = i; } printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK, divided_rate / 1000000, ((divided_rate + 500000) % 1000000) / 1000); if (tc->tcb_config && tc->tcb_config->counter_width == 32) { /* use apropriate function to read 32 bit counter */ clksrc.read = tc_get_cycles32; /* setup ony channel 0 */ tcb_setup_single_chan(tc, best_divisor_idx); } else { /* tclib will give us three clocks no matter what the * underlying platform supports. */ clk_enable(tc->clk[1]); /* setup both channel 0 & 1 */ tcb_setup_dual_chan(tc, best_divisor_idx); } /* and away we go! */ clocksource_register_hz(&clksrc, divided_rate); /* channel 2: periodic and oneshot timer support */ setup_clkevents(tc, clk32k_divisor_idx); return 0; } arch_initcall(tcb_clksrc_init);
gpl-2.0
ptmr3/GalaxyNote2_Kernel2
drivers/staging/tty/epca.c
2535
76751
/* Copyright (C) 1996 Digi International. For technical support please email digiLinux@dgii.com or call Digi tech support at (612) 912-3456 ** This driver is no longer supported by Digi ** Much of this design and code came from epca.c which was copyright (C) 1994, 1995 Troy De Jongh, and subsequently modified by David Nugent, Christoph Lameter, Mike McLagan. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* See README.epca for change history --DAT*/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/serial.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/pci.h> #include "digiPCI.h" #include "digi1.h" #include "digiFep1.h" #include "epca.h" #include "epcaconfig.h" #define VERSION "1.3.0.1-LK2.6" /* This major needs to be submitted to Linux to join the majors list */ #define DIGIINFOMAJOR 35 /* For Digi specific ioctl */ #define MAXCARDS 7 #define epcaassert(x, msg) if (!(x)) epca_error(__LINE__, msg) #define PFX "epca: " static int nbdevs, num_cards, liloconfig; static int digi_poller_inhibited = 1 ; static int setup_error_code; static int invalid_lilo_config; /* * The ISA boards do window flipping into the same spaces so its only sane with * a single lock. It's still pretty efficient. This lock guards the hardware * and the tty_port lock guards the kernel side stuff like use counts. Take * this lock inside the port lock if you must take both. */ static DEFINE_SPINLOCK(epca_lock); /* MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 7 below. */ static struct board_info boards[MAXBOARDS]; static struct tty_driver *pc_driver; static struct tty_driver *pc_info; /* ------------------ Begin Digi specific structures -------------------- */ /* * digi_channels represents an array of structures that keep track of each * channel of the Digi product. Information such as transmit and receive * pointers, termio data, and signal definitions (DTR, CTS, etc ...) are stored * here. This structure is NOT used to overlay the cards physical channel * structure. */ static struct channel digi_channels[MAX_ALLOC]; /* * card_ptr is an array used to hold the address of the first channel structure * of each card. This array will hold the addresses of various channels located * in digi_channels. */ static struct channel *card_ptr[MAXCARDS]; static struct timer_list epca_timer; /* * Begin generic memory functions. These functions will be alias (point at) * more specific functions dependent on the board being configured. */ static void memwinon(struct board_info *b, unsigned int win); static void memwinoff(struct board_info *b, unsigned int win); static void globalwinon(struct channel *ch); static void rxwinon(struct channel *ch); static void txwinon(struct channel *ch); static void memoff(struct channel *ch); static void assertgwinon(struct channel *ch); static void assertmemoff(struct channel *ch); /* ---- Begin more 'specific' memory functions for cx_like products --- */ static void pcxem_memwinon(struct board_info *b, unsigned int win); static void pcxem_memwinoff(struct board_info *b, unsigned int win); static void pcxem_globalwinon(struct channel *ch); static void pcxem_rxwinon(struct channel *ch); static void pcxem_txwinon(struct channel *ch); static void pcxem_memoff(struct channel *ch); /* ------ Begin more 'specific' memory functions for the pcxe ------- */ static void pcxe_memwinon(struct board_info *b, unsigned int win); static void pcxe_memwinoff(struct board_info *b, unsigned int win); static void pcxe_globalwinon(struct channel *ch); static void pcxe_rxwinon(struct channel *ch); static void pcxe_txwinon(struct channel *ch); static void pcxe_memoff(struct channel *ch); /* ---- Begin more 'specific' memory functions for the pc64xe and pcxi ---- */ /* Note : pc64xe and pcxi share the same windowing routines */ static void pcxi_memwinon(struct board_info *b, unsigned int win); static void pcxi_memwinoff(struct board_info *b, unsigned int win); static void pcxi_globalwinon(struct channel *ch); static void pcxi_rxwinon(struct channel *ch); static void pcxi_txwinon(struct channel *ch); static void pcxi_memoff(struct channel *ch); /* - Begin 'specific' do nothing memory functions needed for some cards - */ static void dummy_memwinon(struct board_info *b, unsigned int win); static void dummy_memwinoff(struct board_info *b, unsigned int win); static void dummy_globalwinon(struct channel *ch); static void dummy_rxwinon(struct channel *ch); static void dummy_txwinon(struct channel *ch); static void dummy_memoff(struct channel *ch); static void dummy_assertgwinon(struct channel *ch); static void dummy_assertmemoff(struct channel *ch); static struct channel *verifyChannel(struct tty_struct *); static void pc_sched_event(struct channel *, int); static void epca_error(int, char *); static void pc_close(struct tty_struct *, struct file *); static void shutdown(struct channel *, struct tty_struct *tty); static void pc_hangup(struct tty_struct *); static int pc_write_room(struct tty_struct *); static int pc_chars_in_buffer(struct tty_struct *); static void pc_flush_buffer(struct tty_struct *); static void pc_flush_chars(struct tty_struct *); static int pc_open(struct tty_struct *, struct file *); static void post_fep_init(unsigned int crd); static void epcapoll(unsigned long); static void doevent(int); static void fepcmd(struct channel *, int, int, int, int, int); static unsigned termios2digi_h(struct channel *ch, unsigned); static unsigned termios2digi_i(struct channel *ch, unsigned); static unsigned termios2digi_c(struct channel *ch, unsigned); static void epcaparam(struct tty_struct *, struct channel *); static void receive_data(struct channel *, struct tty_struct *tty); static int pc_ioctl(struct tty_struct *, unsigned int, unsigned long); static int info_ioctl(struct tty_struct *, unsigned int, unsigned long); static void pc_set_termios(struct tty_struct *, struct ktermios *); static void do_softint(struct work_struct *work); static void pc_stop(struct tty_struct *); static void pc_start(struct tty_struct *); static void pc_throttle(struct tty_struct *tty); static void pc_unthrottle(struct tty_struct *tty); static int pc_send_break(struct tty_struct *tty, int msec); static void setup_empty_event(struct tty_struct *tty, struct channel *ch); static int pc_write(struct tty_struct *, const unsigned char *, int); static int pc_init(void); static int init_PCI(void); /* * Table of functions for each board to handle memory. Mantaining parallelism * is a *very* good idea here. The idea is for the runtime code to blindly call * these functions, not knowing/caring about the underlying hardware. This * stuff should contain no conditionals; if more functionality is needed a * different entry should be established. These calls are the interface calls * and are the only functions that should be accessed. Anyone caught making * direct calls deserves what they get. */ static void memwinon(struct board_info *b, unsigned int win) { b->memwinon(b, win); } static void memwinoff(struct board_info *b, unsigned int win) { b->memwinoff(b, win); } static void globalwinon(struct channel *ch) { ch->board->globalwinon(ch); } static void rxwinon(struct channel *ch) { ch->board->rxwinon(ch); } static void txwinon(struct channel *ch) { ch->board->txwinon(ch); } static void memoff(struct channel *ch) { ch->board->memoff(ch); } static void assertgwinon(struct channel *ch) { ch->board->assertgwinon(ch); } static void assertmemoff(struct channel *ch) { ch->board->assertmemoff(ch); } /* PCXEM windowing is the same as that used in the PCXR and CX series cards. */ static void pcxem_memwinon(struct board_info *b, unsigned int win) { outb_p(FEPWIN | win, b->port + 1); } static void pcxem_memwinoff(struct board_info *b, unsigned int win) { outb_p(0, b->port + 1); } static void pcxem_globalwinon(struct channel *ch) { outb_p(FEPWIN, (int)ch->board->port + 1); } static void pcxem_rxwinon(struct channel *ch) { outb_p(ch->rxwin, (int)ch->board->port + 1); } static void pcxem_txwinon(struct channel *ch) { outb_p(ch->txwin, (int)ch->board->port + 1); } static void pcxem_memoff(struct channel *ch) { outb_p(0, (int)ch->board->port + 1); } /* ----------------- Begin pcxe memory window stuff ------------------ */ static void pcxe_memwinon(struct board_info *b, unsigned int win) { outb_p(FEPWIN | win, b->port + 1); } static void pcxe_memwinoff(struct board_info *b, unsigned int win) { outb_p(inb(b->port) & ~FEPMEM, b->port + 1); outb_p(0, b->port + 1); } static void pcxe_globalwinon(struct channel *ch) { outb_p(FEPWIN, (int)ch->board->port + 1); } static void pcxe_rxwinon(struct channel *ch) { outb_p(ch->rxwin, (int)ch->board->port + 1); } static void pcxe_txwinon(struct channel *ch) { outb_p(ch->txwin, (int)ch->board->port + 1); } static void pcxe_memoff(struct channel *ch) { outb_p(0, (int)ch->board->port); outb_p(0, (int)ch->board->port + 1); } /* ------------- Begin pc64xe and pcxi memory window stuff -------------- */ static void pcxi_memwinon(struct board_info *b, unsigned int win) { outb_p(inb(b->port) | FEPMEM, b->port); } static void pcxi_memwinoff(struct board_info *b, unsigned int win) { outb_p(inb(b->port) & ~FEPMEM, b->port); } static void pcxi_globalwinon(struct channel *ch) { outb_p(FEPMEM, ch->board->port); } static void pcxi_rxwinon(struct channel *ch) { outb_p(FEPMEM, ch->board->port); } static void pcxi_txwinon(struct channel *ch) { outb_p(FEPMEM, ch->board->port); } static void pcxi_memoff(struct channel *ch) { outb_p(0, ch->board->port); } static void pcxi_assertgwinon(struct channel *ch) { epcaassert(inb(ch->board->port) & FEPMEM, "Global memory off"); } static void pcxi_assertmemoff(struct channel *ch) { epcaassert(!(inb(ch->board->port) & FEPMEM), "Memory on"); } /* * Not all of the cards need specific memory windowing routines. Some cards * (Such as PCI) needs no windowing routines at all. We provide these do * nothing routines so that the same code base can be used. The driver will * ALWAYS call a windowing routine if it thinks it needs to; regardless of the * card. However, dependent on the card the routine may or may not do anything. */ static void dummy_memwinon(struct board_info *b, unsigned int win) { } static void dummy_memwinoff(struct board_info *b, unsigned int win) { } static void dummy_globalwinon(struct channel *ch) { } static void dummy_rxwinon(struct channel *ch) { } static void dummy_txwinon(struct channel *ch) { } static void dummy_memoff(struct channel *ch) { } static void dummy_assertgwinon(struct channel *ch) { } static void dummy_assertmemoff(struct channel *ch) { } static struct channel *verifyChannel(struct tty_struct *tty) { /* * This routine basically provides a sanity check. It insures that the * channel returned is within the proper range of addresses as well as * properly initialized. If some bogus info gets passed in * through tty->driver_data this should catch it. */ if (tty) { struct channel *ch = tty->driver_data; if (ch >= &digi_channels[0] && ch < &digi_channels[nbdevs]) { if (ch->magic == EPCA_MAGIC) return ch; } } return NULL; } static void pc_sched_event(struct channel *ch, int event) { /* * We call this to schedule interrupt processing on some event. The * kernel sees our request and calls the related routine in OUR driver. */ ch->event |= 1 << event; schedule_work(&ch->tqueue); } static void epca_error(int line, char *msg) { printk(KERN_ERR "epca_error (Digi): line = %d %s\n", line, msg); } static void pc_close(struct tty_struct *tty, struct file *filp) { struct channel *ch; struct tty_port *port; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch == NULL) return; port = &ch->port; if (tty_port_close_start(port, tty, filp) == 0) return; pc_flush_buffer(tty); shutdown(ch, tty); tty_port_close_end(port, tty); ch->event = 0; /* FIXME: review ch->event locking */ tty_port_tty_set(port, NULL); } static void shutdown(struct channel *ch, struct tty_struct *tty) { unsigned long flags; struct board_chan __iomem *bc; struct tty_port *port = &ch->port; if (!(port->flags & ASYNC_INITIALIZED)) return; spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); bc = ch->brdchan; /* * In order for an event to be generated on the receipt of data the * idata flag must be set. Since we are shutting down, this is not * necessary clear this flag. */ if (bc) writeb(0, &bc->idata); /* If we're a modem control device and HUPCL is on, drop RTS & DTR. */ if (tty->termios->c_cflag & HUPCL) { ch->omodem &= ~(ch->m_rts | ch->m_dtr); fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1); } memoff(ch); /* * The channel has officially been closed. The next time it is opened it * will have to reinitialized. Set a flag to indicate this. */ /* Prevent future Digi programmed interrupts from coming active */ port->flags &= ~ASYNC_INITIALIZED; spin_unlock_irqrestore(&epca_lock, flags); } static void pc_hangup(struct tty_struct *tty) { struct channel *ch; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { pc_flush_buffer(tty); tty_ldisc_flush(tty); shutdown(ch, tty); ch->event = 0; /* FIXME: review locking of ch->event */ tty_port_hangup(&ch->port); } } static int pc_write(struct tty_struct *tty, const unsigned char *buf, int bytesAvailable) { unsigned int head, tail; int dataLen; int size; int amountCopied; struct channel *ch; unsigned long flags; int remain; struct board_chan __iomem *bc; /* * pc_write is primarily called directly by the kernel routine * tty_write (Though it can also be called by put_char) found in * tty_io.c. pc_write is passed a line discipline buffer where the data * to be written out is stored. The line discipline implementation * itself is done at the kernel level and is not brought into the * driver. */ /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch == NULL) return 0; /* Make a pointer to the channel data structure found on the board. */ bc = ch->brdchan; size = ch->txbufsize; amountCopied = 0; spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); head = readw(&bc->tin) & (size - 1); tail = readw(&bc->tout); if (tail != readw(&bc->tout)) tail = readw(&bc->tout); tail &= (size - 1); if (head >= tail) { /* head has not wrapped */ /* * remain (much like dataLen above) represents the total amount * of space available on the card for data. Here dataLen * represents the space existing between the head pointer and * the end of buffer. This is important because a memcpy cannot * be told to automatically wrap around when it hits the buffer * end. */ dataLen = size - head; remain = size - (head - tail) - 1; } else { /* head has wrapped around */ remain = tail - head - 1; dataLen = remain; } /* * Check the space on the card. If we have more data than space; reduce * the amount of data to fit the space. */ bytesAvailable = min(remain, bytesAvailable); txwinon(ch); while (bytesAvailable > 0) { /* there is data to copy onto card */ /* * If head is not wrapped, the below will make sure the first * data copy fills to the end of card buffer. */ dataLen = min(bytesAvailable, dataLen); memcpy_toio(ch->txptr + head, buf, dataLen); buf += dataLen; head += dataLen; amountCopied += dataLen; bytesAvailable -= dataLen; if (head >= size) { head = 0; dataLen = tail; } } ch->statusflags |= TXBUSY; globalwinon(ch); writew(head, &bc->tin); if ((ch->statusflags & LOWWAIT) == 0) { ch->statusflags |= LOWWAIT; writeb(1, &bc->ilow); } memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); return amountCopied; } static int pc_write_room(struct tty_struct *tty) { int remain = 0; struct channel *ch; unsigned long flags; unsigned int head, tail; struct board_chan __iomem *bc; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); bc = ch->brdchan; head = readw(&bc->tin) & (ch->txbufsize - 1); tail = readw(&bc->tout); if (tail != readw(&bc->tout)) tail = readw(&bc->tout); /* Wrap tail if necessary */ tail &= (ch->txbufsize - 1); remain = tail - head - 1; if (remain < 0) remain += ch->txbufsize; if (remain && (ch->statusflags & LOWWAIT) == 0) { ch->statusflags |= LOWWAIT; writeb(1, &bc->ilow); } memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); } /* Return how much room is left on card */ return remain; } static int pc_chars_in_buffer(struct tty_struct *tty) { int chars; unsigned int ctail, head, tail; int remain; unsigned long flags; struct channel *ch; struct board_chan __iomem *bc; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch == NULL) return 0; spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); bc = ch->brdchan; tail = readw(&bc->tout); head = readw(&bc->tin); ctail = readw(&ch->mailbox->cout); if (tail == head && readw(&ch->mailbox->cin) == ctail && readb(&bc->tbusy) == 0) chars = 0; else { /* Begin if some space on the card has been used */ head = readw(&bc->tin) & (ch->txbufsize - 1); tail &= (ch->txbufsize - 1); /* * The logic here is basically opposite of the above * pc_write_room here we are finding the amount of bytes in the * buffer filled. Not the amount of bytes empty. */ remain = tail - head - 1; if (remain < 0) remain += ch->txbufsize; chars = (int)(ch->txbufsize - remain); /* * Make it possible to wakeup anything waiting for output in * tty_ioctl.c, etc. * * If not already set. Setup an event to indicate when the * transmit buffer empties. */ if (!(ch->statusflags & EMPTYWAIT)) setup_empty_event(tty, ch); } /* End if some space on the card has been used */ memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); /* Return number of characters residing on card. */ return chars; } static void pc_flush_buffer(struct tty_struct *tty) { unsigned int tail; unsigned long flags; struct channel *ch; struct board_chan __iomem *bc; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch == NULL) return; spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); bc = ch->brdchan; tail = readw(&bc->tout); /* Have FEP move tout pointer; effectively flushing transmit buffer */ fepcmd(ch, STOUT, (unsigned) tail, 0, 0, 0); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); tty_wakeup(tty); } static void pc_flush_chars(struct tty_struct *tty) { struct channel *ch; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { unsigned long flags; spin_lock_irqsave(&epca_lock, flags); /* * If not already set and the transmitter is busy setup an * event to indicate when the transmit empties. */ if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT)) setup_empty_event(tty, ch); spin_unlock_irqrestore(&epca_lock, flags); } } static int epca_carrier_raised(struct tty_port *port) { struct channel *ch = container_of(port, struct channel, port); if (ch->imodem & ch->dcd) return 1; return 0; } static void epca_dtr_rts(struct tty_port *port, int onoff) { } static int pc_open(struct tty_struct *tty, struct file *filp) { struct channel *ch; struct tty_port *port; unsigned long flags; int line, retval, boardnum; struct board_chan __iomem *bc; unsigned int head; line = tty->index; if (line < 0 || line >= nbdevs) return -ENODEV; ch = &digi_channels[line]; port = &ch->port; boardnum = ch->boardnum; /* Check status of board configured in system. */ /* * I check to see if the epca_setup routine detected a user error. It * might be better to put this in pc_init, but for the moment it goes * here. */ if (invalid_lilo_config) { if (setup_error_code & INVALID_BOARD_TYPE) printk(KERN_ERR "epca: pc_open: Invalid board type specified in kernel options.\n"); if (setup_error_code & INVALID_NUM_PORTS) printk(KERN_ERR "epca: pc_open: Invalid number of ports specified in kernel options.\n"); if (setup_error_code & INVALID_MEM_BASE) printk(KERN_ERR "epca: pc_open: Invalid board memory address specified in kernel options.\n"); if (setup_error_code & INVALID_PORT_BASE) printk(KERN_ERR "epca; pc_open: Invalid board port address specified in kernel options.\n"); if (setup_error_code & INVALID_BOARD_STATUS) printk(KERN_ERR "epca: pc_open: Invalid board status specified in kernel options.\n"); if (setup_error_code & INVALID_ALTPIN) printk(KERN_ERR "epca: pc_open: Invalid board altpin specified in kernel options;\n"); tty->driver_data = NULL; /* Mark this device as 'down' */ return -ENODEV; } if (boardnum >= num_cards || boards[boardnum].status == DISABLED) { tty->driver_data = NULL; /* Mark this device as 'down' */ return(-ENODEV); } bc = ch->brdchan; if (bc == NULL) { tty->driver_data = NULL; return -ENODEV; } spin_lock_irqsave(&port->lock, flags); /* * Every time a channel is opened, increment a counter. This is * necessary because we do not wish to flush and shutdown the channel * until the last app holding the channel open, closes it. */ port->count++; /* * Set a kernel structures pointer to our local channel structure. This * way we can get to it when passed only a tty struct. */ tty->driver_data = ch; port->tty = tty; /* * If this is the first time the channel has been opened, initialize * the tty->termios struct otherwise let pc_close handle it. */ spin_lock(&epca_lock); globalwinon(ch); ch->statusflags = 0; /* Save boards current modem status */ ch->imodem = readb(&bc->mstat); /* * Set receive head and tail ptrs to each other. This indicates no data * available to read. */ head = readw(&bc->rin); writew(head, &bc->rout); /* Set the channels associated tty structure */ /* * The below routine generally sets up parity, baud, flow control * issues, etc.... It effect both control flags and input flags. */ epcaparam(tty, ch); memoff(ch); spin_unlock(&epca_lock); port->flags |= ASYNC_INITIALIZED; spin_unlock_irqrestore(&port->lock, flags); retval = tty_port_block_til_ready(port, tty, filp); if (retval) return retval; /* * Set this again in case a hangup set it to zero while this open() was * waiting for the line... */ spin_lock_irqsave(&port->lock, flags); port->tty = tty; spin_lock(&epca_lock); globalwinon(ch); /* Enable Digi Data events */ writeb(1, &bc->idata); memoff(ch); spin_unlock(&epca_lock); spin_unlock_irqrestore(&port->lock, flags); return 0; } static int __init epca_module_init(void) { return pc_init(); } module_init(epca_module_init); static struct pci_driver epca_driver; static void __exit epca_module_exit(void) { int count, crd; struct board_info *bd; struct channel *ch; del_timer_sync(&epca_timer); if (tty_unregister_driver(pc_driver) || tty_unregister_driver(pc_info)) { printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n"); return; } put_tty_driver(pc_driver); put_tty_driver(pc_info); for (crd = 0; crd < num_cards; crd++) { bd = &boards[crd]; if (!bd) { /* sanity check */ printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n"); return; } ch = card_ptr[crd]; for (count = 0; count < bd->numports; count++, ch++) { struct tty_struct *tty = tty_port_tty_get(&ch->port); if (tty) { tty_hangup(tty); tty_kref_put(tty); } } } pci_unregister_driver(&epca_driver); } module_exit(epca_module_exit); static const struct tty_operations pc_ops = { .open = pc_open, .close = pc_close, .write = pc_write, .write_room = pc_write_room, .flush_buffer = pc_flush_buffer, .chars_in_buffer = pc_chars_in_buffer, .flush_chars = pc_flush_chars, .ioctl = pc_ioctl, .set_termios = pc_set_termios, .stop = pc_stop, .start = pc_start, .throttle = pc_throttle, .unthrottle = pc_unthrottle, .hangup = pc_hangup, .break_ctl = pc_send_break }; static const struct tty_port_operations epca_port_ops = { .carrier_raised = epca_carrier_raised, .dtr_rts = epca_dtr_rts, }; static int info_open(struct tty_struct *tty, struct file *filp) { return 0; } static const struct tty_operations info_ops = { .open = info_open, .ioctl = info_ioctl, }; static int __init pc_init(void) { int crd; struct board_info *bd; unsigned char board_id = 0; int err = -ENOMEM; int pci_boards_found, pci_count; pci_count = 0; pc_driver = alloc_tty_driver(MAX_ALLOC); if (!pc_driver) goto out1; pc_info = alloc_tty_driver(MAX_ALLOC); if (!pc_info) goto out2; /* * If epca_setup has not been ran by LILO set num_cards to defaults; * copy board structure defined by digiConfig into drivers board * structure. Note : If LILO has ran epca_setup then epca_setup will * handle defining num_cards as well as copying the data into the board * structure. */ if (!liloconfig) { /* driver has been configured via. epcaconfig */ nbdevs = NBDEVS; num_cards = NUMCARDS; memcpy(&boards, &static_boards, sizeof(struct board_info) * NUMCARDS); } /* * Note : If lilo was used to configure the driver and the ignore * epcaconfig option was chosen (digiepca=2) then nbdevs and num_cards * will equal 0 at this point. This is okay; PCI cards will still be * picked up if detected. */ /* * Set up interrupt, we will worry about memory allocation in * post_fep_init. */ printk(KERN_INFO "DIGI epca driver version %s loaded.\n", VERSION); /* * NOTE : This code assumes that the number of ports found in the * boards array is correct. This could be wrong if the card in question * is PCI (And therefore has no ports entry in the boards structure.) * The rest of the information will be valid for PCI because the * beginning of pc_init scans for PCI and determines i/o and base * memory addresses. I am not sure if it is possible to read the number * of ports supported by the card prior to it being booted (Since that * is the state it is in when pc_init is run). Because it is not * possible to query the number of supported ports until after the card * has booted; we are required to calculate the card_ptrs as the card * is initialized (Inside post_fep_init). The negative thing about this * approach is that digiDload's call to GET_INFO will have a bad port * value. (Since this is called prior to post_fep_init.) */ pci_boards_found = 0; if (num_cards < MAXBOARDS) pci_boards_found += init_PCI(); num_cards += pci_boards_found; pc_driver->owner = THIS_MODULE; pc_driver->name = "ttyD"; pc_driver->major = DIGI_MAJOR; pc_driver->minor_start = 0; pc_driver->type = TTY_DRIVER_TYPE_SERIAL; pc_driver->subtype = SERIAL_TYPE_NORMAL; pc_driver->init_termios = tty_std_termios; pc_driver->init_termios.c_iflag = 0; pc_driver->init_termios.c_oflag = 0; pc_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; pc_driver->init_termios.c_lflag = 0; pc_driver->init_termios.c_ispeed = 9600; pc_driver->init_termios.c_ospeed = 9600; pc_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_HARDWARE_BREAK; tty_set_operations(pc_driver, &pc_ops); pc_info->owner = THIS_MODULE; pc_info->name = "digi_ctl"; pc_info->major = DIGIINFOMAJOR; pc_info->minor_start = 0; pc_info->type = TTY_DRIVER_TYPE_SERIAL; pc_info->subtype = SERIAL_TYPE_INFO; pc_info->init_termios = tty_std_termios; pc_info->init_termios.c_iflag = 0; pc_info->init_termios.c_oflag = 0; pc_info->init_termios.c_lflag = 0; pc_info->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; pc_info->init_termios.c_ispeed = 9600; pc_info->init_termios.c_ospeed = 9600; pc_info->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(pc_info, &info_ops); for (crd = 0; crd < num_cards; crd++) { /* * This is where the appropriate memory handlers for the * hardware is set. Everything at runtime blindly jumps through * these vectors. */ /* defined in epcaconfig.h */ bd = &boards[crd]; switch (bd->type) { case PCXEM: case EISAXEM: bd->memwinon = pcxem_memwinon; bd->memwinoff = pcxem_memwinoff; bd->globalwinon = pcxem_globalwinon; bd->txwinon = pcxem_txwinon; bd->rxwinon = pcxem_rxwinon; bd->memoff = pcxem_memoff; bd->assertgwinon = dummy_assertgwinon; bd->assertmemoff = dummy_assertmemoff; break; case PCIXEM: case PCIXRJ: case PCIXR: bd->memwinon = dummy_memwinon; bd->memwinoff = dummy_memwinoff; bd->globalwinon = dummy_globalwinon; bd->txwinon = dummy_txwinon; bd->rxwinon = dummy_rxwinon; bd->memoff = dummy_memoff; bd->assertgwinon = dummy_assertgwinon; bd->assertmemoff = dummy_assertmemoff; break; case PCXE: case PCXEVE: bd->memwinon = pcxe_memwinon; bd->memwinoff = pcxe_memwinoff; bd->globalwinon = pcxe_globalwinon; bd->txwinon = pcxe_txwinon; bd->rxwinon = pcxe_rxwinon; bd->memoff = pcxe_memoff; bd->assertgwinon = dummy_assertgwinon; bd->assertmemoff = dummy_assertmemoff; break; case PCXI: case PC64XE: bd->memwinon = pcxi_memwinon; bd->memwinoff = pcxi_memwinoff; bd->globalwinon = pcxi_globalwinon; bd->txwinon = pcxi_txwinon; bd->rxwinon = pcxi_rxwinon; bd->memoff = pcxi_memoff; bd->assertgwinon = pcxi_assertgwinon; bd->assertmemoff = pcxi_assertmemoff; break; default: break; } /* * Some cards need a memory segment to be defined for use in * transmit and receive windowing operations. These boards are * listed in the below switch. In the case of the XI the amount * of memory on the board is variable so the memory_seg is also * variable. This code determines what they segment should be. */ switch (bd->type) { case PCXE: case PCXEVE: case PC64XE: bd->memory_seg = 0xf000; break; case PCXI: board_id = inb((int)bd->port); if ((board_id & 0x1) == 0x1) { /* it's an XI card */ /* Is it a 64K board */ if ((board_id & 0x30) == 0) bd->memory_seg = 0xf000; /* Is it a 128K board */ if ((board_id & 0x30) == 0x10) bd->memory_seg = 0xe000; /* Is is a 256K board */ if ((board_id & 0x30) == 0x20) bd->memory_seg = 0xc000; /* Is it a 512K board */ if ((board_id & 0x30) == 0x30) bd->memory_seg = 0x8000; } else printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n", (int)bd->port); break; } } err = tty_register_driver(pc_driver); if (err) { printk(KERN_ERR "Couldn't register Digi PC/ driver"); goto out3; } err = tty_register_driver(pc_info); if (err) { printk(KERN_ERR "Couldn't register Digi PC/ info "); goto out4; } /* Start up the poller to check for events on all enabled boards */ init_timer(&epca_timer); epca_timer.function = epcapoll; mod_timer(&epca_timer, jiffies + HZ/25); return 0; out4: tty_unregister_driver(pc_driver); out3: put_tty_driver(pc_info); out2: put_tty_driver(pc_driver); out1: return err; } static void post_fep_init(unsigned int crd) { int i; void __iomem *memaddr; struct global_data __iomem *gd; struct board_info *bd; struct board_chan __iomem *bc; struct channel *ch; int shrinkmem = 0, lowwater; /* * This call is made by the user via. the ioctl call DIGI_INIT. It is * responsible for setting up all the card specific stuff. */ bd = &boards[crd]; /* * If this is a PCI board, get the port info. Remember PCI cards do not * have entries into the epcaconfig.h file, so we can't get the number * of ports from it. Unfortunetly, this means that anyone doing a * DIGI_GETINFO before the board has booted will get an invalid number * of ports returned (It should return 0). Calls to DIGI_GETINFO after * DIGI_INIT has been called will return the proper values. */ if (bd->type >= PCIXEM) { /* Begin get PCI number of ports */ /* * Below we use XEMPORTS as a memory offset regardless of which * PCI card it is. This is because all of the supported PCI * cards have the same memory offset for the channel data. This * will have to be changed if we ever develop a PCI/XE card. * NOTE : The FEP manual states that the port offset is 0xC22 * as opposed to 0xC02. This is only true for PC/XE, and PC/XI * cards; not for the XEM, or CX series. On the PCI cards the * number of ports is determined by reading a ID PROM located * in the box attached to the card. The card can then determine * the index the id to determine the number of ports available. * (FYI - The id should be located at 0x1ac (And may use up to * 4 bytes if the box in question is a XEM or CX)). */ /* PCI cards are already remapped at this point ISA are not */ bd->numports = readw(bd->re_map_membase + XEMPORTS); epcaassert(bd->numports <= 64, "PCI returned a invalid number of ports"); nbdevs += (bd->numports); } else { /* Fix up the mappings for ISA/EISA etc */ /* FIXME: 64K - can we be smarter ? */ bd->re_map_membase = ioremap_nocache(bd->membase, 0x10000); } if (crd != 0) card_ptr[crd] = card_ptr[crd-1] + boards[crd-1].numports; else card_ptr[crd] = &digi_channels[crd]; /* <- For card 0 only */ ch = card_ptr[crd]; epcaassert(ch <= &digi_channels[nbdevs - 1], "ch out of range"); memaddr = bd->re_map_membase; /* * The below assignment will set bc to point at the BEGINNING of the * cards channel structures. For 1 card there will be between 8 and 64 * of these structures. */ bc = memaddr + CHANSTRUCT; /* * The below assignment will set gd to point at the BEGINNING of global * memory address 0xc00. The first data in that global memory actually * starts at address 0xc1a. The command in pointer begins at 0xd10. */ gd = memaddr + GLOBAL; /* * XEPORTS (address 0xc22) points at the number of channels the card * supports. (For 64XE, XI, XEM, and XR use 0xc02) */ if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3)) shrinkmem = 1; if (bd->type < PCIXEM) if (!request_region((int)bd->port, 4, board_desc[bd->type])) return; memwinon(bd, 0); /* * Remember ch is the main drivers channels structure, while bc is the * cards channel structure. */ for (i = 0; i < bd->numports; i++, ch++, bc++) { unsigned long flags; u16 tseg, rseg; tty_port_init(&ch->port); ch->port.ops = &epca_port_ops; ch->brdchan = bc; ch->mailbox = gd; INIT_WORK(&ch->tqueue, do_softint); ch->board = &boards[crd]; spin_lock_irqsave(&epca_lock, flags); switch (bd->type) { /* * Since some of the boards use different bitmaps for * their control signals we cannot hard code these * values and retain portability. We virtualize this * data here. */ case EISAXEM: case PCXEM: case PCIXEM: case PCIXRJ: case PCIXR: ch->m_rts = 0x02; ch->m_dcd = 0x80; ch->m_dsr = 0x20; ch->m_cts = 0x10; ch->m_ri = 0x40; ch->m_dtr = 0x01; break; case PCXE: case PCXEVE: case PCXI: case PC64XE: ch->m_rts = 0x02; ch->m_dcd = 0x08; ch->m_dsr = 0x10; ch->m_cts = 0x20; ch->m_ri = 0x40; ch->m_dtr = 0x80; break; } if (boards[crd].altpin) { ch->dsr = ch->m_dcd; ch->dcd = ch->m_dsr; ch->digiext.digi_flags |= DIGI_ALTPIN; } else { ch->dcd = ch->m_dcd; ch->dsr = ch->m_dsr; } ch->boardnum = crd; ch->channelnum = i; ch->magic = EPCA_MAGIC; tty_port_tty_set(&ch->port, NULL); if (shrinkmem) { fepcmd(ch, SETBUFFER, 32, 0, 0, 0); shrinkmem = 0; } tseg = readw(&bc->tseg); rseg = readw(&bc->rseg); switch (bd->type) { case PCIXEM: case PCIXRJ: case PCIXR: /* Cover all the 2MEG cards */ ch->txptr = memaddr + ((tseg << 4) & 0x1fffff); ch->rxptr = memaddr + ((rseg << 4) & 0x1fffff); ch->txwin = FEPWIN | (tseg >> 11); ch->rxwin = FEPWIN | (rseg >> 11); break; case PCXEM: case EISAXEM: /* Cover all the 32K windowed cards */ /* Mask equal to window size - 1 */ ch->txptr = memaddr + ((tseg << 4) & 0x7fff); ch->rxptr = memaddr + ((rseg << 4) & 0x7fff); ch->txwin = FEPWIN | (tseg >> 11); ch->rxwin = FEPWIN | (rseg >> 11); break; case PCXEVE: case PCXE: ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff); ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9); ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff); ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >> 9); break; case PCXI: case PC64XE: ch->txptr = memaddr + ((tseg - bd->memory_seg) << 4); ch->rxptr = memaddr + ((rseg - bd->memory_seg) << 4); ch->txwin = ch->rxwin = 0; break; } ch->txbufhead = 0; ch->txbufsize = readw(&bc->tmax) + 1; ch->rxbufhead = 0; ch->rxbufsize = readw(&bc->rmax) + 1; lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2); /* Set transmitter low water mark */ fepcmd(ch, STXLWATER, lowwater, 0, 10, 0); /* Set receiver low water mark */ fepcmd(ch, SRXLWATER, (ch->rxbufsize / 4), 0, 10, 0); /* Set receiver high water mark */ fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0); writew(100, &bc->edelay); writeb(1, &bc->idata); ch->startc = readb(&bc->startc); ch->stopc = readb(&bc->stopc); ch->startca = readb(&bc->startca); ch->stopca = readb(&bc->stopca); ch->fepcflag = 0; ch->fepiflag = 0; ch->fepoflag = 0; ch->fepstartc = 0; ch->fepstopc = 0; ch->fepstartca = 0; ch->fepstopca = 0; ch->port.close_delay = 50; spin_unlock_irqrestore(&epca_lock, flags); } printk(KERN_INFO "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n", VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports); memwinoff(bd, 0); } static void epcapoll(unsigned long ignored) { unsigned long flags; int crd; unsigned int head, tail; struct channel *ch; struct board_info *bd; /* * This routine is called upon every timer interrupt. Even though the * Digi series cards are capable of generating interrupts this method * of non-looping polling is more efficient. This routine checks for * card generated events (Such as receive data, are transmit buffer * empty) and acts on those events. */ for (crd = 0; crd < num_cards; crd++) { bd = &boards[crd]; ch = card_ptr[crd]; if ((bd->status == DISABLED) || digi_poller_inhibited) continue; /* * assertmemoff is not needed here; indeed it is an empty * subroutine. It is being kept because future boards may need * this as well as some legacy boards. */ spin_lock_irqsave(&epca_lock, flags); assertmemoff(ch); globalwinon(ch); /* * In this case head and tail actually refer to the event queue * not the transmit or receive queue. */ head = readw(&ch->mailbox->ein); tail = readw(&ch->mailbox->eout); /* If head isn't equal to tail we have an event */ if (head != tail) doevent(crd); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); } /* End for each card */ mod_timer(&epca_timer, jiffies + (HZ / 25)); } static void doevent(int crd) { void __iomem *eventbuf; struct channel *ch, *chan0; static struct tty_struct *tty; struct board_info *bd; struct board_chan __iomem *bc; unsigned int tail, head; int event, channel; int mstat, lstat; /* * This subroutine is called by epcapoll when an event is detected * in the event queue. This routine responds to those events. */ bd = &boards[crd]; chan0 = card_ptr[crd]; epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range"); assertgwinon(chan0); while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein))) { /* Begin while something in event queue */ assertgwinon(chan0); eventbuf = bd->re_map_membase + tail + ISTART; /* Get the channel the event occurred on */ channel = readb(eventbuf); /* Get the actual event code that occurred */ event = readb(eventbuf + 1); /* * The two assignments below get the current modem status * (mstat) and the previous modem status (lstat). These are * useful because an event could signal a change in modem * signals itself. */ mstat = readb(eventbuf + 2); lstat = readb(eventbuf + 3); ch = chan0 + channel; if ((unsigned)channel >= bd->numports || !ch) { if (channel >= bd->numports) ch = chan0; bc = ch->brdchan; goto next; } bc = ch->brdchan; if (bc == NULL) goto next; tty = tty_port_tty_get(&ch->port); if (event & DATA_IND) { /* Begin DATA_IND */ receive_data(ch, tty); assertgwinon(ch); } /* End DATA_IND */ /* else *//* Fix for DCD transition missed bug */ if (event & MODEMCHG_IND) { /* A modem signal change has been indicated */ ch->imodem = mstat; if (test_bit(ASYNCB_CHECK_CD, &ch->port.flags)) { /* We are now receiving dcd */ if (mstat & ch->dcd) wake_up_interruptible(&ch->port.open_wait); else /* No dcd; hangup */ pc_sched_event(ch, EPCA_EVENT_HANGUP); } } if (tty) { if (event & BREAK_IND) { /* A break has been indicated */ tty_insert_flip_char(tty, 0, TTY_BREAK); tty_schedule_flip(tty); } else if (event & LOWTX_IND) { if (ch->statusflags & LOWWAIT) { ch->statusflags &= ~LOWWAIT; tty_wakeup(tty); } } else if (event & EMPTYTX_IND) { /* This event is generated by setup_empty_event */ ch->statusflags &= ~TXBUSY; if (ch->statusflags & EMPTYWAIT) { ch->statusflags &= ~EMPTYWAIT; tty_wakeup(tty); } } tty_kref_put(tty); } next: globalwinon(ch); BUG_ON(!bc); writew(1, &bc->idata); writew((tail + 4) & (IMAX - ISTART - 4), &chan0->mailbox->eout); globalwinon(chan0); } /* End while something in event queue */ } static void fepcmd(struct channel *ch, int cmd, int word_or_byte, int byte2, int ncmds, int bytecmd) { unchar __iomem *memaddr; unsigned int head, cmdTail, cmdStart, cmdMax; long count; int n; /* This is the routine in which commands may be passed to the card. */ if (ch->board->status == DISABLED) return; assertgwinon(ch); /* Remember head (As well as max) is just an offset not a base addr */ head = readw(&ch->mailbox->cin); /* cmdStart is a base address */ cmdStart = readw(&ch->mailbox->cstart); /* * We do the addition below because we do not want a max pointer * relative to cmdStart. We want a max pointer that points at the * physical end of the command queue. */ cmdMax = (cmdStart + 4 + readw(&ch->mailbox->cmax)); memaddr = ch->board->re_map_membase; if (head >= (cmdMax - cmdStart) || (head & 03)) { printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__, cmd, head); printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__, cmdMax, cmdStart); return; } if (bytecmd) { writeb(cmd, memaddr + head + cmdStart + 0); writeb(ch->channelnum, memaddr + head + cmdStart + 1); /* Below word_or_byte is bits to set */ writeb(word_or_byte, memaddr + head + cmdStart + 2); /* Below byte2 is bits to reset */ writeb(byte2, memaddr + head + cmdStart + 3); } else { writeb(cmd, memaddr + head + cmdStart + 0); writeb(ch->channelnum, memaddr + head + cmdStart + 1); writeb(word_or_byte, memaddr + head + cmdStart + 2); } head = (head + 4) & (cmdMax - cmdStart - 4); writew(head, &ch->mailbox->cin); count = FEPTIMEOUT; for (;;) { count--; if (count == 0) { printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n"); return; } head = readw(&ch->mailbox->cin); cmdTail = readw(&ch->mailbox->cout); n = (head - cmdTail) & (cmdMax - cmdStart - 4); /* * Basically this will break when the FEP acknowledges the * command by incrementing cmdTail (Making it equal to head). */ if (n <= ncmds * (sizeof(short) * 4)) break; } } /* * Digi products use fields in their channels structures that are very similar * to the c_cflag and c_iflag fields typically found in UNIX termios * structures. The below three routines allow mappings between these hardware * "flags" and their respective Linux flags. */ static unsigned termios2digi_h(struct channel *ch, unsigned cflag) { unsigned res = 0; if (cflag & CRTSCTS) { ch->digiext.digi_flags |= (RTSPACE | CTSPACE); res |= ((ch->m_cts) | (ch->m_rts)); } if (ch->digiext.digi_flags & RTSPACE) res |= ch->m_rts; if (ch->digiext.digi_flags & DTRPACE) res |= ch->m_dtr; if (ch->digiext.digi_flags & CTSPACE) res |= ch->m_cts; if (ch->digiext.digi_flags & DSRPACE) res |= ch->dsr; if (ch->digiext.digi_flags & DCDPACE) res |= ch->dcd; if (res & (ch->m_rts)) ch->digiext.digi_flags |= RTSPACE; if (res & (ch->m_cts)) ch->digiext.digi_flags |= CTSPACE; return res; } static unsigned termios2digi_i(struct channel *ch, unsigned iflag) { unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP | IXON | IXANY | IXOFF); if (ch->digiext.digi_flags & DIGI_AIXON) res |= IAIXON; return res; } static unsigned termios2digi_c(struct channel *ch, unsigned cflag) { unsigned res = 0; if (cflag & CBAUDEX) { ch->digiext.digi_flags |= DIGI_FAST; /* * HUPCL bit is used by FEP to indicate fast baud table is to * be used. */ res |= FEP_HUPCL; } else ch->digiext.digi_flags &= ~DIGI_FAST; /* * CBAUD has bit position 0x1000 set these days to indicate Linux * baud rate remap. Digi hardware can't handle the bit assignment. * (We use a different bit assignment for high speed.). Clear this * bit out. */ res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE); /* * This gets a little confusing. The Digi cards have their own * representation of c_cflags controlling baud rate. For the most part * this is identical to the Linux implementation. However; Digi * supports one rate (76800) that Linux doesn't. This means that the * c_cflag entry that would normally mean 76800 for Digi actually means * 115200 under Linux. Without the below mapping, a stty 115200 would * only drive the board at 76800. Since the rate 230400 is also found * after 76800, the same problem afflicts us when we choose a rate of * 230400. Without the below modificiation stty 230400 would actually * give us 115200. * * There are two additional differences. The Linux value for CLOCAL * (0x800; 0004000) has no meaning to the Digi hardware. Also in later * releases of Linux; the CBAUD define has CBAUDEX (0x1000; 0010000) * ored into it (CBAUD = 0x100f as opposed to 0xf). CBAUDEX should be * checked for a screened out prior to termios2digi_c returning. Since * CLOCAL isn't used by the board this can be ignored as long as the * returned value is used only by Digi hardware. */ if (cflag & CBAUDEX) { /* * The below code is trying to guarantee that only baud rates * 115200 and 230400 are remapped. We use exclusive or because * the various baud rates share common bit positions and * therefore can't be tested for easily. */ if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) || (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX)))) res += 1; } return res; } /* Caller must hold the locks */ static void epcaparam(struct tty_struct *tty, struct channel *ch) { unsigned int cmdHead; struct ktermios *ts; struct board_chan __iomem *bc; unsigned mval, hflow, cflag, iflag; bc = ch->brdchan; epcaassert(bc != NULL, "bc out of range"); assertgwinon(ch); ts = tty->termios; if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */ cmdHead = readw(&bc->rin); writew(cmdHead, &bc->rout); cmdHead = readw(&bc->tin); /* Changing baud in mid-stream transmission can be wonderful */ /* * Flush current transmit buffer by setting cmdTail pointer * (tout) to cmdHead pointer (tin). Hopefully the transmit * buffer is empty. */ fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0); mval = 0; } else { /* Begin CBAUD not detected */ /* * c_cflags have changed but that change had nothing to do with * BAUD. Propagate the change to the card. */ cflag = termios2digi_c(ch, ts->c_cflag); if (cflag != ch->fepcflag) { ch->fepcflag = cflag; /* Set baud rate, char size, stop bits, parity */ fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0); } /* * If the user has not forced CLOCAL and if the device is not a * CALLOUT device (Which is always CLOCAL) we set flags such * that the driver will wait on carrier detect. */ if (ts->c_cflag & CLOCAL) clear_bit(ASYNCB_CHECK_CD, &ch->port.flags); else set_bit(ASYNCB_CHECK_CD, &ch->port.flags); mval = ch->m_dtr | ch->m_rts; } /* End CBAUD not detected */ iflag = termios2digi_i(ch, ts->c_iflag); /* Check input mode flags */ if (iflag != ch->fepiflag) { ch->fepiflag = iflag; /* * Command sets channels iflag structure on the board. Such * things as input soft flow control, handling of parity * errors, and break handling are all set here. * * break handling, parity handling, input stripping, * flow control chars */ fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0); } /* * Set the board mint value for this channel. This will cause hardware * events to be generated each time the DCD signal (Described in mint) * changes. */ writeb(ch->dcd, &bc->mint); if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD)) if (ch->digiext.digi_flags & DIGI_FORCEDCD) writeb(0, &bc->mint); ch->imodem = readb(&bc->mstat); hflow = termios2digi_h(ch, ts->c_cflag); if (hflow != ch->hflow) { ch->hflow = hflow; /* * Hard flow control has been selected but the board is not * using it. Activate hard flow control now. */ fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1); } mval ^= ch->modemfake & (mval ^ ch->modem); if (ch->omodem ^ mval) { ch->omodem = mval; /* * The below command sets the DTR and RTS mstat structure. If * hard flow control is NOT active these changes will drive the * output of the actual DTR and RTS lines. If hard flow control * is active, the changes will be saved in the mstat structure * and only asserted when hard flow control is turned off. */ /* First reset DTR & RTS; then set them */ fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1); fepcmd(ch, SETMODEM, mval, 0, 0, 1); } if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc) { ch->fepstartc = ch->startc; ch->fepstopc = ch->stopc; /* * The XON / XOFF characters have changed; propagate these * changes to the card. */ fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); } if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca) { ch->fepstartca = ch->startca; ch->fepstopca = ch->stopca; /* * Similar to the above, this time the auxilarly XON / XOFF * characters have changed; propagate these changes to the card. */ fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); } } /* Caller holds lock */ static void receive_data(struct channel *ch, struct tty_struct *tty) { unchar *rptr; struct ktermios *ts = NULL; struct board_chan __iomem *bc; int dataToRead, wrapgap, bytesAvailable; unsigned int tail, head; unsigned int wrapmask; /* * This routine is called by doint when a receive data event has taken * place. */ globalwinon(ch); if (ch->statusflags & RXSTOPPED) return; if (tty) ts = tty->termios; bc = ch->brdchan; BUG_ON(!bc); wrapmask = ch->rxbufsize - 1; /* * Get the head and tail pointers to the receiver queue. Wrap the head * pointer if it has reached the end of the buffer. */ head = readw(&bc->rin); head &= wrapmask; tail = readw(&bc->rout) & wrapmask; bytesAvailable = (head - tail) & wrapmask; if (bytesAvailable == 0) return; /* If CREAD bit is off or device not open, set TX tail to head */ if (!tty || !ts || !(ts->c_cflag & CREAD)) { writew(head, &bc->rout); return; } if (tty_buffer_request_room(tty, bytesAvailable + 1) == 0) return; if (readb(&bc->orun)) { writeb(0, &bc->orun); printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n", tty->name); tty_insert_flip_char(tty, 0, TTY_OVERRUN); } rxwinon(ch); while (bytesAvailable > 0) { /* Begin while there is data on the card */ wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail; /* * Even if head has wrapped around only report the amount of * data to be equal to the size - tail. Remember memcpy can't * automatically wrap around the receive buffer. */ dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable; /* Make sure we don't overflow the buffer */ dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead); if (dataToRead == 0) break; /* * Move data read from our card into the line disciplines * buffer for translation if necessary. */ memcpy_fromio(rptr, ch->rxptr + tail, dataToRead); tail = (tail + dataToRead) & wrapmask; bytesAvailable -= dataToRead; } /* End while there is data on the card */ globalwinon(ch); writew(tail, &bc->rout); /* Must be called with global data */ tty_schedule_flip(tty); } static int info_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { switch (cmd) { case DIGI_GETINFO: { struct digi_info di; int brd; if (get_user(brd, (unsigned int __user *)arg)) return -EFAULT; if (brd < 0 || brd >= num_cards || num_cards == 0) return -ENODEV; memset(&di, 0, sizeof(di)); di.board = brd; di.status = boards[brd].status; di.type = boards[brd].type ; di.numports = boards[brd].numports ; /* Legacy fixups - just move along nothing to see */ di.port = (unsigned char *)boards[brd].port ; di.membase = (unsigned char *)boards[brd].membase ; if (copy_to_user((void __user *)arg, &di, sizeof(di))) return -EFAULT; break; } case DIGI_POLLER: { int brd = arg & 0xff000000 >> 16; unsigned char state = arg & 0xff; if (brd < 0 || brd >= num_cards) { printk(KERN_ERR "epca: DIGI POLLER : brd not valid!\n"); return -ENODEV; } digi_poller_inhibited = state; break; } case DIGI_INIT: { /* * This call is made by the apps to complete the * initialization of the board(s). This routine is * responsible for setting the card to its initial * state and setting the drivers control fields to the * sutianle settings for the card in question. */ int crd; for (crd = 0; crd < num_cards; crd++) post_fep_init(crd); break; } default: return -ENOTTY; } return 0; } static int pc_tiocmget(struct tty_struct *tty) { struct channel *ch = tty->driver_data; struct board_chan __iomem *bc; unsigned int mstat, mflag = 0; unsigned long flags; if (ch) bc = ch->brdchan; else return -EINVAL; spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); mstat = readb(&bc->mstat); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); if (mstat & ch->m_dtr) mflag |= TIOCM_DTR; if (mstat & ch->m_rts) mflag |= TIOCM_RTS; if (mstat & ch->m_cts) mflag |= TIOCM_CTS; if (mstat & ch->dsr) mflag |= TIOCM_DSR; if (mstat & ch->m_ri) mflag |= TIOCM_RI; if (mstat & ch->dcd) mflag |= TIOCM_CD; return mflag; } static int pc_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct channel *ch = tty->driver_data; unsigned long flags; if (!ch) return -EINVAL; spin_lock_irqsave(&epca_lock, flags); /* * I think this modemfake stuff is broken. It doesn't correctly reflect * the behaviour desired by the TIOCM* ioctls. Therefore this is * probably broken. */ if (set & TIOCM_RTS) { ch->modemfake |= ch->m_rts; ch->modem |= ch->m_rts; } if (set & TIOCM_DTR) { ch->modemfake |= ch->m_dtr; ch->modem |= ch->m_dtr; } if (clear & TIOCM_RTS) { ch->modemfake |= ch->m_rts; ch->modem &= ~ch->m_rts; } if (clear & TIOCM_DTR) { ch->modemfake |= ch->m_dtr; ch->modem &= ~ch->m_dtr; } globalwinon(ch); /* * The below routine generally sets up parity, baud, flow control * issues, etc.... It effect both control flags and input flags. */ epcaparam(tty, ch); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); return 0; } static int pc_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { digiflow_t dflow; unsigned long flags; unsigned int mflag, mstat; unsigned char startc, stopc; struct board_chan __iomem *bc; struct channel *ch = tty->driver_data; void __user *argp = (void __user *)arg; if (ch) bc = ch->brdchan; else return -EINVAL; switch (cmd) { case TIOCMODG: mflag = pc_tiocmget(tty); if (put_user(mflag, (unsigned long __user *)argp)) return -EFAULT; break; case TIOCMODS: if (get_user(mstat, (unsigned __user *)argp)) return -EFAULT; return pc_tiocmset(tty, mstat, ~mstat); case TIOCSDTR: spin_lock_irqsave(&epca_lock, flags); ch->omodem |= ch->m_dtr; globalwinon(ch); fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); break; case TIOCCDTR: spin_lock_irqsave(&epca_lock, flags); ch->omodem &= ~ch->m_dtr; globalwinon(ch); fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); break; case DIGI_GETA: if (copy_to_user(argp, &ch->digiext, sizeof(digi_t))) return -EFAULT; break; case DIGI_SETAW: case DIGI_SETAF: if (cmd == DIGI_SETAW) { /* Setup an event to indicate when the transmit buffer empties */ spin_lock_irqsave(&epca_lock, flags); setup_empty_event(tty, ch); spin_unlock_irqrestore(&epca_lock, flags); tty_wait_until_sent(tty, 0); } else { /* ldisc lock already held in ioctl */ if (tty->ldisc->ops->flush_buffer) tty->ldisc->ops->flush_buffer(tty); } /* Fall Thru */ case DIGI_SETA: if (copy_from_user(&ch->digiext, argp, sizeof(digi_t))) return -EFAULT; if (ch->digiext.digi_flags & DIGI_ALTPIN) { ch->dcd = ch->m_dsr; ch->dsr = ch->m_dcd; } else { ch->dcd = ch->m_dcd; ch->dsr = ch->m_dsr; } spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); /* * The below routine generally sets up parity, baud, flow * control issues, etc.... It effect both control flags and * input flags. */ epcaparam(tty, ch); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); break; case DIGI_GETFLOW: case DIGI_GETAFLOW: spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); if (cmd == DIGI_GETFLOW) { dflow.startc = readb(&bc->startc); dflow.stopc = readb(&bc->stopc); } else { dflow.startc = readb(&bc->startca); dflow.stopc = readb(&bc->stopca); } memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); if (copy_to_user(argp, &dflow, sizeof(dflow))) return -EFAULT; break; case DIGI_SETAFLOW: case DIGI_SETFLOW: if (cmd == DIGI_SETFLOW) { startc = ch->startc; stopc = ch->stopc; } else { startc = ch->startca; stopc = ch->stopca; } if (copy_from_user(&dflow, argp, sizeof(dflow))) return -EFAULT; if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin if setflow toggled */ spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); if (cmd == DIGI_SETFLOW) { ch->fepstartc = ch->startc = dflow.startc; ch->fepstopc = ch->stopc = dflow.stopc; fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); } else { ch->fepstartca = ch->startca = dflow.startc; ch->fepstopca = ch->stopca = dflow.stopc; fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); } if (ch->statusflags & TXSTOPPED) pc_start(tty); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); } /* End if setflow toggled */ break; default: return -ENOIOCTLCMD; } return 0; } static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct channel *ch; unsigned long flags; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { /* Begin if channel valid */ spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); epcaparam(tty, ch); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); if ((old_termios->c_cflag & CRTSCTS) && ((tty->termios->c_cflag & CRTSCTS) == 0)) tty->hw_stopped = 0; if (!(old_termios->c_cflag & CLOCAL) && (tty->termios->c_cflag & CLOCAL)) wake_up_interruptible(&ch->port.open_wait); } /* End if channel valid */ } static void do_softint(struct work_struct *work) { struct channel *ch = container_of(work, struct channel, tqueue); /* Called in response to a modem change event */ if (ch && ch->magic == EPCA_MAGIC) { struct tty_struct *tty = tty_port_tty_get(&ch->port); if (tty && tty->driver_data) { if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { tty_hangup(tty); wake_up_interruptible(&ch->port.open_wait); clear_bit(ASYNCB_NORMAL_ACTIVE, &ch->port.flags); } } tty_kref_put(tty); } } /* * pc_stop and pc_start provide software flow control to the routine and the * pc_ioctl routine. */ static void pc_stop(struct tty_struct *tty) { struct channel *ch; unsigned long flags; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { spin_lock_irqsave(&epca_lock, flags); if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */ globalwinon(ch); /* STOP transmitting now !! */ fepcmd(ch, PAUSETX, 0, 0, 0, 0); ch->statusflags |= TXSTOPPED; memoff(ch); } /* End if transmit stop requested */ spin_unlock_irqrestore(&epca_lock, flags); } } static void pc_start(struct tty_struct *tty) { struct channel *ch; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { unsigned long flags; spin_lock_irqsave(&epca_lock, flags); /* Just in case output was resumed because of a change in Digi-flow */ if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */ struct board_chan __iomem *bc; globalwinon(ch); bc = ch->brdchan; if (ch->statusflags & LOWWAIT) writeb(1, &bc->ilow); /* Okay, you can start transmitting again... */ fepcmd(ch, RESUMETX, 0, 0, 0, 0); ch->statusflags &= ~TXSTOPPED; memoff(ch); } /* End transmit resume requested */ spin_unlock_irqrestore(&epca_lock, flags); } } /* * The below routines pc_throttle and pc_unthrottle are used to slow (And * resume) the receipt of data into the kernels receive buffers. The exact * occurrence of this depends on the size of the kernels receive buffer and * what the 'watermarks' are set to for that buffer. See the n_ttys.c file for * more details. */ static void pc_throttle(struct tty_struct *tty) { struct channel *ch; unsigned long flags; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { spin_lock_irqsave(&epca_lock, flags); if ((ch->statusflags & RXSTOPPED) == 0) { globalwinon(ch); fepcmd(ch, PAUSERX, 0, 0, 0, 0); ch->statusflags |= RXSTOPPED; memoff(ch); } spin_unlock_irqrestore(&epca_lock, flags); } } static void pc_unthrottle(struct tty_struct *tty) { struct channel *ch; unsigned long flags; /* * verifyChannel returns the channel from the tty struct if it is * valid. This serves as a sanity check. */ ch = verifyChannel(tty); if (ch != NULL) { /* Just in case output was resumed because of a change in Digi-flow */ spin_lock_irqsave(&epca_lock, flags); if (ch->statusflags & RXSTOPPED) { globalwinon(ch); fepcmd(ch, RESUMERX, 0, 0, 0, 0); ch->statusflags &= ~RXSTOPPED; memoff(ch); } spin_unlock_irqrestore(&epca_lock, flags); } } static int pc_send_break(struct tty_struct *tty, int msec) { struct channel *ch = tty->driver_data; unsigned long flags; if (msec == -1) msec = 0xFFFF; else if (msec > 0xFFFE) msec = 0xFFFE; else if (msec < 1) msec = 1; spin_lock_irqsave(&epca_lock, flags); globalwinon(ch); /* * Maybe I should send an infinite break here, schedule() for msec * amount of time, and then stop the break. This way, the user can't * screw up the FEP by causing digi_send_break() to be called (i.e. via * an ioctl()) more than once in msec amount of time. * Try this for now... */ fepcmd(ch, SENDBREAK, msec, 0, 10, 0); memoff(ch); spin_unlock_irqrestore(&epca_lock, flags); return 0; } /* Caller MUST hold the lock */ static void setup_empty_event(struct tty_struct *tty, struct channel *ch) { struct board_chan __iomem *bc = ch->brdchan; globalwinon(ch); ch->statusflags |= EMPTYWAIT; /* * When set the iempty flag request a event to be generated when the * transmit buffer is empty (If there is no BREAK in progress). */ writeb(1, &bc->iempty); memoff(ch); } #ifndef MODULE static void __init epca_setup(char *str, int *ints) { struct board_info board; int index, loop, last; char *temp, *t2; unsigned len; /* * If this routine looks a little strange it is because it is only * called if a LILO append command is given to boot the kernel with * parameters. In this way, we can provide the user a method of * changing his board configuration without rebuilding the kernel. */ if (!liloconfig) liloconfig = 1; memset(&board, 0, sizeof(board)); /* Assume the data is int first, later we can change it */ /* I think that array position 0 of ints holds the number of args */ for (last = 0, index = 1; index <= ints[0]; index++) switch (index) { /* Begin parse switch */ case 1: board.status = ints[index]; /* * We check for 2 (As opposed to 1; because 2 is a flag * instructing the driver to ignore epcaconfig.) For * this reason we check for 2. */ if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */ nbdevs = 0; num_cards = 0; return; } /* End ignore epcaconfig as well as lilo cmd line */ if (board.status > 2) { printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status); invalid_lilo_config = 1; setup_error_code |= INVALID_BOARD_STATUS; return; } last = index; break; case 2: board.type = ints[index]; if (board.type >= PCIXEM) { printk(KERN_ERR "epca_setup: Invalid board type 0x%x\n", board.type); invalid_lilo_config = 1; setup_error_code |= INVALID_BOARD_TYPE; return; } last = index; break; case 3: board.altpin = ints[index]; if (board.altpin > 1) { printk(KERN_ERR "epca_setup: Invalid board altpin 0x%x\n", board.altpin); invalid_lilo_config = 1; setup_error_code |= INVALID_ALTPIN; return; } last = index; break; case 4: board.numports = ints[index]; if (board.numports < 2 || board.numports > 256) { printk(KERN_ERR "epca_setup: Invalid board numports 0x%x\n", board.numports); invalid_lilo_config = 1; setup_error_code |= INVALID_NUM_PORTS; return; } nbdevs += board.numports; last = index; break; case 5: board.port = ints[index]; if (ints[index] <= 0) { printk(KERN_ERR "epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port); invalid_lilo_config = 1; setup_error_code |= INVALID_PORT_BASE; return; } last = index; break; case 6: board.membase = ints[index]; if (ints[index] <= 0) { printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n", (unsigned int)board.membase); invalid_lilo_config = 1; setup_error_code |= INVALID_MEM_BASE; return; } last = index; break; default: printk(KERN_ERR "<Error> - epca_setup: Too many integer parms\n"); return; } /* End parse switch */ while (str && *str) { /* Begin while there is a string arg */ /* find the next comma or terminator */ temp = str; /* While string is not null, and a comma hasn't been found */ while (*temp && (*temp != ',')) temp++; if (!*temp) temp = NULL; else *temp++ = 0; /* Set index to the number of args + 1 */ index = last + 1; switch (index) { case 1: len = strlen(str); if (strncmp("Disable", str, len) == 0) board.status = 0; else if (strncmp("Enable", str, len) == 0) board.status = 1; else { printk(KERN_ERR "epca_setup: Invalid status %s\n", str); invalid_lilo_config = 1; setup_error_code |= INVALID_BOARD_STATUS; return; } last = index; break; case 2: for (loop = 0; loop < EPCA_NUM_TYPES; loop++) if (strcmp(board_desc[loop], str) == 0) break; /* * If the index incremented above refers to a * legitimate board type set it here. */ if (index < EPCA_NUM_TYPES) board.type = loop; else { printk(KERN_ERR "epca_setup: Invalid board type: %s\n", str); invalid_lilo_config = 1; setup_error_code |= INVALID_BOARD_TYPE; return; } last = index; break; case 3: len = strlen(str); if (strncmp("Disable", str, len) == 0) board.altpin = 0; else if (strncmp("Enable", str, len) == 0) board.altpin = 1; else { printk(KERN_ERR "epca_setup: Invalid altpin %s\n", str); invalid_lilo_config = 1; setup_error_code |= INVALID_ALTPIN; return; } last = index; break; case 4: t2 = str; while (isdigit(*t2)) t2++; if (*t2) { printk(KERN_ERR "epca_setup: Invalid port count %s\n", str); invalid_lilo_config = 1; setup_error_code |= INVALID_NUM_PORTS; return; } /* * There is not a man page for simple_strtoul but the * code can be found in vsprintf.c. The first argument * is the string to translate (To an unsigned long * obviously), the second argument can be the address * of any character variable or a NULL. If a variable * is given, the end pointer of the string will be * stored in that variable; if a NULL is given the end * pointer will not be returned. The last argument is * the base to use. If a 0 is indicated, the routine * will attempt to determine the proper base by looking * at the values prefix (A '0' for octal, a 'x' for * hex, etc ... If a value is given it will use that * value as the base. */ board.numports = simple_strtoul(str, NULL, 0); nbdevs += board.numports; last = index; break; case 5: t2 = str; while (isxdigit(*t2)) t2++; if (*t2) { printk(KERN_ERR "epca_setup: Invalid i/o address %s\n", str); invalid_lilo_config = 1; setup_error_code |= INVALID_PORT_BASE; return; } board.port = simple_strtoul(str, NULL, 16); last = index; break; case 6: t2 = str; while (isxdigit(*t2)) t2++; if (*t2) { printk(KERN_ERR "epca_setup: Invalid memory base %s\n", str); invalid_lilo_config = 1; setup_error_code |= INVALID_MEM_BASE; return; } board.membase = simple_strtoul(str, NULL, 16); last = index; break; default: printk(KERN_ERR "epca: Too many string parms\n"); return; } str = temp; } /* End while there is a string arg */ if (last < 6) { printk(KERN_ERR "epca: Insufficient parms specified\n"); return; } /* I should REALLY validate the stuff here */ /* Copies our local copy of board into boards */ memcpy((void *)&boards[num_cards], (void *)&board, sizeof(board)); /* Does this get called once per lilo arg are what ? */ printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n", num_cards, board_desc[board.type], board.numports, (int)board.port, (unsigned int) board.membase); num_cards++; } static int __init epca_real_setup(char *str) { int ints[11]; epca_setup(get_options(str, 11, ints), ints); return 1; } __setup("digiepca", epca_real_setup); #endif enum epic_board_types { brd_xr = 0, brd_xem, brd_cx, brd_xrj, }; /* indexed directly by epic_board_types enum */ static struct { unsigned char board_type; unsigned bar_idx; /* PCI base address region */ } epca_info_tbl[] = { { PCIXR, 0, }, { PCIXEM, 0, }, { PCICX, 0, }, { PCIXRJ, 2, }, }; static int __devinit epca_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int board_num = -1; int board_idx, info_idx = ent->driver_data; unsigned long addr; if (pci_enable_device(pdev)) return -EIO; board_num++; board_idx = board_num + num_cards; if (board_idx >= MAXBOARDS) goto err_out; addr = pci_resource_start(pdev, epca_info_tbl[info_idx].bar_idx); if (!addr) { printk(KERN_ERR PFX "PCI region #%d not available (size 0)\n", epca_info_tbl[info_idx].bar_idx); goto err_out; } boards[board_idx].status = ENABLED; boards[board_idx].type = epca_info_tbl[info_idx].board_type; boards[board_idx].numports = 0x0; boards[board_idx].port = addr + PCI_IO_OFFSET; boards[board_idx].membase = addr; if (!request_mem_region(addr + PCI_IO_OFFSET, 0x200000, "epca")) { printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", 0x200000, addr + PCI_IO_OFFSET); goto err_out; } boards[board_idx].re_map_port = ioremap_nocache(addr + PCI_IO_OFFSET, 0x200000); if (!boards[board_idx].re_map_port) { printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", 0x200000, addr + PCI_IO_OFFSET); goto err_out_free_pciio; } if (!request_mem_region(addr, 0x200000, "epca")) { printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", 0x200000, addr); goto err_out_free_iounmap; } boards[board_idx].re_map_membase = ioremap_nocache(addr, 0x200000); if (!boards[board_idx].re_map_membase) { printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", 0x200000, addr + PCI_IO_OFFSET); goto err_out_free_memregion; } /* * I don't know what the below does, but the hardware guys say its * required on everything except PLX (In this case XRJ). */ if (info_idx != brd_xrj) { pci_write_config_byte(pdev, 0x40, 0); pci_write_config_byte(pdev, 0x46, 0); } return 0; err_out_free_memregion: release_mem_region(addr, 0x200000); err_out_free_iounmap: iounmap(boards[board_idx].re_map_port); err_out_free_pciio: release_mem_region(addr + PCI_IO_OFFSET, 0x200000); err_out: return -ENODEV; } static struct pci_device_id epca_pci_tbl[] = { { PCI_VENDOR_DIGI, PCI_DEVICE_XR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_xr }, { PCI_VENDOR_DIGI, PCI_DEVICE_XEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_xem }, { PCI_VENDOR_DIGI, PCI_DEVICE_CX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_cx }, { PCI_VENDOR_DIGI, PCI_DEVICE_XRJ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_xrj }, { 0, } }; MODULE_DEVICE_TABLE(pci, epca_pci_tbl); static int __init init_PCI(void) { memset(&epca_driver, 0, sizeof(epca_driver)); epca_driver.name = "epca"; epca_driver.id_table = epca_pci_tbl; epca_driver.probe = epca_init_one; return pci_register_driver(&epca_driver); } MODULE_LICENSE("GPL");
gpl-2.0
jiangyanfeng/android_kernel_huawei_G300
drivers/media/dvb/dvb-usb/technisat-usb2.c
2535
19872
/* * Linux driver for Technisat DVB-S/S2 USB 2.0 device * * Copyright (C) 2010 Patrick Boettcher, * Kernel Labs Inc. PO Box 745, St James, NY 11780 * * Development was sponsored by Technisat Digital UK Limited, whose * registered office is Witan Gate House 500 - 600 Witan Gate West, * Milton Keynes, MK9 1SH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND * TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO * THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR * FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE COPYRIGHT HOLDER * NOR TECHNISAT DIGITAL UK LIMITED SHALL BE LIABLE FOR ANY SPECIAL, * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS PROGRAM. See the * GNU General Public License for more details. */ #define DVB_USB_LOG_PREFIX "technisat-usb2" #include "dvb-usb.h" #include "stv6110x.h" #include "stv090x.h" /* module parameters */ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (bit-mask: 1=info,2=eeprom,4=i2c,8=rc)." \ DVB_USB_DEBUG_STATUS); /* disables all LED control command and * also does not start the signal polling thread */ static int disable_led_control; module_param(disable_led_control, int, 0444); MODULE_PARM_DESC(disable_led_control, "disable LED control of the device " "(default: 0 - LED control is active)."); /* device private data */ struct technisat_usb2_state { struct dvb_usb_device *dev; struct delayed_work green_led_work; u8 power_state; u16 last_scan_code; }; /* debug print helpers */ #define deb_info(args...) dprintk(debug, 0x01, args) #define deb_eeprom(args...) dprintk(debug, 0x02, args) #define deb_i2c(args...) dprintk(debug, 0x04, args) #define deb_rc(args...) dprintk(debug, 0x08, args) /* vendor requests */ #define SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST 0xB3 #define SET_FRONT_END_RESET_VENDOR_REQUEST 0xB4 #define GET_VERSION_INFO_VENDOR_REQUEST 0xB5 #define SET_GREEN_LED_VENDOR_REQUEST 0xB6 #define SET_RED_LED_VENDOR_REQUEST 0xB7 #define GET_IR_DATA_VENDOR_REQUEST 0xB8 #define SET_LED_TIMER_DIVIDER_VENDOR_REQUEST 0xB9 #define SET_USB_REENUMERATION 0xBA /* i2c-access methods */ #define I2C_SPEED_100KHZ_BIT 0x40 #define I2C_STATUS_NAK 7 #define I2C_STATUS_OK 8 static int technisat_usb2_i2c_access(struct usb_device *udev, u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) { u8 b[64]; int ret, actual_length; deb_i2c("i2c-access: %02x, tx: ", device_addr); debug_dump(tx, txlen, deb_i2c); deb_i2c(" "); if (txlen > 62) { err("i2c TX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); txlen = 62; } if (rxlen > 62) { err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); txlen = 62; } b[0] = I2C_SPEED_100KHZ_BIT; b[1] = device_addr << 1; if (rx != NULL) { b[0] |= rxlen; b[1] |= 1; } memcpy(&b[2], tx, txlen); ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01), b, 2 + txlen, NULL, 1000); if (ret < 0) { err("i2c-error: out failed %02x = %d", device_addr, ret); return -ENODEV; } ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, 0x01), b, 64, &actual_length, 1000); if (ret < 0) { err("i2c-error: in failed %02x = %d", device_addr, ret); return -ENODEV; } if (b[0] != I2C_STATUS_OK) { err("i2c-error: %02x = %d", device_addr, b[0]); /* handle tuner-i2c-nak */ if (!(b[0] == I2C_STATUS_NAK && device_addr == 0x60 /* && device_is_technisat_usb2 */)) return -ENODEV; } deb_i2c("status: %d, ", b[0]); if (rx != NULL) { memcpy(rx, &b[2], rxlen); deb_i2c("rx (%d): ", rxlen); debug_dump(rx, rxlen, deb_i2c); } deb_i2c("\n"); return 0; } static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { int ret = 0, i; struct dvb_usb_device *d = i2c_get_adapdata(adap); /* Ensure nobody else hits the i2c bus while we're sending our sequence of messages, (such as the remote control thread) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { if (i+1 < num && msg[i+1].flags & I2C_M_RD) { ret = technisat_usb2_i2c_access(d->udev, msg[i+1].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len); if (ret != 0) break; i++; } else { ret = technisat_usb2_i2c_access(d->udev, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0); if (ret != 0) break; } } if (ret == 0) ret = i; mutex_unlock(&d->i2c_mutex); return ret; } static u32 technisat_usb2_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm technisat_usb2_i2c_algo = { .master_xfer = technisat_usb2_i2c_xfer, .functionality = technisat_usb2_i2c_func, }; #if 0 static void technisat_usb2_frontend_reset(struct usb_device *udev) { usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SET_FRONT_END_RESET_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 10, 0, NULL, 0, 500); } #endif /* LED control */ enum technisat_usb2_led_state { LED_OFF, LED_BLINK, LED_ON, LED_UNDEFINED }; static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum technisat_usb2_led_state state) { int ret; u8 led[8] = { red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST, 0 }; if (disable_led_control && state != LED_OFF) return 0; switch (state) { case LED_ON: led[1] = 0x82; break; case LED_BLINK: led[1] = 0x82; if (red) { led[2] = 0x02; led[3] = 10; led[4] = 10; } else { led[2] = 0xff; led[3] = 50; led[4] = 50; } led[5] = 1; break; default: case LED_OFF: led[1] = 0x80; break; } if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, led, sizeof(led), 500); mutex_unlock(&d->i2c_mutex); return ret; } static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green) { int ret; u8 b = 0; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), SET_LED_TIMER_DIVIDER_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, (red << 8) | green, 0, &b, 1, 500); mutex_unlock(&d->i2c_mutex); return ret; } static void technisat_usb2_green_led_control(struct work_struct *work) { struct technisat_usb2_state *state = container_of(work, struct technisat_usb2_state, green_led_work.work); struct dvb_frontend *fe = state->dev->adapter[0].fe; if (state->power_state == 0) goto schedule; if (fe != NULL) { enum fe_status status; if (fe->ops.read_status(fe, &status) != 0) goto schedule; if (status & FE_HAS_LOCK) { u32 ber; if (fe->ops.read_ber(fe, &ber) != 0) goto schedule; if (ber > 1000) technisat_usb2_set_led(state->dev, 0, LED_BLINK); else technisat_usb2_set_led(state->dev, 0, LED_ON); } else technisat_usb2_set_led(state->dev, 0, LED_OFF); } schedule: schedule_delayed_work(&state->green_led_work, msecs_to_jiffies(500)); } /* method to find out whether the firmware has to be downloaded or not */ static int technisat_usb2_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { int ret; u8 version[3]; /* first select the interface */ if (usb_set_interface(udev, 0, 1) != 0) err("could not set alternate setting to 0"); else info("set alternate setting"); *cold = 0; /* by default do not download a firmware - just in case something is wrong */ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), GET_VERSION_INFO_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, sizeof(version), 500); if (ret < 0) *cold = 1; else { info("firmware version: %d.%d", version[1], version[2]); *cold = 0; } return 0; } /* power control */ static int technisat_usb2_power_ctrl(struct dvb_usb_device *d, int level) { struct technisat_usb2_state *state = d->priv; state->power_state = level; if (disable_led_control) return 0; /* green led is turned off in any case - will be turned on when tuning */ technisat_usb2_set_led(d, 0, LED_OFF); /* red led is turned on all the time */ technisat_usb2_set_led(d, 1, LED_ON); return 0; } /* mac address reading - from the eeprom */ #if 0 static void technisat_usb2_eeprom_dump(struct dvb_usb_device *d) { u8 reg; u8 b[16]; int i, j; /* full EEPROM dump */ for (j = 0; j < 256 * 4; j += 16) { reg = j; if (technisat_usb2_i2c_access(d->udev, 0x50 + j / 256, &reg, 1, b, 16) != 0) break; deb_eeprom("EEPROM: %01x%02x: ", j / 256, reg); for (i = 0; i < 16; i++) deb_eeprom("%02x ", b[i]); deb_eeprom("\n"); } } #endif static u8 technisat_usb2_calc_lrc(const u8 *b, u16 length) { u8 lrc = 0; while (--length) lrc ^= *b++; return lrc; } static int technisat_usb2_eeprom_lrc_read(struct dvb_usb_device *d, u16 offset, u8 *b, u16 length, u8 tries) { u8 bo = offset & 0xff; struct i2c_msg msg[] = { { .addr = 0x50 | ((offset >> 8) & 0x3), .buf = &bo, .len = 1 }, { .addr = 0x50 | ((offset >> 8) & 0x3), .flags = I2C_M_RD, .buf = b, .len = length } }; while (tries--) { int status; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) break; status = technisat_usb2_calc_lrc(b, length - 1) == b[length - 1]; if (status) return 0; } return -EREMOTEIO; } #define EEPROM_MAC_START 0x3f8 #define EEPROM_MAC_TOTAL 8 static int technisat_usb2_read_mac_address(struct dvb_usb_device *d, u8 mac[]) { u8 buf[EEPROM_MAC_TOTAL]; if (technisat_usb2_eeprom_lrc_read(d, EEPROM_MAC_START, buf, EEPROM_MAC_TOTAL, 4) != 0) return -ENODEV; memcpy(mac, buf, 6); return 0; } /* frontend attach */ static int technisat_usb2_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { int i; u8 gpio[3] = { 0 }; /* 0 = 2, 1 = 3, 2 = 4 */ gpio[2] = 1; /* high - voltage ? */ switch (voltage) { case SEC_VOLTAGE_13: gpio[0] = 1; break; case SEC_VOLTAGE_18: gpio[0] = 1; gpio[1] = 1; break; default: case SEC_VOLTAGE_OFF: break; } for (i = 0; i < 3; i++) if (stv090x_set_gpio(fe, i+2, 0, gpio[i], 0) != 0) return -EREMOTEIO; return 0; } static struct stv090x_config technisat_usb2_stv090x_config = { .device = STV0903, .demod_mode = STV090x_SINGLE, .clk_mode = STV090x_CLK_EXT, .xtal = 8000000, .address = 0x68, .ts1_mode = STV090x_TSMODE_DVBCI, .ts1_clk = 13400000, .ts1_tei = 1, .repeater_level = STV090x_RPTLEVEL_64, .tuner_bbgain = 6, }; static struct stv6110x_config technisat_usb2_stv6110x_config = { .addr = 0x60, .refclk = 16000000, .clk_div = 2, }; static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) { struct usb_device *udev = a->dev->udev; int ret; a->fe = dvb_attach(stv090x_attach, &technisat_usb2_stv090x_config, &a->dev->i2c_adap, STV090x_DEMODULATOR_0); if (a->fe) { struct stv6110x_devctl *ctl; ctl = dvb_attach(stv6110x_attach, a->fe, &technisat_usb2_stv6110x_config, &a->dev->i2c_adap); if (ctl) { technisat_usb2_stv090x_config.tuner_init = ctl->tuner_init; technisat_usb2_stv090x_config.tuner_sleep = ctl->tuner_sleep; technisat_usb2_stv090x_config.tuner_set_mode = ctl->tuner_set_mode; technisat_usb2_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency; technisat_usb2_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency; technisat_usb2_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth; technisat_usb2_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth; technisat_usb2_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain; technisat_usb2_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain; technisat_usb2_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk; technisat_usb2_stv090x_config.tuner_get_status = ctl->tuner_get_status; /* call the init function once to initialize tuner's clock output divider and demod's master clock */ if (a->fe->ops.init) a->fe->ops.init(a->fe); if (mutex_lock_interruptible(&a->dev->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, NULL, 0, 500); mutex_unlock(&a->dev->i2c_mutex); if (ret != 0) err("could not set IF_CLK to external"); a->fe->ops.set_voltage = technisat_usb2_set_voltage; /* if everything was successful assign a nice name to the frontend */ strlcpy(a->fe->ops.info.name, a->dev->desc->name, sizeof(a->fe->ops.info.name)); } else { dvb_frontend_detach(a->fe); a->fe = NULL; } } technisat_usb2_set_led_timer(a->dev, 1, 1); return a->fe == NULL ? -ENODEV : 0; } /* Remote control */ /* the device is giving providing raw IR-signals to the host mapping * it only to one remote control is just the default implementation */ #define NOMINAL_IR_BIT_TRANSITION_TIME_US 889 #define NOMINAL_IR_BIT_TIME_US (2 * NOMINAL_IR_BIT_TRANSITION_TIME_US) #define FIRMWARE_CLOCK_TICK 83333 #define FIRMWARE_CLOCK_DIVISOR 256 #define IR_PERCENT_TOLERANCE 15 #define NOMINAL_IR_BIT_TRANSITION_TICKS ((NOMINAL_IR_BIT_TRANSITION_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK) #define NOMINAL_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICKS / FIRMWARE_CLOCK_DIVISOR) #define NOMINAL_IR_BIT_TIME_TICKS ((NOMINAL_IR_BIT_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK) #define NOMINAL_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICKS / FIRMWARE_CLOCK_DIVISOR) #define MINIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT - ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MAXIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT + ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MINIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT - ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MAXIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT + ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) static int technisat_usb2_get_ir(struct dvb_usb_device *d) { u8 buf[62], *b; int ret; struct ir_raw_event ev; buf[0] = GET_IR_DATA_VENDOR_REQUEST; buf[1] = 0x08; buf[2] = 0x8f; buf[3] = MINIMUM_IR_BIT_TRANSITION_TICK_COUNT; buf[4] = MAXIMUM_IR_BIT_TIME_TICK_COUNT; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), GET_IR_DATA_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, buf, 5, 500); if (ret < 0) goto unlock; buf[1] = 0; buf[2] = 0; ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), GET_IR_DATA_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_IN, 0x8080, 0, buf, sizeof(buf), 500); unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) return ret; if (ret == 1) return 0; /* no key pressed */ /* decoding */ b = buf+1; #if 0 deb_rc("RC: %d ", ret); debug_dump(b, ret, deb_rc); #endif ev.pulse = 0; while (1) { ev.pulse = !ev.pulse; ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000; ir_raw_event_store(d->rc_dev, &ev); b++; if (*b == 0xff) { ev.pulse = 0; ev.duration = 888888*2; ir_raw_event_store(d->rc_dev, &ev); break; } } ir_raw_event_handle(d->rc_dev); return 1; } static int technisat_usb2_rc_query(struct dvb_usb_device *d) { int ret = technisat_usb2_get_ir(d); if (ret < 0) return ret; if (ret == 0) return 0; if (!disable_led_control) technisat_usb2_set_led(d, 1, LED_BLINK); return 0; } /* DVB-USB and USB stuff follows */ static struct usb_device_id technisat_usb2_id_table[] = { { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_DVB_S2) }, { 0 } /* Terminating entry */ }; /* device description */ static struct dvb_usb_device_properties technisat_usb2_devices = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .identify_state = technisat_usb2_identify_state, .firmware = "dvb-usb-SkyStar_USB_HD_FW_v17_63.HEX.fw", .size_of_priv = sizeof(struct technisat_usb2_state), .i2c_algo = &technisat_usb2_i2c_algo, .power_ctrl = technisat_usb2_power_ctrl, .read_mac_address = technisat_usb2_read_mac_address, .num_adapters = 1, .adapter = { { .frontend_attach = technisat_usb2_frontend_attach, .stream = { .type = USB_ISOC, .count = 8, .endpoint = 0x2, .u = { .isoc = { .framesperurb = 32, .framesize = 2048, .interval = 3, } } }, .size_of_priv = 0, }, }, .num_device_descs = 1, .devices = { { "Technisat SkyStar USB HD (DVB-S/S2)", { &technisat_usb2_id_table[0], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_TECHNISAT_USB2, .module_name = "technisat-usb2", .rc_query = technisat_usb2_rc_query, .allowed_protos = RC_TYPE_ALL, .driver_type = RC_DRIVER_IR_RAW, } }; static int technisat_usb2_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *dev; if (dvb_usb_device_init(intf, &technisat_usb2_devices, THIS_MODULE, &dev, adapter_nr) != 0) return -ENODEV; if (dev) { struct technisat_usb2_state *state = dev->priv; state->dev = dev; if (!disable_led_control) { INIT_DELAYED_WORK(&state->green_led_work, technisat_usb2_green_led_control); schedule_delayed_work(&state->green_led_work, msecs_to_jiffies(500)); } } return 0; } static void technisat_usb2_disconnect(struct usb_interface *intf) { struct dvb_usb_device *dev = usb_get_intfdata(intf); /* work and stuff was only created when the device is is hot-state */ if (dev != NULL) { struct technisat_usb2_state *state = dev->priv; if (state != NULL) { cancel_delayed_work_sync(&state->green_led_work); flush_scheduled_work(); } } dvb_usb_device_exit(intf); } static struct usb_driver technisat_usb2_driver = { .name = "dvb_usb_technisat_usb2", .probe = technisat_usb2_probe, .disconnect = technisat_usb2_disconnect, .id_table = technisat_usb2_id_table, }; /* module stuff */ static int __init technisat_usb2_module_init(void) { int result = usb_register(&technisat_usb2_driver); if (result) { err("usb_register failed. Code %d", result); return result; } return 0; } static void __exit technisat_usb2_module_exit(void) { usb_deregister(&technisat_usb2_driver); } module_init(technisat_usb2_module_init); module_exit(technisat_usb2_module_exit); MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>"); MODULE_DESCRIPTION("Driver for Technisat DVB-S/S2 USB 2.0 device"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHV-E110S
drivers/usb/host/whci/hcd.c
3047
8650
/* * Wireless Host Controller (WHC) driver. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" /* * One time initialization. * * Nothing to do here. */ static int whc_reset(struct usb_hcd *usb_hcd) { return 0; } /* * Start the wireless host controller. * * Start device notification. * * Put hc into run state, set DNTS parameters. */ static int whc_start(struct usb_hcd *usb_hcd) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); u8 bcid; int ret; mutex_lock(&wusbhc->mutex); le_writel(WUSBINTR_GEN_CMD_DONE | WUSBINTR_HOST_ERR | WUSBINTR_ASYNC_SCHED_SYNCED | WUSBINTR_DNTS_INT | WUSBINTR_ERR_INT | WUSBINTR_INT, whc->base + WUSBINTR); /* set cluster ID */ bcid = wusb_cluster_id_get(); ret = whc_set_cluster_id(whc, bcid); if (ret < 0) goto out; wusbhc->cluster_id = bcid; /* start HC */ whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN); usb_hcd->uses_new_polling = 1; set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags); usb_hcd->state = HC_STATE_RUNNING; out: mutex_unlock(&wusbhc->mutex); return ret; } /* * Stop the wireless host controller. * * Stop device notification. * * Wait for pending transfer to stop? Put hc into stop state? */ static void whc_stop(struct usb_hcd *usb_hcd) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); mutex_lock(&wusbhc->mutex); /* stop HC */ le_writel(0, whc->base + WUSBINTR); whc_write_wusbcmd(whc, WUSBCMD_RUN, 0); whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, WUSBSTS_HCHALTED, WUSBSTS_HCHALTED, 100, "HC to halt"); wusb_cluster_id_put(wusbhc->cluster_id); mutex_unlock(&wusbhc->mutex); } static int whc_get_frame_number(struct usb_hcd *usb_hcd) { /* Frame numbers are not applicable to WUSB. */ return -ENOSYS; } /* * Queue an URB to the ASL or PZL */ static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); int ret; switch (usb_pipetype(urb->pipe)) { case PIPE_INTERRUPT: ret = pzl_urb_enqueue(whc, urb, mem_flags); break; case PIPE_ISOCHRONOUS: dev_err(&whc->umc->dev, "isochronous transfers unsupported\n"); ret = -ENOTSUPP; break; case PIPE_CONTROL: case PIPE_BULK: default: ret = asl_urb_enqueue(whc, urb, mem_flags); break; }; return ret; } /* * Remove a queued URB from the ASL or PZL. */ static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); int ret; switch (usb_pipetype(urb->pipe)) { case PIPE_INTERRUPT: ret = pzl_urb_dequeue(whc, urb, status); break; case PIPE_ISOCHRONOUS: ret = -ENOTSUPP; break; case PIPE_CONTROL: case PIPE_BULK: default: ret = asl_urb_dequeue(whc, urb, status); break; }; return ret; } /* * Wait for all URBs to the endpoint to be completed, then delete the * qset. */ static void whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); struct whc_qset *qset; qset = ep->hcpriv; if (qset) { ep->hcpriv = NULL; if (usb_endpoint_xfer_bulk(&ep->desc) || usb_endpoint_xfer_control(&ep->desc)) asl_qset_delete(whc, qset); else pzl_qset_delete(whc, qset); } } static void whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); struct whc_qset *qset; unsigned long flags; spin_lock_irqsave(&whc->lock, flags); qset = ep->hcpriv; if (qset) { qset->remove = 1; qset->reset = 1; if (usb_endpoint_xfer_bulk(&ep->desc) || usb_endpoint_xfer_control(&ep->desc)) queue_work(whc->workqueue, &whc->async_work); else queue_work(whc->workqueue, &whc->periodic_work); } spin_unlock_irqrestore(&whc->lock, flags); } static struct hc_driver whc_hc_driver = { .description = "whci-hcd", .product_desc = "Wireless host controller", .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd), .irq = whc_int_handler, .flags = HCD_USB2, .reset = whc_reset, .start = whc_start, .stop = whc_stop, .get_frame_number = whc_get_frame_number, .urb_enqueue = whc_urb_enqueue, .urb_dequeue = whc_urb_dequeue, .endpoint_disable = whc_endpoint_disable, .endpoint_reset = whc_endpoint_reset, .hub_status_data = wusbhc_rh_status_data, .hub_control = wusbhc_rh_control, .bus_suspend = wusbhc_rh_suspend, .bus_resume = wusbhc_rh_resume, .start_port_reset = wusbhc_rh_start_port_reset, }; static int whc_probe(struct umc_dev *umc) { int ret = -ENOMEM; struct usb_hcd *usb_hcd; struct wusbhc *wusbhc = NULL; struct whc *whc = NULL; struct device *dev = &umc->dev; usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci"); if (usb_hcd == NULL) { dev_err(dev, "unable to create hcd\n"); goto error; } usb_hcd->wireless = 1; usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */ wusbhc = usb_hcd_to_wusbhc(usb_hcd); whc = wusbhc_to_whc(wusbhc); whc->umc = umc; ret = whc_init(whc); if (ret) goto error; wusbhc->dev = dev; wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent); if (!wusbhc->uwb_rc) { ret = -ENODEV; dev_err(dev, "cannot get radio controller\n"); goto error; } if (whc->n_devices > USB_MAXCHILDREN) { dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n", whc->n_devices); wusbhc->ports_max = USB_MAXCHILDREN; } else wusbhc->ports_max = whc->n_devices; wusbhc->mmcies_max = whc->n_mmc_ies; wusbhc->start = whc_wusbhc_start; wusbhc->stop = whc_wusbhc_stop; wusbhc->mmcie_add = whc_mmcie_add; wusbhc->mmcie_rm = whc_mmcie_rm; wusbhc->dev_info_set = whc_dev_info_set; wusbhc->bwa_set = whc_bwa_set; wusbhc->set_num_dnts = whc_set_num_dnts; wusbhc->set_ptk = whc_set_ptk; wusbhc->set_gtk = whc_set_gtk; ret = wusbhc_create(wusbhc); if (ret) goto error_wusbhc_create; ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED); if (ret) { dev_err(dev, "cannot add HCD: %d\n", ret); goto error_usb_add_hcd; } ret = wusbhc_b_create(wusbhc); if (ret) { dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret); goto error_wusbhc_b_create; } whc_dbg_init(whc); return 0; error_wusbhc_b_create: usb_remove_hcd(usb_hcd); error_usb_add_hcd: wusbhc_destroy(wusbhc); error_wusbhc_create: uwb_rc_put(wusbhc->uwb_rc); error: whc_clean_up(whc); if (usb_hcd) usb_put_hcd(usb_hcd); return ret; } static void whc_remove(struct umc_dev *umc) { struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev); struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); if (usb_hcd) { whc_dbg_clean_up(whc); wusbhc_b_destroy(wusbhc); usb_remove_hcd(usb_hcd); wusbhc_destroy(wusbhc); uwb_rc_put(wusbhc->uwb_rc); whc_clean_up(whc); usb_put_hcd(usb_hcd); } } static struct umc_driver whci_hc_driver = { .name = "whci-hcd", .cap_id = UMC_CAP_ID_WHCI_WUSB_HC, .probe = whc_probe, .remove = whc_remove, }; static int __init whci_hc_driver_init(void) { return umc_driver_register(&whci_hc_driver); } module_init(whci_hc_driver_init); static void __exit whci_hc_driver_exit(void) { umc_driver_unregister(&whci_hc_driver); } module_exit(whci_hc_driver_exit); /* PCI device ID's that we handle (so it gets loaded) */ static struct pci_device_id __used whci_hcd_id_table[] = { { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, { /* empty last entry */ } }; MODULE_DEVICE_TABLE(pci, whci_hcd_id_table); MODULE_DESCRIPTION("WHCI Wireless USB host controller driver"); MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); MODULE_LICENSE("GPL");
gpl-2.0
CyanHacker-Lollipop/kernel_htc_msm8974
drivers/input/touchscreen/tsc40.c
3559
3933
/* * TSC-40 serial touchscreen driver. It should be compatible with * TSC-10 and 25. * * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * License: GPLv2 as published by the FSF. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define PACKET_LENGTH 5 struct tsc_ser { struct input_dev *dev; struct serio *serio; u32 idx; unsigned char data[PACKET_LENGTH]; char phys[32]; }; static void tsc_process_data(struct tsc_ser *ptsc) { struct input_dev *dev = ptsc->dev; u8 *data = ptsc->data; u32 x; u32 y; x = ((data[1] & 0x03) << 8) | data[2]; y = ((data[3] & 0x03) << 8) | data[4]; input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); input_report_key(dev, BTN_TOUCH, 1); input_sync(dev); } static irqreturn_t tsc_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct tsc_ser *ptsc = serio_get_drvdata(serio); struct input_dev *dev = ptsc->dev; ptsc->data[ptsc->idx] = data; switch (ptsc->idx++) { case 0: if (unlikely((data & 0x3e) != 0x10)) { dev_dbg(&serio->dev, "unsynchronized packet start (0x%02x)\n", data); ptsc->idx = 0; } else if (!(data & 0x01)) { input_report_key(dev, BTN_TOUCH, 0); input_sync(dev); ptsc->idx = 0; } break; case 1: case 3: if (unlikely(data & 0xfc)) { dev_dbg(&serio->dev, "unsynchronized data 0x%02x at offset %d\n", data, ptsc->idx - 1); ptsc->idx = 0; } break; case 4: tsc_process_data(ptsc); ptsc->idx = 0; break; } return IRQ_HANDLED; } static int tsc_connect(struct serio *serio, struct serio_driver *drv) { struct tsc_ser *ptsc; struct input_dev *input_dev; int error; ptsc = kzalloc(sizeof(struct tsc_ser), GFP_KERNEL); input_dev = input_allocate_device(); if (!ptsc || !input_dev) { error = -ENOMEM; goto fail1; } ptsc->serio = serio; ptsc->dev = input_dev; snprintf(ptsc->phys, sizeof(ptsc->phys), "%s/input0", serio->phys); input_dev->name = "TSC-10/25/40 Serial TouchScreen"; input_dev->phys = ptsc->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_TSC40; input_dev->id.product = 40; input_dev->id.version = 0x0001; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0); input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0); input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0); serio_set_drvdata(serio, ptsc); error = serio_open(serio, drv); if (error) goto fail2; error = input_register_device(ptsc->dev); if (error) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(ptsc); return error; } static void tsc_disconnect(struct serio *serio) { struct tsc_ser *ptsc = serio_get_drvdata(serio); serio_close(serio); input_unregister_device(ptsc->dev); kfree(ptsc); serio_set_drvdata(serio, NULL); } static struct serio_device_id tsc_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TSC40, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, tsc_serio_ids); #define DRIVER_DESC "TSC-10/25/40 serial touchscreen driver" static struct serio_driver tsc_drv = { .driver = { .name = "tsc40", }, .description = DRIVER_DESC, .id_table = tsc_serio_ids, .interrupt = tsc_interrupt, .connect = tsc_connect, .disconnect = tsc_disconnect, }; static int __init tsc_ser_init(void) { return serio_register_driver(&tsc_drv); } module_init(tsc_ser_init); static void __exit tsc_exit(void) { serio_unregister_driver(&tsc_drv); } module_exit(tsc_exit); MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2");
gpl-2.0
austinkelleher/linux
drivers/gpu/drm/gma500/power.c
4071
9111
/************************************************************************** * Copyright (c) 2009-2011, Intel Corporation. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Benjamin Defnet <benjamin.r.defnet@intel.com> * Rajesh Poornachandran <rajesh.poornachandran@intel.com> * Massively reworked * Alan Cox <alan@linux.intel.com> */ #include "power.h" #include "psb_drv.h" #include "psb_reg.h" #include "psb_intel_reg.h" #include <linux/mutex.h> #include <linux/pm_runtime.h> static struct mutex power_mutex; /* Serialize power ops */ static spinlock_t power_ctrl_lock; /* Serialize power claim */ /** * gma_power_init - initialise power manager * @dev: our device * * Set up for power management tracking of our hardware. */ void gma_power_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; /* FIXME: Move APM/OSPM base into relevant device code */ dev_priv->apm_base = dev_priv->apm_reg & 0xffff; dev_priv->ospm_base &= 0xffff; dev_priv->display_power = true; /* We start active */ dev_priv->display_count = 0; /* Currently no users */ dev_priv->suspended = false; /* And not suspended */ spin_lock_init(&power_ctrl_lock); mutex_init(&power_mutex); if (dev_priv->ops->init_pm) dev_priv->ops->init_pm(dev); } /** * gma_power_uninit - end power manager * @dev: device to end for * * Undo the effects of gma_power_init */ void gma_power_uninit(struct drm_device *dev) { pm_runtime_disable(&dev->pdev->dev); pm_runtime_set_suspended(&dev->pdev->dev); } /** * gma_suspend_display - suspend the display logic * @dev: our DRM device * * Suspend the display logic of the graphics interface */ static void gma_suspend_display(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; if (dev_priv->suspended) return; dev_priv->ops->save_regs(dev); dev_priv->ops->power_down(dev); dev_priv->display_power = false; } /** * gma_resume_display - resume display side logic * * Resume the display hardware restoring state and enabling * as necessary. */ static void gma_resume_display(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); struct drm_psb_private *dev_priv = dev->dev_private; /* turn on the display power island */ dev_priv->ops->power_up(dev); dev_priv->suspended = false; dev_priv->display_power = true; PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED); psb_gtt_restore(dev); /* Rebuild our GTT mappings */ dev_priv->ops->restore_regs(dev); } /** * gma_suspend_pci - suspend PCI side * @pdev: PCI device * * Perform the suspend processing on our PCI device state */ static void gma_suspend_pci(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); struct drm_psb_private *dev_priv = dev->dev_private; int bsm, vbt; if (dev_priv->suspended) return; pci_save_state(pdev); pci_read_config_dword(pdev, 0x5C, &bsm); dev_priv->regs.saveBSM = bsm; pci_read_config_dword(pdev, 0xFC, &vbt); dev_priv->regs.saveVBT = vbt; pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); dev_priv->suspended = true; } /** * gma_resume_pci - resume helper * @dev: our PCI device * * Perform the resume processing on our PCI device state - rewrite * register state and re-enable the PCI device */ static bool gma_resume_pci(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); struct drm_psb_private *dev_priv = dev->dev_private; int ret; if (!dev_priv->suspended) return true; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM); pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT); /* restoring MSI address and data in PCIx space */ pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); ret = pci_enable_device(pdev); if (ret != 0) dev_err(&pdev->dev, "pci_enable failed: %d\n", ret); else dev_priv->suspended = false; return !dev_priv->suspended; } /** * gma_power_suspend - bus callback for suspend * @pdev: our PCI device * @state: suspend type * * Called back by the PCI layer during a suspend of the system. We * perform the necessary shut down steps and save enough state that * we can undo this when resume is called. */ int gma_power_suspend(struct device *_dev) { struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_psb_private *dev_priv = dev->dev_private; mutex_lock(&power_mutex); if (!dev_priv->suspended) { if (dev_priv->display_count) { mutex_unlock(&power_mutex); dev_err(dev->dev, "GPU hardware busy, cannot suspend\n"); return -EBUSY; } psb_irq_uninstall(dev); gma_suspend_display(dev); gma_suspend_pci(pdev); } mutex_unlock(&power_mutex); return 0; } /** * gma_power_resume - resume power * @pdev: PCI device * * Resume the PCI side of the graphics and then the displays */ int gma_power_resume(struct device *_dev) { struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); struct drm_device *dev = pci_get_drvdata(pdev); mutex_lock(&power_mutex); gma_resume_pci(pdev); gma_resume_display(pdev); psb_irq_preinstall(dev); psb_irq_postinstall(dev); mutex_unlock(&power_mutex); return 0; } /** * gma_power_is_on - returne true if power is on * @dev: our DRM device * * Returns true if the display island power is on at this moment */ bool gma_power_is_on(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; return dev_priv->display_power; } /** * gma_power_begin - begin requiring power * @dev: our DRM device * @force_on: true to force power on * * Begin an action that requires the display power island is enabled. * We refcount the islands. */ bool gma_power_begin(struct drm_device *dev, bool force_on) { struct drm_psb_private *dev_priv = dev->dev_private; int ret; unsigned long flags; spin_lock_irqsave(&power_ctrl_lock, flags); /* Power already on ? */ if (dev_priv->display_power) { dev_priv->display_count++; pm_runtime_get(&dev->pdev->dev); spin_unlock_irqrestore(&power_ctrl_lock, flags); return true; } if (force_on == false) goto out_false; /* Ok power up needed */ ret = gma_resume_pci(dev->pdev); if (ret == 0) { psb_irq_preinstall(dev); psb_irq_postinstall(dev); pm_runtime_get(&dev->pdev->dev); dev_priv->display_count++; spin_unlock_irqrestore(&power_ctrl_lock, flags); return true; } out_false: spin_unlock_irqrestore(&power_ctrl_lock, flags); return false; } /** * gma_power_end - end use of power * @dev: Our DRM device * * Indicate that one of our gma_power_begin() requested periods when * the diplay island power is needed has completed. */ void gma_power_end(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&power_ctrl_lock, flags); dev_priv->display_count--; WARN_ON(dev_priv->display_count < 0); spin_unlock_irqrestore(&power_ctrl_lock, flags); pm_runtime_put(&dev->pdev->dev); } int psb_runtime_suspend(struct device *dev) { return gma_power_suspend(dev); } int psb_runtime_resume(struct device *dev) { return gma_power_resume(dev); } int psb_runtime_idle(struct device *dev) { struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev)); struct drm_psb_private *dev_priv = drmdev->dev_private; if (dev_priv->display_count) return 0; else return 1; } int gma_power_thaw(struct device *_dev) { return gma_power_resume(_dev); } int gma_power_freeze(struct device *_dev) { return gma_power_suspend(_dev); } int gma_power_restore(struct device *_dev) { return gma_power_resume(_dev); }
gpl-2.0
lijinc/linux-source-3.11.0
drivers/pnp/pnpbios/proc.c
4327
8377
/* * /proc/bus/pnp interface for Plug and Play devices * * Written by David Hinds, dahinds@users.sourceforge.net * Modified by Thomas Hood * * The .../devices and .../<node> and .../boot/<node> files are * utilized by the lspnp and setpnp utilities, supplied with the * pcmcia-cs package. * http://pcmcia-cs.sourceforge.net * * The .../escd file is utilized by the lsescd utility written by * Gunther Mayer. * * The .../legacy_device_resources file is not used yet. * * The other files are human-readable. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/pnp.h> #include <linux/seq_file.h> #include <linux/init.h> #include <asm/uaccess.h> #include "pnpbios.h" static struct proc_dir_entry *proc_pnp = NULL; static struct proc_dir_entry *proc_pnp_boot = NULL; static int pnpconfig_proc_show(struct seq_file *m, void *v) { struct pnp_isa_config_struc pnps; if (pnp_bios_isapnp_config(&pnps)) return -EIO; seq_printf(m, "structure_revision %d\n" "number_of_CSNs %d\n" "ISA_read_data_port 0x%x\n", pnps.revision, pnps.no_csns, pnps.isa_rd_data_port); return 0; } static int pnpconfig_proc_open(struct inode *inode, struct file *file) { return single_open(file, pnpconfig_proc_show, NULL); } static const struct file_operations pnpconfig_proc_fops = { .owner = THIS_MODULE, .open = pnpconfig_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int escd_info_proc_show(struct seq_file *m, void *v) { struct escd_info_struc escd; if (pnp_bios_escd_info(&escd)) return -EIO; seq_printf(m, "min_ESCD_write_size %d\n" "ESCD_size %d\n" "NVRAM_base 0x%x\n", escd.min_escd_write_size, escd.escd_size, escd.nv_storage_base); return 0; } static int escd_info_proc_open(struct inode *inode, struct file *file) { return single_open(file, escd_info_proc_show, NULL); } static const struct file_operations escd_info_proc_fops = { .owner = THIS_MODULE, .open = escd_info_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #define MAX_SANE_ESCD_SIZE (32*1024) static int escd_proc_show(struct seq_file *m, void *v) { struct escd_info_struc escd; char *tmpbuf; int escd_size; if (pnp_bios_escd_info(&escd)) return -EIO; /* sanity check */ if (escd.escd_size > MAX_SANE_ESCD_SIZE) { printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by BIOS escd_info call is too great\n", __func__); return -EFBIG; } tmpbuf = kzalloc(escd.escd_size, GFP_KERNEL); if (!tmpbuf) return -ENOMEM; if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) { kfree(tmpbuf); return -EIO; } escd_size = (unsigned char)(tmpbuf[0]) + (unsigned char)(tmpbuf[1]) * 256; /* sanity check */ if (escd_size > MAX_SANE_ESCD_SIZE) { printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by" " BIOS read_escd call is too great\n", __func__); kfree(tmpbuf); return -EFBIG; } seq_write(m, tmpbuf, escd_size); kfree(tmpbuf); return 0; } static int escd_proc_open(struct inode *inode, struct file *file) { return single_open(file, escd_proc_show, NULL); } static const struct file_operations escd_proc_fops = { .owner = THIS_MODULE, .open = escd_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int pnp_legacyres_proc_show(struct seq_file *m, void *v) { void *buf; buf = kmalloc(65536, GFP_KERNEL); if (!buf) return -ENOMEM; if (pnp_bios_get_stat_res(buf)) { kfree(buf); return -EIO; } seq_write(m, buf, 65536); kfree(buf); return 0; } static int pnp_legacyres_proc_open(struct inode *inode, struct file *file) { return single_open(file, pnp_legacyres_proc_show, NULL); } static const struct file_operations pnp_legacyres_proc_fops = { .owner = THIS_MODULE, .open = pnp_legacyres_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int pnp_devices_proc_show(struct seq_file *m, void *v) { struct pnp_bios_node *node; u8 nodenum; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -ENOMEM; for (nodenum = 0; nodenum < 0xff;) { u8 thisnodenum = nodenum; if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node)) break; seq_printf(m, "%02x\t%08x\t%3phC\t%04x\n", node->handle, node->eisa_id, node->type_code, node->flags); if (nodenum <= thisnodenum) { printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_read_devices:", (unsigned int)nodenum, (unsigned int)thisnodenum); break; } } kfree(node); return 0; } static int pnp_devices_proc_open(struct inode *inode, struct file *file) { return single_open(file, pnp_devices_proc_show, NULL); } static const struct file_operations pnp_devices_proc_fops = { .owner = THIS_MODULE, .open = pnp_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int pnpbios_proc_show(struct seq_file *m, void *v) { void *data = m->private; struct pnp_bios_node *node; int boot = (long)data >> 8; u8 nodenum = (long)data; int len; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -ENOMEM; if (pnp_bios_get_dev_node(&nodenum, boot, node)) { kfree(node); return -EIO; } len = node->size - sizeof(struct pnp_bios_node); seq_write(m, node->data, len); kfree(node); return 0; } static int pnpbios_proc_open(struct inode *inode, struct file *file) { return single_open(file, pnpbios_proc_show, PDE_DATA(inode)); } static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { void *data = PDE_DATA(file_inode(file)); struct pnp_bios_node *node; int boot = (long)data >> 8; u8 nodenum = (long)data; int ret = count; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -ENOMEM; if (pnp_bios_get_dev_node(&nodenum, boot, node)) { ret = -EIO; goto out; } if (count != node->size - sizeof(struct pnp_bios_node)) { ret = -EINVAL; goto out; } if (copy_from_user(node->data, buf, count)) { ret = -EFAULT; goto out; } if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) { ret = -EINVAL; goto out; } ret = count; out: kfree(node); return ret; } static const struct file_operations pnpbios_proc_fops = { .owner = THIS_MODULE, .open = pnpbios_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = pnpbios_proc_write, }; int pnpbios_interface_attach_device(struct pnp_bios_node *node) { char name[3]; sprintf(name, "%02x", node->handle); if (!proc_pnp) return -EIO; if (!pnpbios_dont_use_current_config) { proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops, (void *)(long)(node->handle)); } if (!proc_pnp_boot) return -EIO; if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops, (void *)(long)(node->handle + 0x100))) return 0; return -EIO; } /* * When this is called, pnpbios functions are assumed to * work and the pnpbios_dont_use_current_config flag * should already have been set to the appropriate value */ int __init pnpbios_proc_init(void) { proc_pnp = proc_mkdir("bus/pnp", NULL); if (!proc_pnp) return -EIO; proc_pnp_boot = proc_mkdir("boot", proc_pnp); if (!proc_pnp_boot) return -EIO; proc_create("devices", 0, proc_pnp, &pnp_devices_proc_fops); proc_create("configuration_info", 0, proc_pnp, &pnpconfig_proc_fops); proc_create("escd_info", 0, proc_pnp, &escd_info_proc_fops); proc_create("escd", S_IRUSR, proc_pnp, &escd_proc_fops); proc_create("legacy_device_resources", 0, proc_pnp, &pnp_legacyres_proc_fops); return 0; } void __exit pnpbios_proc_exit(void) { int i; char name[3]; if (!proc_pnp) return; for (i = 0; i < 0xff; i++) { sprintf(name, "%02x", i); if (!pnpbios_dont_use_current_config) remove_proc_entry(name, proc_pnp); remove_proc_entry(name, proc_pnp_boot); } remove_proc_entry("legacy_device_resources", proc_pnp); remove_proc_entry("escd", proc_pnp); remove_proc_entry("escd_info", proc_pnp); remove_proc_entry("configuration_info", proc_pnp); remove_proc_entry("devices", proc_pnp); remove_proc_entry("boot", proc_pnp); remove_proc_entry("bus/pnp", NULL); }
gpl-2.0
isimobile/android_kernel_sony_fusion3
drivers/net/irda/stir4200.c
4839
26362
/***************************************************************************** * * Filename: stir4200.c * Version: 0.4 * Description: Irda SigmaTel USB Dongle * Status: Experimental * Author: Stephen Hemminger <shemminger@osdl.org> * * Based on earlier driver by Paul Stewart <stewart@parc.com> * * Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at> * Copyright (C) 2001, Dag Brattli <dag@brattli.net> * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com> * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ /* * This dongle does no framing, and requires polling to receive the * data. The STIr4200 has bulk in and out endpoints just like * usr-irda devices, but the data it sends and receives is raw; like * irtty, it needs to call the wrap and unwrap functions to add and * remove SOF/BOF and escape characters to/from the frame. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/time.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include <net/irda/wrapper.h> #include <net/irda/crc.h> #include <asm/byteorder.h> #include <asm/unaligned.h> MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); MODULE_DESCRIPTION("IrDA-USB Dongle Driver for SigmaTel STIr4200"); MODULE_LICENSE("GPL"); static int qos_mtt_bits = 0x07; /* 1 ms or more */ module_param(qos_mtt_bits, int, 0); MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time"); static int rx_sensitivity = 1; /* FIR 0..4, SIR 0..6 */ module_param(rx_sensitivity, int, 0); MODULE_PARM_DESC(rx_sensitivity, "Set Receiver sensitivity (0-6, 0 is most sensitive)"); static int tx_power = 0; /* 0 = highest ... 3 = lowest */ module_param(tx_power, int, 0); MODULE_PARM_DESC(tx_power, "Set Transmitter power (0-3, 0 is highest power)"); #define STIR_IRDA_HEADER 4 #define CTRL_TIMEOUT 100 /* milliseconds */ #define TRANSMIT_TIMEOUT 200 /* milliseconds */ #define STIR_FIFO_SIZE 4096 #define FIFO_REGS_SIZE 3 enum FirChars { FIR_CE = 0x7d, FIR_XBOF = 0x7f, FIR_EOF = 0x7e, }; enum StirRequests { REQ_WRITE_REG = 0x00, REQ_READ_REG = 0x01, REQ_READ_ROM = 0x02, REQ_WRITE_SINGLE = 0x03, }; /* Register offsets */ enum StirRegs { REG_RSVD=0, REG_MODE, REG_PDCLK, REG_CTRL1, REG_CTRL2, REG_FIFOCTL, REG_FIFOLSB, REG_FIFOMSB, REG_DPLL, REG_IRDIG, REG_TEST=15, }; enum StirModeMask { MODE_FIR = 0x80, MODE_SIR = 0x20, MODE_ASK = 0x10, MODE_FASTRX = 0x08, MODE_FFRSTEN = 0x04, MODE_NRESET = 0x02, MODE_2400 = 0x01, }; enum StirPdclkMask { PDCLK_4000000 = 0x02, PDCLK_115200 = 0x09, PDCLK_57600 = 0x13, PDCLK_38400 = 0x1D, PDCLK_19200 = 0x3B, PDCLK_9600 = 0x77, PDCLK_2400 = 0xDF, }; enum StirCtrl1Mask { CTRL1_SDMODE = 0x80, CTRL1_RXSLOW = 0x40, CTRL1_TXPWD = 0x10, CTRL1_RXPWD = 0x08, CTRL1_SRESET = 0x01, }; enum StirCtrl2Mask { CTRL2_SPWIDTH = 0x08, CTRL2_REVID = 0x03, }; enum StirFifoCtlMask { FIFOCTL_DIR = 0x10, FIFOCTL_CLR = 0x08, FIFOCTL_EMPTY = 0x04, }; enum StirDiagMask { IRDIG_RXHIGH = 0x80, IRDIG_RXLOW = 0x40, }; enum StirTestMask { TEST_PLLDOWN = 0x80, TEST_LOOPIR = 0x40, TEST_LOOPUSB = 0x20, TEST_TSTENA = 0x10, TEST_TSTOSC = 0x0F, }; struct stir_cb { struct usb_device *usbdev; /* init: probe_irda */ struct net_device *netdev; /* network layer */ struct irlap_cb *irlap; /* The link layer we are binded to */ struct qos_info qos; unsigned speed; /* Current speed */ struct task_struct *thread; /* transmit thread */ struct sk_buff *tx_pending; void *io_buf; /* transmit/receive buffer */ __u8 *fifo_status; iobuff_t rx_buff; /* receive unwrap state machine */ struct timeval rx_time; int receiving; struct urb *rx_urb; }; /* These are the currently known USB ids */ static struct usb_device_id dongles[] = { /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */ { USB_DEVICE(0x066f, 0x4200) }, { } }; MODULE_DEVICE_TABLE(usb, dongles); /* Send control message to set dongle register */ static int write_reg(struct stir_cb *stir, __u16 reg, __u8 value) { struct usb_device *dev = stir->usbdev; pr_debug("%s: write reg %d = 0x%x\n", stir->netdev->name, reg, value); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), REQ_WRITE_SINGLE, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_DEVICE, value, reg, NULL, 0, CTRL_TIMEOUT); } /* Send control message to read multiple registers */ static inline int read_reg(struct stir_cb *stir, __u16 reg, __u8 *data, __u16 count) { struct usb_device *dev = stir->usbdev; return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), REQ_READ_REG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, reg, data, count, CTRL_TIMEOUT); } static inline int isfir(u32 speed) { return speed == 4000000; } /* * Prepare a FIR IrDA frame for transmission to the USB dongle. The * FIR transmit frame is documented in the datasheet. It consists of * a two byte 0x55 0xAA sequence, two little-endian length bytes, a * sequence of exactly 16 XBOF bytes of 0x7E, two BOF bytes of 0x7E, * then the data escaped as follows: * * 0x7D -> 0x7D 0x5D * 0x7E -> 0x7D 0x5E * 0x7F -> 0x7D 0x5F * * Then, 4 bytes of little endian (stuffed) FCS follow, then two * trailing EOF bytes of 0x7E. */ static inline __u8 *stuff_fir(__u8 *p, __u8 c) { switch(c) { case 0x7d: case 0x7e: case 0x7f: *p++ = 0x7d; c ^= IRDA_TRANS; /* fall through */ default: *p++ = c; } return p; } /* Take raw data in skb and put it wrapped into buf */ static unsigned wrap_fir_skb(const struct sk_buff *skb, __u8 *buf) { __u8 *ptr = buf; __u32 fcs = ~(crc32_le(~0, skb->data, skb->len)); __u16 wraplen; int i; /* Header */ buf[0] = 0x55; buf[1] = 0xAA; ptr = buf + STIR_IRDA_HEADER; memset(ptr, 0x7f, 16); ptr += 16; /* BOF */ *ptr++ = 0x7e; *ptr++ = 0x7e; /* Address / Control / Information */ for (i = 0; i < skb->len; i++) ptr = stuff_fir(ptr, skb->data[i]); /* FCS */ ptr = stuff_fir(ptr, fcs & 0xff); ptr = stuff_fir(ptr, (fcs >> 8) & 0xff); ptr = stuff_fir(ptr, (fcs >> 16) & 0xff); ptr = stuff_fir(ptr, (fcs >> 24) & 0xff); /* EOFs */ *ptr++ = 0x7e; *ptr++ = 0x7e; /* Total length, minus the header */ wraplen = (ptr - buf) - STIR_IRDA_HEADER; buf[2] = wraplen & 0xff; buf[3] = (wraplen >> 8) & 0xff; return wraplen + STIR_IRDA_HEADER; } static unsigned wrap_sir_skb(struct sk_buff *skb, __u8 *buf) { __u16 wraplen; wraplen = async_wrap_skb(skb, buf + STIR_IRDA_HEADER, STIR_FIFO_SIZE - STIR_IRDA_HEADER); buf[0] = 0x55; buf[1] = 0xAA; buf[2] = wraplen & 0xff; buf[3] = (wraplen >> 8) & 0xff; return wraplen + STIR_IRDA_HEADER; } /* * Frame is fully formed in the rx_buff so check crc * and pass up to irlap * setup for next receive */ static void fir_eof(struct stir_cb *stir) { iobuff_t *rx_buff = &stir->rx_buff; int len = rx_buff->len - 4; struct sk_buff *skb, *nskb; __u32 fcs; if (unlikely(len <= 0)) { pr_debug("%s: short frame len %d\n", stir->netdev->name, len); ++stir->netdev->stats.rx_errors; ++stir->netdev->stats.rx_length_errors; return; } fcs = ~(crc32_le(~0, rx_buff->data, len)); if (fcs != get_unaligned_le32(rx_buff->data + len)) { pr_debug("crc error calc 0x%x len %d\n", fcs, len); stir->netdev->stats.rx_errors++; stir->netdev->stats.rx_crc_errors++; return; } /* if frame is short then just copy it */ if (len < IRDA_RX_COPY_THRESHOLD) { nskb = dev_alloc_skb(len + 1); if (unlikely(!nskb)) { ++stir->netdev->stats.rx_dropped; return; } skb_reserve(nskb, 1); skb = nskb; skb_copy_to_linear_data(nskb, rx_buff->data, len); } else { nskb = dev_alloc_skb(rx_buff->truesize); if (unlikely(!nskb)) { ++stir->netdev->stats.rx_dropped; return; } skb_reserve(nskb, 1); skb = rx_buff->skb; rx_buff->skb = nskb; rx_buff->head = nskb->data; } skb_put(skb, len); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); skb->dev = stir->netdev; netif_rx(skb); stir->netdev->stats.rx_packets++; stir->netdev->stats.rx_bytes += len; rx_buff->data = rx_buff->head; rx_buff->len = 0; } /* Unwrap FIR stuffed data and bump it to IrLAP */ static void stir_fir_chars(struct stir_cb *stir, const __u8 *bytes, int len) { iobuff_t *rx_buff = &stir->rx_buff; int i; for (i = 0; i < len; i++) { __u8 byte = bytes[i]; switch(rx_buff->state) { case OUTSIDE_FRAME: /* ignore garbage till start of frame */ if (unlikely(byte != FIR_EOF)) continue; /* Now receiving frame */ rx_buff->state = BEGIN_FRAME; /* Time to initialize receive buffer */ rx_buff->data = rx_buff->head; rx_buff->len = 0; continue; case LINK_ESCAPE: if (byte == FIR_EOF) { pr_debug("%s: got EOF after escape\n", stir->netdev->name); goto frame_error; } rx_buff->state = INSIDE_FRAME; byte ^= IRDA_TRANS; break; case BEGIN_FRAME: /* ignore multiple BOF/EOF */ if (byte == FIR_EOF) continue; rx_buff->state = INSIDE_FRAME; rx_buff->in_frame = TRUE; /* fall through */ case INSIDE_FRAME: switch(byte) { case FIR_CE: rx_buff->state = LINK_ESCAPE; continue; case FIR_XBOF: /* 0x7f is not used in this framing */ pr_debug("%s: got XBOF without escape\n", stir->netdev->name); goto frame_error; case FIR_EOF: rx_buff->state = OUTSIDE_FRAME; rx_buff->in_frame = FALSE; fir_eof(stir); continue; } break; } /* add byte to rx buffer */ if (unlikely(rx_buff->len >= rx_buff->truesize)) { pr_debug("%s: fir frame exceeds %d\n", stir->netdev->name, rx_buff->truesize); ++stir->netdev->stats.rx_over_errors; goto error_recovery; } rx_buff->data[rx_buff->len++] = byte; continue; frame_error: ++stir->netdev->stats.rx_frame_errors; error_recovery: ++stir->netdev->stats.rx_errors; rx_buff->state = OUTSIDE_FRAME; rx_buff->in_frame = FALSE; } } /* Unwrap SIR stuffed data and bump it up to IrLAP */ static void stir_sir_chars(struct stir_cb *stir, const __u8 *bytes, int len) { int i; for (i = 0; i < len; i++) async_unwrap_char(stir->netdev, &stir->netdev->stats, &stir->rx_buff, bytes[i]); } static inline void unwrap_chars(struct stir_cb *stir, const __u8 *bytes, int length) { if (isfir(stir->speed)) stir_fir_chars(stir, bytes, length); else stir_sir_chars(stir, bytes, length); } /* Mode parameters for each speed */ static const struct { unsigned speed; __u8 pdclk; } stir_modes[] = { { 2400, PDCLK_2400 }, { 9600, PDCLK_9600 }, { 19200, PDCLK_19200 }, { 38400, PDCLK_38400 }, { 57600, PDCLK_57600 }, { 115200, PDCLK_115200 }, { 4000000, PDCLK_4000000 }, }; /* * Setup chip for speed. * Called at startup to initialize the chip * and on speed changes. * * Note: Write multiple registers doesn't appear to work */ static int change_speed(struct stir_cb *stir, unsigned speed) { int i, err; __u8 mode; for (i = 0; i < ARRAY_SIZE(stir_modes); ++i) { if (speed == stir_modes[i].speed) goto found; } dev_warn(&stir->netdev->dev, "invalid speed %d\n", speed); return -EINVAL; found: pr_debug("speed change from %d to %d\n", stir->speed, speed); /* Reset modulator */ err = write_reg(stir, REG_CTRL1, CTRL1_SRESET); if (err) goto out; /* Undocumented magic to tweak the DPLL */ err = write_reg(stir, REG_DPLL, 0x15); if (err) goto out; /* Set clock */ err = write_reg(stir, REG_PDCLK, stir_modes[i].pdclk); if (err) goto out; mode = MODE_NRESET | MODE_FASTRX; if (isfir(speed)) mode |= MODE_FIR | MODE_FFRSTEN; else mode |= MODE_SIR; if (speed == 2400) mode |= MODE_2400; err = write_reg(stir, REG_MODE, mode); if (err) goto out; /* This resets TEMIC style transceiver if any. */ err = write_reg(stir, REG_CTRL1, CTRL1_SDMODE | (tx_power & 3) << 1); if (err) goto out; err = write_reg(stir, REG_CTRL1, (tx_power & 3) << 1); if (err) goto out; /* Reset sensitivity */ err = write_reg(stir, REG_CTRL2, (rx_sensitivity & 7) << 5); out: stir->speed = speed; return err; } /* * Called from net/core when new frame is available. */ static netdev_tx_t stir_hard_xmit(struct sk_buff *skb, struct net_device *netdev) { struct stir_cb *stir = netdev_priv(netdev); netif_stop_queue(netdev); /* the IRDA wrapping routines don't deal with non linear skb */ SKB_LINEAR_ASSERT(skb); skb = xchg(&stir->tx_pending, skb); wake_up_process(stir->thread); /* this should never happen unless stop/wakeup problem */ if (unlikely(skb)) { WARN_ON(1); dev_kfree_skb(skb); } return NETDEV_TX_OK; } /* * Wait for the transmit FIFO to have space for next data * * If space < 0 then wait till FIFO completely drains. * FYI: can take up to 13 seconds at 2400baud. */ static int fifo_txwait(struct stir_cb *stir, int space) { int err; unsigned long count, status; unsigned long prev_count = 0x1fff; /* Read FIFO status and count */ for (;; prev_count = count) { err = read_reg(stir, REG_FIFOCTL, stir->fifo_status, FIFO_REGS_SIZE); if (unlikely(err != FIFO_REGS_SIZE)) { dev_warn(&stir->netdev->dev, "FIFO register read error: %d\n", err); return err; } status = stir->fifo_status[0]; count = (unsigned)(stir->fifo_status[2] & 0x1f) << 8 | stir->fifo_status[1]; pr_debug("fifo status 0x%lx count %lu\n", status, count); /* is fifo receiving already, or empty */ if (!(status & FIFOCTL_DIR) || (status & FIFOCTL_EMPTY)) return 0; if (signal_pending(current)) return -EINTR; /* shutting down? */ if (!netif_running(stir->netdev) || !netif_device_present(stir->netdev)) return -ESHUTDOWN; /* only waiting for some space */ if (space >= 0 && STIR_FIFO_SIZE - 4 > space + count) return 0; /* queue confused */ if (prev_count < count) break; /* estimate transfer time for remaining chars */ msleep((count * 8000) / stir->speed); } err = write_reg(stir, REG_FIFOCTL, FIFOCTL_CLR); if (err) return err; err = write_reg(stir, REG_FIFOCTL, 0); if (err) return err; return 0; } /* Wait for turnaround delay before starting transmit. */ static void turnaround_delay(const struct stir_cb *stir, long us) { long ticks; struct timeval now; if (us <= 0) return; do_gettimeofday(&now); if (now.tv_sec - stir->rx_time.tv_sec > 0) us -= USEC_PER_SEC; us -= now.tv_usec - stir->rx_time.tv_usec; if (us < 10) return; ticks = us / (1000000 / HZ); if (ticks > 0) schedule_timeout_interruptible(1 + ticks); else udelay(us); } /* * Start receiver by submitting a request to the receive pipe. * If nothing is available it will return after rx_interval. */ static int receive_start(struct stir_cb *stir) { /* reset state */ stir->receiving = 1; stir->rx_buff.in_frame = FALSE; stir->rx_buff.state = OUTSIDE_FRAME; stir->rx_urb->status = 0; return usb_submit_urb(stir->rx_urb, GFP_KERNEL); } /* Stop all pending receive Urb's */ static void receive_stop(struct stir_cb *stir) { stir->receiving = 0; usb_kill_urb(stir->rx_urb); if (stir->rx_buff.in_frame) stir->netdev->stats.collisions++; } /* * Wrap data in socket buffer and send it. */ static void stir_send(struct stir_cb *stir, struct sk_buff *skb) { unsigned wraplen; int first_frame = 0; /* if receiving, need to turnaround */ if (stir->receiving) { receive_stop(stir); turnaround_delay(stir, irda_get_mtt(skb)); first_frame = 1; } if (isfir(stir->speed)) wraplen = wrap_fir_skb(skb, stir->io_buf); else wraplen = wrap_sir_skb(skb, stir->io_buf); /* check for space available in fifo */ if (!first_frame) fifo_txwait(stir, wraplen); stir->netdev->stats.tx_packets++; stir->netdev->stats.tx_bytes += skb->len; stir->netdev->trans_start = jiffies; pr_debug("send %d (%d)\n", skb->len, wraplen); if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1), stir->io_buf, wraplen, NULL, TRANSMIT_TIMEOUT)) stir->netdev->stats.tx_errors++; } /* * Transmit state machine thread */ static int stir_transmit_thread(void *arg) { struct stir_cb *stir = arg; struct net_device *dev = stir->netdev; struct sk_buff *skb; while (!kthread_should_stop()) { #ifdef CONFIG_PM /* if suspending, then power off and wait */ if (unlikely(freezing(current))) { if (stir->receiving) receive_stop(stir); else fifo_txwait(stir, -1); write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); try_to_freeze(); if (change_speed(stir, stir->speed)) break; } #endif /* if something to send? */ skb = xchg(&stir->tx_pending, NULL); if (skb) { unsigned new_speed = irda_get_next_speed(skb); netif_wake_queue(dev); if (skb->len > 0) stir_send(stir, skb); dev_kfree_skb(skb); if ((new_speed != -1) && (stir->speed != new_speed)) { if (fifo_txwait(stir, -1) || change_speed(stir, new_speed)) break; } continue; } /* nothing to send? start receiving */ if (!stir->receiving && irda_device_txqueue_empty(dev)) { /* Wait otherwise chip gets confused. */ if (fifo_txwait(stir, -1)) break; if (unlikely(receive_start(stir))) { if (net_ratelimit()) dev_info(&dev->dev, "%s: receive usb submit failed\n", stir->netdev->name); stir->receiving = 0; msleep(10); continue; } } /* sleep if nothing to send */ set_current_state(TASK_INTERRUPTIBLE); schedule(); } return 0; } /* * USB bulk receive completion callback. * Wakes up every ms (usb round trip) with wrapped * data. */ static void stir_rcv_irq(struct urb *urb) { struct stir_cb *stir = urb->context; int err; /* in process of stopping, just drop data */ if (!netif_running(stir->netdev)) return; /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) return; if (urb->actual_length > 0) { pr_debug("receive %d\n", urb->actual_length); unwrap_chars(stir, urb->transfer_buffer, urb->actual_length); do_gettimeofday(&stir->rx_time); } /* kernel thread is stopping receiver don't resubmit */ if (!stir->receiving) return; /* resubmit existing urb */ err = usb_submit_urb(urb, GFP_ATOMIC); /* in case of error, the kernel thread will restart us */ if (err) { dev_warn(&stir->netdev->dev, "usb receive submit error: %d\n", err); stir->receiving = 0; wake_up_process(stir->thread); } } /* * Function stir_net_open (dev) * * Network device is taken up. Usually this is done by "ifconfig irda0 up" */ static int stir_net_open(struct net_device *netdev) { struct stir_cb *stir = netdev_priv(netdev); int err; char hwname[16]; err = usb_clear_halt(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1)); if (err) goto err_out1; err = usb_clear_halt(stir->usbdev, usb_rcvbulkpipe(stir->usbdev, 2)); if (err) goto err_out1; err = change_speed(stir, 9600); if (err) goto err_out1; err = -ENOMEM; /* Initialize for SIR/FIR to copy data directly into skb. */ stir->receiving = 0; stir->rx_buff.truesize = IRDA_SKB_MAX_MTU; stir->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); if (!stir->rx_buff.skb) goto err_out1; skb_reserve(stir->rx_buff.skb, 1); stir->rx_buff.head = stir->rx_buff.skb->data; do_gettimeofday(&stir->rx_time); stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!stir->rx_urb) goto err_out2; stir->io_buf = kmalloc(STIR_FIFO_SIZE, GFP_KERNEL); if (!stir->io_buf) goto err_out3; usb_fill_bulk_urb(stir->rx_urb, stir->usbdev, usb_rcvbulkpipe(stir->usbdev, 2), stir->io_buf, STIR_FIFO_SIZE, stir_rcv_irq, stir); stir->fifo_status = kmalloc(FIFO_REGS_SIZE, GFP_KERNEL); if (!stir->fifo_status) goto err_out4; /* * Now that everything should be initialized properly, * Open new IrLAP layer instance to take care of us... * Note : will send immediately a speed change... */ sprintf(hwname, "usb#%d", stir->usbdev->devnum); stir->irlap = irlap_open(netdev, &stir->qos, hwname); if (!stir->irlap) { err("stir4200: irlap_open failed"); goto err_out5; } /** Start kernel thread for transmit. */ stir->thread = kthread_run(stir_transmit_thread, stir, "%s", stir->netdev->name); if (IS_ERR(stir->thread)) { err = PTR_ERR(stir->thread); err("stir4200: unable to start kernel thread"); goto err_out6; } netif_start_queue(netdev); return 0; err_out6: irlap_close(stir->irlap); err_out5: kfree(stir->fifo_status); err_out4: kfree(stir->io_buf); err_out3: usb_free_urb(stir->rx_urb); err_out2: kfree_skb(stir->rx_buff.skb); err_out1: return err; } /* * Function stir_net_close (stir) * * Network device is taken down. Usually this is done by * "ifconfig irda0 down" */ static int stir_net_close(struct net_device *netdev) { struct stir_cb *stir = netdev_priv(netdev); /* Stop transmit processing */ netif_stop_queue(netdev); /* Kill transmit thread */ kthread_stop(stir->thread); kfree(stir->fifo_status); /* Mop up receive urb's */ usb_kill_urb(stir->rx_urb); kfree(stir->io_buf); usb_free_urb(stir->rx_urb); kfree_skb(stir->rx_buff.skb); /* Stop and remove instance of IrLAP */ if (stir->irlap) irlap_close(stir->irlap); stir->irlap = NULL; return 0; } /* * IOCTLs : Extra out-of-band network commands... */ static int stir_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *) rq; struct stir_cb *stir = netdev_priv(netdev); int ret = 0; switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the device is still there */ if (netif_device_present(stir->netdev)) ret = change_speed(stir, irq->ifr_baudrate); break; case SIOCSMEDIABUSY: /* Set media busy */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the IrDA stack is still there */ if (netif_running(stir->netdev)) irda_device_set_media_busy(stir->netdev, TRUE); break; case SIOCGRECEIVING: /* Only approximately true */ irq->ifr_receiving = stir->receiving; break; default: ret = -EOPNOTSUPP; } return ret; } static const struct net_device_ops stir_netdev_ops = { .ndo_open = stir_net_open, .ndo_stop = stir_net_close, .ndo_start_xmit = stir_hard_xmit, .ndo_do_ioctl = stir_net_ioctl, }; /* * This routine is called by the USB subsystem for each new device * in the system. We need to check if the device is ours, and in * this case start handling it. * Note : it might be worth protecting this function by a global * spinlock... Or not, because maybe USB already deal with that... */ static int stir_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct stir_cb *stir = NULL; struct net_device *net; int ret = -ENOMEM; /* Allocate network device container. */ net = alloc_irdadev(sizeof(*stir)); if(!net) goto err_out1; SET_NETDEV_DEV(net, &intf->dev); stir = netdev_priv(net); stir->netdev = net; stir->usbdev = dev; ret = usb_reset_configuration(dev); if (ret != 0) { err("stir4200: usb reset configuration failed"); goto err_out2; } printk(KERN_INFO "SigmaTel STIr4200 IRDA/USB found at address %d, " "Vendor: %x, Product: %x\n", dev->devnum, le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); /* Initialize QoS for this device */ irda_init_max_qos_capabilies(&stir->qos); /* That's the Rx capability. */ stir->qos.baud_rate.bits &= IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 | (IR_4000000 << 8); stir->qos.min_turn_time.bits &= qos_mtt_bits; irda_qos_bits_to_value(&stir->qos); /* Override the network functions we need to use */ net->netdev_ops = &stir_netdev_ops; ret = register_netdev(net); if (ret != 0) goto err_out2; dev_info(&intf->dev, "IrDA: Registered SigmaTel device %s\n", net->name); usb_set_intfdata(intf, stir); return 0; err_out2: free_netdev(net); err_out1: return ret; } /* * The current device is removed, the USB layer tell us to shut it down... */ static void stir_disconnect(struct usb_interface *intf) { struct stir_cb *stir = usb_get_intfdata(intf); if (!stir) return; unregister_netdev(stir->netdev); free_netdev(stir->netdev); usb_set_intfdata(intf, NULL); } #ifdef CONFIG_PM /* USB suspend, so power off the transmitter/receiver */ static int stir_suspend(struct usb_interface *intf, pm_message_t message) { struct stir_cb *stir = usb_get_intfdata(intf); netif_device_detach(stir->netdev); return 0; } /* Coming out of suspend, so reset hardware */ static int stir_resume(struct usb_interface *intf) { struct stir_cb *stir = usb_get_intfdata(intf); netif_device_attach(stir->netdev); /* receiver restarted when send thread wakes up */ return 0; } #endif /* * USB device callbacks */ static struct usb_driver irda_driver = { .name = "stir4200", .probe = stir_probe, .disconnect = stir_disconnect, .id_table = dongles, #ifdef CONFIG_PM .suspend = stir_suspend, .resume = stir_resume, #endif }; module_usb_driver(irda_driver);
gpl-2.0
Split-Screen/android_kernel_motorola_msm8610
drivers/gpu/drm/radeon/radeon_sa.c
5351
5266
/* * Copyright 2011 Red Hat Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Jerome Glisse <glisse@freedesktop.org> */ #include "drmP.h" #include "drm.h" #include "radeon.h" int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, unsigned size, u32 domain) { int r; sa_manager->bo = NULL; sa_manager->size = size; sa_manager->domain = domain; INIT_LIST_HEAD(&sa_manager->sa_bo); r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, RADEON_GEM_DOMAIN_CPU, &sa_manager->bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); return r; } return r; } void radeon_sa_bo_manager_fini(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager) { struct radeon_sa_bo *sa_bo, *tmp; if (!list_empty(&sa_manager->sa_bo)) { dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); } list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) { list_del_init(&sa_bo->list); } radeon_bo_unref(&sa_manager->bo); sa_manager->size = 0; } int radeon_sa_bo_manager_start(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager) { int r; if (sa_manager->bo == NULL) { dev_err(rdev->dev, "no bo for sa manager\n"); return -EINVAL; } /* map the buffer */ r = radeon_bo_reserve(sa_manager->bo, false); if (r) { dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); return r; } r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); if (r) { radeon_bo_unreserve(sa_manager->bo); dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); return r; } r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); radeon_bo_unreserve(sa_manager->bo); return r; } int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager) { int r; if (sa_manager->bo == NULL) { dev_err(rdev->dev, "no bo for sa manager\n"); return -EINVAL; } r = radeon_bo_reserve(sa_manager->bo, false); if (!r) { radeon_bo_kunmap(sa_manager->bo); radeon_bo_unpin(sa_manager->bo); radeon_bo_unreserve(sa_manager->bo); } return r; } /* * Principe is simple, we keep a list of sub allocation in offset * order (first entry has offset == 0, last entry has the highest * offset). * * When allocating new object we first check if there is room at * the end total_size - (last_object_offset + last_object_size) >= * alloc_size. If so we allocate new object there. * * When there is not enough room at the end, we start waiting for * each sub object until we reach object_offset+object_size >= * alloc_size, this object then become the sub object we return. * * Alignment can't be bigger than page size */ int radeon_sa_bo_new(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, struct radeon_sa_bo *sa_bo, unsigned size, unsigned align) { struct radeon_sa_bo *tmp; struct list_head *head; unsigned offset = 0, wasted = 0; BUG_ON(align > RADEON_GPU_PAGE_SIZE); BUG_ON(size > sa_manager->size); /* no one ? */ head = sa_manager->sa_bo.prev; if (list_empty(&sa_manager->sa_bo)) { goto out; } /* look for a hole big enough */ offset = 0; list_for_each_entry(tmp, &sa_manager->sa_bo, list) { /* room before this object ? */ if ((tmp->offset - offset) >= size) { head = tmp->list.prev; goto out; } offset = tmp->offset + tmp->size; wasted = offset % align; if (wasted) { wasted = align - wasted; } offset += wasted; } /* room at the end ? */ head = sa_manager->sa_bo.prev; tmp = list_entry(head, struct radeon_sa_bo, list); offset = tmp->offset + tmp->size; wasted = offset % align; if (wasted) { wasted = align - wasted; } offset += wasted; if ((sa_manager->size - offset) < size) { /* failed to find somethings big enough */ return -ENOMEM; } out: sa_bo->manager = sa_manager; sa_bo->offset = offset; sa_bo->size = size; list_add(&sa_bo->list, head); return 0; } void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo) { list_del_init(&sa_bo->list); }
gpl-2.0
z8cpaul/lsikernel-3.14
arch/mn10300/mm/cache-smp-inv.c
12263
4428
/* Functions for global i/dcache invalidation when caching in SMP * * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/mm.h> #include <asm/cacheflush.h> #include "cache-smp.h" /** * mn10300_icache_inv - Globally invalidate instruction cache * * Invalidate the instruction cache on all CPUs. */ void mn10300_icache_inv(void) { unsigned long flags; flags = smp_lock_cache(); mn10300_local_icache_inv(); smp_cache_call(SMP_ICACHE_INV, 0, 0); smp_unlock_cache(flags); } /** * mn10300_icache_inv_page - Globally invalidate a page of instruction cache * @start: The address of the page of memory to be invalidated. * * Invalidate a range of addresses in the instruction cache on all CPUs * covering the page that includes the given address. */ void mn10300_icache_inv_page(unsigned long start) { unsigned long flags; start &= ~(PAGE_SIZE-1); flags = smp_lock_cache(); mn10300_local_icache_inv_page(start); smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE); smp_unlock_cache(flags); } /** * mn10300_icache_inv_range - Globally invalidate range of instruction cache * @start: The start address of the region to be invalidated. * @end: The end address of the region to be invalidated. * * Invalidate a range of addresses in the instruction cache on all CPUs, * between start and end-1 inclusive. */ void mn10300_icache_inv_range(unsigned long start, unsigned long end) { unsigned long flags; flags = smp_lock_cache(); mn10300_local_icache_inv_range(start, end); smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); smp_unlock_cache(flags); } /** * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache * @start: The start address of the region to be invalidated. * @size: The size of the region to be invalidated. * * Invalidate a range of addresses in the instruction cache on all CPUs, * between start and start+size-1 inclusive. */ void mn10300_icache_inv_range2(unsigned long start, unsigned long size) { unsigned long flags; flags = smp_lock_cache(); mn10300_local_icache_inv_range2(start, size); smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size); smp_unlock_cache(flags); } /** * mn10300_dcache_inv - Globally invalidate data cache * * Invalidate the data cache on all CPUs. */ void mn10300_dcache_inv(void) { unsigned long flags; flags = smp_lock_cache(); mn10300_local_dcache_inv(); smp_cache_call(SMP_DCACHE_INV, 0, 0); smp_unlock_cache(flags); } /** * mn10300_dcache_inv_page - Globally invalidate a page of data cache * @start: The address of the page of memory to be invalidated. * * Invalidate a range of addresses in the data cache on all CPUs covering the * page that includes the given address. */ void mn10300_dcache_inv_page(unsigned long start) { unsigned long flags; start &= ~(PAGE_SIZE-1); flags = smp_lock_cache(); mn10300_local_dcache_inv_page(start); smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE); smp_unlock_cache(flags); } /** * mn10300_dcache_inv_range - Globally invalidate range of data cache * @start: The start address of the region to be invalidated. * @end: The end address of the region to be invalidated. * * Invalidate a range of addresses in the data cache on all CPUs, between start * and end-1 inclusive. */ void mn10300_dcache_inv_range(unsigned long start, unsigned long end) { unsigned long flags; flags = smp_lock_cache(); mn10300_local_dcache_inv_range(start, end); smp_cache_call(SMP_DCACHE_INV_RANGE, start, end); smp_unlock_cache(flags); } /** * mn10300_dcache_inv_range2 - Globally invalidate range of data cache * @start: The start address of the region to be invalidated. * @size: The size of the region to be invalidated. * * Invalidate a range of addresses in the data cache on all CPUs, between start * and start+size-1 inclusive. */ void mn10300_dcache_inv_range2(unsigned long start, unsigned long size) { unsigned long flags; flags = smp_lock_cache(); mn10300_local_dcache_inv_range2(start, size); smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size); smp_unlock_cache(flags); }
gpl-2.0
Red680812/android_44_KitKat_kernel_htc_dlxpul
fs/proc/proc_sysctl.c
232
34128
#include <linux/init.h> #include <linux/sysctl.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/security.h> #include <linux/sched.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/module.h> #include "internal.h" static const struct dentry_operations proc_sys_dentry_operations; static const struct file_operations proc_sys_file_operations; static const struct inode_operations proc_sys_inode_operations; static const struct file_operations proc_sys_dir_file_operations; static const struct inode_operations proc_sys_dir_operations; void proc_sys_poll_notify(struct ctl_table_poll *poll) { if (!poll) return; atomic_inc(&poll->event); wake_up_interruptible(&poll->wait); } static struct ctl_table root_table[] = { { .procname = "", .mode = S_IFDIR|S_IRUGO|S_IXUGO, }, { } }; static struct ctl_table_root sysctl_table_root = { .default_set.dir.header = { {{.count = 1, .nreg = 1, .ctl_table = root_table }}, .ctl_table_arg = root_table, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, }, }; static DEFINE_SPINLOCK(sysctl_lock); static void drop_sysctl_table(struct ctl_table_header *header); static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry, struct nsproxy *namespaces); static int insert_links(struct ctl_table_header *head); static void put_links(struct ctl_table_header *header); static void sysctl_print_dir(struct ctl_dir *dir) { if (dir->header.parent) sysctl_print_dir(dir->header.parent); printk(KERN_CONT "%s/", dir->header.ctl_table[0].procname); } static int namecmp(const char *name1, int len1, const char *name2, int len2) { int minlen; int cmp; minlen = len1; if (minlen > len2) minlen = len2; cmp = memcmp(name1, name2, minlen); if (cmp == 0) cmp = len1 - len2; return cmp; } static struct ctl_table *find_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; struct rb_node *node = dir->root.rb_node; while (node) { struct ctl_node *ctl_node; const char *procname; int cmp; ctl_node = rb_entry(node, struct ctl_node, node); head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; procname = entry->procname; cmp = namecmp(name, namelen, procname, strlen(procname)); if (cmp < 0) node = node->rb_left; else if (cmp > 0) node = node->rb_right; else { *phead = head; return entry; } } return NULL; } static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) { struct rb_node *node = &head->node[entry - head->ctl_table].node; struct rb_node **p = &head->parent->root.rb_node; struct rb_node *parent = NULL; const char *name = entry->procname; int namelen = strlen(name); while (*p) { struct ctl_table_header *parent_head; struct ctl_table *parent_entry; struct ctl_node *parent_node; const char *parent_name; int cmp; parent = *p; parent_node = rb_entry(parent, struct ctl_node, node); parent_head = parent_node->header; parent_entry = &parent_head->ctl_table[parent_node - parent_head->node]; parent_name = parent_entry->procname; cmp = namecmp(name, namelen, parent_name, strlen(parent_name)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { printk(KERN_ERR "sysctl duplicate entry: "); sysctl_print_dir(head->parent); printk(KERN_CONT "/%s\n", entry->procname); return -EEXIST; } } rb_link_node(node, parent, p); return 0; } static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) { struct rb_node *node = &head->node[entry - head->ctl_table].node; rb_erase(node, &head->parent->root); } static void init_header(struct ctl_table_header *head, struct ctl_table_root *root, struct ctl_table_set *set, struct ctl_node *node, struct ctl_table *table) { head->ctl_table = table; head->ctl_table_arg = table; head->used = 0; head->count = 1; head->nreg = 1; head->unregistering = NULL; head->root = root; head->set = set; head->parent = NULL; head->node = node; if (node) { struct ctl_table *entry; for (entry = table; entry->procname; entry++, node++) { rb_init_node(&node->node); node->header = head; } } } static void erase_header(struct ctl_table_header *head) { struct ctl_table *entry; for (entry = head->ctl_table; entry->procname; entry++) erase_entry(head, entry); } static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) { struct ctl_table *entry; int err; dir->header.nreg++; header->parent = dir; err = insert_links(header); if (err) goto fail_links; for (entry = header->ctl_table; entry->procname; entry++) { err = insert_entry(header, entry); if (err) goto fail; } return 0; fail: erase_header(header); put_links(header); fail_links: header->parent = NULL; drop_sysctl_table(&dir->header); return err; } static int use_table(struct ctl_table_header *p) { if (unlikely(p->unregistering)) return 0; p->used++; return 1; } static void unuse_table(struct ctl_table_header *p) { if (!--p->used) if (unlikely(p->unregistering)) complete(p->unregistering); } static void start_unregistering(struct ctl_table_header *p) { if (unlikely(p->used)) { struct completion wait; init_completion(&wait); p->unregistering = &wait; spin_unlock(&sysctl_lock); wait_for_completion(&wait); spin_lock(&sysctl_lock); } else { p->unregistering = ERR_PTR(-EINVAL); } erase_header(p); } static void sysctl_head_get(struct ctl_table_header *head) { spin_lock(&sysctl_lock); head->count++; spin_unlock(&sysctl_lock); } void sysctl_head_put(struct ctl_table_header *head) { spin_lock(&sysctl_lock); if (!--head->count) kfree_rcu(head, rcu); spin_unlock(&sysctl_lock); } static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) { if (!head) BUG(); spin_lock(&sysctl_lock); if (!use_table(head)) head = ERR_PTR(-ENOENT); spin_unlock(&sysctl_lock); return head; } static void sysctl_head_finish(struct ctl_table_header *head) { if (!head) return; spin_lock(&sysctl_lock); unuse_table(head); spin_unlock(&sysctl_lock); } static struct ctl_table_set * lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces) { struct ctl_table_set *set = &root->default_set; if (root->lookup) set = root->lookup(root, namespaces); return set; } static struct ctl_table *lookup_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; spin_lock(&sysctl_lock); entry = find_entry(&head, dir, name, namelen); if (entry && use_table(head)) *phead = head; else entry = NULL; spin_unlock(&sysctl_lock); return entry; } static struct ctl_node *first_usable_entry(struct rb_node *node) { struct ctl_node *ctl_node; for (;node; node = rb_next(node)) { ctl_node = rb_entry(node, struct ctl_node, node); if (use_table(ctl_node->header)) return ctl_node; } return NULL; } static void first_entry(struct ctl_dir *dir, struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head = NULL; struct ctl_table *entry = NULL; struct ctl_node *ctl_node; spin_lock(&sysctl_lock); ctl_node = first_usable_entry(rb_first(&dir->root)); spin_unlock(&sysctl_lock); if (ctl_node) { head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; } *phead = head; *pentry = entry; } static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head = *phead; struct ctl_table *entry = *pentry; struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; spin_lock(&sysctl_lock); unuse_table(head); ctl_node = first_usable_entry(rb_next(&ctl_node->node)); spin_unlock(&sysctl_lock); head = NULL; if (ctl_node) { head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; } *phead = head; *pentry = entry; } void register_sysctl_root(struct ctl_table_root *root) { } static int test_perm(int mode, int op) { if (!current_euid()) mode >>= 6; else if (in_egroup_p(0)) mode >>= 3; if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0) return 0; return -EACCES; } static int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op) { int mode; if (root->permissions) mode = root->permissions(root, current->nsproxy, table); else mode = table->mode; return test_perm(mode, op); } static struct inode *proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) { struct inode *inode; struct proc_inode *ei; inode = new_inode(sb); if (!inode) goto out; inode->i_ino = get_next_ino(); sysctl_head_get(head); ei = PROC_I(inode); ei->sysctl = head; ei->sysctl_entry = table; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_mode = table->mode; if (!S_ISDIR(table->mode)) { inode->i_mode |= S_IFREG; inode->i_op = &proc_sys_inode_operations; inode->i_fop = &proc_sys_file_operations; } else { inode->i_mode |= S_IFDIR; inode->i_op = &proc_sys_dir_operations; inode->i_fop = &proc_sys_dir_file_operations; } out: return inode; } static struct ctl_table_header *grab_header(struct inode *inode) { struct ctl_table_header *head = PROC_I(inode)->sysctl; if (!head) head = &sysctl_table_root.default_set.dir.header; return sysctl_head_grab(head); } static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct ctl_table_header *head = grab_header(dir); struct ctl_table_header *h = NULL; struct qstr *name = &dentry->d_name; struct ctl_table *p; struct inode *inode; struct dentry *err = ERR_PTR(-ENOENT); struct ctl_dir *ctl_dir; int ret; if (IS_ERR(head)) return ERR_CAST(head); ctl_dir = container_of(head, struct ctl_dir, header); p = lookup_entry(&h, ctl_dir, name->name, name->len); if (!p) goto out; if (S_ISLNK(p->mode)) { ret = sysctl_follow_link(&h, &p, current->nsproxy); err = ERR_PTR(ret); if (ret) goto out; } err = ERR_PTR(-ENOMEM); inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); if (h) sysctl_head_finish(h); if (!inode) goto out; err = NULL; d_set_d_op(dentry, &proc_sys_dentry_operations); d_add(dentry, inode); out: sysctl_head_finish(head); return err; } static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, size_t count, loff_t *ppos, int write) { struct inode *inode = filp->f_path.dentry->d_inode; struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; ssize_t error; size_t res; if (IS_ERR(head)) return PTR_ERR(head); error = -EPERM; if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ)) goto out; error = -EINVAL; if (!table->proc_handler) goto out; res = count; error = table->proc_handler(table, write, buf, &res, ppos); if (!error) error = res; out: sysctl_head_finish(head); return error; } static ssize_t proc_sys_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 0); } static ssize_t proc_sys_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 1); } static int proc_sys_open(struct inode *inode, struct file *filp) { struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; if (IS_ERR(head)) return PTR_ERR(head); if (table->poll) filp->private_data = proc_sys_poll_event(table->poll); sysctl_head_finish(head); return 0; } static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) { struct inode *inode = filp->f_path.dentry->d_inode; struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; unsigned int ret = DEFAULT_POLLMASK; unsigned long event; if (IS_ERR(head)) return POLLERR | POLLHUP; if (!table->proc_handler) goto out; if (!table->poll) goto out; event = (unsigned long)filp->private_data; poll_wait(filp, &table->poll->wait, wait); if (event != atomic_read(&table->poll->event)) { filp->private_data = proc_sys_poll_event(table->poll); ret = POLLIN | POLLRDNORM | POLLERR | POLLPRI; } out: sysctl_head_finish(head); return ret; } static int proc_sys_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct ctl_table_header *head, struct ctl_table *table) { struct dentry *child, *dir = filp->f_path.dentry; struct inode *inode; struct qstr qname; ino_t ino = 0; unsigned type = DT_UNKNOWN; qname.name = table->procname; qname.len = strlen(table->procname); qname.hash = full_name_hash(qname.name, qname.len); child = d_lookup(dir, &qname); if (!child) { child = d_alloc(dir, &qname); if (child) { inode = proc_sys_make_inode(dir->d_sb, head, table); if (!inode) { dput(child); return -ENOMEM; } else { d_set_d_op(child, &proc_sys_dentry_operations); d_add(child, inode); } } else { return -ENOMEM; } } inode = child->d_inode; ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); return !!filldir(dirent, qname.name, qname.len, filp->f_pos, ino, type); } static int proc_sys_link_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct ctl_table_header *head, struct ctl_table *table) { int err, ret = 0; head = sysctl_head_grab(head); if (S_ISLNK(table->mode)) { err = sysctl_follow_link(&head, &table, current->nsproxy); if (err) goto out; } ret = proc_sys_fill_cache(filp, dirent, filldir, head, table); out: sysctl_head_finish(head); return ret; } static int scan(struct ctl_table_header *head, ctl_table *table, unsigned long *pos, struct file *file, void *dirent, filldir_t filldir) { int res; if ((*pos)++ < file->f_pos) return 0; if (unlikely(S_ISLNK(table->mode))) res = proc_sys_link_fill_cache(file, dirent, filldir, head, table); else res = proc_sys_fill_cache(file, dirent, filldir, head, table); if (res == 0) file->f_pos = *pos; return res; } static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct ctl_table_header *head = grab_header(inode); struct ctl_table_header *h = NULL; struct ctl_table *entry; struct ctl_dir *ctl_dir; unsigned long pos; int ret = -EINVAL; if (IS_ERR(head)) return PTR_ERR(head); ctl_dir = container_of(head, struct ctl_dir, header); ret = 0; if (filp->f_pos == 0) { if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) goto out; filp->f_pos++; } if (filp->f_pos == 1) { if (filldir(dirent, "..", 2, filp->f_pos, parent_ino(dentry), DT_DIR) < 0) goto out; filp->f_pos++; } pos = 2; for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { ret = scan(h, entry, &pos, filp, dirent, filldir); if (ret) { sysctl_head_finish(h); break; } } ret = 1; out: sysctl_head_finish(head); return ret; } static int proc_sys_permission(struct inode *inode, int mask) { struct ctl_table_header *head; struct ctl_table *table; int error; if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) return -EACCES; head = grab_header(inode); if (IS_ERR(head)) return PTR_ERR(head); table = PROC_I(inode)->sysctl_entry; if (!table) error = mask & MAY_WRITE ? -EACCES : 0; else error = sysctl_perm(head->root, table, mask & ~MAY_NOT_BLOCK); sysctl_head_finish(head); return error; } static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int error; if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) return -EPERM; error = inode_change_ok(inode, attr); if (error) return error; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { error = vmtruncate(inode, attr->ia_size); if (error) return error; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; if (IS_ERR(head)) return PTR_ERR(head); generic_fillattr(inode, stat); if (table) stat->mode = (stat->mode & S_IFMT) | table->mode; sysctl_head_finish(head); return 0; } static const struct file_operations proc_sys_file_operations = { .open = proc_sys_open, .poll = proc_sys_poll, .read = proc_sys_read, .write = proc_sys_write, .llseek = default_llseek, }; static const struct file_operations proc_sys_dir_file_operations = { .read = generic_read_dir, .readdir = proc_sys_readdir, .llseek = generic_file_llseek, }; static const struct inode_operations proc_sys_inode_operations = { .permission = proc_sys_permission, .setattr = proc_sys_setattr, .getattr = proc_sys_getattr, }; static const struct inode_operations proc_sys_dir_operations = { .lookup = proc_sys_lookup, .permission = proc_sys_permission, .setattr = proc_sys_setattr, .getattr = proc_sys_getattr, }; static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) return -ECHILD; return !PROC_I(dentry->d_inode)->sysctl->unregistering; } static int proc_sys_delete(const struct dentry *dentry) { return !!PROC_I(dentry->d_inode)->sysctl->unregistering; } static int sysctl_is_seen(struct ctl_table_header *p) { struct ctl_table_set *set = p->set; int res; spin_lock(&sysctl_lock); if (p->unregistering) res = 0; else if (!set->is_seen) res = 1; else res = set->is_seen(set); spin_unlock(&sysctl_lock); return res; } static int proc_sys_compare(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { struct ctl_table_header *head; if (!inode) return 1; if (name->len != len) return 1; if (memcmp(name->name, str, len)) return 1; head = rcu_dereference(PROC_I(inode)->sysctl); return !head || !sysctl_is_seen(head); } static const struct dentry_operations proc_sys_dentry_operations = { .d_revalidate = proc_sys_revalidate, .d_delete = proc_sys_delete, .d_compare = proc_sys_compare, }; static struct ctl_dir *find_subdir(struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; entry = find_entry(&head, dir, name, namelen); if (!entry) return ERR_PTR(-ENOENT); if (!S_ISDIR(entry->mode)) return ERR_PTR(-ENOTDIR); return container_of(head, struct ctl_dir, header); } static struct ctl_dir *new_dir(struct ctl_table_set *set, const char *name, int namelen) { struct ctl_table *table; struct ctl_dir *new; struct ctl_node *node; char *new_name; new = kzalloc(sizeof(*new) + sizeof(struct ctl_node) + sizeof(struct ctl_table)*2 + namelen + 1, GFP_KERNEL); if (!new) return NULL; node = (struct ctl_node *)(new + 1); table = (struct ctl_table *)(node + 1); new_name = (char *)(table + 2); memcpy(new_name, name, namelen); new_name[namelen] = '\0'; table[0].procname = new_name; table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; init_header(&new->header, set->dir.header.root, set, node, table); return new; } static struct ctl_dir *get_subdir(struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_set *set = dir->header.set; struct ctl_dir *subdir, *new = NULL; int err; spin_lock(&sysctl_lock); subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; spin_unlock(&sysctl_lock); new = new_dir(set, name, namelen); spin_lock(&sysctl_lock); subdir = ERR_PTR(-ENOMEM); if (!new) goto failed; subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; err = insert_header(dir, &new->header); subdir = ERR_PTR(err); if (err) goto failed; subdir = new; found: subdir->header.nreg++; failed: if (unlikely(IS_ERR(subdir))) { printk(KERN_ERR "sysctl could not get directory: "); sysctl_print_dir(dir); printk(KERN_CONT "/%*.*s %ld\n", namelen, namelen, name, PTR_ERR(subdir)); } drop_sysctl_table(&dir->header); if (new) drop_sysctl_table(&new->header); spin_unlock(&sysctl_lock); return subdir; } static struct ctl_dir *xlate_dir(struct ctl_table_set *set, struct ctl_dir *dir) { struct ctl_dir *parent; const char *procname; if (!dir->header.parent) return &set->dir; parent = xlate_dir(set, dir->header.parent); if (IS_ERR(parent)) return parent; procname = dir->header.ctl_table[0].procname; return find_subdir(parent, procname, strlen(procname)); } static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry, struct nsproxy *namespaces) { struct ctl_table_header *head; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_table *entry; struct ctl_dir *dir; int ret; ret = 0; spin_lock(&sysctl_lock); root = (*pentry)->data; set = lookup_header_set(root, namespaces); dir = xlate_dir(set, (*phead)->parent); if (IS_ERR(dir)) ret = PTR_ERR(dir); else { const char *procname = (*pentry)->procname; head = NULL; entry = find_entry(&head, dir, procname, strlen(procname)); ret = -ENOENT; if (entry && use_table(head)) { unuse_table(*phead); *phead = head; *pentry = entry; ret = 0; } } spin_unlock(&sysctl_lock); return ret; } static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_ERR "sysctl table check failed: %s/%s %pV\n", path, table->procname, &vaf); va_end(args); return -EINVAL; } static int sysctl_check_table(const char *path, struct ctl_table *table) { int err = 0; for (; table->procname; table++) { if (table->child) err = sysctl_err(path, table, "Not a file"); if ((table->proc_handler == proc_dostring) || (table->proc_handler == proc_dointvec) || (table->proc_handler == proc_dointvec_minmax) || (table->proc_handler == proc_dointvec_jiffies) || (table->proc_handler == proc_dointvec_userhz_jiffies) || (table->proc_handler == proc_dointvec_ms_jiffies) || (table->proc_handler == proc_doulongvec_minmax) || (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { if (!table->data) err = sysctl_err(path, table, "No data"); if (!table->maxlen) err = sysctl_err(path, table, "No maxlen"); } if (!table->proc_handler) err = sysctl_err(path, table, "No proc_handler"); if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode) err = sysctl_err(path, table, "bogus .mode 0%o", table->mode); } return err; } static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table, struct ctl_table_root *link_root) { struct ctl_table *link_table, *entry, *link; struct ctl_table_header *links; struct ctl_node *node; char *link_name; int nr_entries, name_bytes; name_bytes = 0; nr_entries = 0; for (entry = table; entry->procname; entry++) { nr_entries++; name_bytes += strlen(entry->procname) + 1; } links = kzalloc(sizeof(struct ctl_table_header) + sizeof(struct ctl_node)*nr_entries + sizeof(struct ctl_table)*(nr_entries + 1) + name_bytes, GFP_KERNEL); if (!links) return NULL; node = (struct ctl_node *)(links + 1); link_table = (struct ctl_table *)(node + nr_entries); link_name = (char *)&link_table[nr_entries + 1]; for (link = link_table, entry = table; entry->procname; link++, entry++) { int len = strlen(entry->procname) + 1; memcpy(link_name, entry->procname, len); link->procname = link_name; link->mode = S_IFLNK|S_IRWXUGO; link->data = link_root; link_name += len; } init_header(links, dir->header.root, dir->header.set, node, link_table); links->nreg = nr_entries; return links; } static bool get_links(struct ctl_dir *dir, struct ctl_table *table, struct ctl_table_root *link_root) { struct ctl_table_header *head; struct ctl_table *entry, *link; for (entry = table; entry->procname; entry++) { const char *procname = entry->procname; link = find_entry(&head, dir, procname, strlen(procname)); if (!link) return false; if (S_ISDIR(link->mode) && S_ISDIR(entry->mode)) continue; if (S_ISLNK(link->mode) && (link->data == link_root)) continue; return false; } for (entry = table; entry->procname; entry++) { const char *procname = entry->procname; link = find_entry(&head, dir, procname, strlen(procname)); head->nreg++; } return true; } static int insert_links(struct ctl_table_header *head) { struct ctl_table_set *root_set = &sysctl_table_root.default_set; struct ctl_dir *core_parent = NULL; struct ctl_table_header *links; int err; if (head->set == root_set) return 0; core_parent = xlate_dir(root_set, head->parent); if (IS_ERR(core_parent)) return 0; if (get_links(core_parent, head->ctl_table, head->root)) return 0; core_parent->header.nreg++; spin_unlock(&sysctl_lock); links = new_links(core_parent, head->ctl_table, head->root); spin_lock(&sysctl_lock); err = -ENOMEM; if (!links) goto out; err = 0; if (get_links(core_parent, head->ctl_table, head->root)) { kfree(links); goto out; } err = insert_header(core_parent, links); if (err) kfree(links); out: drop_sysctl_table(&core_parent->header); return err; } struct ctl_table_header *__register_sysctl_table( struct ctl_table_set *set, const char *path, struct ctl_table *table) { struct ctl_table_root *root = set->dir.header.root; struct ctl_table_header *header; const char *name, *nextname; struct ctl_dir *dir; struct ctl_table *entry; struct ctl_node *node; int nr_entries = 0; for (entry = table; entry->procname; entry++) nr_entries++; header = kzalloc(sizeof(struct ctl_table_header) + sizeof(struct ctl_node)*nr_entries, GFP_KERNEL); if (!header) return NULL; node = (struct ctl_node *)(header + 1); init_header(header, root, set, node, table); if (sysctl_check_table(path, table)) goto fail; spin_lock(&sysctl_lock); dir = &set->dir; dir->header.nreg++; spin_unlock(&sysctl_lock); for (name = path; name; name = nextname) { int namelen; nextname = strchr(name, '/'); if (nextname) { namelen = nextname - name; nextname++; } else { namelen = strlen(name); } if (namelen == 0) continue; dir = get_subdir(dir, name, namelen); if (IS_ERR(dir)) goto fail; } spin_lock(&sysctl_lock); if (insert_header(dir, header)) goto fail_put_dir_locked; drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); return header; fail_put_dir_locked: drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); fail: kfree(header); dump_stack(); return NULL; } struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table) { return __register_sysctl_table(&sysctl_table_root.default_set, path, table); } EXPORT_SYMBOL(register_sysctl); static char *append_path(const char *path, char *pos, const char *name) { int namelen; namelen = strlen(name); if (((pos - path) + namelen + 2) >= PATH_MAX) return NULL; memcpy(pos, name, namelen); pos[namelen] = '/'; pos[namelen + 1] = '\0'; pos += namelen + 1; return pos; } static int count_subheaders(struct ctl_table *table) { int has_files = 0; int nr_subheaders = 0; struct ctl_table *entry; if (!table || !table->procname) return 1; for (entry = table; entry->procname; entry++) { if (entry->child) nr_subheaders += count_subheaders(entry->child); else has_files = 1; } return nr_subheaders + has_files; } static int register_leaf_sysctl_tables(const char *path, char *pos, struct ctl_table_header ***subheader, struct ctl_table_set *set, struct ctl_table *table) { struct ctl_table *ctl_table_arg = NULL; struct ctl_table *entry, *files; int nr_files = 0; int nr_dirs = 0; int err = -ENOMEM; for (entry = table; entry->procname; entry++) { if (entry->child) nr_dirs++; else nr_files++; } files = table; if (nr_dirs && nr_files) { struct ctl_table *new; files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1), GFP_KERNEL); if (!files) goto out; ctl_table_arg = files; for (new = files, entry = table; entry->procname; entry++) { if (entry->child) continue; *new = *entry; new++; } } if (nr_files || !nr_dirs) { struct ctl_table_header *header; header = __register_sysctl_table(set, path, files); if (!header) { kfree(ctl_table_arg); goto out; } header->ctl_table_arg = ctl_table_arg; **subheader = header; (*subheader)++; } for (entry = table; entry->procname; entry++) { char *child_pos; if (!entry->child) continue; err = -ENAMETOOLONG; child_pos = append_path(path, pos, entry->procname); if (!child_pos) goto out; err = register_leaf_sysctl_tables(path, child_pos, subheader, set, entry->child); pos[0] = '\0'; if (err) goto out; } err = 0; out: return err; } struct ctl_table_header *__register_sysctl_paths( struct ctl_table_set *set, const struct ctl_path *path, struct ctl_table *table) { struct ctl_table *ctl_table_arg = table; int nr_subheaders = count_subheaders(table); struct ctl_table_header *header = NULL, **subheaders, **subheader; const struct ctl_path *component; char *new_path, *pos; pos = new_path = kmalloc(PATH_MAX, GFP_KERNEL); if (!new_path) return NULL; pos[0] = '\0'; for (component = path; component->procname; component++) { pos = append_path(new_path, pos, component->procname); if (!pos) goto out; } while (table->procname && table->child && !table[1].procname) { pos = append_path(new_path, pos, table->procname); if (!pos) goto out; table = table->child; } if (nr_subheaders == 1) { header = __register_sysctl_table(set, new_path, table); if (header) header->ctl_table_arg = ctl_table_arg; } else { header = kzalloc(sizeof(*header) + sizeof(*subheaders)*nr_subheaders, GFP_KERNEL); if (!header) goto out; subheaders = (struct ctl_table_header **) (header + 1); subheader = subheaders; header->ctl_table_arg = ctl_table_arg; if (register_leaf_sysctl_tables(new_path, pos, &subheader, set, table)) goto err_register_leaves; } out: kfree(new_path); return header; err_register_leaves: while (subheader > subheaders) { struct ctl_table_header *subh = *(--subheader); struct ctl_table *table = subh->ctl_table_arg; unregister_sysctl_table(subh); kfree(table); } kfree(header); header = NULL; goto out; } struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, struct ctl_table *table) { return __register_sysctl_paths(&sysctl_table_root.default_set, path, table); } EXPORT_SYMBOL(register_sysctl_paths); struct ctl_table_header *register_sysctl_table(struct ctl_table *table) { static const struct ctl_path null_path[] = { {} }; return register_sysctl_paths(null_path, table); } EXPORT_SYMBOL(register_sysctl_table); static void put_links(struct ctl_table_header *header) { struct ctl_table_set *root_set = &sysctl_table_root.default_set; struct ctl_table_root *root = header->root; struct ctl_dir *parent = header->parent; struct ctl_dir *core_parent; struct ctl_table *entry; if (header->set == root_set) return; core_parent = xlate_dir(root_set, parent); if (IS_ERR(core_parent)) return; for (entry = header->ctl_table; entry->procname; entry++) { struct ctl_table_header *link_head; struct ctl_table *link; const char *name = entry->procname; link = find_entry(&link_head, core_parent, name, strlen(name)); if (link && ((S_ISDIR(link->mode) && S_ISDIR(entry->mode)) || (S_ISLNK(link->mode) && (link->data == root)))) { drop_sysctl_table(link_head); } else { printk(KERN_ERR "sysctl link missing during unregister: "); sysctl_print_dir(parent); printk(KERN_CONT "/%s\n", name); } } } static void drop_sysctl_table(struct ctl_table_header *header) { struct ctl_dir *parent = header->parent; if (--header->nreg) return; put_links(header); start_unregistering(header); if (!--header->count) kfree_rcu(header, rcu); if (parent) drop_sysctl_table(&parent->header); } void unregister_sysctl_table(struct ctl_table_header * header) { int nr_subheaders; might_sleep(); if (header == NULL) return; nr_subheaders = count_subheaders(header->ctl_table_arg); if (unlikely(nr_subheaders > 1)) { struct ctl_table_header **subheaders; int i; subheaders = (struct ctl_table_header **)(header + 1); for (i = nr_subheaders -1; i >= 0; i--) { struct ctl_table_header *subh = subheaders[i]; struct ctl_table *table = subh->ctl_table_arg; unregister_sysctl_table(subh); kfree(table); } kfree(header); return; } spin_lock(&sysctl_lock); drop_sysctl_table(header); spin_unlock(&sysctl_lock); } EXPORT_SYMBOL(unregister_sysctl_table); void setup_sysctl_set(struct ctl_table_set *set, struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)) { memset(set, 0, sizeof(*set)); set->is_seen = is_seen; init_header(&set->dir.header, root, set, NULL, root_table); } void retire_sysctl_set(struct ctl_table_set *set) { WARN_ON(!RB_EMPTY_ROOT(&set->dir.root)); } int __init proc_sys_init(void) { struct proc_dir_entry *proc_sys_root; proc_sys_root = proc_mkdir("sys", NULL); proc_sys_root->proc_iops = &proc_sys_dir_operations; proc_sys_root->proc_fops = &proc_sys_dir_file_operations; proc_sys_root->nlink = 0; return sysctl_init(); }
gpl-2.0
ali1234/u-boot-n900
board/esd/ar405/fpgadata.c
232
225470
0x00,0x09,0x0f,0xf0,0x0f,0xf0,0x0f,0xf0,0x0f,0xf0,0x00,0x00,0x01,0x61,0x00,0x0d, 0x70,0x70,0x63,0x5f,0x61,0x72,0x30,0x31,0x2e,0x6e,0x63,0x64,0x00,0x62,0x00,0x0b, 0x73,0x34,0x30,0x78,0x6c,0x70,0x71,0x32,0x34,0x30,0x00,0x63,0x00,0x0b,0x32,0x30, 0x30,0x31,0x2f,0x30,0x32,0x2f,0x31,0x34,0x00,0x64,0x00,0x09,0x31,0x35,0x3a,0x34, 0x30,0x3a,0x30,0x34,0x00,0x65,0xe2,0x01,0x00,0x00,0xab,0x8e,0xff,0x30,0xe5,0xe5, 0xe8,0xe5,0x03,0xe8,0x04,0x01,0x02,0x11,0x09,0x09,0x01,0x07,0x02,0x04,0x04,0x06, 0x09,0x07,0x04,0x04,0x04,0x04,0x03,0x07,0x02,0x04,0x09,0x04,0x04,0x0b,0x09,0x09, 0x03,0x07,0x02,0x09,0x04,0x04,0x04,0x04,0x0e,0x04,0x04,0x09,0x03,0x07,0x02,0x04, 0x03,0x03,0x03,0x07,0xe5,0x01,0x0d,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03, 0x19,0x03,0x02,0x02,0x03,0x02,0x08,0x09,0x07,0x13,0x03,0x11,0x02,0x06,0x03,0x05, 0x03,0x05,0x11,0x1d,0x1f,0x13,0x10,0x01,0x01,0xe3,0x4c,0xe5,0x01,0x0a,0x04,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x09,0x04,0x04,0x02,0x06,0x01,0xe5,0x05, 0x01,0x02,0x04,0x04,0x04,0x09,0x09,0x09,0x09,0x04,0x06,0x01,0x07,0x09,0x04,0x04, 0x09,0x06,0x02,0x09,0x09,0x04,0x01,0xe7,0x03,0x04,0x07,0xe6,0x08,0x09,0x09,0x0e, 0x01,0xe6,0x13,0x09,0x09,0x09,0x03,0x05,0x03,0x05,0x09,0x09,0x09,0x09,0xe5,0x07, 0x03,0x05,0xe5,0x07,0xe5,0x01,0x05,0x0b,0xe5,0x07,0xe5,0x07,0x09,0x03,0x05,0xe6, 0xe5,0x04,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0xe6,0x04,0x03,0x05,0xe5,0xe6,0x04, 0xe5,0x07,0x03,0x05,0xe5,0x07,0x0b,0x01,0x0e,0x05,0x03,0x05,0x03,0x05,0x1d,0x03, 0x05,0x03,0x44,0xe5,0xe5,0x2e,0x1c,0x01,0x13,0x32,0x01,0xe3,0x4d,0xe5,0x0f,0x09, 0x09,0x27,0x09,0x42,0x04,0x4a,0x44,0x01,0x01,0x01,0x12,0x09,0x09,0x01,0x06,0x01, 0x12,0x09,0x0a,0x09,0x11,0x21,0x13,0x11,0x15,0x1d,0x1b,0x0c,0x0d,0x03,0x03,0x01, 0x31,0x31,0x47,0x81,0xe5,0xe5,0xe5,0x77,0x3e,0xe5,0x19,0xe5,0x1b,0x3b,0xe5,0xe5, 0x5a,0x3d,0x1f,0x74,0xe6,0xe5,0x28,0x5b,0x09,0xe5,0x0a,0x08,0xe5,0x08,0x1d,0x11, 0xe6,0x37,0x12,0x01,0x01,0x09,0x06,0x09,0x09,0x09,0xe5,0x07,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x01,0x03,0x03,0x03,0x05,0x05,0x05,0x09,0x09,0x09,0x09,0x09,0xe5, 0x07,0x09,0x09,0x09,0x05,0x03,0x09,0x09,0x09,0x10,0xe5,0x0f,0x09,0x09,0x04,0x04, 0x09,0x09,0x05,0x03,0x05,0x03,0x01,0x07,0x09,0x09,0x09,0x09,0x07,0x01,0x07,0xe5, 0x01,0x07,0x01,0x06,0x02,0x09,0x05,0x03,0x06,0x02,0x09,0x05,0x01,0x01,0x01,0x07, 0x05,0x03,0x09,0x09,0x09,0x04,0x04,0x0e,0x01,0xe5,0x02,0x09,0xe5,0x07,0xe5,0x01, 0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x03,0x03,0xe5,0x01, 0x01,0x03,0xe5,0x01,0x05,0xe5,0x07,0xe5,0xe5,0x05,0x09,0x09,0x01,0x02,0x06,0x04, 0x04,0x09,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0x09,0xe5,0x07,0x09,0xe5,0x02,0x04,0xe5, 0x01,0x05,0x09,0x09,0xe5,0x07,0xe5,0x04,0x0b,0x01,0x10,0x09,0x09,0x09,0x09,0x05, 0x03,0x09,0x09,0x09,0x09,0x09,0x09,0x06,0x02,0x06,0x02,0x06,0xe6,0x01,0x06,0x02, 0x06,0x02,0x09,0x09,0x03,0x02,0x02,0x09,0x06,0x02,0x09,0x09,0x06,0x02,0x06,0x02, 0x09,0x09,0x0d,0x02,0xe5,0x02,0x01,0xe5,0x05,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x03,0x03,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x02,0x04,0xe5,0x03,0x03,0x05,0x03,0x0b,0x04,0xe5,0x02,0x06,0x02,0x09,0xe5, 0x07,0xe5,0x01,0x05,0x05,0x03,0xe5,0x03,0x03,0x05,0x03,0xe5,0x07,0xe5,0x07,0x09, 0x09,0xe5,0x07,0xe5,0x07,0x02,0x03,0xe5,0xe6,0x08,0x05,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x09,0x09,0x09,0x0f,0x01,0xe5,0x05,0x06,0x09,0x07,0xe6,0x08,0x09, 0x09,0x09,0x09,0x09,0x09,0x09,0x05,0x03,0x0a,0x09,0x08,0x01,0x03,0x06,0x08,0x09, 0x09,0x0a,0x08,0x0a,0x07,0xe5,0x08,0x0a,0x09,0x08,0x09,0x11,0xe5,0xe5,0x01,0x0a, 0x02,0x04,0x01,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x01,0x04,0x02,0x03,0x02,0x02, 0x06,0x02,0x05,0xe5,0x01,0x05,0xe5,0x01,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02, 0x08,0x02,0x05,0xe5,0x01,0x06,0x02,0x05,0xe5,0x01,0x04,0x01,0x02,0x06,0x02,0x06, 0x02,0x06,0x02,0x01,0x04,0x02,0x06,0x02,0x06,0x02,0x05,0xe5,0x01,0x06,0x02,0x06, 0x02,0x08,0x04,0xe5,0xe6,0x02,0x11,0x04,0xe6,0x24,0x09,0xe5,0x2f,0x06,0xe5,0xe5, 0x05,0x0e,0x06,0xe5,0x10,0x17,0x09,0x06,0x02,0x45,0x03,0x02,0xe5,0x15,0x0f,0x13, 0x27,0x1e,0x05,0x02,0x01,0x01,0x05,0x08,0x3c,0x06,0x01,0x45,0x04,0xe8,0x11,0x01, 0xe5,0x07,0x15,0x13,0x23,0x22,0xe5,0x04,0x01,0x06,0x2e,0x13,0x1d,0x2d,0xe5,0xe6, 0x18,0x02,0x0c,0x17,0x09,0x31,0x0d,0xe5,0x0c,0x31,0xe7,0x58,0x04,0xe5,0xe6,0x0f, 0x09,0x09,0x09,0x09,0x09,0x09,0x01,0x02,0xe5,0x02,0x09,0x09,0x05,0x03,0x05,0x03, 0xe5,0x07,0x09,0x0b,0x09,0x09,0x09,0x01,0x03,0x03,0x07,0x01,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x09,0x0f,0xe6,0x13,0x1d,0x03,0x01,0x1a,0x13,0xe5,0xe5,0x0f,0x01, 0xe5,0x07,0xe5,0x05,0x01,0xe5,0x06,0xe6,0xe5,0x05,0x02,0x06,0x01,0xe5,0x09,0x12, 0x09,0x10,0x0a,0x01,0x08,0xe5,0x0f,0x01,0xe5,0x17,0xe5,0xe5,0x50,0x01,0x07,0x09, 0x01,0x14,0x05,0x01,0x08,0x0a,0x0a,0x02,0x09,0x0d,0x0b,0x14,0x4e,0xe5,0x01,0x3c, 0x0b,0x12,0x34,0x0b,0x06,0x01,0x0e,0x18,0x04,0x04,0x04,0x0f,0x27,0x19,0x02,0xe5, 0x05,0x67,0x09,0x3d,0x33,0x07,0x1d,0x17,0x05,0xe5,0xe6,0x05,0x15,0x5b,0x35,0x81, 0xe6,0x34,0x01,0x07,0x1d,0xe6,0x06,0x01,0x0b,0x05,0x01,0x0b,0x0b,0x2d,0x01,0x07, 0x0b,0x05,0x03,0x11,0xe6,0x1a,0x01,0x03,0x17,0xe5,0x1a,0x1c,0x09,0x24,0x01,0x11, 0x01,0x0e,0x07,0x15,0x1b,0x09,0x03,0x21,0x1c,0x01,0x18,0x01,0x01,0x35,0x20,0x01, 0x04,0x01,0x13,0x0d,0x0b,0x12,0x10,0x02,0x0a,0x03,0x11,0x01,0x09,0x02,0x03,0x07, 0x06,0x2a,0x0c,0x02,0x1a,0x0a,0x08,0x13,0x09,0x10,0xe5,0xe5,0x0b,0x03,0xe5,0x0e, 0x07,0x1e,0x03,0x01,0x12,0x0f,0x02,0x01,0x02,0x17,0x02,0x01,0x02,0x11,0x0d,0x13, 0xe5,0x01,0x27,0xe5,0x33,0x07,0x0d,0x3b,0x02,0x0b,0x09,0x0e,0x03,0x1c,0x35,0x01, 0xe6,0x64,0x09,0x2a,0x03,0x06,0x01,0x03,0x07,0x02,0x2b,0x05,0x20,0xe6,0x24,0xe5, 0xe5,0x06,0x43,0x0e,0x37,0x01,0x28,0x23,0x27,0x0c,0x15,0xe5,0xe6,0x68,0x0c,0x03, 0x02,0x13,0x05,0x02,0x14,0x01,0x0b,0x03,0x07,0x08,0x06,0x16,0x06,0x36,0xe5,0xe6, 0x60,0x07,0x13,0x02,0x10,0x0d,0x11,0x02,0x20,0x10,0x01,0x0c,0x01,0x02,0x34,0xe7, 0x09,0x06,0x58,0x10,0x01,0x11,0x01,0x1f,0x04,0x08,0x17,0x18,0x04,0x2c,0x0b,0x03, 0xe6,0x6e,0x37,0x0f,0x02,0x03,0xe5,0xe5,0x04,0xe5,0x06,0x01,0xe5,0x03,0xe5,0xe6, 0x05,0x08,0xe6,0x03,0x05,0xe5,0x07,0xe5,0x05,0x1e,0xe7,0xe6,0x01,0x01,0x6c,0x13, 0xe5,0x21,0xe5,0x0d,0x04,0x04,0x04,0x01,0x01,0x07,0xe5,0xe5,0x04,0xe5,0x0f,0x05, 0xe6,0x03,0xe5,0x06,0xe5,0x21,0x01,0x06,0x04,0x6c,0x10,0xe5,0x16,0x06,0x01,0xe5, 0xe5,0x0b,0x04,0x06,0xe5,0x10,0xe5,0x05,0xe5,0x07,0x0c,0xe5,0x18,0x1b,0x05,0x01, 0xe5,0x01,0x57,0x16,0x10,0x13,0xe5,0x04,0x07,0x01,0x04,0xe5,0xe5,0x08,0x06,0x12, 0xe5,0x1c,0x38,0x03,0xe5,0xe5,0x5a,0x27,0x03,0x22,0x06,0xe5,0x04,0xe5,0x06,0x01, 0x0e,0x07,0x02,0x02,0x05,0x0c,0x01,0x01,0x02,0xe5,0x01,0x05,0x09,0x1f,0xe5,0xe6, 0x01,0x5e,0x13,0x0f,0x03,0x04,0x10,0x01,0x02,0x04,0x08,0x02,0x01,0xe6,0x02,0x05, 0x03,0x0f,0x02,0x02,0x02,0x14,0x01,0x02,0x01,0x01,0x2c,0x02,0x01,0x01,0xe5,0x5b, 0x01,0x2d,0x0d,0x09,0x01,0x03,0xe5,0x03,0x04,0x01,0xe5,0x05,0x01,0x11,0x02,0x0e, 0x0b,0x01,0x02,0x0b,0x09,0xe5,0xe5,0xe5,0x19,0xe5,0x02,0x5d,0x01,0x3a,0x02,0x07, 0x01,0x01,0xe5,0xe5,0x01,0x02,0x06,0x17,0xe8,0xe5,0x01,0x0a,0xe5,0x0d,0x0e,0x09, 0x1f,0xe5,0xe6,0x6c,0xe5,0x25,0x12,0x04,0x04,0x02,0x09,0x10,0x09,0x09,0x1d,0x09, 0x21,0xe8,0x63,0x09,0x09,0x09,0x09,0x0b,0x0f,0xe5,0x0d,0x07,0xe6,0x06,0x05,0xe5, 0x0b,0xe5,0x03,0xe5,0x15,0x05,0xe5,0x07,0xe5,0x1f,0x02,0xe6,0x0d,0x09,0x09,0x09, 0x09,0x09,0x09,0x09,0x04,0x01,0x02,0x03,0x02,0x02,0x03,0x05,0x03,0x05,0x03,0x05, 0x03,0x02,0x02,0x06,0x01,0x02,0x02,0x06,0xe5,0x04,0x02,0xe5,0x07,0x06,0x02,0x02, 0xe5,0x04,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x07,0x09,0x03,0x05,0xe6,0x06,0xe6,0x06, 0x09,0x06,0x06,0x02,0x01,0x02,0x59,0x08,0x0a,0x09,0x10,0x02,0x06,0x17,0x13,0x11, 0x02,0x1d,0x30,0xe5,0x03,0x05,0xe5,0x58,0x13,0x0a,0x13,0x0d,0x0e,0x27,0x03,0x0f, 0x0d,0x2f,0x0a,0x01,0x01,0x5f,0x07,0x01,0x04,0x02,0x1c,0x0c,0x0c,0xe6,0x06,0x06, 0x01,0x09,0x0a,0xe6,0x06,0x09,0xe6,0x0a,0x04,0x0a,0xe6,0x06,0xe6,0x1e,0xe8,0x5b, 0xe5,0x01,0x06,0x01,0xe5,0x07,0xe5,0x08,0x09,0x09,0x03,0x06,0xe5,0x09,0x0f,0x01, 0xe5,0x02,0x02,0x02,0x0a,0x13,0x0f,0x01,0x01,0x09,0x09,0x19,0xe5,0x02,0x01,0xe8, 0x66,0x12,0x1f,0xe5,0x0c,0x01,0x05,0x06,0x2b,0x03,0x0e,0x03,0x09,0x07,0x16,0x0a, 0xe5,0xe5,0x14,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x03, 0x05,0xe5,0x05,0xe7,0x01,0x04,0xe6,0x07,0xe5,0x07,0xe5,0x02,0x02,0xe7,0x07,0xe5, 0x05,0xe7,0x07,0xe5,0x01,0x05,0xe5,0x02,0x02,0xe7,0x05,0xe7,0x07,0xe5,0x07,0xe6, 0x03,0x01,0xe8,0x5e,0x07,0x01,0x07,0x02,0x10,0x01,0x16,0x09,0x09,0xe5,0x04,0x01, 0xe5,0x07,0x0a,0x08,0x0a,0x09,0xe5,0x06,0x0a,0x09,0x1b,0x08,0xe5,0x5c,0x09,0x01, 0x09,0x11,0x01,0x1c,0x03,0x01,0xe5,0x01,0x07,0x01,0xe5,0xe5,0x06,0x0e,0xe5,0x01, 0xe5,0xe5,0x0b,0xe5,0x01,0x0a,0x01,0x02,0x09,0xe5,0x01,0x03,0x01,0xe5,0x01,0x1c, 0xe7,0x0b,0x5a,0x01,0x0a,0x06,0x13,0x0a,0x03,0x0d,0xe5,0x02,0x0a,0x13,0x03,0xe5, 0x0d,0x03,0xe5,0x0c,0x0a,0x03,0xe5,0x07,0xe5,0x1b,0xe6,0xe5,0x66,0x01,0x0a,0x29, 0x03,0x01,0x06,0x01,0xe6,0x01,0x03,0x01,0x03,0x09,0x0c,0x01,0xe5,0x01,0x0d,0x01, 0xe5,0x02,0x08,0x0d,0x01,0xe5,0x05,0x01,0xe5,0x1b,0xe5,0xe5,0x73,0x1e,0x1e,0x06, 0xe5,0xe5,0x05,0x01,0xe5,0x11,0xe5,0x27,0x09,0x23,0xe5,0x10,0x01,0x07,0x01,0x07, 0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01, 0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x09,0x01,0xe5,0x05, 0x01,0xe6,0x04,0x01,0x03,0x03,0x01,0xe5,0xe5,0xe5,0xe6,0xe5,0x06,0xe6,0x06,0x01, 0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x0a,0xe5,0x01,0x20,0x50,0x01,0xe5,0x04,0x1d,0x01,0x02,0xe5,0xe5,0x06,0xe6,0x01, 0x01,0x0b,0x0c,0x02,0x04,0x0e,0x03,0xe6,0x06,0x0f,0x03,0xe7,0x07,0x20,0xe6,0xe6, 0x08,0x33,0x26,0x05,0x06,0x02,0x17,0x0f,0x05,0x02,0x0f,0x0a,0x0c,0x07,0x05,0xe5, 0xe5,0xe5,0xe6,0x0d,0x07,0x04,0x2c,0x02,0xe7,0x6b,0x04,0x20,0x06,0xe5,0x07,0x15, 0x11,0x1a,0x02,0x01,0x13,0x0d,0x1e,0x02,0xe6,0x02,0xe5,0x5d,0xe5,0x01,0xe6,0x03, 0x05,0x08,0x07,0x01,0x09,0x09,0x02,0x09,0x03,0x04,0x02,0x0b,0x11,0x01,0x02,0x01, 0x53,0xe5,0x01,0xe6,0x66,0x02,0x10,0x08,0x01,0x03,0x03,0x08,0xe6,0x01,0x11,0x05, 0xe5,0x12,0x0c,0x02,0x05,0x4d,0xe5,0x01,0xe5,0x11,0x4c,0x12,0x02,0x09,0x15,0x02, 0x16,0xe5,0x08,0x08,0xe5,0x11,0xe5,0x12,0x08,0xe5,0x01,0x07,0x0b,0x09,0x0a,0x0a, 0xe9,0x42,0x11,0x24,0x08,0x07,0x07,0x09,0x09,0x0b,0x04,0xe5,0x02,0x02,0x01,0x02, 0xe5,0x09,0x01,0x02,0x01,0xe5,0x01,0xe5,0x09,0x01,0xe5,0x07,0x01,0x0e,0x27,0xe8, 0x04,0x22,0x4b,0x01,0x06,0x1b,0xe5,0x1a,0x04,0x01,0x07,0x16,0x0c,0x01,0x18,0x15, 0x17,0x05,0x16,0x40,0xe5,0xe6,0x2b,0xe7,0x05,0xe5,0x15,0x09,0x0c,0x04,0xe5,0xe5, 0x06,0xe6,0x05,0x01,0x09,0x05,0x17,0x1a,0x09,0x02,0x05,0x59,0x29,0x06,0x08,0x05, 0x11,0x08,0x03,0x1b,0x09,0x07,0x0f,0x0d,0x23,0xe6,0xe5,0x50,0x0c,0xe5,0x19,0x36, 0x04,0x02,0x01,0x04,0x02,0x01,0x02,0xe6,0x04,0x10,0x01,0xe5,0x0f,0x01,0x18,0x09, 0x13,0x01,0x01,0x5f,0x04,0x04,0x06,0x13,0x02,0x0c,0x0b,0x09,0x06,0x01,0x02,0x0b, 0x02,0x0a,0x01,0x03,0x02,0x09,0x06,0x0c,0x01,0x07,0x2c,0xe8,0x1b,0x43,0x08,0x1d, 0x0c,0x05,0x14,0xe5,0x0b,0x06,0x13,0x0a,0x04,0x0e,0x06,0x02,0x06,0x06,0x1e,0xe5, 0x01,0xe6,0x71,0xe6,0x09,0xe5,0x05,0xe5,0xe5,0xe5,0x03,0x02,0xe5,0x05,0x03,0x01, 0x04,0x09,0xe5,0x06,0xe5,0x01,0xe5,0x03,0xe6,0x07,0x13,0xe5,0x14,0xe5,0x04,0x09, 0x1e,0x02,0x02,0x02,0x01,0x5d,0x12,0xe5,0x09,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x03,0xe5,0x04,0x01,0x04,0x02,0x01,0x04,0x05,0x02,0x03,0xe5,0x0e,0x05,0x05,0xe5, 0x1c,0x01,0x04,0x09,0x1b,0x01,0x03,0xe5,0xe5,0x03,0x5f,0x01,0xe5,0x03,0x01,0x01, 0x07,0x01,0xe5,0xe5,0x10,0x01,0x0b,0x18,0x01,0x07,0xe5,0xe5,0x02,0xe5,0x0a,0x01, 0x0e,0x01,0xe5,0x03,0x04,0xe5,0x02,0x31,0x04,0xe6,0xe5,0x01,0x6b,0x0c,0x01,0x0e, 0xe5,0xe5,0xe5,0x08,0x19,0xe5,0x01,0x06,0x13,0x1d,0xe5,0x2f,0x06,0x05,0xe5,0x6e, 0x01,0x18,0x09,0x08,0x06,0x05,0x04,0x01,0x02,0x07,0x04,0x04,0x03,0x01,0x05,0x02, 0x04,0x0b,0x07,0x09,0x0b,0x09,0x16,0x07,0xe9,0x01,0x68,0x04,0xe5,0x02,0x09,0x13, 0x07,0x03,0x01,0x01,0xe5,0x09,0x01,0x01,0x08,0x03,0x01,0x03,0x05,0x0a,0x02,0x03, 0x01,0x0a,0x02,0x09,0x05,0x2c,0x02,0xe6,0xe6,0x65,0x07,0x12,0x08,0xe5,0x01,0x01, 0x02,0x08,0x0c,0xe6,0x01,0x01,0x01,0x07,0x02,0x04,0x01,0x05,0x08,0x02,0x15,0x07, 0x01,0x05,0x05,0x02,0x01,0x04,0x02,0x01,0x1a,0x01,0x02,0x81,0xe6,0x04,0xe5,0xe5, 0xe6,0xe5,0xe6,0x05,0x06,0x06,0xe5,0xe5,0xe6,0xe5,0x01,0x04,0x02,0x06,0x02,0x0a, 0xe5,0xe8,0x01,0x0a,0xe5,0x03,0x02,0x06,0x02,0x0b,0xe6,0x06,0xe6,0x1d,0x02,0x6d, 0x1a,0x02,0xe5,0x04,0x0b,0x02,0xe6,0x03,0x09,0x02,0xe5,0x04,0x02,0x02,0x03,0x09, 0x09,0x09,0x02,0xe5,0x04,0x02,0x02,0x03,0x02,0x02,0x03,0x09,0x1b,0x04,0x01,0x02, 0x60,0x0f,0x17,0xe5,0x07,0xe5,0x0a,0x08,0xe5,0x07,0xe5,0x15,0xe5,0x03,0xe5,0x0b, 0x05,0xe5,0x0b,0x0f,0xe5,0x01,0x05,0xe5,0x20,0x02,0xe5,0x0d,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x04,0x01,0x02,0xe5,0x02,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02, 0x02,0x03,0x02,0xe5,0x07,0xe5,0x04,0x04,0x01,0x07,0xe5,0x04,0x02,0xe6,0x01,0x01, 0x02,0xe5,0xe5,0x05,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x02,0xe6,0x01,0xe5,0x02,0x01, 0x02,0xe5,0xe6,0x04,0xe5,0x04,0x02,0xe6,0xe5,0x04,0xe6,0x06,0x09,0x0d,0xe5,0x02, 0x02,0x59,0x09,0x09,0x09,0x08,0xe5,0x11,0x15,0xe5,0x08,0x09,0x12,0xe5,0x12,0x12, 0xe5,0x2b,0x02,0xe6,0xe5,0x58,0x0b,0x09,0x07,0x0b,0x05,0x09,0x15,0x03,0x07,0x0b, 0x0f,0x03,0x0f,0x01,0x15,0x31,0xe7,0x59,0x02,0x01,0x01,0x05,0x13,0x09,0x01,0xe7, 0x06,0xe6,0x04,0xe5,0xe7,0x07,0xe6,0x03,0x01,0xe7,0x06,0x09,0x03,0x05,0xe6,0x02, 0x03,0x05,0xe5,0xe8,0x06,0x09,0x06,0x01,0xe7,0x06,0xe6,0x20,0x01,0x5d,0x01,0xe5, 0x07,0xe5,0x09,0x04,0x02,0xe5,0x04,0xe5,0xe7,0x01,0x06,0x07,0x01,0xe6,0x08,0x04, 0xe5,0xe7,0x10,0x0b,0x0f,0x01,0xe6,0x10,0x07,0x01,0xe6,0x08,0x20,0x01,0xe7,0x5b, 0x01,0xe5,0x05,0x06,0x0c,0x06,0x02,0x01,0x01,0x08,0x05,0x02,0x01,0x03,0x0a,0x02, 0x01,0x04,0x02,0x01,0x07,0x0c,0x02,0x0d,0x01,0xe5,0x02,0x04,0x11,0x01,0x03,0xe5, 0x04,0x21,0xe6,0xe5,0x08,0x0b,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe6,0xe5, 0x02,0xe7,0x05,0xe7,0xe6,0xe6,0x02,0xe6,0x02,0x02,0xe8,0xe6,0x02,0xe6,0x01,0x05, 0xe5,0x01,0x05,0xe5,0x05,0xe7,0x07,0xe6,0xe5,0x02,0xe7,0x01,0x05,0xe5,0x01,0x05, 0xe6,0xe6,0x01,0xe7,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x06,0xe5,0x01,0x5f,0xe5,0x05, 0x0b,0x0a,0xe5,0x04,0x01,0x13,0x01,0x04,0x05,0xe5,0x04,0x01,0xe5,0x08,0x13,0xe5, 0x04,0x01,0xe5,0x05,0x01,0xe5,0x08,0x06,0x09,0x01,0xe6,0x07,0xe5,0x23,0x5d,0x01, 0xe5,0x0f,0x13,0x01,0x03,0x02,0x06,0x02,0x01,0xe5,0xe5,0x01,0x03,0x06,0x02,0xe5, 0xe6,0x04,0x0c,0x0d,0x02,0x02,0xe5,0x05,0x01,0x03,0x02,0x03,0x08,0x07,0x01,0x03, 0x02,0x06,0x02,0x14,0xe5,0x03,0xe6,0x01,0x5f,0x07,0x0b,0x13,0x04,0xe5,0x07,0xe5, 0x04,0x0e,0xe5,0x02,0x04,0x04,0xe5,0x16,0xe5,0x02,0x09,0xe5,0x02,0xe5,0x02,0x13, 0x04,0xe5,0x07,0xe5,0x14,0x07,0xe5,0xe5,0x5c,0x01,0xe5,0x0f,0x04,0x07,0x06,0x01, 0x02,0xe5,0xe6,0x04,0xe5,0xe6,0x01,0x01,0x06,0x05,0xe5,0xe7,0xe5,0x01,0x03,0x03, 0x15,0xe5,0xe6,0x02,0x06,0x01,0x02,0xe5,0xe7,0x01,0x10,0x01,0x02,0xe5,0xe6,0x04, 0xe5,0xe6,0x14,0x03,0x05,0x11,0x55,0x01,0xe6,0x04,0x03,0xe5,0x08,0x19,0x1e,0xe5, 0xe5,0x13,0x05,0x17,0xe5,0x03,0x03,0x36,0x01,0x11,0x01,0x07,0x01,0x07,0x01,0x07, 0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x04,0x02,0x01,0x04,0x02, 0x01,0xe5,0x04,0xe5,0xe6,0x05,0x01,0x01,0x05,0x01,0x01,0x06,0xe5,0xe5,0x01,0x04, 0xe8,0x04,0x01,0x04,0x02,0x01,0x02,0x04,0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x03, 0xe6,0xe5,0x04,0x01,0x01,0x05,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0xe5,0x05,0x01, 0x07,0x01,0x0b,0x01,0xe5,0x6d,0x04,0x0b,0x07,0xe5,0x07,0xe5,0x0e,0x04,0xe6,0x06, 0xe5,0x03,0x03,0xe5,0x11,0xe6,0x05,0x0a,0xe5,0x03,0x05,0x04,0x01,0x03,0x06,0xe5, 0x01,0x05,0xe6,0x20,0xe8,0x4f,0x0a,0x04,0x09,0x02,0x13,0x04,0x06,0x02,0xe5,0x13, 0xe5,0x07,0x07,0x02,0x0b,0x06,0x03,0x0e,0x07,0x13,0x02,0x03,0x05,0xe5,0x04,0x1a, 0xe5,0xe5,0xe5,0x47,0x1a,0x06,0x07,0x12,0x09,0x14,0xe5,0x08,0x04,0x06,0x11,0x04, 0x0f,0x1a,0x01,0x09,0x0f,0x14,0x02,0xe5,0x1c,0x33,0x0b,0x09,0x0b,0x07,0x03,0x09, 0x03,0x09,0x03,0x01,0x0e,0x01,0x02,0x07,0x01,0x04,0x09,0x01,0x0a,0x01,0x09,0xe5, 0x05,0x01,0x07,0x01,0x01,0x01,0x05,0x05,0x26,0x02,0xe6,0x5d,0x01,0x08,0x06,0x0b, 0x10,0x09,0xe6,0x01,0x12,0x08,0x02,0x04,0x13,0x04,0x06,0x09,0x01,0x05,0x04,0x06, 0x05,0x2a,0xe7,0x02,0x0d,0x50,0x0e,0xe6,0x07,0x0c,0x06,0x03,0xe5,0x01,0x01,0x01, 0x03,0xe5,0x0a,0x02,0x03,0x0f,0x03,0x09,0x03,0xe6,0xe5,0xe5,0x13,0x02,0xe6,0x03, 0x09,0x0d,0xe5,0x1b,0xe5,0x01,0x09,0x01,0x2a,0x02,0x0e,0x0c,0x06,0xe5,0x04,0x09, 0x09,0xe5,0x10,0xe6,0x07,0xe7,0x07,0x09,0xe5,0x07,0x09,0xe5,0xe5,0x01,0xe5,0x01, 0x02,0x06,0x02,0x09,0x0b,0x03,0xe5,0x01,0x1a,0x1e,0x29,0x38,0x06,0x01,0xe5,0x02, 0x02,0x01,0x07,0x02,0x10,0x06,0xe5,0x02,0x07,0x08,0x02,0x09,0x06,0x02,0x01,0x07, 0x06,0x02,0x02,0x01,0x18,0x0b,0xe5,0x0f,0x04,0x13,0x03,0xe5,0x7a,0x1c,0x05,0xe5, 0xe5,0xe5,0x07,0x04,0x19,0x1b,0x09,0x0d,0x02,0x2a,0xe5,0x01,0x7c,0x0d,0x0a,0x03, 0x57,0x08,0x07,0x0c,0x1f,0x01,0xe5,0x32,0x24,0x0e,0x01,0xe5,0x05,0x01,0x07,0x01, 0x16,0x06,0x01,0x11,0x01,0x02,0x18,0x01,0x0f,0x0a,0x09,0x02,0x1d,0x18,0xe6,0xe5, 0x72,0x03,0x05,0x10,0xe5,0xe5,0x03,0x07,0x08,0x02,0x24,0x09,0xe5,0x16,0x03,0xe5, 0x21,0x12,0xe5,0xe7,0x61,0x0f,0x02,0x04,0x02,0x0b,0x09,0x10,0x01,0x02,0x25,0x09, 0x0b,0x11,0x03,0x07,0x02,0x0c,0x1c,0x01,0x01,0x6a,0x04,0x1d,0x0a,0xe5,0x01,0x02, 0xe5,0x01,0x02,0xe5,0x04,0x09,0x09,0x02,0xe5,0x04,0x02,0xe5,0x04,0x09,0x02,0x09, 0x09,0x0a,0x02,0xe5,0x04,0x1e,0xe6,0xe5,0xe5,0x01,0x01,0x6c,0xe5,0x2d,0xe5,0x1c, 0x01,0x0e,0x0f,0x21,0x02,0xe5,0x04,0x1b,0x01,0x03,0x01,0xe5,0x03,0x5f,0xe5,0x0a, 0x01,0x04,0x01,0xe5,0x0f,0x07,0x01,0x09,0x01,0x06,0xe5,0xe5,0x06,0x01,0x07,0x01, 0x06,0x02,0x07,0x01,0x09,0x01,0x48,0x06,0x01,0x64,0x09,0x02,0x01,0x04,0x02,0xe5, 0x0e,0x01,0xe5,0x05,0x01,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x19,0xe5,0x1b,0xe5,0x18,0x03,0x01, 0xe6,0x04,0x05,0xe5,0x05,0x01,0x4e,0x09,0x01,0x01,0x02,0xe5,0xe7,0x01,0x06,0x01, 0x07,0x01,0x06,0x02,0x01,0x06,0xe5,0xe5,0x05,0xe5,0xe5,0x06,0x01,0x07,0x01,0x07, 0x01,0x07,0x01,0x03,0x02,0xe5,0x14,0xe5,0x04,0x05,0x09,0x07,0x03,0x01,0x13,0xe6, 0x08,0x01,0xe5,0x01,0x04,0x01,0x54,0x05,0x01,0x02,0x03,0xe5,0x08,0x01,0x01,0x07, 0x01,0x01,0x01,0xe5,0x03,0x01,0x01,0x01,0xe5,0x01,0x01,0x01,0x01,0xe5,0x01,0x01, 0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0xe5,0x15,0x03, 0xe6,0x13,0x02,0x05,0x0e,0x03,0x01,0xe5,0x0c,0x5b,0x07,0x01,0x05,0x04,0x06,0x01, 0x09,0x0b,0x04,0x01,0x02,0x09,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x04,0xe5, 0x07,0x01,0x07,0x02,0x01,0x0b,0x01,0xe5,0x05,0x01,0xe5,0x03,0x05,0x11,0x01,0xe5, 0xe5,0x0e,0x59,0x07,0x01,0x0a,0x06,0x01,0x07,0x01,0x09,0x01,0x04,0x01,0xe5,0xe5, 0x06,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x03,0xe6,0x06,0xe5,0x08,0xe5, 0xe5,0x01,0x03,0x07,0x01,0x01,0x05,0x01,0x01,0xe5,0xe6,0x16,0x02,0xe6,0x07,0x02, 0x06,0xe5,0x57,0x02,0x06,0x06,0x10,0x0b,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03, 0x05,0x03,0x05,0x03,0x05,0x09,0x0b,0x04,0x02,0x01,0xe5,0x02,0x09,0x09,0x02,0xe5, 0x0c,0x05,0xe5,0xe6,0x09,0xe5,0x04,0x02,0x65,0x04,0x21,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x03,0x01,0x01,0xe5,0x08,0x09,0x05,0xe5, 0x07,0xe5,0x23,0xe5,0x0d,0x03,0x05,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x02, 0x06,0x09,0x09,0x04,0x01,0xe5,0xe5,0x08,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x06, 0xe5,0xe5,0x01,0x04,0x01,0x06,0xe5,0xe5,0x01,0x06,0x09,0x05,0x03,0xe5,0x03,0x03, 0xe6,0x06,0xe6,0x03,0x02,0xe5,0xe5,0x05,0x0d,0xe5,0xe7,0x01,0x63,0x09,0x13,0x0a, 0x0b,0x08,0xe5,0x08,0x08,0xe5,0x06,0xe6,0x07,0xe5,0x06,0xe6,0x1b,0x1c,0x19,0x03, 0xe5,0xe5,0x64,0x09,0x13,0x1a,0x04,0x04,0x09,0x04,0x04,0x04,0x04,0x03,0x05,0x04, 0x04,0x16,0x3e,0x01,0x0a,0x59,0x02,0x06,0x0e,0xe5,0x07,0x12,0x02,0x09,0x09,0x04, 0x01,0x02,0x09,0x09,0x09,0x07,0x01,0x07,0x01,0x0a,0x03,0x05,0xe6,0x06,0xe6,0x03, 0x02,0x15,0x02,0xe7,0x10,0x01,0x53,0x03,0x05,0x01,0x01,0x07,0xe6,0xe5,0x02,0x03, 0x02,0x01,0x02,0xe5,0x07,0x01,0xe5,0x05,0x01,0xe5,0x02,0x02,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x04,0xe5,0xe6,0x02,0x01,0xe5,0xe7,0xe5,0x02, 0x01,0x01,0x05,0x0d,0x09,0x05,0x1e,0x0d,0x01,0x57,0x01,0x09,0x03,0xe5,0x03,0x09, 0x23,0x14,0x13,0x0b,0x21,0x0c,0x1e,0x01,0x01,0x04,0x08,0x01,0x05,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe6, 0x06,0xe5,0x01,0x05,0xe6,0x06,0xe5,0x01,0x05,0xe5,0xe5,0x01,0x04,0xe6,0x01,0x03, 0x01,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x01,0x03,0xe7,0x01,0x05, 0xe5,0x05,0x01,0xe5,0x07,0xe6,0x06,0xe5,0x05,0xe7,0x02,0x02,0xe7,0x07,0xe5,0x07, 0xe5,0x0a,0x67,0x01,0x27,0xe5,0x14,0x09,0x09,0x09,0x13,0xe5,0x04,0x01,0x11,0x0c, 0xe5,0x07,0xe5,0x22,0xe5,0x66,0x27,0x01,0x0b,0x06,0x02,0x03,0x02,0x02,0x06,0x02, 0x09,0x09,0x06,0x02,0x07,0x01,0x06,0x05,0x04,0x0f,0x02,0x06,0x02,0xe5,0x12,0xe5, 0x03,0xe5,0x01,0xe5,0x0c,0x01,0x01,0x55,0x01,0x01,0x09,0x09,0x09,0x07,0xe5,0x06, 0x02,0x04,0x04,0xe5,0x03,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x07,0x03,0xe6,0x02,0x0b, 0x01,0x01,0x19,0x02,0xe5,0x04,0x02,0xe5,0x14,0x06,0xe5,0xe6,0x0c,0x01,0x57,0x27, 0x01,0xe5,0x07,0x01,0x06,0xe5,0xe6,0x01,0xe8,0xe6,0x02,0x01,0xe5,0xe6,0x05,0x01, 0xe5,0x05,0x01,0x03,0xe8,0xe6,0x0c,0xe6,0x19,0x01,0xe5,0x05,0x01,0xe6,0x13,0x03, 0x01,0xe5,0x01,0x61,0x08,0x06,0x09,0x02,0x0a,0x05,0x02,0x08,0x02,0x06,0x02,0x09, 0x06,0xe5,0xe6,0x07,0xe5,0x01,0x02,0xe5,0xe6,0x07,0xe5,0x01,0x02,0xe5,0xe5,0x0f, 0x02,0x1a,0x1b,0xe5,0xe5,0x10,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07, 0x01,0x07,0x01,0xe5,0x05,0x01,0x07,0x01,0xe6,0x04,0x01,0x03,0x03,0x01,0x03,0x03, 0x01,0x07,0x01,0x01,0x05,0x01,0xe5,0xe5,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0x03, 0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x02,0x03,0x01,0x02,0x04,0x01,0x07,0x01,0x07,0x01, 0x07,0x01,0xe5,0x05,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x0b,0xe5,0xe5,0x01,0x0e, 0x48,0x0c,0x13,0x02,0x02,0x02,0x04,0x05,0xe7,0x01,0x04,0xe6,0x03,0x03,0xe7,0x01, 0xe5,0x01,0x02,0xe5,0x02,0x03,0x07,0x02,0x01,0x06,0xe5,0x01,0x03,0x07,0x02,0x0c, 0x05,0xe5,0x02,0x06,0xe5,0x01,0xe5,0xe5,0x01,0xe6,0x01,0x03,0x05,0x15,0x01,0xe5, 0x3c,0x12,0x28,0xe5,0x02,0x0e,0x06,0x01,0x09,0x03,0x05,0x03,0x06,0x0b,0xe5,0x02, 0x09,0x0e,0x06,0x1d,0x14,0x12,0x02,0xe6,0x29,0x56,0x06,0x06,0x01,0x06,0x0e,0x01, 0x01,0x02,0x03,0x01,0x03,0x01,0x01,0x10,0x01,0x02,0x07,0x13,0x10,0x01,0x07,0x01, 0x24,0xe7,0x31,0x25,0x0f,0x07,0xe5,0x09,0x09,0x07,0xe5,0x09,0x01,0x03,0x01,0x01, 0x01,0x07,0x01,0xe5,0x02,0x02,0x04,0x01,0xe5,0xe5,0x03,0x06,0x07,0x13,0xe6,0x08, 0x1b,0x01,0x19,0x01,0xe5,0x44,0x21,0x06,0x06,0x03,0x01,0x12,0x06,0x03,0x04,0x08, 0x09,0x09,0x09,0x05,0x09,0x03,0x01,0x15,0x01,0x22,0x1b,0xe5,0x07,0x04,0xe5,0x02, 0x43,0x01,0x1b,0x0b,0x03,0xe6,0x02,0x04,0xe5,0x01,0x04,0xe8,0x02,0x03,0x05,0x03, 0x04,0x06,0x03,0x05,0x03,0x03,0x0b,0xe5,0x0b,0x03,0x09,0xe6,0x02,0x13,0x03,0x0f, 0x0e,0x0d,0x01,0x0f,0x10,0x2c,0x01,0x14,0x09,0x02,0x03,0x01,0x2e,0x02,0xe8,0x04, 0xe6,0x09,0x03,0x02,0xe6,0x19,0xe5,0xe5,0x37,0xe6,0x09,0x42,0x14,0x1d,0x12,0xe5, 0x03,0x0f,0x0a,0x09,0xe5,0x07,0x13,0x01,0x01,0x13,0x04,0x0a,0x08,0x0c,0x13,0x03, 0x53,0xe5,0x03,0x0d,0xe5,0x07,0xe5,0x03,0x1f,0x01,0x09,0xe7,0x04,0x02,0xe5,0x18, 0x02,0xe5,0x06,0x0a,0xe5,0x0d,0x01,0x0b,0xe5,0x07,0xe5,0x03,0x06,0x17,0x01,0xe6, 0x54,0x13,0x09,0x06,0x11,0x09,0x02,0x05,0x08,0xe5,0x07,0xe5,0x03,0x12,0x04,0x13, 0xe5,0x16,0x04,0x09,0x22,0x02,0x0f,0x44,0x02,0x07,0x08,0x04,0x04,0xe5,0x02,0x01, 0x01,0x01,0x09,0x09,0x08,0x03,0x07,0x09,0x0a,0x09,0x08,0x04,0x02,0x02,0x08,0x07, 0x03,0x08,0x08,0x01,0x07,0x03,0x20,0x01,0xe5,0x0e,0x3e,0x1a,0xe5,0x07,0x09,0x07, 0xe6,0x08,0x03,0x16,0x0e,0x09,0x04,0x09,0x02,0xe6,0x03,0x18,0x0e,0x09,0x0b,0x10, 0x01,0x02,0x0d,0xe5,0x3e,0x0e,0x0b,0x0a,0xe5,0x06,0x01,0x0f,0x01,0x08,0x1e,0xe5, 0x06,0xe6,0x10,0x1a,0x03,0xe5,0x38,0xe8,0x0a,0x3e,0xe5,0x39,0xe5,0x17,0xe5,0xe5, 0x02,0x02,0xe5,0xe5,0x02,0xe5,0x14,0xe5,0x04,0x13,0x08,0xe5,0x01,0xe5,0x03,0xe5, 0x01,0xe5,0x04,0x02,0xe5,0x04,0x0a,0x13,0xe7,0xe6,0x01,0x01,0x08,0xe6,0x35,0x01, 0x15,0x03,0x22,0x03,0x1b,0x02,0x01,0x04,0x02,0x1d,0x0d,0x01,0x03,0x0a,0x12,0x09, 0x06,0xe5,0x13,0x01,0x03,0x01,0xe5,0x03,0x05,0xe5,0xe5,0xe5,0x3c,0x11,0x22,0xe5, 0x20,0x09,0x06,0x01,0x06,0xe5,0x12,0xe5,0x1b,0x01,0x02,0x01,0x16,0x01,0xe5,0x14, 0x04,0x03,0x02,0x0a,0x01,0x72,0xe5,0x10,0xe5,0x0d,0x09,0x06,0xe5,0x1a,0xe5,0x1f, 0x1a,0x04,0x15,0x02,0xe6,0xe5,0x0e,0x04,0x01,0x7a,0x02,0xe5,0x01,0x07,0x04,0x04, 0x04,0x04,0x09,0x09,0x09,0x09,0x09,0x11,0x02,0x02,0x05,0x09,0x02,0x03,0x02,0xe5, 0x01,0x12,0x02,0x02,0x0a,0x03,0x02,0x01,0x4a,0x04,0x25,0x01,0xe5,0x03,0x0b,0x04, 0x03,0x05,0x03,0x0a,0x09,0x13,0x1b,0x05,0x14,0x04,0x12,0x05,0xe6,0x0b,0x01,0x55, 0x2b,0x09,0x01,0x07,0x09,0x03,0x05,0x01,0x07,0x01,0x11,0x01,0x1b,0x07,0x05,0x09, 0x05,0x19,0xe5,0x02,0x0d,0x01,0x7f,0x0a,0xe5,0xe5,0x06,0x03,0xe5,0x03,0x04,0x04, 0x01,0x07,0x01,0x0b,0xe5,0x03,0x01,0x0b,0xe7,0x0b,0x02,0x0b,0xe6,0x06,0x06,0x18, 0xe8,0x12,0xe5,0x4d,0x02,0x10,0x1d,0xe5,0x09,0xe5,0x04,0x09,0x0c,0xe5,0x0e,0x13, 0x13,0x02,0x06,0x09,0x25,0x14,0x4f,0xe5,0x07,0x0b,0x07,0xe5,0x04,0x0c,0xe5,0x09, 0xe5,0x03,0xe5,0x07,0xe5,0x1b,0xe5,0x11,0xe5,0x17,0x03,0xe5,0x07,0xe5,0x15,0x09, 0xe5,0xe7,0x0d,0x02,0xe5,0x04,0x09,0x09,0x09,0x09,0x09,0x09,0x04,0x01,0x02,0x09, 0x04,0xe6,0x01,0x06,0x02,0x09,0x09,0x04,0x01,0x01,0x02,0x09,0xe5,0x07,0xe7,0x05, 0x04,0x01,0x02,0x09,0xe5,0x02,0x01,0x02,0x09,0xe5,0x07,0x09,0xe5,0x07,0xe6,0x06, 0xe6,0x03,0x02,0x04,0x01,0x02,0x03,0x09,0x01,0xe7,0x01,0x59,0x09,0x09,0x06,0x02, 0x1d,0x29,0x31,0x1c,0xe5,0x08,0x0e,0x03,0xe5,0xe5,0x5a,0x09,0x08,0x09,0x1e,0x02, 0x0e,0x17,0x0f,0x13,0x0b,0x1f,0x09,0x15,0xe5,0xe5,0x6e,0x01,0x10,0xe5,0xe5,0x08, 0x0b,0x0a,0x01,0x01,0x05,0xe6,0xe5,0x03,0x04,0x04,0x0a,0x01,0x01,0x04,0x0a,0xe6, 0x0d,0x02,0x03,0x05,0xe6,0x06,0xe6,0x03,0x09,0x0e,0x03,0x01,0x0a,0x06,0x4f,0x05, 0x02,0x06,0x01,0xe5,0x04,0x02,0xe5,0x02,0x02,0x01,0xe6,0xe5,0x04,0x01,0x07,0x03, 0x09,0x09,0x07,0x01,0x09,0x02,0x06,0x09,0x09,0x11,0x0b,0x09,0x07,0x01,0x05,0x01, 0x01,0x0d,0xe7,0x0e,0x53,0x0a,0x02,0x01,0x11,0x01,0x04,0x09,0x03,0x2a,0x13,0x0b, 0x01,0x0d,0x1a,0x02,0x0b,0x03,0x0b,0xe7,0x0e,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe5, 0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x08,0xe6,0x05,0xe7,0x02,0x03,0xe6,0x07,0xe5, 0x07,0xe5,0x05,0xe7,0x07,0xe5,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x02,0x02,0xe7,0x02, 0x02,0xe7,0x01,0x05,0xe5,0x07,0xe5,0x09,0xe5,0x55,0x13,0x08,0xe5,0x11,0x09,0x0b, 0x14,0x08,0x09,0x13,0x28,0xe5,0x07,0x06,0x09,0x01,0x0e,0xe6,0xe5,0x11,0x43,0x06, 0x03,0x08,0x06,0x01,0xe5,0x11,0x02,0x06,0x01,0x07,0x03,0x04,0x05,0xe5,0x01,0x05, 0x03,0x02,0x01,0x09,0x0a,0xe5,0x01,0x04,0x0a,0xe5,0x01,0x0c,0x0c,0xe5,0x01,0x05, 0xe5,0x01,0x02,0x01,0x05,0x01,0x01,0x0f,0xe5,0x0e,0x61,0x01,0x13,0x12,0x07,0x09, 0xe5,0x03,0x03,0x0e,0x0e,0xe5,0x02,0x0e,0xe5,0x1b,0xe5,0x03,0x03,0xe5,0xe5,0x19, 0x03,0x0f,0x46,0x13,0x06,0x01,0xe5,0x11,0x1a,0x01,0x06,0x01,0xe5,0x08,0x1a,0x01, 0xe5,0x0f,0x01,0xe5,0x19,0x01,0xe5,0x05,0x01,0xe5,0x01,0x1a,0x01,0x12,0x45,0x04, 0x0d,0xe5,0x07,0x0f,0x02,0xe5,0x1d,0x06,0x04,0x12,0x2d,0x02,0x0a,0x26,0x03,0xe5, 0x01,0x01,0x0f,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x02,0x04,0x01,0x04,0x01,0xe5,0xe5,0xe5,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x02,0x04,0x01,0x09,0x01,0x06,0xe5,0xe6,0x05,0x01,0x07,0x01,0x07,0x01,0x02,0x03, 0xe5,0xe5,0x05,0xe5,0xe5,0x06,0x01,0x07,0x01,0x07,0x01,0x06,0xe7,0x06,0x01,0x07, 0x01,0x07,0x01,0x08,0x02,0xe7,0x0a,0x05,0x45,0x09,0x04,0x09,0x14,0xe6,0x11,0x05, 0x04,0x01,0xe6,0x05,0xe6,0x15,0x05,0xe5,0x18,0x02,0xe5,0x04,0xe6,0x0e,0x04,0xe5, 0x02,0x07,0x08,0x0e,0xe7,0x03,0x06,0x30,0x1e,0x06,0x07,0x09,0x0e,0x12,0x0d,0x08, 0xe5,0x05,0x11,0x05,0xe5,0x1c,0x0d,0x0e,0xe5,0x05,0x08,0x11,0x01,0x02,0x5c,0x09, 0x07,0x07,0x07,0x07,0x13,0x06,0x05,0xe5,0xe5,0x05,0x02,0x15,0x07,0x09,0x01,0x0c, 0x44,0x01,0x10,0x5f,0xe8,0x05,0x02,0x08,0xe5,0x01,0x06,0x19,0x13,0x0b,0x07,0x1a, 0x07,0x25,0xe6,0x0d,0x03,0x08,0x02,0x4b,0x0b,0x0d,0x0b,0x07,0x04,0x0e,0x05,0x53, 0x01,0x28,0x11,0xe6,0x18,0x21,0x18,0xe5,0x16,0x06,0x01,0x14,0x09,0x03,0x03,0x03, 0x06,0x0b,0x06,0x03,0x01,0x03,0x03,0x06,0x09,0x0c,0x07,0x15,0x07,0x21,0xe5,0xe5, 0x10,0x05,0xe5,0x34,0x04,0x04,0xe5,0x02,0x01,0x07,0x01,0xe5,0x05,0x01,0x0a,0x09, 0x09,0x08,0xe5,0xe5,0x09,0x0f,0xe5,0x06,0xe6,0x1e,0x1d,0x03,0x07,0x04,0x1e,0x01, 0x08,0x0c,0x3b,0x04,0x0b,0xe5,0x07,0x06,0x02,0x07,0x18,0x03,0x0b,0x0c,0x04,0x02, 0x04,0x10,0x09,0x0b,0x23,0x04,0x06,0x01,0x14,0xe5,0x0d,0x04,0x40,0x04,0x03,0xe5, 0x03,0x13,0x0e,0x01,0xe5,0x0e,0xe6,0x13,0x0f,0x05,0x06,0xe5,0x11,0xe5,0x18,0x02, 0xe5,0x07,0xe5,0x0a,0x17,0xe8,0x0f,0x03,0x41,0x08,0x27,0x11,0x01,0x0e,0x06,0x05, 0x15,0x01,0x02,0x03,0x0a,0x01,0x15,0x05,0x01,0x07,0x01,0x06,0x1c,0xe7,0x04,0x04, 0x02,0x01,0x45,0x04,0x03,0xe5,0x06,0x01,0x09,0x07,0x01,0x1c,0xe5,0xe5,0x02,0x09, 0x0d,0x01,0xe5,0x1b,0x01,0x08,0x31,0x17,0x01,0xe7,0x04,0x08,0x09,0x3a,0x0a,0x01, 0x11,0x09,0x1f,0x14,0xe5,0x06,0x04,0x02,0x01,0x13,0x39,0x01,0x17,0x04,0x05,0xe5, 0x02,0x03,0x09,0x44,0x01,0x0b,0x05,0x01,0x09,0x0b,0x10,0x02,0x17,0x05,0x0d,0x0c, 0x03,0x0a,0x27,0x09,0x16,0x03,0xe6,0x55,0x04,0x02,0xe5,0x0e,0x16,0xe5,0x13,0xe5, 0x01,0xe5,0x0a,0x03,0x16,0xe5,0x04,0x0c,0xe5,0x04,0x05,0x10,0xe5,0x04,0x02,0xe5, 0x04,0xe5,0x08,0x13,0xe7,0xe6,0x01,0x01,0x0d,0x40,0xe5,0x07,0x02,0x10,0x17,0x14, 0x0d,0x01,0xe5,0x01,0x1d,0x13,0x06,0x16,0x09,0x1c,0x01,0x04,0x01,0x04,0x0b,0x3f, 0xe5,0x0a,0x13,0x3d,0x3b,0x3d,0x04,0xe5,0x01,0x02,0x0b,0x02,0x01,0xe5,0x3a,0x13, 0x31,0x18,0x06,0x08,0xe5,0x29,0x24,0xe5,0x18,0x02,0xe5,0x01,0x05,0x0a,0x0b,0x34, 0x03,0xe5,0xe6,0x01,0xe5,0x05,0xe7,0x08,0x01,0x04,0x01,0x1c,0x03,0x02,0x03,0x01, 0x05,0x04,0x04,0x02,0x06,0x13,0x13,0x04,0x18,0x09,0x0a,0x02,0x11,0xe5,0xe6,0x01, 0x02,0x16,0x39,0x06,0x03,0x01,0x02,0x02,0x01,0x01,0x02,0x2a,0x02,0x02,0x02,0x0a, 0x03,0x0a,0x2c,0x03,0x23,0x08,0x0e,0x03,0x01,0xe6,0x03,0x0a,0x06,0x01,0x3e,0x18, 0x04,0x23,0x02,0x0e,0x01,0xe5,0x02,0xe5,0x03,0x01,0x07,0x01,0x0c,0x18,0x17,0x09, 0x0d,0x11,0x01,0x01,0xe5,0x0f,0x06,0x01,0x3c,0xe5,0x42,0x02,0x0a,0xe5,0xe5,0xe6, 0x03,0x04,0x01,0x07,0x01,0x0b,0xe5,0x11,0xe5,0x03,0x10,0x01,0x05,0x09,0x21,0xe6, 0x04,0x0a,0x02,0x02,0x3f,0x02,0x02,0x41,0x02,0x06,0x09,0x1d,0x13,0x1d,0x09,0x0c, 0xe5,0x12,0xe5,0x01,0x10,0x0d,0x37,0x03,0x09,0x09,0x06,0x24,0x09,0x03,0xe5,0x07, 0xe5,0x12,0x02,0x05,0xe5,0x11,0xe5,0x1b,0xe5,0x07,0xe5,0x08,0x16,0xe9,0xe5,0x06, 0x01,0x02,0xe5,0xe5,0x05,0x03,0x05,0x09,0x09,0x09,0x09,0x09,0xe5,0x03,0xe5,0x01, 0x05,0x03,0x02,0x02,0x03,0x09,0x09,0x09,0x06,0x04,0xe5,0x07,0xe5,0x07,0xe6,0x01, 0x01,0x02,0x09,0x05,0x03,0xe5,0x07,0x09,0xe5,0x07,0x09,0x09,0xe6,0x06,0xe6,0x01, 0x01,0x02,0x09,0x0d,0x01,0xe5,0xe5,0x01,0x09,0x09,0x42,0x02,0x09,0x2e,0x02,0x3d, 0x3b,0x18,0x02,0x01,0x01,0x09,0x09,0x47,0x08,0x31,0x12,0x27,0x01,0x11,0x29,0x1e, 0xe8,0x0c,0x01,0xe6,0x04,0x01,0x3b,0xe6,0x0b,0x02,0x01,0x04,0x0e,0x1c,0xe5,0x01, 0x09,0x01,0x07,0xe6,0x05,0x09,0x0a,0x01,0x01,0x04,0x0a,0x01,0x1b,0xe6,0x06,0xe6, 0x03,0x1b,0xe7,0x0c,0x01,0xe5,0x07,0x01,0x39,0x01,0x04,0x03,0x06,0x01,0x01,0x02, 0x06,0x01,0x05,0xe5,0x1b,0x0d,0x09,0x09,0x07,0x01,0x01,0x07,0x05,0x01,0xe5,0x09, 0x0c,0x10,0x09,0x05,0x01,0x01,0x01,0x0d,0xe5,0x01,0xe5,0x02,0x01,0xe5,0x0b,0x02, 0x0c,0x14,0x23,0x0c,0xe5,0x01,0x01,0x09,0x06,0x02,0x04,0x18,0x06,0x13,0x04,0x1b, 0x01,0x04,0x27,0x0e,0x19,0x01,0xe5,0x0c,0x01,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x01,0x05, 0xe5,0x01,0x05,0xe5,0x07,0xe5,0x07,0xe5,0xe5,0x05,0xe7,0x05,0xe7,0x02,0x03,0xe6, 0x07,0xe5,0x07,0xe5,0x05,0xe8,0xe5,0x04,0xe5,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x02, 0x02,0xe7,0x02,0x02,0xe7,0x06,0xe6,0x07,0xe7,0x02,0x02,0x02,0x0d,0x01,0x28,0x2e, 0x01,0x07,0x0b,0x0a,0x09,0x1f,0x08,0x09,0x11,0x01,0xe5,0x1c,0xe5,0x07,0x09,0x06, 0x08,0x0f,0x03,0x01,0x0d,0x01,0x07,0x03,0x3c,0x0e,0x01,0x02,0x04,0x04,0x06,0x0a, 0x12,0x07,0xe5,0x03,0x01,0x02,0x04,0x04,0x05,0x06,0x02,0x08,0x01,0x02,0xe5,0xe5, 0x0a,0x01,0x02,0x18,0x01,0x02,0x04,0x01,0x02,0xe5,0xe5,0x01,0x01,0x0c,0xe5,0x03, 0xe5,0x01,0xe6,0x0b,0x01,0x28,0x26,0x09,0x09,0x09,0x14,0x07,0x06,0xe5,0x08,0xe5, 0x03,0x03,0x04,0x18,0xe5,0xe5,0xe6,0x0c,0xe5,0x17,0x03,0xe5,0x03,0x03,0xe5,0x02, 0x11,0x08,0xe6,0x03,0x08,0x01,0x28,0x2e,0x01,0x09,0x09,0x0a,0x09,0x0f,0x01,0x06, 0x01,0xe5,0x08,0x1a,0x01,0xe6,0xe6,0x0b,0x01,0xe5,0x0c,0x0c,0x01,0xe5,0x05,0x01, 0xe5,0x06,0x0d,0x03,0x01,0x02,0xe6,0x51,0x01,0xe5,0x07,0xe5,0x0f,0x02,0x14,0x03, 0x06,0x06,0x01,0xe6,0x0e,0x3b,0xe5,0xe5,0x25,0xe5,0x17,0x02,0x03,0xe5,0x0b,0x01, 0x07,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x04,0x02, 0x01,0x07,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01, 0x06,0x02,0xe6,0x06,0x01,0x03,0x03,0x01,0x07,0x01,0x07,0xe6,0xe5,0x04,0x01,0x02, 0x04,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x07,0x01,0x03,0x06,0xe6,0xe5,0x03,0x0a,0xe5,0x01,0x07,0x1b,0x33,0x05,0xe5,0x01, 0x04,0x0c,0xe6,0x03,0x02,0xe5,0x06,0x0c,0xe5,0x07,0xe6,0x03,0x09,0x04,0x03,0x03, 0xe5,0x07,0xe5,0x07,0xe5,0x06,0x14,0xe5,0x07,0xe5,0x1c,0x07,0xe5,0x0b,0x08,0x4e, 0x12,0x0d,0xe6,0x01,0x03,0x07,0xe5,0x0a,0x01,0x07,0xe5,0x05,0x15,0x01,0xe5,0x07, 0xe5,0xe6,0x02,0xe5,0x05,0x0a,0x0a,0xe5,0x07,0xe5,0x05,0x0b,0x10,0xe5,0xe7,0x01, 0x02,0x09,0x26,0x23,0x01,0x34,0x15,0x02,0x06,0x22,0x0e,0x1d,0x05,0x03,0x23,0xe5, 0xe5,0x07,0x06,0x09,0x01,0x37,0x09,0x01,0x01,0x04,0xe6,0x07,0xe6,0x28,0x0d,0x6c, 0x07,0x0f,0xe6,0x08,0x01,0x01,0x48,0x02,0x03,0x02,0x11,0x02,0x23,0xe5,0x10,0x1e, 0x3c,0x26,0xe7,0x16,0xe6,0x16,0x13,0x12,0x06,0xe5,0x06,0xe5,0x13,0x01,0x0a,0x06, 0x01,0x04,0x01,0x0f,0x04,0xe5,0x01,0x0a,0x05,0x0f,0x05,0x01,0x1a,0x16,0x20,0xe6, 0x0f,0x06,0x01,0x0a,0x1d,0x0e,0x15,0x01,0x02,0x0d,0x1b,0x03,0x03,0x09,0x1a,0x01, 0xe5,0x54,0x11,0x01,0x01,0x01,0x08,0x07,0x4d,0xe5,0xe5,0x09,0x09,0x07,0x18,0x27, 0x04,0x0d,0x5d,0x05,0x0e,0x1d,0x3e,0xe5,0x08,0x05,0x02,0xe6,0x05,0xe7,0x05,0xe5, 0x01,0x02,0x04,0xe5,0x01,0x06,0x09,0x07,0x01,0xe5,0x07,0x01,0x07,0xe5,0x06,0x01, 0x33,0x19,0xe6,0x01,0x0e,0x46,0x18,0xe5,0x08,0x0b,0x08,0x07,0x02,0x08,0x09,0x09, 0x0b,0x11,0x01,0x0a,0x49,0xe5,0x01,0x03,0x15,0x22,0x22,0xe5,0x03,0xe5,0x01,0x09, 0x18,0x0d,0x06,0x06,0x02,0x05,0x04,0x0d,0x05,0x0b,0x0d,0x2d,0x1d,0xe7,0x01,0x15, 0x01,0x43,0x09,0x09,0x2c,0x0d,0x02,0xe6,0x12,0x04,0x60,0x01,0xe7,0x16,0xe5,0x1f, 0x0b,0x1a,0x06,0xe5,0x0a,0x1e,0x06,0x14,0xe5,0x1f,0x01,0x14,0x0b,0x09,0x04,0x04, 0x29,0xe6,0x48,0xe7,0x0a,0xe5,0x02,0x13,0x09,0x01,0x01,0xe5,0x05,0x02,0x01,0x02, 0x01,0x01,0xe5,0x07,0x01,0xe5,0x05,0x12,0xe5,0x07,0xe5,0x01,0xe5,0x04,0xe5,0x07, 0x20,0x01,0x04,0x09,0x1e,0xea,0x01,0x01,0x41,0x01,0x11,0xe5,0x0a,0xe5,0x14,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x03,0x01,0x05,0x02,0xe5,0x04,0x13,0x09,0x03,0x01,0x04, 0x02,0x01,0x03,0x20,0xe5,0x05,0x02,0x01,0x04,0x1b,0x01,0x04,0x01,0x04,0x0f,0xe5, 0x36,0x0f,0x01,0x09,0x42,0x01,0x02,0x07,0x01,0x6d,0x05,0xe7,0x01,0x11,0x45,0x02, 0x01,0x07,0x43,0xe5,0x01,0x06,0x02,0x01,0x6f,0x02,0x01,0x01,0x15,0x08,0x01,0x3a, 0x03,0x02,0x01,0x01,0x09,0x0f,0x09,0x09,0x0b,0x09,0x07,0x03,0xe6,0x09,0x01,0x0c, 0x09,0x09,0x1d,0x09,0x1f,0xe5,0x01,0x02,0x18,0x01,0x02,0x3c,0x01,0x02,0xe5,0x01, 0x04,0x01,0x01,0xe5,0x3d,0x02,0x02,0x02,0x04,0x05,0x01,0x01,0x60,0x02,0x02,0x02, 0xe6,0x17,0x43,0x01,0x20,0x01,0x08,0xe5,0x11,0xe5,0x09,0xe5,0x0b,0x01,0x07,0x01, 0x22,0x1a,0x02,0x06,0x02,0x01,0x1a,0xe5,0xe7,0x16,0x45,0x01,0x1f,0xe8,0x04,0xe5, 0xe5,0xe5,0x03,0xe5,0x04,0x05,0x01,0x01,0x04,0xe5,0xe5,0xe6,0x01,0x06,0x01,0x07, 0x01,0x0b,0xe5,0x07,0xe7,0x05,0xe5,0xe6,0x19,0x01,0xe6,0x04,0xe6,0x1d,0x01,0xe5, 0x04,0x71,0x06,0x09,0x09,0x0b,0x09,0x09,0x1d,0x09,0x09,0x04,0x18,0x09,0x21,0xe8, 0x1d,0x41,0x19,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x09,0xe5,0x07,0xe5,0x0b,0x19,0xe5, 0x07,0xe5,0x07,0xe5,0x1b,0xe5,0x07,0xe5,0x23,0xe5,0x0d,0x09,0x05,0x03,0x09,0x09, 0x09,0x09,0x09,0x04,0x01,0x02,0xe5,0x07,0x02,0x01,0x01,0x02,0x09,0xe5,0x07,0xe5, 0x07,0xe5,0x06,0x02,0xe6,0x06,0xe5,0x07,0xe5,0x03,0xe5,0x01,0x02,0x06,0x09,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0x09,0x09,0xe6,0x06,0xe6,0x06,0x09,0x0d,0xe5,0xe5,0xe5, 0x01,0x13,0x45,0x13,0x3d,0x06,0x02,0x72,0x02,0x02,0xe5,0x14,0x43,0x14,0x10,0x09, 0x09,0x06,0x0e,0x03,0x08,0x1a,0x09,0x09,0x48,0xe6,0xe5,0x13,0x02,0x01,0x43,0x01, 0x01,0x0c,0x02,0x01,0x0a,0x01,0x01,0x05,0xe6,0x06,0x01,0x09,0xe6,0x06,0xe6,0x06, 0x08,0x14,0xe6,0x03,0x01,0xe7,0x06,0xe6,0x1a,0xe6,0x06,0xe6,0x14,0x09,0xe8,0x18, 0x01,0x41,0x02,0xe5,0x03,0x03,0x06,0x01,0xe5,0x09,0x09,0x09,0x0b,0x09,0x0f,0x02, 0x14,0x05,0x01,0xe6,0x08,0x1d,0x09,0x19,0xe5,0x02,0x02,0x02,0xe5,0x5b,0x01,0x11, 0x01,0x15,0x0b,0x04,0x04,0x11,0x09,0x03,0x09,0x06,0x03,0x02,0x01,0x01,0x08,0x4b, 0x01,0x15,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe6,0x01,0x04,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x05,0xe7,0x05,0xe7,0x08, 0xe6,0x02,0x02,0xe7,0x01,0x05,0xe6,0x06,0xe5,0x07,0xe5,0x05,0xe8,0xe5,0x03,0xe6, 0x05,0xe7,0x07,0xe5,0x07,0xe5,0x02,0x02,0xe7,0x02,0x02,0xe7,0x07,0xe5,0x07,0xe6, 0x03,0x01,0x02,0xe5,0x16,0x01,0x3c,0x06,0x01,0x09,0x07,0x01,0xe5,0x08,0xe5,0x27, 0xe5,0x07,0x08,0x24,0x03,0x1d,0xe5,0x07,0xe5,0x19,0x05,0xe8,0x18,0x02,0x40,0x01, 0x02,0x06,0x07,0x02,0x0b,0x09,0xe5,0x01,0x05,0xe5,0x01,0x07,0x01,0x01,0x05,0xe5, 0x01,0x03,0x05,0xe5,0x17,0xe5,0x01,0x05,0x09,0xe5,0x01,0x19,0xe5,0x01,0x05,0xe5, 0x01,0x14,0xe5,0x03,0xe5,0x01,0xe5,0x55,0x06,0x01,0x09,0x07,0x01,0x18,0xe5,0x07, 0xe5,0x01,0x11,0xe5,0x02,0x09,0x18,0xe5,0x07,0x05,0x03,0xe5,0x1b,0xe5,0x07,0xe5, 0x14,0x06,0x03,0x56,0x06,0x01,0xe5,0x07,0x07,0x01,0xe5,0x15,0x01,0xe5,0x05,0x01, 0xe5,0x08,0x01,0x06,0x01,0xe6,0x01,0x03,0x01,0xe5,0x18,0x01,0xe5,0x0f,0x01,0xe5, 0x19,0x01,0xe5,0x05,0x01,0xe5,0x18,0x01,0xe5,0xe7,0x19,0x44,0x48,0xe5,0x03,0x03, 0x0c,0x14,0x3b,0x16,0x08,0xe5,0x0b,0x04,0xe6,0x06,0x01,0x07,0x01,0x07,0x01,0x01, 0x05,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05, 0x01,0x02,0x04,0x01,0x07,0x01,0xe5,0x05,0x01,0xe5,0x04,0xe5,0xe6,0x07,0x01,0x07, 0x01,0x03,0x03,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x07, 0x01,0x01,0xe5,0x03,0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0x02, 0x04,0x01,0x0a,0xe5,0xe6,0x54,0xe5,0x05,0x18,0x07,0xe5,0x10,0xe5,0x04,0x01,0x18, 0x17,0x03,0xe5,0x11,0xe5,0x03,0x17,0xe5,0x07,0xe5,0x21,0x02,0xe5,0x1d,0x35,0x0b, 0x13,0xe5,0x02,0xe5,0x06,0x09,0x01,0x0e,0x02,0x02,0x27,0xe5,0x07,0x04,0x04,0x01, 0x10,0x0a,0xe5,0xe5,0x05,0xe5,0x07,0x05,0x0d,0x01,0x04,0x01,0xe5,0xe5,0x52,0x0f, 0x1c,0x10,0xe5,0x16,0x24,0x12,0x10,0x17,0x25,0xe5,0x2e,0x13,0x0b,0x0d,0x01,0x09, 0x07,0xe6,0x1e,0x14,0x03,0x04,0x01,0x23,0xe6,0x09,0x19,0x33,0x01,0x09,0x53,0x01, 0x05,0x09,0x05,0x21,0xe5,0x06,0x58,0x33,0xe7,0x08,0x08,0x28,0x0f,0xe5,0xe5,0x0d, 0x01,0x13,0x09,0x0a,0x01,0x06,0xe5,0x05,0xe5,0x02,0x0c,0x03,0x16,0x10,0x05,0x09, 0x0b,0x09,0x07,0x2a,0x01,0x0d,0x0f,0x2b,0x13,0x13,0x09,0x13,0x2c,0xe5,0x11,0x09, 0x02,0x02,0x03,0x15,0x2e,0x04,0x03,0x03,0x0b,0x02,0x83,0x02,0x14,0x10,0x12,0x03, 0x3b,0x1b,0x01,0x01,0x01,0x08,0x29,0x09,0xe5,0x07,0x01,0x07,0x09,0x0a,0xe6,0x10, 0xe5,0x12,0x08,0xe5,0x02,0x01,0x03,0x28,0x01,0x0f,0x02,0x1c,0xe6,0x2c,0xe7,0x27, 0x16,0x0c,0x1d,0x10,0x01,0x14,0x01,0xe5,0x02,0x1e,0x04,0x26,0x1b,0x2f,0xe6,0x0e, 0x1e,0x1c,0x13,0x13,0x09,0x05,0x0d,0x0c,0xe5,0xe5,0x08,0x09,0x13,0x02,0x03,0x3b, 0x22,0xe5,0xe6,0x0d,0x01,0x39,0x13,0xe5,0x03,0x08,0x04,0x09,0x13,0x10,0x2d,0x5c, 0x02,0xe6,0x0c,0xe5,0x22,0x19,0x03,0x06,0x08,0xe5,0x11,0x09,0x13,0xe5,0x01,0x08, 0x09,0x33,0x07,0x02,0x1a,0x2b,0x02,0xe6,0x07,0xe5,0x03,0xe6,0x17,0x02,0xe5,0x12, 0xe5,0x08,0x01,0xe5,0x04,0x02,0x01,0x04,0x05,0x03,0x02,0x01,0x02,0x01,0xe5,0x07, 0x09,0xe5,0x0a,0x01,0x02,0x03,0x01,0xe5,0x05,0x02,0xe5,0x04,0x16,0x01,0x04,0x07, 0x0b,0x16,0x01,0x04,0x09,0x1e,0xe5,0x01,0xe6,0x01,0x01,0x08,0xe6,0x1a,0x03,0x0c, 0xe5,0x10,0xe5,0x07,0xe5,0x05,0x05,0x02,0x03,0x01,0x04,0x08,0x03,0xe5,0x0b,0x05, 0xe5,0x02,0x04,0xe6,0x05,0x05,0xe5,0x01,0x16,0x01,0x04,0x13,0x15,0xe5,0x05,0x02, 0xe5,0x04,0x1b,0x01,0x02,0x01,0xe6,0x03,0x05,0x01,0xe5,0xe5,0xe5,0x18,0x13,0x01, 0x01,0x02,0xe5,0x02,0x02,0x08,0x02,0xe5,0x02,0x02,0x04,0x04,0x01,0xe5,0x02,0x04, 0x01,0x02,0x02,0x08,0x07,0x09,0x03,0x11,0x77,0x05,0x01,0xe5,0x01,0x0a,0x01,0x1b, 0x10,0x02,0x01,0x04,0x04,0x0e,0x04,0x07,0x06,0x04,0x04,0x04,0x0d,0xe5,0x0d,0x13, 0x7b,0x02,0xe5,0xe6,0x0c,0x01,0x04,0x01,0x14,0x11,0x06,0x02,0x03,0x02,0x01,0x01, 0x05,0x02,0x03,0x02,0x01,0x01,0xe5,0x03,0x02,0x03,0x02,0x02,0x03,0x02,0x01,0x01, 0x05,0x09,0xe5,0x01,0x02,0x04,0x09,0x06,0x02,0x01,0x01,0x19,0x13,0x1d,0x09,0x19, 0x06,0xe7,0x01,0x0a,0x01,0x01,0x02,0x01,0x14,0x03,0x13,0x09,0x01,0x02,0x0e,0x01, 0x02,0xe5,0x02,0x09,0x01,0x02,0x04,0x04,0x0e,0x02,0x01,0x02,0x03,0x0f,0x03,0x01, 0x02,0x72,0x04,0x01,0xe5,0x0b,0x01,0x1b,0x13,0x01,0x01,0x07,0x01,0x02,0x01,0x04, 0x02,0x04,0x01,0x02,0x01,0xe5,0x02,0x02,0x04,0x01,0x02,0x01,0x02,0x01,0x02,0xe6, 0x06,0x04,0x04,0x01,0x02,0x04,0x01,0x01,0x06,0xe5,0x03,0x01,0x33,0xe5,0x19,0x09, 0x02,0x01,0x1a,0xe5,0xe7,0xe6,0x09,0x01,0x1b,0x13,0x01,0x02,0x09,0x01,0x07,0x02, 0x06,0x01,0x02,0x04,0xe7,0x05,0xe6,0x06,0xe6,0x05,0xe5,0xe6,0x02,0x01,0xe5,0xe5, 0xe5,0xe6,0x02,0xe5,0xe5,0x05,0xe5,0xe5,0xe6,0x03,0x01,0x01,0x18,0xe5,0x11,0xe5, 0xe5,0x1a,0x09,0xe6,0x1f,0xe5,0x12,0xe5,0x2c,0x09,0x02,0xe6,0x03,0x09,0x04,0x04, 0x09,0x02,0xe5,0x04,0x09,0x04,0x04,0x02,0xe5,0x06,0x04,0x04,0x09,0x1d,0x13,0x04, 0x18,0x09,0x25,0x14,0x2d,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07, 0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01,0xe5,0x05,0xe5,0x07,0xe5,0x07,0xe5, 0x1b,0xe5,0x11,0xe5,0x1b,0xe5,0x07,0xe5,0x1f,0x01,0xe7,0x0d,0x03,0xe5,0xe5,0x01, 0x06,0x02,0x09,0x02,0x06,0x09,0xe6,0x01,0x01,0x02,0xe6,0x06,0xe6,0x01,0x01,0x02, 0xe6,0xe5,0x04,0xe6,0x01,0x01,0x02,0xe7,0xe5,0xe5,0x01,0xe6,0xe5,0x04,0xe5,0x04, 0x02,0xe5,0x04,0x01,0x02,0xe5,0x07,0xe5,0x07,0xe6,0x06,0x09,0x09,0xe5,0x07,0x09, 0xe5,0x07,0x09,0x09,0xe6,0x06,0xe6,0x06,0x09,0x0d,0x03,0xe5,0x01,0x13,0x08,0xe5, 0x26,0x13,0x13,0x1c,0xe5,0x07,0xe5,0x91,0x05,0xe5,0x13,0x08,0x29,0x11,0x13,0x1b, 0x03,0x05,0x03,0x02,0x04,0x09,0x27,0x13,0x49,0x01,0xe5,0x16,0x08,0x02,0x1e,0xe6, 0xe5,0x04,0xe6,0x03,0x01,0xe5,0xe5,0xe5,0x04,0xe6,0xe5,0x04,0x01,0x01,0x05,0xe6, 0x06,0xe6,0x06,0xe6,0x06,0xe6,0xe5,0x05,0xe7,0xe5,0x04,0xe6,0xe5,0x04,0xe6,0x14, 0x05,0xe6,0x10,0xe6,0x14,0x05,0xe6,0xe5,0x04,0xe6,0x1e,0xe6,0xe5,0x01,0x0e,0x07, 0x01,0x05,0x02,0x1e,0x08,0xe5,0x04,0x01,0xe6,0x07,0xe5,0x08,0x08,0xe5,0x07,0xe5, 0x08,0x08,0xe5,0x06,0x02,0xe5,0x08,0x09,0x1d,0x13,0x1d,0x09,0x19,0xe5,0x02,0x01, 0x03,0xe5,0x0d,0x07,0x05,0x05,0x1e,0x05,0x03,0x06,0x01,0x03,0x02,0x06,0x09,0x07, 0x02,0x06,0x01,0x07,0x02,0x05,0x02,0x02,0x01,0x02,0x01,0x30,0x13,0x02,0x1d,0x28, 0x01,0xe6,0x0e,0x05,0xe5,0x01,0x05,0xe6,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe6,0xe5,0x04,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe5,0x01,0x06,0xe6,0x02,0x02,0xe7,0x02,0x03,0xe6,0x07,0xe5,0x07,0xe5,0x05, 0xe7,0x07,0xe5,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x02,0x02,0xe7,0x02,0x02,0xe7,0x07, 0xe5,0x07,0xe6,0x03,0x01,0xe6,0xe5,0x08,0x19,0x1e,0x06,0x01,0xe5,0x08,0xe5,0x04, 0x01,0xe6,0x07,0xe5,0x04,0x01,0x07,0x01,0xe5,0x08,0x08,0xe5,0x09,0xe5,0x04,0x03, 0xe5,0x03,0x03,0xe5,0x03,0x1d,0x0d,0xe5,0x03,0x17,0x05,0x03,0xe5,0x03,0x15,0x05, 0x01,0xe6,0x03,0x0d,0x09,0x04,0x22,0x04,0x04,0x09,0x04,0x04,0x09,0x04,0x04,0x04, 0x04,0x09,0x04,0x01,0x02,0x06,0x04,0x03,0x05,0xe5,0xe6,0x04,0x03,0x19,0xe5,0xe6, 0x0e,0xe5,0xe6,0x16,0x01,0xe5,0xe6,0x04,0xe5,0xe6,0x13,0xe5,0x03,0xe6,0xe7,0x0d, 0x13,0x1e,0x08,0xe5,0x11,0x13,0x09,0xe5,0x08,0x09,0x07,0x02,0xe5,0x02,0x09,0xe5, 0x07,0x1d,0xe5,0x11,0xe5,0x1b,0xe5,0x07,0xe5,0x14,0x06,0xe6,0xe5,0x08,0x05,0x11, 0x6d,0x0b,0x06,0x08,0x27,0x13,0x1d,0x09,0x1c,0x01,0x03,0x03,0x09,0x02,0x1a,0x1e, 0x01,0x0e,0x02,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x11,0x01,0x1a,0x05,0xe5,0x2f, 0x0c,0x10,0x09,0x1a,0x06,0xe7,0x01,0x02,0x05,0x02,0x02,0x01,0x02,0x04,0x01,0x01, 0x05,0x01,0x04,0x02,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0xe6,0x06,0x01, 0x07,0xe6,0x06,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0xe6,0x08,0x01, 0x07,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x02, 0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x0c,0x01,0x03, 0x09,0x1f,0xe5,0x11,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5, 0x03,0x03,0xe5,0x01,0x05,0xe6,0x07,0x07,0x02,0x09,0xe5,0x02,0x04,0xe5,0x1b,0xe5, 0x11,0xe5,0x03,0x0a,0x0c,0xe5,0x07,0xe5,0x20,0xe5,0x01,0xe5,0x29,0x06,0x0c,0x06, 0x03,0x08,0x03,0x02,0x02,0x01,0x07,0x01,0x07,0x01,0x04,0x03,0x07,0x01,0xe5,0x02, 0x02,0x01,0x02,0x02,0x03,0x03,0x08,0x02,0x02,0x03,0x1d,0x10,0x02,0xe5,0x1b,0x09, 0x10,0x10,0x04,0x01,0xe5,0x01,0x27,0x13,0x09,0x09,0x1f,0x04,0x01,0x07,0x11,0x01, 0x03,0x07,0xe5,0x01,0x06,0x0f,0x0d,0x13,0x1d,0x09,0x1e,0x05,0xe5,0x01,0x17,0xe6, 0x2e,0xe6,0x06,0xe7,0x05,0xe6,0x10,0xe6,0x06,0xe6,0x10,0xe6,0x1f,0x17,0x08,0x0a, 0x50,0x01,0xe5,0x17,0x16,0x15,0x03,0x05,0x0d,0x05,0x08,0xe5,0x02,0x09,0x07,0x0b, 0x04,0x04,0x05,0x32,0xe5,0x59,0x01,0x01,0x0b,0x06,0x17,0x10,0x0e,0xe5,0x06,0x0a, 0xe5,0x12,0x03,0x0e,0xe5,0x02,0x0d,0x04,0x01,0x0a,0x08,0x11,0x12,0xe5,0xe5,0x1f, 0x07,0x09,0x1f,0xe7,0x2f,0x0e,0x04,0x04,0x01,0x0a,0x03,0x02,0x01,0x04,0x01,0x0a, 0x02,0x09,0x06,0x0d,0x01,0x09,0x03,0x1a,0x18,0x57,0xe7,0x05,0x01,0x20,0x15,0x12, 0x09,0x28,0x13,0x05,0x09,0x34,0xe5,0x46,0x06,0x01,0x01,0x17,0xe5,0x2e,0x07,0x0c, 0xe6,0x02,0x04,0x09,0x26,0x08,0x0d,0x08,0xe6,0x06,0xe5,0x07,0xe5,0x12,0x1c,0xe6, 0x29,0x02,0x01,0xe5,0x18,0x46,0x08,0x0a,0xe5,0x08,0x0e,0x0a,0x17,0x05,0x03,0x0a, 0x04,0x04,0x03,0x0f,0x1b,0x2e,0x01,0xe5,0x17,0x29,0x13,0x0d,0x04,0x04,0x04,0x07, 0x01,0x07,0x01,0x07,0x01,0x08,0x06,0x09,0x10,0x07,0x09,0xe6,0x07,0x03,0x09,0x0f, 0x36,0xe9,0x66,0xe5,0x01,0x06,0x09,0x09,0x0c,0x0b,0x09,0x07,0x17,0x02,0x01,0x04, 0x03,0xe5,0xe5,0x19,0xe5,0x34,0x01,0x01,0xe5,0xe5,0x02,0x03,0x07,0x31,0xe5,0x0c, 0x04,0x0c,0x01,0x01,0x07,0x03,0x01,0x03,0x01,0x07,0x01,0x0d,0x03,0x01,0x07,0x09, 0x1f,0xe5,0x07,0x02,0x06,0x04,0x02,0x01,0x0b,0x1b,0x09,0x09,0x08,0xe9,0x25,0xe5, 0x03,0xe5,0x01,0x01,0x03,0xe5,0x04,0x01,0xe6,0x04,0x06,0xe5,0xe5,0xe7,0x06,0x04, 0xe5,0xe5,0xe5,0xe8,0x05,0xe5,0x05,0x11,0x01,0x0c,0xe5,0xe6,0x05,0x02,0xe5,0x04, 0x01,0xe5,0x05,0x13,0x05,0x01,0x0b,0x16,0x01,0x04,0x09,0x02,0xe5,0x19,0xe6,0x01, 0xe5,0x01,0x01,0x2c,0xe5,0x08,0x01,0xe5,0x08,0x06,0x01,0xe6,0x01,0x0c,0xe5,0xe5, 0x01,0x03,0x01,0xe5,0x01,0x06,0xe5,0x11,0x0f,0x02,0x01,0x04,0x02,0xe5,0x04,0x15, 0xe5,0x0b,0xe6,0x02,0xe5,0x1b,0xe5,0x05,0x02,0x01,0x04,0x02,0x01,0x16,0x01,0x02, 0x01,0xe6,0x03,0x2a,0x09,0x05,0x03,0x05,0x09,0x01,0x03,0xe5,0x01,0x01,0x07,0x13, 0x01,0x07,0x01,0x04,0xe5,0xe5,0xe5,0x02,0xe5,0x03,0x01,0x1f,0x13,0x02,0xe5,0xe5, 0xe5,0x0a,0x43,0x04,0xe5,0x01,0x02,0x3c,0x09,0x09,0x09,0x01,0x07,0x05,0xe5,0x01, 0x06,0xe5,0xe5,0xe5,0x06,0x01,0x04,0x02,0x01,0x04,0xe5,0x02,0x01,0x36,0x02,0x01, 0x51,0x02,0x02,0xe5,0x3e,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x04,0x09,0xe5, 0x01,0x02,0x03,0x02,0xe5,0xe5,0x04,0x01,0x1a,0x09,0x09,0x13,0x02,0x01,0x01,0x03, 0x02,0x05,0x1d,0x09,0x18,0x06,0xe8,0x01,0x02,0x39,0x01,0x01,0x05,0x01,0x01,0x05, 0x03,0x02,0x01,0xe5,0xe5,0xe5,0x04,0x03,0x09,0x05,0x03,0x02,0x01,0xe5,0xe5,0xe5, 0x04,0x03,0x0b,0x37,0x03,0x02,0x01,0x4a,0x03,0x01,0xe6,0x2e,0x0a,0xe5,0x01,0x01, 0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x09,0x09,0x01,0x07,0x01,0x07,0x01,0x09,0x01, 0x0d,0xe5,0x05,0x0a,0x18,0x01,0x29,0x09,0x02,0x01,0x11,0x08,0x02,0x01,0x3c,0x01, 0xe5,0xe5,0x06,0x01,0x07,0x09,0x01,0x07,0x03,0xe5,0xe5,0xe6,0x02,0xe5,0x03,0x01, 0x07,0x01,0x07,0x01,0x08,0xe5,0xe5,0x0a,0xe5,0xe5,0xe5,0x04,0x08,0xe5,0x11,0xe5, 0x03,0x01,0x0b,0xe5,0x1c,0x09,0xe6,0x12,0xe6,0x06,0xe5,0xe6,0x69,0x09,0x33,0x09, 0x09,0x13,0x13,0x1d,0x09,0x22,0x01,0xe5,0x01,0x67,0xe5,0x07,0xe5,0x01,0x2f,0xe5, 0x07,0xe5,0x07,0xe5,0x11,0xe5,0x11,0xe5,0x1b,0xe5,0x07,0xe5,0x1f,0xe5,0x01,0xe5, 0x0d,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0xe5,0x04,0x02,0xe5,0x03,0x03, 0x04,0x01,0x02,0x09,0x08,0x02,0x02,0x06,0xe5,0x07,0xe6,0x06,0xe5,0x07,0x09,0xe5, 0x07,0x09,0xe5,0x07,0x09,0x09,0xe6,0x06,0xe6,0x06,0x09,0x0d,0xe5,0xe7,0x01,0x3b, 0x09,0x09,0x09,0x12,0xe5,0x12,0x1f,0x86,0x02,0xe5,0x01,0x3d,0x09,0x08,0x0a,0x0f, 0x03,0x05,0x0d,0x16,0x06,0x07,0x13,0x13,0x13,0x48,0xe5,0xe6,0x07,0x36,0x01,0x07, 0x01,0x07,0x01,0x07,0x01,0x09,0xe7,0x05,0xe5,0xe5,0x03,0x01,0x09,0x07,0x01,0x09, 0x01,0x04,0x04,0xe7,0x06,0xe6,0x06,0xe6,0x0a,0x04,0xe5,0xe5,0x03,0x01,0x04,0x05, 0xe6,0x0f,0x04,0x05,0xe6,0xe5,0x04,0xe6,0x1f,0xe7,0x3e,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x04,0x02,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x07,0x01,0xe5,0x05,0x02,0xe5,0x08,0x09,0x11,0xe6,0x04, 0x02,0x0a,0x11,0xe5,0x09,0x09,0x19,0xe5,0x02,0x01,0xe6,0xe6,0x3d,0x01,0x07,0x01, 0x07,0x01,0x04,0x02,0x01,0x09,0x04,0x0c,0x01,0x09,0x07,0x01,0x08,0xe5,0xe5,0x08, 0x14,0x12,0x07,0x01,0x0a,0x12,0x22,0x13,0x01,0x01,0xe5,0x13,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x05,0xe8,0xe5,0x02,0xe8,0xe5,0x02,0xe8,0xe5,0x02,0xe8,0xe5,0x02, 0xe7,0x01,0xe5,0x01,0xe8,0x04,0x02,0xe5,0xe5,0x02,0xe5,0xe6,0xe5,0x02,0xe5,0xe6, 0xe5,0x02,0xe7,0x01,0x01,0x04,0xe7,0x01,0x02,0xe7,0x02,0x03,0xe6,0x05,0x01,0xe5, 0x07,0xe5,0x01,0x03,0xe8,0x06,0xe5,0x05,0xe7,0x07,0xe5,0x01,0x05,0xe5,0x02,0x02, 0xe7,0x02,0x02,0xe7,0x05,0x01,0xe5,0x06,0xe7,0xe5,0x01,0x03,0x01,0x1a,0x1d,0xe5, 0x06,0x03,0xe5,0x03,0x03,0xe5,0x03,0x03,0xe5,0x03,0xe5,0x01,0xe5,0x03,0xe6,0x01, 0x0e,0x03,0xe5,0x03,0x03,0xe5,0x03,0x03,0xe5,0x0c,0x02,0xe6,0x03,0x03,0x05,0x03, 0x09,0x08,0xe5,0x04,0x02,0x0a,0x05,0x0c,0x0a,0x05,0x03,0xe5,0x03,0x0d,0xe5,0x03, 0x01,0x08,0xe5,0x19,0x21,0x01,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0x02, 0x02,0x03,0x05,0x03,0x02,0x06,0xe5,0xe5,0x05,0x02,0x06,0x0b,0xe5,0x03,0xe5,0xe6, 0x02,0x01,0x03,0x05,0xe5,0xe5,0x03,0x08,0xe5,0xe6,0xe6,0x0e,0xe5,0xe6,0x0b,0x0a, 0x01,0xe5,0xe6,0x04,0xe5,0xe6,0x07,0x0b,0xe5,0x03,0x01,0xe7,0x3a,0x01,0x03,0x05, 0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x03,0x01,0x07,0x01,0xe5,0x01,0x05,0xe5,0x01, 0x05,0xe5,0x01,0x05,0x02,0x07,0x04,0x04,0xe5,0x07,0x05,0x12,0x04,0xe5,0x02,0x0a, 0x03,0xe5,0x0c,0x0e,0xe5,0x07,0xe5,0x06,0x0a,0x02,0x07,0x02,0x1a,0x1d,0x03,0xe7, 0x05,0xe7,0x04,0x01,0xe6,0x01,0x02,0x01,0xe6,0x01,0x03,0xe6,0x01,0x03,0x01,0xe5, 0x05,0xe5,0x03,0x03,0x03,0x01,0x03,0x05,0x03,0x01,0xe5,0x08,0x02,0x05,0x13,0x06, 0x08,0x03,0x13,0x0f,0x0d,0x09,0x06,0x02,0x06,0x03,0x04,0x02,0x01,0x01,0xe7,0x3d, 0x02,0x10,0x02,0x10,0x09,0x02,0x0e,0x0b,0x02,0x10,0x19,0x19,0x02,0x1e,0x13,0x1a, 0x05,0x02,0xe5,0x10,0x01,0x07,0x01,0x07,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0x01, 0x02,0x02,0x01,0x01,0x05,0x01,0x04,0x02,0x01,0x01,0x05,0x01,0x07,0x01,0x04,0x02, 0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x04,0x02,0x01,0x09,0x01,0x07,0x01,0x07,0x01, 0x07,0x01,0x07,0x01,0x07,0x01,0x04,0x02,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01, 0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x07,0x01,0x05,0xe5,0x02,0xe6, 0xe5,0x3e,0x07,0x0b,0x02,0x0a,0x03,0x02,0x04,0x03,0x02,0x05,0x14,0x08,0x01,0x03, 0x08,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x0d,0x03,0xe5,0x04,0xe5,0x0a,0xe5,0x03,0x17, 0xe5,0x07,0xe5,0x22,0xe7,0x0a,0x13,0x08,0x14,0x06,0x09,0x03,0x04,0xe5,0x0b,0x09, 0xe5,0x07,0x11,0x09,0x05,0x02,0x07,0x01,0x02,0x04,0x08,0xe5,0x0e,0x02,0x06,0x0c, 0x11,0x0c,0x09,0x1a,0x06,0x01,0xe5,0xe5,0x42,0x13,0x09,0x0a,0x26,0xe5,0x04,0x05, 0x04,0x09,0x11,0x0c,0x03,0x0f,0x1c,0x09,0x11,0x0d,0xe5,0x02,0xe8,0x24,0x1b,0x07, 0x02,0x06,0x01,0x07,0x01,0x0a,0x0a,0x05,0x01,0x0a,0x06,0x01,0xe5,0x02,0x06,0x06, 0x27,0x07,0x01,0xe5,0xe6,0x02,0xe5,0x03,0x01,0x02,0x44,0xe6,0xe5,0x3a,0xe5,0x01, 0x0b,0x0a,0x03,0x05,0x03,0xe5,0x10,0x01,0x04,0xe5,0x11,0x04,0x01,0x01,0x0d,0x0b, 0x23,0x21,0x36,0x13,0x09,0x23,0x0d,0x03,0x01,0x05,0x01,0x01,0x02,0x06,0x05,0x08, 0x02,0x01,0x0d,0xe5,0x01,0x01,0x07,0x04,0x09,0x01,0x1c,0x03,0x0b,0x01,0x01,0x01, 0x04,0xe5,0x36,0x06,0x0c,0x02,0x02,0x0d,0x15,0x13,0xe5,0x02,0x13,0x04,0x04,0x09, 0x02,0xe5,0x0d,0xe5,0x11,0xe5,0x09,0x23,0x01,0x08,0x0d,0x09,0x04,0x4b,0x03,0x21, 0x3f,0x05,0x2d,0x3b,0x0b,0x38,0x16,0xe5,0x05,0x38,0xe5,0x11,0xe5,0x07,0xe6,0x02, 0x03,0xe6,0x02,0x07,0x02,0x01,0x01,0x01,0x06,0xe6,0x03,0x03,0x08,0x16,0x07,0x14, 0x01,0x01,0x05,0x01,0x01,0x03,0x02,0x16,0x19,0xe6,0x18,0x01,0xe5,0x3e,0xe5,0x11, 0xe5,0x0a,0x09,0x13,0x05,0x03,0x09,0x06,0x08,0x0e,0x32,0x21,0x0e,0x14,0x05,0xe6, 0x01,0xe5,0x10,0x12,0x09,0x09,0x04,0x04,0x0b,0x02,0x04,0x04,0x04,0x01,0x02,0x13, 0x10,0x02,0x0f,0x09,0xe5,0x04,0x03,0x0a,0x0f,0x07,0xe5,0x09,0xe5,0x07,0x09,0x16, 0x12,0x0b,0x01,0xe6,0x01,0x3d,0x13,0xe5,0x03,0x03,0x09,0x0c,0x06,0x06,0x0c,0x02, 0x01,0x09,0x09,0x07,0x26,0x01,0xe5,0x32,0x1c,0xe7,0xe5,0x1a,0x0c,0x19,0x05,0x0d, 0x03,0x05,0x09,0xe5,0x0b,0x05,0x13,0x01,0x0f,0x0e,0x13,0x12,0x0d,0x2e,0x0f,0x06, 0xe6,0xe7,0x17,0xe6,0xe5,0xe5,0x02,0xe5,0x0b,0xe5,0x05,0x01,0xe5,0x04,0x09,0xe5, 0xe6,0x04,0xe5,0xe6,0x04,0x08,0xe5,0x01,0xe5,0xe5,0x02,0x09,0xe5,0x03,0x03,0xe5, 0x07,0xe5,0xe5,0xe5,0x04,0xe5,0xe6,0x05,0x09,0x01,0xe5,0x04,0xe5,0x01,0x01,0xe5, 0x01,0xe5,0x08,0xe5,0x03,0x03,0x09,0xe5,0x10,0xe5,0x01,0x01,0x04,0x02,0xe5,0x04, 0x1e,0xe5,0xe6,0xe5,0x01,0x01,0x01,0x0d,0xe5,0x06,0xe5,0x1c,0xe5,0x01,0x01,0x03, 0xe5,0x07,0xe5,0x01,0x01,0x06,0x03,0x09,0xe5,0xe5,0xe6,0x02,0x05,0x02,0x06,0x03, 0x05,0x02,0x03,0xe5,0x09,0x01,0x04,0x09,0x01,0xe5,0x08,0x01,0xe5,0x0c,0x05,0xe5, 0x0b,0x0c,0x01,0x06,0xe5,0x01,0xe5,0x01,0x09,0x1b,0x01,0x02,0x02,0xe5,0x03,0x01, 0x14,0x02,0x01,0xe5,0xe5,0xe5,0x1a,0x0b,0x07,0x04,0xe5,0x06,0xe5,0x0b,0x13,0x19, 0x1d,0x05,0x03,0xe5,0xe5,0xe6,0xe5,0x03,0x01,0xe5,0xe5,0xe5,0x1c,0x07,0x04,0x01, 0xe5,0xe5,0x1c,0x06,0xe6,0x01,0x1a,0xe5,0x01,0x01,0x17,0xe5,0x11,0xe5,0x07,0xe5, 0x01,0x05,0xe5,0x0b,0x06,0xe5,0xe5,0x08,0x05,0xe5,0x01,0x33,0x06,0x02,0x01,0x07, 0x01,0x25,0x09,0x01,0x1f,0x03,0xe7,0x39,0x09,0xe5,0x01,0x05,0x09,0xe5,0x01,0x05, 0x01,0x01,0x07,0x01,0xe5,0x03,0x09,0xe5,0x01,0xe5,0x03,0x09,0xe5,0x01,0x11,0x09, 0x0e,0x07,0x06,0x04,0x01,0x02,0x01,0x01,0xe5,0xe5,0x01,0x1a,0x02,0x09,0x22,0xe5, 0x01,0x02,0x1f,0x1d,0x02,0x01,0x0e,0x09,0x04,0x04,0x02,0x01,0xe5,0x02,0x09,0x05, 0x03,0x09,0x2f,0x03,0x09,0x05,0x01,0x01,0x01,0x02,0xe5,0xe5,0xe5,0x18,0x03,0x04, 0x04,0x1d,0x04,0x01,0xe5,0x1f,0x01,0x0e,0x0e,0x0e,0x04,0x07,0x04,0x02,0x0b,0x06, 0x02,0x06,0x01,0xe5,0x05,0x02,0x10,0x01,0x01,0x05,0xe6,0x05,0x0f,0x09,0x01,0x04, 0x01,0xe5,0xe5,0x06,0x01,0x03,0xe5,0x17,0x01,0xe5,0xe8,0xe5,0xe5,0x01,0x01,0x1a, 0x01,0x02,0x21,0x01,0x0e,0xe5,0x04,0xe7,0x03,0x01,0xe5,0x07,0xe5,0xe7,0x01,0x01, 0xe5,0x03,0x03,0xe5,0x05,0x07,0x03,0xe5,0x01,0x01,0x03,0xe5,0xe5,0xe6,0x02,0xe5, 0xe8,0x02,0xe5,0xe5,0xe5,0x08,0x06,0xe5,0xe5,0xe5,0x04,0x0e,0x09,0x01,0x01,0xe5, 0xe5,0xe6,0xe5,0xe6,0x03,0x01,0x01,0xe5,0xe5,0x15,0x04,0xe6,0x01,0x01,0x02,0xe6, 0x1f,0xe5,0x30,0x01,0xe5,0x02,0x09,0x09,0x09,0x09,0x02,0x02,0x0d,0x09,0x09,0x09, 0x10,0x04,0x09,0x02,0xe5,0x18,0x09,0x09,0x04,0x18,0x09,0x21,0x03,0x02,0x35,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x0b,0xe5,0x03,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x01,0x11,0xe5,0x07,0xe5,0x1b,0xe5,0x07,0xe5,0x07,0xe5,0x1b,0xe5, 0x07,0xe5,0x1f,0xe6,0xe6,0x0d,0x09,0x09,0x09,0x09,0xe5,0x04,0x02,0xe5,0x07,0xe5, 0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x02,0x01,0x02,0x09,0xe5,0x04,0x02,0xe5,0xe5, 0xe5,0xe5,0x01,0xe5,0xe5,0x02,0x02,0xe5,0x03,0x05,0x06,0x02,0xe5,0x07,0xe6,0x06, 0x09,0x09,0xe5,0xe5,0x05,0xe5,0x07,0xe5,0xe5,0x05,0x09,0x09,0xe7,0x05,0xe7,0x05, 0x09,0x0d,0xe5,0xe7,0x01,0x31,0x08,0xe5,0x11,0xe5,0x07,0xe5,0x08,0x12,0xe5,0x08, 0x08,0xe5,0x11,0x02,0x31,0x54,0x02,0xe6,0xe5,0x32,0x05,0x03,0x05,0x09,0x03,0x05, 0x03,0x05,0x03,0x0f,0x03,0x05,0x01,0x07,0x03,0x05,0x0d,0x07,0x27,0x03,0x05,0x09, 0x48,0xe5,0xe6,0xe5,0x28,0x01,0x09,0xe5,0xe5,0x06,0x01,0x04,0x01,0xe7,0x06,0x01, 0x03,0x02,0xe7,0x03,0x01,0x0a,0xe6,0x02,0x02,0xe7,0x05,0xe7,0x02,0x02,0xe7,0xe5, 0x02,0x07,0x04,0xe7,0xe5,0x01,0x01,0xe7,0x1a,0xe6,0xe5,0x03,0xe7,0xe5,0xe6,0xe8, 0x1a,0xe6,0x06,0xe6,0x1e,0xe5,0x01,0x2b,0x01,0xe5,0x05,0x02,0xe5,0x07,0xe5,0x04, 0x01,0xe6,0x03,0x03,0xe5,0x04,0x01,0xe6,0x04,0x01,0xe5,0x09,0x05,0x01,0xe6,0x03, 0xe5,0x01,0xe5,0x04,0x01,0xe6,0x10,0x02,0xe5,0x04,0x01,0xe6,0x0b,0x09,0x06,0x07, 0xe6,0x04,0x01,0xe6,0x1c,0x09,0x19,0xe5,0x02,0x02,0xe7,0x27,0x0e,0x04,0x01,0x06, 0x03,0x01,0x01,0x02,0x09,0x0c,0x01,0x0b,0x01,0xe5,0x05,0x07,0xe5,0x02,0x06,0x13, 0x0b,0x0c,0x08,0x06,0x08,0xe5,0x05,0x01,0xe5,0x20,0x09,0x1e,0x01,0xe5,0x14,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0x01, 0x04,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe5,0xe5,0x06,0xe7, 0x01,0x02,0xe7,0x06,0xe6,0x07,0xe5,0x07,0xe5,0x05,0xe7,0x01,0x05,0xe6,0xe5,0x02, 0xe7,0x07,0xe5,0x06,0xe6,0x02,0x02,0xe7,0x02,0x02,0xe7,0x07,0xe5,0x07,0xe7,0x02, 0x04,0xe5,0x2a,0x01,0xe6,0x06,0x09,0x0a,0x08,0x0a,0x06,0x01,0xe5,0x12,0x08,0xe6, 0x07,0x0b,0xe5,0x06,0xe6,0x04,0x01,0xe5,0x08,0x09,0x09,0xe5,0x06,0xe5,0x08,0xe5, 0x17,0x03,0xe5,0x07,0xe5,0x07,0x17,0x01,0x01,0x2b,0x01,0x07,0x09,0x01,0x11,0x01, 0x0a,0x06,0x01,0xe5,0x19,0x14,0x09,0xe5,0x04,0x02,0xe5,0xe6,0x04,0x1a,0x02,0xe5, 0x0e,0x02,0x1a,0x02,0x06,0x02,0x03,0x10,0xe5,0x03,0xe5,0x01,0xe5,0x36,0x14,0x1a, 0x01,0x14,0x08,0x0a,0x0f,0x04,0x04,0xe5,0x07,0x05,0x09,0x0d,0xe5,0x02,0xe5,0x0c, 0xe5,0x11,0x09,0xe5,0x07,0xe5,0x14,0x08,0x01,0x2e,0x06,0x04,0x04,0x04,0x09,0x04, 0x04,0x09,0x04,0x02,0x0b,0x09,0x04,0x04,0x09,0x09,0x03,0x01,0xe5,0x03,0xe5,0xe6, 0x04,0x03,0x03,0x09,0x0b,0xe5,0xe7,0x03,0x09,0xe5,0xe6,0x10,0x07,0xe5,0xe6,0x04, 0xe5,0xe6,0x02,0x11,0x03,0x03,0x01,0x1b,0x09,0x17,0x01,0xe5,0xe5,0x0f,0x02,0x06, 0x02,0x06,0x02,0x06,0x01,0xe5,0xe5,0x03,0x02,0x06,0x01,0xe5,0x05,0x02,0x01,0x05, 0x28,0xe5,0xe5,0x0f,0x02,0x17,0xe5,0x08,0x30,0x03,0x01,0xe5,0x01,0x0b,0x01,0x07, 0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x01,0x02,0x02,0x01,0x01,0x05,0x01,0x01,0x05, 0x01,0x01,0x02,0x01,0xe5,0xe5,0xe5,0xe5,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02, 0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x09,0x01,0xe6,0x04,0x01,0xe5, 0x05,0x01,0xe6,0x04,0x01,0x03,0x03,0x01,0x07,0x01,0x01,0x02,0x02,0x01,0x01,0x05, 0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x0a,0x01, 0x01,0x0b,0x23,0x07,0x09,0xe5,0x07,0xe5,0x03,0x01,0xe7,0x05,0x01,0x07,0x01,0x01, 0x01,0x05,0x07,0x01,0xe5,0x07,0x07,0x01,0xe5,0x13,0xe5,0x07,0xe5,0x08,0xe5,0x01, 0x04,0xe5,0x07,0xe5,0x05,0x01,0x09,0xe5,0x1b,0xe5,0x07,0xe5,0x02,0x1d,0xe5,0xe7, 0x09,0x1e,0x04,0x01,0x01,0x03,0x01,0x01,0x02,0x01,0xe5,0x02,0x02,0x01,0x04,0x02, 0x01,0x07,0x02,0x06,0x04,0x01,0x09,0x02,0x03,0x02,0x02,0x04,0x01,0x02,0x02,0xe5, 0x01,0xe5,0x09,0x0b,0x07,0x03,0x06,0x02,0x01,0x04,0x03,0x0a,0x01,0x0c,0x1b,0x01, 0x15,0x10,0x03,0xe5,0x14,0x0a,0x09,0x06,0x02,0x04,0x11,0x08,0x10,0x02,0x02,0x01, 0x05,0x07,0x01,0x09,0x07,0x11,0x03,0x01,0x07,0x01,0x14,0x03,0x03,0x12,0x01,0x1b, 0x01,0x07,0x01,0x20,0x01,0x01,0xe6,0x0f,0x04,0x04,0x14,0x08,0x08,0x07,0x01,0x09, 0x0e,0xe5,0xe5,0xe5,0x0f,0x07,0x14,0x26,0xe5,0x05,0x10,0xe5,0xe5,0x05,0x02,0x09, 0x3f,0xe5,0xe6,0x30,0x07,0x06,0x06,0x01,0x01,0x04,0xe5,0x06,0x0f,0x23,0x07,0x02, 0xe6,0x19,0x0a,0x22,0x4a,0x03,0x18,0xe5,0x08,0x13,0x03,0x05,0x03,0x05,0x07,0x01, 0x09,0x07,0x01,0xe5,0x03,0x03,0x05,0x03,0x02,0x06,0x09,0x06,0xe5,0x01,0xe5,0x02, 0x05,0xe5,0xe5,0x04,0xe5,0x07,0xe6,0xe5,0x05,0x02,0x06,0xe5,0xe5,0x01,0x0d,0x03, 0x12,0x37,0x0b,0x05,0xe5,0x08,0x04,0xe5,0x03,0x0e,0x01,0x03,0x03,0x09,0x01,0x01, 0x05,0x03,0x01,0x03,0x01,0x07,0x09,0x09,0x09,0x09,0x08,0x01,0x0a,0x08,0x01,0x03, 0x07,0x05,0xe5,0x08,0x10,0x01,0xe7,0x0a,0x11,0x13,0x0f,0x07,0xe6,0xe5,0x02,0x10, 0xe5,0x0a,0x06,0x30,0x3c,0x02,0x01,0x18,0x09,0x06,0x0b,0x3e,0x1d,0x01,0x01,0x13, 0x22,0x07,0xe5,0x09,0x02,0xe5,0x03,0x0a,0x08,0x13,0xe5,0x11,0xe5,0x07,0xe5,0x09, 0xe5,0x07,0xe5,0x07,0xe5,0x03,0x0b,0xe9,0x04,0xe6,0x07,0xe6,0x06,0xe5,0x1b,0xe5, 0x0f,0x08,0x02,0x06,0x02,0xe5,0x02,0x18,0x19,0x08,0x0a,0x08,0x0a,0x06,0x01,0x11, 0x01,0xe5,0x06,0x0a,0xe5,0x07,0x0b,0xe5,0x01,0x05,0x01,0x01,0x05,0x01,0x14,0x06, 0x02,0x07,0x0a,0x1b,0x01,0x1f,0x03,0x01,0xe5,0x20,0x01,0x01,0xe5,0x0f,0x03,0x05, 0x09,0x04,0x04,0x06,0x02,0x03,0x05,0x05,0x03,0x03,0x05,0x09,0x06,0x02,0x0b,0x03, 0x05,0x03,0x05,0x07,0x01,0x07,0x01,0x01,0x07,0x09,0x03,0xe5,0x07,0x04,0x0e,0x34, 0xe7,0x17,0x09,0x10,0x01,0xe6,0x06,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x01,0xe5,0x03,0xe5,0x07,0xe5,0x09,0x09,0xe5,0x08, 0xe5,0x07,0x06,0x02,0x01,0xe5,0x05,0x09,0x28,0x2a,0xe5,0x01,0xe5,0x18,0x09,0x0c, 0x05,0x09,0x09,0x02,0x06,0x09,0x05,0x03,0x02,0x06,0x03,0x05,0x09,0x02,0x06,0x07, 0x01,0x0b,0x08,0xe5,0x08,0x0a,0x07,0xe5,0x07,0x01,0x0e,0x30,0x14,0xe6,0xe7,0x0a, 0x09,0x09,0x0c,0xe5,0x01,0x01,0xe5,0x02,0x02,0x01,0xe5,0x02,0x02,0xe5,0xe5,0x02, 0x02,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x02,0x05,0x03,0x02,0xe5,0xe5,0x05,0xe5,0xe5, 0x02,0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x02,0x01,0xe5,0x03,0x03,0x02,0xe5,0xe5,0x02, 0x05,0x03,0x02,0xe5,0xe5,0x02,0xe5,0xe5,0xe6,0x02,0xe5,0x07,0x0b,0xe5,0x05,0x19, 0x0c,0xe5,0x1d,0xea,0x01,0x01,0x08,0x04,0x04,0xe6,0x06,0xe6,0x0a,0x02,0xe5,0x02, 0xe5,0x02,0xe5,0x02,0xe5,0x01,0xe5,0x03,0xe5,0xe5,0xe5,0xe5,0x01,0xe5,0x07,0xe5, 0x02,0x01,0x02,0xe5,0x07,0x02,0x06,0xe5,0x07,0x09,0xe5,0xe5,0x07,0x09,0xe5,0x07, 0xe5,0x02,0xe5,0x02,0xe5,0x02,0x01,0x02,0x04,0xe5,0x01,0xe5,0x0d,0x01,0x1e,0x0c, 0x1c,0x01,0x02,0xe8,0x03,0x08,0x06,0xe5,0xe5,0xe5,0x03,0x01,0x02,0x04,0xe5,0xe5, 0xe5,0x03,0x04,0x04,0x04,0x02,0x01,0x04,0x04,0x04,0x02,0x01,0x04,0x02,0x01,0x04, 0x04,0x04,0x04,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x04,0x06,0x01,0xe5,0xe5,0x03, 0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x01,0x04,0x01,0x02,0x02,0x03,0xe5,0x11,0x19, 0x29,0x06,0xe6,0x01,0x0a,0x06,0x02,0x01,0x07,0x06,0xe5,0xe5,0xe5,0x03,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x02,0x06,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x30,0x2b,0x04, 0xe6,0x0c,0x07,0x01,0x01,0x02,0x04,0x01,0x03,0x02,0x0c,0x02,0x01,0x01,0x02,0x02, 0xe5,0x01,0x02,0x02,0x03,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x01, 0x01,0x02,0x02,0x06,0x02,0x06,0x02,0x03,0x02,0x02,0xe5,0x01,0x04,0x02,0x06,0x02, 0x06,0x02,0x06,0x02,0x06,0x02,0x04,0x04,0x0e,0x01,0x02,0x18,0x30,0x01,0x02,0x0a, 0x03,0x05,0x01,0x02,0x04,0x01,0x01,0x02,0x01,0x04,0x05,0x03,0x02,0x01,0xe5,0x02, 0x05,0x03,0x01,0x01,0xe6,0x02,0x04,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5, 0x02,0x05,0x03,0x01,0x02,0xe5,0x02,0x01,0x01,0xe6,0x02,0x07,0x03,0x01,0x02,0xe5, 0x02,0x05,0x03,0x02,0x01,0xe5,0x02,0x01,0x02,0xe5,0x02,0x05,0x03,0x0f,0x01,0x01, 0x19,0x03,0x27,0x04,0x01,0xe5,0x03,0xe5,0x05,0x0b,0x04,0x02,0x01,0x07,0x01,0x07, 0x03,0x05,0x03,0x05,0x09,0x07,0x01,0x03,0x05,0x03,0x05,0x03,0x02,0x02,0x09,0x06, 0x02,0x0b,0x06,0x02,0x09,0x04,0x01,0x02,0x09,0x06,0x04,0x01,0x0f,0x01,0x02,0x01, 0x16,0x2d,0x02,0xe6,0x01,0x03,0x06,0x0b,0x04,0xe5,0xe5,0xe5,0x06,0x01,0x09,0x02, 0x06,0x02,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x02,0xe6,0x03,0x02,0x06,0x02,0x01, 0xe5,0x02,0x01,0xe5,0x05,0x01,0xe5,0xe6,0x02,0x01,0xe5,0x07,0x01,0xe5,0xe7,0x01, 0x01,0xe5,0x05,0x01,0xe5,0xe7,0x01,0x01,0xe5,0x05,0x01,0xe5,0x01,0x03,0x02,0x0e, 0x01,0x01,0xe5,0xe6,0x14,0x2b,0x04,0x01,0x01,0x1d,0x15,0x02,0x02,0x02,0xe5,0x01, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x01,0x01,0x02,0x02,0x03,0x02,0x02,0x01,0x01,0x02,0x02,0x05,0x02,0x02,0x03,0x02, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x06,0x02,0x10,0x04,0x42,0x05, 0x01,0x1c,0x19,0xe5,0x01,0x05,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5, 0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6, 0x06,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04, 0xe5,0xe6,0x0e,0xe5,0x47,0xe5,0x01,0xe5,0x0d,0x04,0x01,0x02,0x05,0x03,0x02,0x06, 0x04,0x01,0xe5,0xe7,0xe5,0x02,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x07,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe6,0x06,0x09,0xe5,0x07,0x09,0x09,0x09,0x09,0x09,0x0d,0x02,0x01,0x02,0x13, 0x1d,0xe5,0x08,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09,0x09,0x09, 0x09,0x5d,0x02,0x02,0xe5,0x12,0x1d,0x03,0x09,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x06, 0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02, 0x14,0x4a,0xe6,0x16,0x01,0x07,0x15,0xe5,0xe5,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03, 0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02, 0x01,0x04,0x02,0xe6,0x05,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0x01,0x04,0x02,0xe6, 0x03,0x02,0x01,0x01,0x01,0xe5,0x01,0xe6,0x02,0x02,0x0a,0xe6,0x44,0x01,0x01,0xe6, 0x16,0x02,0xe5,0x04,0x03,0x0f,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x03, 0xe5,0x01,0xe5,0x04,0x02,0xe5,0x06,0xe5,0x02,0x06,0x48,0xe9,0x15,0x01,0x07,0x0f, 0x0f,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x01,0x07,0x04,0x01,0x02,0x09, 0x09,0x09,0xe5,0x07,0x0b,0x02,0x45,0x01,0xe6,0x08,0x0b,0xe6,0x05,0xe7,0x06,0xe5, 0x07,0xe5,0x05,0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7, 0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7, 0x01,0x03,0xe7,0x01,0xe5,0x01,0xe7,0x03,0x04,0xe6,0x01,0xe5,0x01,0xe7,0x01,0x03, 0xe7,0x01,0xe5,0x03,0xe5,0x01,0x05,0xe5,0x01,0x03,0x01,0xe5,0x01,0x05,0xe5,0x05, 0xe7,0x07,0xe5,0x07,0xe5,0x05,0xe7,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x07,0x02,0x1a, 0xe5,0x03,0x03,0x10,0x01,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0x09,0xe5,0x09,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe6, 0x06,0xe5,0x07,0xe5,0x08,0x03,0x05,0x16,0x29,0x01,0xe5,0x1d,0x16,0x01,0x03,0x02, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x01,0x02,0x06,0x02,0x03, 0x02,0x02,0x05,0xe5,0x01,0x09,0x09,0x06,0x02,0xe5,0x03,0x04,0x05,0x03,0x19,0x09, 0x1d,0x01,0x17,0x01,0x0a,0x10,0x06,0xe5,0xe5,0x01,0x03,0xe5,0xe5,0x01,0x03,0xe5, 0xe5,0x01,0x03,0xe5,0xe5,0x05,0xe5,0xe5,0x01,0x03,0xe5,0xe5,0x01,0x03,0xe5,0xe5, 0x01,0x03,0xe5,0xe5,0x05,0xe5,0xe5,0x01,0x03,0xe5,0x02,0x02,0x03,0x02,0x02,0x03, 0xe5,0xe5,0x04,0xe5,0x01,0x02,0x06,0x09,0x02,0x02,0x01,0x03,0x0d,0x01,0x1c,0xe5, 0x06,0xe6,0x1b,0x01,0x01,0x17,0x02,0x01,0x01,0x05,0x01,0x10,0x02,0xe5,0xe7,0x03, 0xe5,0xe7,0x03,0xe5,0xe7,0x03,0xe5,0xe7,0x03,0xe5,0xe7,0x03,0xe5,0xe7,0x03,0xe5, 0xe7,0x03,0xe5,0xe7,0x03,0xe5,0xe7,0x03,0xe5,0xe6,0x01,0x04,0x03,0xe5,0x03,0xe5, 0xe7,0x03,0xe6,0xe6,0x03,0x04,0x04,0x04,0x04,0xe8,0x01,0xe5,0x01,0x08,0xe5,0xe6, 0x02,0x17,0xe6,0x05,0x01,0xe5,0x1c,0x01,0x0d,0x09,0xe5,0xe5,0x0a,0x1c,0x0a,0x08, 0x27,0x1a,0x04,0x2d,0x04,0x08,0x03,0x1f,0xe5,0xe5,0xe5,0x07,0x1c,0x06,0xe5,0xe5, 0x07,0x03,0x02,0x01,0x03,0x03,0x01,0xe5,0x05,0x01,0x07,0x01,0x07,0x01,0x01,0x05, 0x01,0x07,0x01,0x07,0xe6,0x06,0x01,0x01,0x04,0xe5,0xe5,0x06,0x01,0x01,0x05,0xe6, 0x01,0x04,0x01,0x07,0x01,0x09,0xe6,0x01,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x01,0x02,0x02,0x01,0x01,0x05,0x01,0xe5,0x05,0x01,0x01,0x05,0x01,0x02,0x04,0x01, 0x01,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x08,0x03,0xe6,0x22,0x01, 0x02,0x04,0xe5,0xe5,0x01,0xe5,0x01,0xe5,0x07,0xe5,0x01,0x05,0x09,0xe5,0x0b,0xe5, 0x03,0x03,0x02,0x02,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x09,0xe5,0x07, 0xe5,0x01,0x02,0x03,0x05,0x02,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x02,0x2b,0x2f,0x01, 0xe5,0x0a,0x08,0xe5,0x03,0x0e,0xe5,0xe5,0x07,0xe5,0x01,0xe5,0x02,0x03,0x01,0x05, 0x01,0x02,0xe5,0xe5,0x02,0x04,0x09,0x04,0x06,0xe5,0x03,0x02,0x03,0x06,0xe5,0x03, 0x03,0x02,0x04,0x02,0x03,0x06,0xe5,0xe5,0x04,0x02,0x07,0xe5,0x02,0x02,0x02,0x03, 0x03,0x0f,0x08,0xe5,0x01,0x06,0x01,0x07,0x0c,0x07,0x15,0x06,0x04,0x02,0x1f,0x0c, 0xe5,0x02,0x0c,0x03,0x0a,0x04,0x02,0x0f,0x03,0x10,0x13,0x05,0x05,0x02,0x11,0x03, 0x07,0x03,0xe5,0x0a,0xe5,0x5d,0xe5,0xe5,0x16,0x09,0x01,0x03,0x01,0x0b,0x01,0x05, 0x01,0x01,0x07,0x01,0x16,0x04,0x01,0x07,0x01,0x07,0x01,0x04,0x02,0x01,0x07,0x01, 0x07,0x09,0x01,0x01,0x09,0x07,0x02,0x03,0x02,0x01,0xe5,0x01,0xe5,0xe6,0xe5,0x5d, 0xe5,0x01,0x02,0x11,0x05,0x08,0x13,0x07,0x01,0x05,0x03,0x05,0xe5,0x05,0x02,0x0d, 0xe5,0xe5,0x02,0x01,0x05,0x01,0x01,0x09,0x05,0x01,0x01,0x06,0xe6,0x09,0x01,0x07, 0x0c,0x07,0x01,0x09,0x0e,0x51,0xe5,0x0f,0x01,0x03,0x02,0x02,0x04,0x01,0x11,0x05, 0x0d,0xe6,0x02,0x04,0xe5,0x03,0x08,0xe5,0x01,0x01,0x05,0x06,0x05,0xe5,0x02,0x03, 0x03,0x01,0x04,0xe5,0x02,0x03,0x02,0xe5,0x0c,0x05,0x05,0x07,0x06,0x06,0x01,0x02, 0x0a,0x05,0x03,0x09,0x08,0x06,0x09,0x14,0x0d,0xe8,0x16,0xe5,0x05,0x06,0x14,0x03, 0x04,0x04,0x09,0x01,0x02,0x04,0x01,0x07,0xe5,0x05,0x01,0x01,0x07,0x01,0x07,0x01, 0x07,0x08,0xe5,0xe5,0xe5,0x01,0x04,0x09,0xe5,0x0a,0x0c,0x02,0x01,0x0b,0x07,0x01, 0x14,0x06,0x09,0x25,0xe6,0x09,0xe5,0x0c,0x0a,0x03,0x13,0x04,0x02,0x06,0x02,0x08, 0x02,0x0e,0x02,0x0d,0x16,0x01,0xe5,0xe5,0x01,0x0b,0x07,0xe5,0x01,0x02,0x08,0x27, 0x49,0xe5,0xe5,0xe5,0x12,0x06,0x0e,0xe6,0x03,0x01,0x1c,0x0f,0x04,0x01,0x0e,0x11, 0xe6,0xe5,0x0a,0x15,0xe6,0x0a,0x04,0x07,0x0b,0xe6,0x02,0x03,0x08,0xe8,0x05,0x06, 0x02,0x12,0x20,0x2e,0x0a,0x02,0x09,0xe5,0x12,0xe5,0x0b,0x04,0x06,0x02,0x09,0x09, 0xe5,0x0a,0x08,0x01,0x07,0x05,0x18,0x0b,0x01,0x13,0x44,0x01,0x01,0x13,0x01,0x0a, 0x04,0x04,0x05,0x14,0x01,0xe5,0x05,0x01,0x02,0x06,0x0a,0x08,0xe5,0x05,0x01,0x06, 0x02,0xe5,0x05,0x01,0xe5,0x07,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x1a,0x02,0x05, 0x03,0x09,0x05,0x03,0x03,0x01,0x03,0x09,0x05,0xe5,0x07,0x1f,0x03,0xe5,0xe7,0x0e, 0x2f,0x0d,0x02,0x0c,0x0c,0x06,0x0e,0x04,0x0d,0x08,0x1d,0x01,0xe5,0x05,0x01,0x05, 0xe5,0x01,0x0f,0xe5,0x08,0x12,0xe5,0x06,0x01,0x20,0xe6,0x02,0x1c,0x1f,0x04,0x1b, 0x01,0x11,0x01,0x02,0x0e,0x01,0x12,0x09,0x05,0x03,0x1f,0x0c,0x06,0x0a,0x12,0x08, 0x08,0x1c,0xe5,0xe7,0x2b,0xe5,0x06,0xe5,0x09,0xe5,0x07,0x09,0x09,0x09,0x09,0xe5, 0x07,0x09,0x09,0xe5,0x04,0x06,0xe5,0x05,0x02,0xe5,0x04,0x05,0x02,0xe5,0x07,0xe5, 0xe6,0x05,0x08,0xe5,0x28,0xe5,0x05,0x1e,0xe7,0xe6,0x01,0x01,0x12,0xe5,0x11,0xe5, 0x01,0x0a,0x04,0xe5,0x01,0x09,0x09,0x05,0xe5,0x01,0x08,0x06,0xe5,0x01,0x09,0x05, 0xe5,0x01,0x09,0x0d,0xe5,0x01,0xe5,0x0a,0x03,0x01,0xe5,0x0c,0x01,0xe5,0x01,0xe5, 0x0a,0x2a,0xe5,0x21,0x01,0x02,0x01,0xe6,0x03,0x05,0xe5,0xe5,0xe5,0x03,0x01,0xe5, 0xe5,0x12,0x02,0xe6,0x01,0x01,0x09,0x04,0xe5,0x02,0x04,0x01,0xe5,0x04,0xe5,0x03, 0x13,0x04,0x01,0x02,0x09,0x04,0x01,0xe5,0x04,0xe5,0x05,0x04,0xe5,0x04,0x0f,0x03, 0x0c,0xe5,0x02,0x01,0x0f,0x01,0x1b,0x01,0x07,0x01,0x01,0x1b,0x08,0x02,0x06,0xe5, 0x01,0x01,0x04,0x02,0x01,0x11,0x06,0xe5,0xe5,0xe5,0x06,0x06,0xe5,0xe5,0x08,0x06, 0xe5,0xe5,0x0f,0xe5,0xe5,0x08,0x09,0x09,0x06,0xe5,0x01,0xe5,0x05,0xe5,0x14,0x10, 0x02,0x13,0x01,0x18,0x02,0x01,0x07,0x01,0x1f,0x02,0xe8,0x14,0x08,0x01,0x02,0x09, 0x11,0x04,0xe5,0x01,0xe5,0x08,0x01,0x02,0x09,0xe5,0x01,0xe5,0xe5,0x01,0x09,0x01, 0x01,0xe5,0xe5,0x01,0x01,0x01,0xe5,0x04,0x02,0xe5,0xe5,0x01,0x06,0x04,0x06,0x02, 0xe5,0x01,0xe5,0xe5,0x01,0x04,0x11,0x07,0x02,0x0c,0x06,0x12,0x03,0x02,0x03,0x02, 0x02,0x03,0x1d,0xe6,0x01,0x0e,0x0c,0x01,0x02,0x07,0x03,0x04,0x04,0x05,0x04,0x03, 0x02,0x06,0xe5,0xe5,0x01,0x09,0x03,0xe5,0xe5,0xe5,0x09,0x04,0x01,0x02,0x01,0x01, 0xe5,0x07,0xe5,0xe5,0x01,0x08,0x02,0x01,0x01,0xe5,0x03,0x03,0xe5,0xe5,0xe5,0x04, 0x03,0x18,0x0a,0x07,0xe5,0x13,0x04,0x02,0x01,0x04,0x02,0x19,0x02,0x01,0xe7,0x0b, 0x01,0x13,0x09,0x05,0x01,0x01,0x04,0x02,0x07,0x01,0x01,0x05,0x01,0x04,0x04,0x01, 0x05,0x01,0x01,0x04,0x02,0x01,0x09,0x07,0x07,0x01,0x0b,0x01,0x07,0x01,0x02,0x02, 0x01,0x01,0x01,0xe5,0x03,0x04,0x0e,0x01,0x05,0x0d,0x05,0x17,0x09,0x1b,0xe5,0x03, 0x04,0x01,0xe5,0x09,0x01,0x13,0x07,0x09,0x01,0x05,0x01,0x04,0x04,0x01,0x02,0x04, 0x04,0x04,0x01,0x02,0x04,0x01,0x01,0xe8,0xe5,0xe5,0x01,0x04,0x04,0x04,0x04,0x04, 0x04,0x06,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x13,0x01,0x02,0x0e,0x04, 0x18,0x04,0x04,0x04,0x18,0x08,0xe6,0x3a,0xe5,0x11,0x02,0x17,0x20,0xe5,0x07,0x02, 0x08,0x02,0x06,0x02,0x03,0x2a,0x02,0x2e,0x02,0x1b,0x03,0x1b,0x02,0x06,0x16,0x09, 0xe5,0x04,0x02,0xe5,0x17,0xe5,0x0b,0x10,0x18,0x09,0x05,0xe5,0x1c,0x13,0x2a,0x1d, 0xe5,0x01,0xe5,0x0d,0x06,0x02,0x03,0x05,0x09,0x09,0x03,0x05,0x09,0x09,0x04,0x01, 0x02,0x06,0x02,0xe5,0x07,0x03,0x05,0x09,0x09,0x0b,0x03,0x02,0x02,0x03,0x05,0xe6, 0x06,0x02,0x06,0x06,0x02,0x09,0x04,0x01,0x02,0x09,0x09,0x04,0x01,0x02,0x09,0x03, 0x05,0x09,0x0d,0xe7,0xe5,0x01,0x12,0xe5,0x30,0x09,0x09,0x06,0x02,0x13,0x13,0x12, 0x02,0x26,0xe5,0x12,0x1d,0x09,0x22,0x06,0x15,0x30,0x08,0x0b,0x08,0x06,0x0b,0x13, 0x15,0x27,0x15,0x1b,0x09,0x2c,0x01,0x14,0xe5,0x05,0x16,0x01,0x07,0x01,0x02,0x01, 0x04,0x02,0x06,0x02,0x01,0x04,0x02,0x01,0x01,0x02,0xe7,0xe5,0x01,0x01,0x02,0x01, 0x04,0x02,0x01,0x02,0x01,0x02,0x01,0x04,0x02,0x05,0xe5,0xe5,0x01,0x01,0x04,0x02, 0x07,0x01,0x04,0x01,0x07,0x01,0x07,0x01,0x02,0x04,0x01,0x04,0x04,0x02,0x06,0x0e, 0x04,0x02,0x01,0x04,0x02,0x1f,0xe6,0xe5,0x16,0x02,0xe5,0xe5,0x06,0x01,0xe5,0x0b, 0x01,0xe5,0x05,0x01,0x01,0x05,0x03,0x01,0xe5,0x01,0x01,0x01,0x02,0x02,0x01,0x01, 0x05,0x02,0xe5,0x04,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x01,0x03,0x01,0x01, 0x02,0x04,0x01,0x01,0x05,0x03,0x09,0x04,0xe5,0xe6,0x05,0x01,0xe5,0x02,0x02,0x03, 0x01,0x03,0x01,0xe5,0x05,0x01,0x01,0x01,0x05,0xe5,0x0f,0x01,0x01,0x05,0x03,0x19, 0xe5,0x02,0x02,0x02,0x14,0x02,0x01,0x04,0x1c,0xe5,0x03,0x04,0x04,0x03,0x05,0x09, 0x03,0x05,0x01,0x02,0x04,0x04,0x04,0x04,0x04,0x03,0xe5,0x0f,0x03,0xe5,0x03,0x04, 0x0c,0x01,0x06,0xe5,0xe5,0x06,0x01,0x03,0x02,0x0c,0x04,0x04,0x13,0x03,0x05,0x22, 0xe6,0xe5,0x14,0xe6,0x06,0xe5,0x07,0xe5,0x06,0xe6,0x05,0xe7,0x01,0x03,0xe7,0x01, 0x03,0xe8,0xe5,0x02,0xe8,0xe5,0x02,0xe8,0x04,0xe7,0x01,0x03,0xe8,0xe5,0x02,0xe7, 0x01,0x03,0xe7,0x01,0x03,0xe7,0xe5,0x01,0x04,0xe6,0x01,0x03,0xe7,0x06,0xe7,0xe5, 0x04,0xe6,0xe5,0x04,0xe6,0x04,0xe7,0x07,0xe6,0xe5,0x02,0xe7,0x01,0x05,0xe5,0x07, 0xe6,0xe5,0x02,0xe7,0x01,0x03,0xe7,0x07,0xe5,0x07,0xe7,0x02,0x01,0x01,0xe6,0x16, 0x01,0x07,0x01,0x0a,0x05,0xe5,0xe6,0x02,0x01,0xe5,0x07,0x02,0x06,0x09,0x09,0x02, 0xe5,0x01,0xe5,0xe6,0x07,0x09,0xe5,0x07,0x02,0x06,0x0b,0x02,0x06,0x07,0x01,0x03, 0x12,0x06,0xe5,0xe5,0x0f,0x02,0xe5,0x18,0x02,0x06,0x1b,0x01,0x01,0x17,0x01,0x07, 0x01,0x0a,0x06,0x01,0x03,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x01,0x04,0xe7, 0x04,0x01,0xe6,0x06,0xe6,0x04,0x01,0xe5,0x01,0x04,0xe7,0x04,0x01,0xe6,0xe5,0x03, 0xe6,0x08,0x01,0xe5,0x05,0xe6,0x08,0x1a,0x01,0xe6,0xe5,0x06,0x05,0x01,0xe5,0x01, 0xe5,0x0f,0x05,0x01,0xe6,0x04,0x01,0xe5,0x13,0xe5,0x03,0x01,0xe5,0xe5,0x16,0x01, 0x01,0x05,0x01,0x01,0x0c,0x05,0x04,0x05,0x02,0xe5,0x02,0x01,0x02,0xe5,0x04,0x01, 0x01,0x05,0x02,0xe5,0x02,0xe6,0x02,0x05,0x02,0xe5,0x04,0x01,0x01,0x05,0x02,0xe5, 0x04,0x01,0x01,0x02,0x02,0x01,0x07,0x01,0x01,0x01,0x05,0x08,0x12,0x01,0x02,0xe5, 0x0e,0x02,0xe5,0x02,0x15,0x02,0xe5,0x02,0x01,0x02,0xe5,0x14,0x06,0xe6,0xe5,0x16, 0x01,0x07,0x01,0x0a,0x03,0x05,0x02,0xe5,0x07,0x05,0x03,0x05,0x04,0x02,0x05,0x03, 0x05,0x09,0x05,0x04,0x08,0x05,0x04,0x06,0x04,0x04,0x04,0x0f,0x16,0x0d,0x05,0x05, 0xe5,0x0f,0x05,0x05,0x03,0x18,0x03,0x01,0x03,0x02,0x0e,0x0f,0xe5,0xe5,0x09,0xe5, 0x10,0x04,0x0b,0xe5,0xe7,0x2b,0xe5,0xe5,0x05,0x02,0x01,0x22,0x03,0xe5,0x3c,0x33, 0x02,0xe5,0xe5,0x0e,0x01,0x07,0x01,0x03,0x03,0x01,0x02,0x04,0x01,0x02,0x04,0x01, 0x01,0x05,0x01,0xe6,0x04,0x01,0x03,0x03,0x01,0x07,0x01,0x07,0x01,0x01,0x05,0x01, 0x06,0xe5,0xe5,0xe5,0x04,0x01,0x01,0x02,0x02,0x01,0xe6,0x06,0x01,0x07,0x01,0x07, 0x01,0x07,0x01,0x06,0xe5,0xe5,0x06,0x01,0x07,0xe6,0x05,0xe5,0xe5,0xe5,0x04,0x01, 0x07,0x01,0x06,0xe5,0xe5,0x06,0x01,0x07,0x01,0x07,0x01,0x0d,0xe5,0x28,0x03,0xe5, 0x07,0xe5,0x02,0x02,0x01,0xe5,0xe5,0x05,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x0b,0x05, 0xe5,0x01,0x05,0xe5,0xe5,0x04,0xe6,0x05,0x01,0xe5,0x06,0x05,0x03,0x0c,0x09,0x01, 0x04,0x19,0x21,0x2f,0x01,0xe7,0x0a,0x08,0x05,0x10,0x09,0x07,0x10,0x02,0x0f,0x16, 0x11,0x1c,0x02,0xe6,0x02,0xe5,0x01,0xe5,0x11,0x09,0x0a,0x08,0x02,0x06,0xe5,0x08, 0x0b,0x07,0x0b,0x10,0xe5,0x02,0x24,0x07,0x03,0x05,0x08,0x01,0x07,0x01,0x07,0x01, 0x07,0x01,0x0b,0x05,0x01,0x06,0xe5,0xe5,0xe5,0x04,0x01,0x03,0x03,0x01,0x0e,0x14, 0x04,0x01,0x03,0x28,0xe5,0x3c,0x03,0x01,0x02,0xe5,0x07,0x07,0x06,0xe6,0x02,0x03, 0xe6,0x09,0xe5,0x03,0x0a,0x04,0x0d,0xe7,0x01,0x01,0x02,0xe5,0x11,0x01,0x02,0x04, 0xe7,0x05,0x01,0x13,0x01,0x03,0x17,0x06,0x02,0xe5,0x07,0x13,0x01,0x4e,0x13,0x04, 0x09,0x04,0x1a,0x02,0x0c,0xe5,0x0d,0x03,0x09,0x01,0x01,0x04,0x02,0x02,0x05,0x02, 0x04,0x0b,0x01,0x02,0x1c,0x0e,0x04,0x0e,0x02,0x01,0x4a,0x03,0x09,0x05,0x0d,0x19, 0x03,0xe5,0x0c,0xe6,0x01,0x04,0xe5,0x02,0x09,0x0a,0x12,0xe5,0x06,0x01,0x0c,0x07, 0x09,0x0a,0x06,0x07,0x03,0x05,0x03,0x05,0x02,0x02,0x07,0x09,0x19,0x24,0x01,0x01, 0x0b,0xe5,0xe5,0x14,0x02,0x06,0x01,0x02,0x02,0xe5,0x09,0x04,0x09,0x09,0x02,0x06, 0x01,0x02,0x04,0x02,0x06,0x01,0x07,0x02,0x04,0x01,0x01,0x02,0xe5,0x04,0x02,0x06, 0x01,0xe5,0x01,0x03,0x04,0x04,0x01,0x07,0xe5,0x18,0x02,0x1b,0x0c,0x25,0x01,0x06, 0x06,0x10,0x14,0x05,0x0e,0xe5,0x02,0xe5,0x03,0x0b,0x0c,0x01,0x04,0x01,0x0a,0x06, 0x01,0x0c,0x02,0x03,0x01,0xe5,0x0f,0x02,0x08,0x07,0x0a,0x09,0xe5,0x06,0x04,0x12, 0x34,0xe6,0x09,0x1d,0x01,0x07,0x01,0x09,0xe7,0x0f,0xe6,0x04,0x03,0x08,0x0a,0x03, 0x04,0x0a,0x04,0x03,0xe6,0x06,0xe6,0x08,0x04,0x05,0x0d,0xe5,0x01,0x09,0xe7,0x06, 0xe6,0x06,0x0d,0x40,0x01,0x3c,0x04,0x0d,0x03,0x02,0x04,0xe5,0xe5,0x03,0xe5,0xe6, 0x0a,0x02,0x04,0x0a,0x0b,0x06,0x01,0x07,0x01,0x06,0x01,0x01,0x05,0xe5,0x11,0x05, 0x09,0x05,0xe5,0x0c,0x34,0x05,0x03,0xe5,0x01,0x0c,0xe5,0xe5,0x13,0x02,0x05,0x05, 0xe5,0x01,0x06,0x07,0x04,0x06,0x02,0x03,0x0f,0x04,0x0e,0x01,0x04,0x07,0x04,0x04, 0x10,0x13,0x07,0x01,0x03,0x08,0xe6,0x07,0x01,0x4e,0x02,0xe6,0x0d,0x01,0x12,0x08, 0x04,0x04,0x01,0x0e,0x01,0x02,0x06,0x0a,0x01,0x07,0x05,0x03,0x01,0x07,0x09,0x09, 0x0b,0x01,0xe5,0x05,0x09,0x09,0x01,0x07,0x16,0x06,0xe5,0xe5,0x1a,0x21,0x08,0x03, 0xe5,0xe5,0x03,0x07,0x1f,0x09,0x23,0x02,0x07,0x01,0x09,0x07,0x01,0x09,0x02,0xe5, 0x02,0x01,0x0b,0x07,0x01,0x09,0x07,0x01,0x09,0x0b,0x11,0x04,0x49,0xe6,0x0a,0x16, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x01,0xe5,0x03,0xe5,0x04,0x09,0x03,0x01,0xe5, 0x01,0x03,0xe5,0x03,0x03,0x05,0x09,0x03,0x05,0x03,0x05,0x03,0x02,0x01,0x02,0x03, 0x02,0xe5,0xe5,0x02,0x05,0x03,0x05,0x06,0x01,0xe5,0x02,0x02,0x01,0xe5,0x02,0x01, 0xe5,0x01,0x03,0x09,0x05,0x16,0xe5,0x27,0xe6,0x01,0xe5,0x01,0x01,0x08,0xe6,0x14, 0x05,0x03,0x05,0xe6,0xe5,0x01,0x01,0x06,0xe5,0x03,0x04,0x01,0x02,0x03,0xe5,0x03, 0xe5,0x01,0xe5,0x03,0xe5,0x07,0x09,0xe5,0x07,0xe5,0x02,0x01,0x02,0x0b,0x04,0x01, 0x02,0xe5,0x07,0xe5,0x01,0xe5,0x03,0x04,0x01,0x02,0x04,0x01,0x02,0x03,0xe5,0x03, 0x08,0xe6,0x07,0x02,0x12,0x25,0x01,0x02,0x02,0xe5,0x03,0x08,0x01,0x02,0x18,0x09, 0x01,0x0b,0x02,0x04,0x04,0x04,0x01,0xe5,0xe5,0x03,0x04,0x04,0x04,0x04,0x04,0x04, 0x04,0x04,0x02,0x01,0x04,0x01,0xe5,0xe5,0x05,0x01,0x02,0x04,0x02,0x01,0x04,0x02, 0x01,0x04,0x04,0x04,0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0x01, 0x04,0x04,0x3d,0x04,0x03,0x02,0x06,0xe5,0x03,0x1b,0x09,0x01,0x0e,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0x02,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x0a,0x3d,0x04,0xe6, 0x0f,0x1a,0x09,0x01,0x04,0x01,0x0c,0x02,0x03,0x02,0x02,0x06,0x02,0xe5,0x01,0x02, 0x02,0x01,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x06,0x02,0x01,0x01,0x02,0x02,0x01, 0x01,0x04,0x02,0xe5,0x01,0x02,0x02,0x06,0x02,0x06,0x02,0x01,0x01,0x02,0x02,0x06, 0x02,0x03,0x02,0x02,0x04,0x04,0x04,0x1d,0x01,0x22,0xe5,0x01,0x02,0x0e,0x01,0x02, 0x14,0x03,0x05,0x01,0x01,0x04,0x0a,0x03,0x02,0xe7,0x02,0x05,0x03,0x04,0xe5,0x02, 0x05,0x03,0x05,0x03,0x01,0x02,0xe5,0x02,0x04,0xe5,0x02,0x07,0x03,0x04,0xe5,0x02, 0x02,0x01,0xe5,0x02,0x01,0x02,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5,0x02,0x01,0x01, 0xe6,0x02,0x01,0x02,0xe5,0x02,0x07,0x01,0x01,0x02,0x14,0x01,0x01,0x1d,0x03,0xe8, 0x29,0x09,0x01,0x04,0xe6,0x04,0x04,0x03,0x05,0x09,0x07,0x01,0x04,0x04,0x09,0x03, 0xe7,0x01,0x05,0xe6,0xe5,0x03,0x06,0x03,0x03,0x01,0x04,0x04,0x03,0x02,0x02,0x03, 0x02,0xe7,0x03,0x01,0xe5,0xe5,0x03,0x04,0x06,0x04,0x01,0xe5,0xe5,0x01,0x1d,0x01, 0x21,0x02,0x01,0x10,0x1a,0x05,0xe5,0x01,0x06,0x01,0x0c,0x02,0x01,0x01,0x02,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x02,0x06,0x01,0xe5, 0xe5,0x03,0x01,0xe5,0x04,0x02,0x02,0x06,0x01,0xe5,0x05,0x02,0x01,0xe6,0x01,0x02, 0x01,0x01,0x02,0x01,0xe8,0x02,0x01,0xe5,0x05,0x01,0xe5,0xe6,0x02,0x02,0x02,0x03, 0x1b,0x01,0x1f,0x05,0xe5,0x0f,0x02,0x02,0x1a,0xe5,0x15,0x02,0x02,0xe5,0x01,0x02, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x01,0x01,0x02,0x02,0x03,0x02, 0x02,0x01,0x01,0x02,0x02,0x05,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x01,0x04,0x02,0x44,0x02,0xe7, 0x0f,0x21,0xe5,0x17,0xe5,0x01,0xe5,0x03,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6, 0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x06,0xe5,0xe6,0x04, 0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5, 0xe6,0x04,0xe5,0xe6,0x08,0x3c,0xe8,0x08,0x01,0x02,0xe5,0x07,0x09,0x04,0x01,0x02, 0x04,0x01,0x02,0x09,0x07,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x06,0xe5,0xe7,0x04, 0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe6,0x06,0x03,0x05,0x09,0x09,0x09,0x09,0x0d,0xe6,0xe6,0x01,0x09, 0x1d,0x1e,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09,0x09,0x09,0x09,0x09, 0x09,0x49,0x02,0x01,0xe6,0x0a,0x1d,0x1f,0x09,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x05,0xe5,0x01,0x06, 0x02,0xe5,0x04,0x02,0x06,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04, 0x02,0x4a,0xe7,0x0e,0xe5,0x19,0x01,0x11,0x01,0x09,0xe7,0x03,0x02,0x01,0x04,0x02, 0xe6,0x03,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0xe6,0x03,0x02,0x01,0x04,0x02,0x01, 0x06,0x02,0x01,0x03,0xe5,0x01,0x01,0x04,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0x01, 0x04,0x02,0x01,0x04,0x02,0xe6,0x03,0x02,0x01,0x4b,0x11,0x19,0x01,0xe5,0x08,0x06, 0x01,0xe5,0x05,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x03, 0xe5,0x01,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x03,0x02,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x04,0x02,0xe5,0x01,0x02,0x02,0xe5,0x07,0x35,0x02,0xe5,0x02,0x03,0x01,0xe5,0x26, 0x02,0x01,0x11,0x01,0x04,0x0e,0x04,0x01,0x02,0x09,0x09,0x04,0x04,0x01,0x04,0x02, 0x04,0x04,0x08,0x02,0x04,0x04,0x09,0x01,0x07,0x04,0x01,0x02,0x04,0x04,0x09,0xe5, 0x07,0x07,0x01,0x17,0x28,0xe5,0x01,0x15,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x01, 0x03,0xe8,0xe5,0x02,0xe7,0x02,0x02,0x01,0xe5,0x01,0x05,0xe5,0x01,0xe5,0x02,0xe6, 0x01,0x03,0xe7,0x01,0x05,0xe5,0x01,0x03,0x01,0xe5,0x01,0x05,0xe5,0x01,0x03,0x01, 0xe5,0x03,0x04,0xe6,0x01,0x04,0xe6,0x01,0x04,0xe6,0x01,0xe5,0x03,0xe5,0x01,0x05, 0xe5,0x01,0x03,0x01,0xe5,0x01,0x05,0xe5,0x01,0x03,0x01,0xe6,0xe5,0x04,0xe5,0x07, 0xe5,0x06,0xe6,0x05,0x01,0xe5,0x07,0xe5,0x07,0xe6,0x03,0x03,0x01,0x09,0x10,0x10, 0x01,0xe6,0x07,0xe5,0x07,0x06,0x01,0xe5,0x07,0xe5,0x07,0xe5,0x07,0x09,0xe5,0x07, 0xe5,0x07,0xe6,0x06,0xe5,0x09,0xe5,0x07,0x09,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5, 0x07,0xe6,0x06,0xe6,0x28,0x18,0x07,0x01,0x12,0x18,0x01,0x0e,0x01,0x03,0x02,0x02, 0xe5,0xe5,0x01,0x03,0x02,0xe5,0xe5,0x02,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0xe5, 0xe5,0x02,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0xe5,0xe5,0x02,0x04,0xe5,0xe5,0x02, 0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x06,0x02,0x06,0xe5,0xe5,0x02,0x02,0x02, 0x06,0x02,0x03,0x1d,0x09,0x14,0xe5,0x03,0x01,0x02,0xe5,0x18,0x10,0x01,0x0d,0x01, 0x08,0xe5,0xe5,0x01,0x02,0x01,0x01,0x07,0x01,0x02,0x03,0xe5,0xe5,0x04,0xe6,0xe5, 0x06,0x01,0x02,0x02,0x01,0x01,0x07,0x01,0x05,0x01,0x02,0xe5,0x05,0x02,0x06,0xe5, 0xe5,0x01,0x03,0x02,0x02,0x06,0x09,0x05,0x01,0x01,0x09,0x05,0x01,0x01,0x01,0x18, 0xe5,0x08,0x15,0x09,0xe5,0x02,0x16,0x10,0x01,0xe5,0x08,0x02,0xe7,0x02,0x02,0xe7, 0x01,0x03,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04, 0xe7,0x01,0x06,0x01,0xe5,0x04,0xe8,0x07,0xe5,0x08,0x09,0x05,0xe8,0x08,0x05,0xe8, 0xe5,0x16,0xe7,0x06,0xe6,0x14,0x03,0x01,0x01,0xe6,0x0f,0x1a,0xe5,0xe5,0x05,0xe5, 0xe5,0xe5,0x07,0x09,0x09,0x31,0x09,0x1f,0x2d,0xe5,0xe5,0xe5,0x1b,0xe5,0x2c,0x0b, 0x05,0x01,0x02,0x04,0x01,0x07,0x01,0x03,0x03,0x01,0x02,0x01,0x02,0x01,0xe5,0x05, 0xe7,0x05,0x01,0x01,0x05,0x01,0x01,0x05,0xe7,0x05,0x01,0x02,0x04,0x01,0xe6,0x04, 0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x02,0x06,0x01,0x07,0xe8,0x04,0x01,0x02,0x04, 0x01,0x01,0x05,0x01,0xe5,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0xe6,0x04,0x01,0xe6, 0x04,0x01,0xe6,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x02,0x09,0x01,0x02,0x08, 0x01,0x01,0xe5,0x01,0x05,0xe5,0x07,0xe5,0xe5,0x05,0xe5,0xe5,0xe5,0x03,0xe5,0x0a, 0x06,0xe5,0x02,0x04,0xe5,0xe5,0x05,0xe5,0xe5,0x02,0x02,0xe5,0x01,0xe5,0x03,0xe5, 0xe5,0x05,0xe5,0x01,0xe6,0x02,0xe5,0x01,0xe5,0x03,0xe5,0xe5,0x04,0x02,0xe5,0x07, 0x09,0xe6,0xe6,0x03,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x01,0x0c,0x01,0x01,0x16,0x05, 0x2c,0x03,0xe5,0x0a,0x01,0x06,0x01,0x07,0x01,0x08,0xe5,0x01,0xe5,0x02,0x01,0x06, 0x06,0x06,0x04,0x02,0xe5,0x07,0xe5,0x08,0x04,0xe5,0xe5,0xe5,0x08,0x05,0x03,0x0a, 0xe5,0x08,0x05,0x08,0x03,0xe5,0x04,0x04,0x03,0xe5,0x03,0x01,0x07,0xe6,0x04,0x06, 0x04,0x01,0x05,0x06,0x01,0x07,0x0c,0x09,0x1a,0xe6,0x01,0x05,0x08,0x0a,0x09,0x08, 0x04,0x05,0x02,0x03,0x09,0x02,0x08,0x03,0x03,0x01,0x03,0x06,0x08,0x03,0x03,0x02, 0x03,0x04,0x0d,0x01,0x01,0x04,0x02,0x03,0x02,0x09,0x02,0x05,0x0a,0x18,0x05,0x05, 0x15,0x23,0x09,0x03,0x0f,0x01,0x19,0x01,0x09,0x07,0xe7,0x11,0xe5,0x02,0x02,0x02, 0x02,0x01,0x01,0x04,0x02,0x03,0x13,0x07,0x01,0xe5,0x07,0x02,0xe5,0x01,0xe5,0xe5, 0xe5,0x01,0x01,0xe5,0x03,0x04,0x01,0x03,0x05,0x01,0x07,0x01,0x09,0xe5,0x12,0x05, 0x03,0x09,0x0c,0x1c,0x01,0x02,0xe5,0xe5,0x0a,0x0f,0x0d,0x01,0x01,0x04,0x01,0x06, 0x09,0x09,0x09,0x03,0x06,0x02,0x06,0x08,0x13,0xe5,0x01,0x07,0xe5,0x01,0x07,0x02, 0x02,0x02,0x06,0x06,0x09,0x08,0x55,0x0b,0xe5,0x0e,0x03,0x13,0x05,0x03,0x0d,0xe6, 0x02,0x0a,0x02,0xe6,0xe5,0xe5,0x04,0x0d,0x03,0x0a,0xe5,0x02,0x03,0x01,0x02,0x04, 0xe6,0x02,0x03,0x03,0x01,0x07,0x0e,0x08,0x03,0xe6,0x06,0x05,0x03,0x05,0x06,0x01, 0x0a,0x13,0x08,0x1f,0xe6,0xe5,0x07,0x04,0xe5,0x03,0x08,0x0e,0xe5,0x0a,0x06,0x01, 0xe5,0x05,0x04,0x04,0x09,0x02,0x09,0x0a,0xe5,0x06,0x05,0xe6,0xe5,0x11,0xe5,0x02, 0x04,0x04,0x04,0x13,0x01,0x01,0x05,0x01,0x0a,0x03,0x02,0x01,0x0a,0x0b,0x02,0x01, 0x09,0x01,0x0c,0x15,0x03,0x03,0x02,0x08,0x0e,0x07,0x02,0x03,0x06,0x04,0x02,0x0b, 0xe5,0x08,0x05,0x03,0x02,0x0d,0x01,0x05,0x09,0x01,0x01,0x02,0x0e,0x02,0x03,0x01, 0xe5,0x03,0x02,0x01,0x01,0x04,0x02,0x08,0xe5,0x11,0x16,0x18,0x0c,0x15,0x06,0x03, 0x01,0xe5,0x01,0x0e,0x05,0xe6,0x06,0xe6,0x06,0xe5,0x05,0x02,0x02,0x0b,0x01,0x04, 0x02,0x06,0x04,0x07,0x01,0xe5,0xe5,0x0f,0x07,0x02,0x08,0xe5,0x03,0x05,0x09,0x0f, 0x01,0x01,0xe8,0x02,0xe5,0x07,0x07,0x02,0x01,0x05,0x01,0x01,0x0a,0x03,0xe6,0x01, 0xe7,0x05,0x09,0x0a,0xe6,0x10,0x07,0xe7,0x01,0x01,0x14,0x09,0x07,0x01,0x08,0x16, 0x03,0x05,0x10,0x03,0x0a,0x13,0x0d,0x13,0x0d,0x01,0x1d,0x09,0x03,0x01,0x0b,0x1e, 0x17,0x01,0xe6,0x09,0x01,0x04,0x0b,0x09,0x06,0x01,0xe5,0x09,0x01,0xe5,0xe6,0x01, 0x08,0xe6,0x17,0x04,0x05,0x05,0x03,0x02,0xe5,0x01,0x01,0x0c,0x01,0x01,0x01,0x0c, 0xe5,0x01,0xe5,0x07,0x0f,0x04,0x04,0x0d,0x02,0x02,0xe5,0x01,0x09,0x09,0x05,0x09, 0xe5,0x14,0x02,0x0b,0xe6,0x18,0x20,0x05,0x14,0x12,0x05,0xe6,0xe6,0x01,0x03,0x01, 0x02,0x06,0x02,0xe5,0x08,0x04,0x05,0xe5,0x01,0x02,0x07,0x01,0x0a,0x12,0x02,0x07, 0x03,0x04,0x1c,0x0a,0x20,0xe5,0x01,0xe6,0x12,0x2d,0x28,0xe5,0x17,0x02,0x01,0x15, 0x05,0x01,0x1b,0x09,0x01,0x11,0x01,0x25,0x01,0x24,0x01,0xe6,0x06,0xe5,0x01,0x01, 0xe7,0x06,0xe5,0x07,0xe6,0x06,0x06,0x01,0x01,0xe5,0x07,0x07,0x01,0xe5,0xe5,0xe5, 0x03,0x07,0x01,0xe5,0x06,0xe5,0x04,0x01,0x0b,0x09,0x02,0x01,0x02,0x01,0xe5,0xe5, 0xe5,0x03,0xe7,0x07,0x09,0x08,0xe5,0x07,0xe5,0x08,0x08,0xe5,0x01,0xe5,0xe5,0x02, 0x12,0xe6,0x07,0x09,0xe5,0x1c,0x01,0xe6,0xe5,0x01,0x01,0x04,0xe5,0x03,0x04,0xe5, 0x0e,0x0f,0x03,0x09,0x05,0x03,0x02,0xe5,0x0e,0x08,0x01,0x0e,0xe5,0x01,0x05,0xe5, 0x01,0x02,0x01,0xe6,0x04,0xe5,0x06,0x09,0x05,0xe5,0x01,0x13,0xe5,0x07,0x09,0x05, 0x16,0x0a,0x09,0x1b,0x01,0x03,0x02,0x04,0x08,0x01,0x01,0x09,0x14,0x04,0x06,0x01, 0xe5,0x12,0x03,0x01,0xe5,0x03,0x03,0xe5,0x01,0x01,0x07,0x01,0x04,0x01,0x02,0x09, 0x04,0x01,0x02,0x01,0x0c,0x04,0x0b,0x03,0xe5,0x01,0x01,0x01,0x0f,0x01,0x11,0x03, 0x19,0x01,0x07,0x01,0x1d,0x04,0x01,0x01,0x02,0x07,0x02,0x01,0x25,0x06,0xe5,0xe5, 0x08,0x09,0x06,0xe5,0xe5,0x08,0x01,0x04,0xe5,0xe5,0x08,0x05,0xe5,0x01,0x09,0x15, 0x09,0x05,0xe5,0x01,0x01,0x11,0x01,0x11,0x1d,0x01,0x07,0x01,0x18,0xe5,0x04,0x04, 0xe6,0x0a,0x01,0x01,0x03,0x02,0x14,0x01,0x07,0x0b,0x02,0x04,0x05,0x02,0xe5,0xe5, 0x06,0x04,0xe5,0x01,0x0a,0x04,0xe5,0x01,0x06,0x02,0x07,0x01,0x06,0x02,0x02,0x01, 0x07,0x01,0x02,0x04,0x04,0xe5,0x01,0x0a,0x01,0x07,0x15,0x06,0x16,0x02,0x03,0x02, 0x02,0x03,0x1a,0x04,0x02,0x0a,0x01,0x01,0x02,0x01,0x14,0x01,0x01,0x05,0x03,0x07, 0x02,0x04,0x01,0x06,0xe5,0xe5,0x08,0x02,0x01,0x01,0x04,0x0a,0x01,0x01,0x09,0x07, 0x01,0x09,0x02,0x01,0x01,0x05,0x01,0x02,0x04,0x04,0x03,0x04,0x05,0x01,0x01,0x05, 0x13,0x07,0xe5,0x13,0x04,0x02,0x01,0x04,0x02,0x19,0x02,0x01,0xe7,0x0b,0x01,0x0c, 0x0e,0x01,0x04,0x02,0x09,0x01,0x07,0x07,0x01,0x09,0x01,0x07,0x01,0x02,0x01,0x02, 0x01,0x02,0x01,0x01,0xe5,0xe5,0x04,0x01,0x01,0x07,0x01,0x05,0x02,0xe5,0xe5,0x06, 0x04,0x04,0x07,0x01,0x01,0x07,0x01,0x04,0x02,0x15,0x02,0x02,0x17,0x09,0x21,0xe5, 0xe5,0xe5,0x01,0x0a,0x01,0x1b,0x01,0x04,0xe5,0xe5,0x03,0x04,0x01,0x02,0x04,0x01, 0x02,0x04,0x04,0x04,0x01,0x02,0x04,0x01,0x03,0xe6,0xe5,0x03,0x01,0x02,0x01,0x02, 0x04,0x01,0x02,0x04,0x01,0x02,0x06,0x01,0x07,0x04,0x04,0x04,0x04,0x01,0x07,0x01, 0x04,0xe7,0x03,0x0a,0xe5,0x01,0x04,0x18,0x04,0x04,0x04,0x1c,0x03,0xe5,0xe5,0xe5, 0x10,0x1f,0x07,0x13,0xe5,0x07,0x02,0x10,0x0b,0x11,0x02,0x06,0x0b,0x02,0x10,0x31, 0xe5,0x42,0xe5,0xe5,0x01,0x16,0x22,0x04,0x0e,0x02,0xe5,0x04,0x02,0x09,0x09,0x01, 0x07,0x06,0x09,0x02,0x06,0x02,0x01,0x01,0x07,0x09,0x0b,0x18,0x13,0x02,0x1d,0x27, 0x03,0xe5,0x08,0x01,0x02,0x02,0x06,0x09,0x09,0x04,0x01,0x02,0x02,0x01,0x01,0x02, 0x09,0x04,0x01,0x02,0x03,0x05,0x03,0x02,0x02,0x03,0x05,0x03,0xe5,0xe5,0x01,0x09, 0x04,0xe6,0x01,0x03,0x07,0x03,0x02,0x02,0x03,0xe5,0xe5,0x01,0x09,0x09,0x06,0x02, 0x09,0x04,0x01,0x02,0x03,0x05,0x09,0x04,0x01,0x02,0x05,0x03,0x09,0x09,0x0d,0xe6, 0x01,0x02,0x09,0x27,0x13,0x1c,0x14,0x13,0x12,0x02,0x09,0x1c,0xe5,0x12,0x1d,0x09, 0x1d,0x04,0x02,0x03,0x0b,0x25,0x07,0x0d,0x2f,0x13,0x15,0x0b,0x1b,0x15,0x1d,0x07, 0x1f,0x08,0xe8,0x0e,0x04,0x09,0x09,0x02,0x01,0x09,0x02,0x06,0x02,0x06,0x02,0x06, 0x02,0x04,0x01,0x02,0x04,0x01,0x09,0x02,0x01,0x04,0x02,0x04,0x01,0x02,0x04,0x01, 0x02,0x01,0x03,0xe5,0xe5,0x06,0x01,0x02,0x06,0x02,0x06,0x09,0x04,0x02,0x01,0x02, 0x01,0x09,0x04,0x02,0x15,0x02,0x01,0x02,0x04,0x01,0x02,0x20,0x02,0x0c,0xe5,0x01, 0x1a,0x01,0xe5,0x02,0x02,0x03,0x05,0x03,0x05,0x03,0x01,0x03,0x01,0x01,0x01,0x03, 0x01,0x01,0x05,0x01,0xe5,0x07,0x01,0x02,0x02,0x01,0x01,0x01,0x03,0x01,0x01,0x01, 0xe5,0x01,0x01,0x01,0x01,0xe5,0x03,0x01,0xe5,0x05,0x03,0x02,0x02,0x03,0x07,0xe5, 0x07,0xe5,0x05,0x03,0x01,0x0d,0x01,0x01,0x01,0x19,0x01,0x02,0x02,0x03,0x20,0x01, 0x01,0xe5,0x0d,0x1b,0x01,0x09,0x09,0x07,0x01,0x13,0x07,0x09,0x01,0x09,0x09,0x09, 0x08,0x0c,0x09,0x03,0xe5,0xe5,0x01,0x06,0x02,0x03,0xe5,0x01,0x01,0x0d,0xe5,0x21, 0x07,0x01,0x03,0x1e,0x03,0xe5,0x0b,0x07,0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe6, 0x04,0xe8,0x04,0xe8,0xe5,0x02,0xe7,0x01,0x03,0xe7,0x01,0x03,0xe8,0xe5,0x02,0xe8, 0xe5,0x02,0xe8,0xe5,0x02,0xe7,0x01,0x03,0xe8,0x04,0xe7,0x08,0xe6,0x05,0xe7,0x01, 0x04,0xe6,0x01,0x05,0xe5,0x01,0x05,0xe6,0x04,0xe7,0x07,0xe6,0x04,0xe7,0x07,0xe5, 0x07,0xe5,0x05,0xe8,0x04,0xe7,0x07,0xe5,0x07,0xe5,0x07,0x02,0x09,0x03,0x01,0x0a, 0x09,0x09,0x06,0x01,0xe5,0x07,0x09,0x09,0x11,0x01,0xe6,0x06,0x11,0x09,0x0b,0x01, 0x04,0x02,0x01,0x09,0x09,0xe5,0x07,0xe6,0x06,0x0a,0xe5,0x06,0x0a,0x10,0x01,0x09, 0x0a,0x19,0xe6,0x0c,0x01,0x0a,0x09,0x09,0x06,0x01,0xe6,0xe5,0x01,0x02,0x01,0x01, 0x02,0xe5,0x02,0x01,0x02,0xe5,0xe5,0xe5,0xe5,0x01,0x04,0x01,0x02,0xe5,0xe5,0x03, 0x01,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0xe5,0x02,0x01,0x02,0x02,0x01,0x01, 0x02,0x02,0x01,0x06,0xe5,0x02,0x02,0x01,0xe5,0x02,0x04,0x02,0xe5,0x07,0x0b,0x01, 0x02,0x0a,0x01,0x01,0x01,0x02,0x03,0x04,0x0d,0x01,0x01,0x02,0x04,0x01,0x02,0x03, 0x17,0xe5,0xe6,0x3b,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x02,0x04,0xe5,0xe5, 0xe5,0x02,0x01,0x08,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x0c,0x03,0x01,0x03, 0x04,0x04,0x09,0x09,0x04,0xe5,0x11,0xe5,0x16,0x04,0xe5,0x02,0x04,0xe5,0x1c,0xe7, 0x19,0x09,0x09,0x0c,0x01,0xe5,0x01,0x03,0x01,0xe5,0x05,0x01,0xe5,0x01,0x03,0x01, 0xe5,0x01,0x03,0x01,0xe6,0xe6,0x01,0xe7,0x05,0x01,0xe5,0x01,0x03,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x04,0x03,0x01,0x07,0xe6,0x01,0x06,0x02,0xe5,0x07,0xe5, 0x0b,0x01,0xe5,0x02,0x06,0x05,0x01,0xe5,0x02,0x02,0x13,0x01,0xe5,0x05,0x01,0xe5, 0x02,0x17,0x02,0xe5,0x1b,0x1a,0xe5,0x05,0x02,0x06,0x09,0xe5,0xe5,0x13,0x05,0x02, 0x32,0xe5,0x07,0xe5,0x1b,0xe5,0x0f,0x02,0x3f,0x0a,0xe7,0xe5,0x0e,0x01,0x07,0x01, 0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x02,0x02,0xe6,0x01,0x01,0x02,0x01, 0x03,0x03,0xe6,0x01,0x04,0xe8,0x03,0xe8,0x02,0x02,0xe6,0xe5,0x04,0x01,0x02,0x04, 0xe6,0xe5,0x04,0x01,0xe5,0x07,0x01,0xe6,0x04,0x01,0x01,0x05,0x01,0x01,0x05,0x01, 0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0xe5,0x05,0x01,0x01,0x05,0x01, 0x02,0x04,0x01,0x07,0x01,0x02,0x04,0x01,0x0a,0x01,0x01,0x14,0x0b,0x02,0x03,0x0f, 0x02,0x01,0x02,0x01,0x01,0xe5,0x05,0x01,0x01,0xe5,0x0d,0xe6,0x09,0x04,0x03,0x07, 0x09,0xe5,0x02,0x07,0x03,0x01,0x03,0xe6,0x05,0x01,0x02,0x04,0x04,0x0b,0x02,0xe5, 0x03,0x03,0xe5,0x07,0xe5,0x02,0x02,0xe5,0x1d,0x01,0x07,0x1a,0x07,0xe5,0xe7,0x0a, 0x0d,0x03,0x0b,0x09,0x02,0x06,0x02,0x09,0x03,0x02,0x0b,0x0c,0x02,0x0e,0xe5,0x01, 0x04,0x0d,0x06,0x02,0x08,0x06,0x02,0x06,0x0c,0x04,0x03,0x07,0x01,0xe5,0x02,0x06, 0x05,0x06,0x02,0x06,0x05,0x05,0x03,0x06,0x1a,0x04,0x02,0x0d,0x11,0x17,0x03,0x04, 0x03,0x05,0x0a,0x06,0x03,0x07,0x0b,0x0f,0x10,0x12,0x17,0x0b,0xe5,0x03,0x05,0x08, 0x19,0x2c,0x02,0x03,0x0d,0x01,0x01,0x09,0x08,0x06,0x01,0xe5,0x01,0x06,0x09,0x04, 0x01,0x02,0x09,0xe5,0x01,0x02,0x01,0x04,0x04,0xe5,0x02,0xe5,0xe5,0x07,0xe7,0x06, 0x01,0xe5,0x05,0xe6,0x03,0x02,0x08,0x01,0x08,0x1f,0x04,0x1c,0x17,0x01,0x19,0x09, 0xe5,0x02,0x0d,0x02,0x1a,0x01,0x03,0x01,0x09,0x07,0x06,0x04,0x0c,0x01,0x04,0x0e, 0xe5,0x01,0x03,0x06,0x01,0x02,0x05,0xe5,0x04,0x01,0x02,0x2e,0x01,0x03,0x03,0x37, 0x1a,0x04,0x03,0xe5,0xe5,0x08,0x05,0xe5,0x0b,0x09,0x0a,0x04,0x02,0x10,0x01,0xe5, 0x05,0x03,0x05,0x03,0x03,0x05,0x05,0x0a,0x03,0x0e,0x07,0x03,0x01,0x01,0x01,0x03, 0x03,0xe5,0x07,0x08,0x0b,0x04,0x08,0xe5,0x0c,0x0b,0x1a,0x1e,0xe5,0x01,0xe5,0x09, 0x02,0x03,0x12,0x0b,0x04,0x02,0x03,0x05,0x06,0x09,0x09,0x06,0x07,0x04,0x04,0x0b, 0x05,0x03,0x02,0x04,0x06,0x02,0x06,0xe6,0x09,0x03,0x02,0x01,0x11,0x01,0x0c,0x01, 0x05,0x0a,0xe5,0x10,0x03,0x09,0x12,0x0b,0x02,0x03,0x01,0x0b,0xe5,0xe5,0x08,0x1b, 0x12,0xe5,0x03,0x02,0x0b,0x09,0x26,0x04,0xe6,0x0e,0x01,0x07,0x09,0x0b,0x05,0x17, 0x07,0x08,0x2b,0x0a,0xe6,0x04,0x0d,0x08,0x17,0x01,0x07,0x09,0xe6,0x05,0x02,0x13, 0x05,0x03,0x01,0x02,0xe5,0xe7,0x03,0xe5,0xe7,0x07,0xe5,0x07,0x01,0xe6,0x0f,0x10, 0x03,0xe6,0x06,0x01,0x07,0xe5,0x11,0x01,0x07,0xe6,0x37,0x01,0x03,0x01,0x22,0x06, 0x06,0x05,0x0c,0x02,0x05,0x02,0x0c,0x05,0x0b,0x06,0xe5,0x07,0x02,0x04,0xe5,0xe5, 0x03,0x03,0x03,0x1a,0x07,0x04,0x11,0x01,0x1e,0x20,0x14,0xe6,0xe5,0x0e,0x19,0x04, 0x01,0x06,0x03,0x02,0x05,0x01,0x04,0x09,0x02,0xe6,0x03,0xe6,0x0d,0x02,0xe5,0x04, 0x06,0x03,0x01,0x08,0x05,0x06,0x06,0x03,0x02,0xe5,0x03,0x09,0x0e,0x05,0x03,0x02, 0x01,0x06,0x07,0x18,0x03,0x09,0x15,0x08,0x01,0x01,0xe5,0xe5,0xe5,0x09,0x02,0x0b, 0x18,0xe5,0x0b,0xe5,0x03,0x02,0x06,0xe5,0x04,0x02,0x03,0xe5,0x05,0xe5,0x05,0x0a, 0x06,0x01,0x03,0x07,0x09,0x03,0x06,0x07,0xe5,0x02,0x05,0x10,0x09,0x0c,0x1c,0xe5, 0x13,0x14,0x03,0xe7,0x0c,0x01,0x28,0x13,0x09,0x09,0xe5,0x11,0x14,0xe5,0x12,0x02, 0x08,0x05,0x02,0xe5,0x06,0x0a,0x0a,0x0f,0x02,0x09,0x02,0x0e,0x01,0x09,0x20,0xe5, 0x01,0xe6,0x0a,0x09,0x16,0xe5,0x07,0xe5,0x04,0x01,0xe6,0x04,0x02,0x0b,0x01,0xe5, 0x06,0xe5,0x05,0x08,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x11,0x01,0xe5,0xe6,0x01,0x02, 0xe5,0xe6,0x04,0xe6,0xe6,0x03,0x08,0xe6,0x04,0x02,0xe5,0xe6,0x06,0x04,0x17,0x02, 0x01,0xe5,0x08,0x22,0x02,0x01,0xe5,0x01,0x01,0x08,0xe6,0x06,0x17,0xe5,0x05,0xe5, 0x05,0x03,0x05,0xe6,0x15,0xe6,0x05,0x05,0x03,0x09,0xe5,0x03,0xe7,0xe5,0x04,0x09, 0x04,0x01,0xe5,0x01,0xe6,0x01,0xe6,0x02,0xe5,0x01,0xe6,0x08,0xe5,0x0a,0xe7,0xe7, 0x0d,0x02,0x0a,0xe5,0x07,0xe5,0x01,0xe6,0x01,0x06,0x1d,0x01,0x02,0x01,0x01,0x04, 0x05,0x01,0xe5,0xe5,0x06,0x13,0x01,0x04,0x01,0x0a,0x06,0x01,0xe5,0xe5,0x03,0x01, 0xe5,0x03,0x01,0xe5,0xe5,0x05,0x01,0xe5,0xe5,0x02,0xe5,0x06,0x01,0x01,0xe5,0xe5, 0x03,0xe5,0x07,0xe5,0x06,0xe5,0x0d,0x01,0x01,0x07,0x01,0xe5,0x08,0x08,0x05,0x01, 0x0a,0x06,0x09,0x01,0x0b,0x01,0xe5,0x0b,0x04,0x1a,0x08,0x02,0x07,0x02,0x01,0x07, 0x10,0x02,0x01,0x04,0x0c,0x06,0xe5,0xe5,0x05,0x08,0xe5,0x01,0x09,0x01,0x04,0x0c, 0x01,0x04,0xe5,0x06,0xe5,0x0c,0x0a,0x01,0x09,0x1b,0x01,0x08,0x0a,0x04,0x02,0x01, 0x0e,0x0e,0x18,0xe5,0x04,0x02,0x02,0xe5,0x0a,0x01,0x01,0x02,0xe5,0x01,0xe5,0x10, 0x09,0x07,0x02,0xe5,0x08,0x01,0x03,0x01,0xe5,0x04,0xe5,0x01,0x05,0xe5,0x01,0x05, 0x02,0x07,0x02,0x0a,0x01,0x0c,0xe5,0x01,0x08,0x02,0x0c,0x07,0x01,0x1d,0xe8,0x01, 0xe5,0x06,0xe5,0x0e,0x02,0x01,0x01,0x07,0x08,0x1a,0xe6,0x01,0x02,0x0a,0x03,0x02, 0x01,0xe5,0x02,0x13,0x09,0x02,0x01,0xe5,0x02,0x05,0x01,0x01,0x02,0x01,0x05,0x04, 0x03,0x04,0x04,0x09,0x09,0x05,0x01,0x01,0x09,0x07,0x03,0x09,0x07,0x01,0x05,0x01, 0x01,0x13,0x05,0x02,0xe5,0x06,0x01,0x01,0x01,0xe5,0x03,0x0f,0x01,0x01,0x07,0x01, 0x03,0xe5,0x17,0x02,0xe6,0xe6,0x0b,0x01,0x07,0x13,0x01,0x04,0x07,0x04,0x05,0xe5, 0x01,0x01,0x18,0xe5,0xe5,0xe5,0x10,0x01,0x0c,0x08,0x02,0x09,0x0a,0xe5,0x03,0x07, 0x01,0x04,0x18,0x07,0x09,0x01,0x1b,0x07,0x1b,0xe9,0x0c,0x01,0xe6,0x04,0x13,0x01, 0x05,0x0b,0x09,0x01,0x03,0xe5,0x0b,0x03,0xe5,0x01,0x01,0x01,0x01,0xe5,0x0d,0x01, 0x16,0x01,0xe6,0xe5,0x10,0x04,0x07,0x01,0x03,0xe6,0x10,0xe5,0x01,0x02,0xe5,0x06, 0x04,0x01,0xe5,0xe5,0x0a,0xe5,0x0f,0x1f,0xe5,0x01,0x02,0x05,0xe5,0x08,0x2a,0x1d, 0x06,0x02,0x02,0x03,0x0c,0x1a,0x02,0x02,0x08,0x02,0x08,0x25,0x06,0x02,0x1a,0x02, 0x13,0xe5,0x07,0x0d,0xe5,0x03,0x01,0x02,0x05,0x36,0x01,0x11,0x05,0x05,0x03,0xe5, 0x07,0xe5,0x0d,0x17,0xe5,0x01,0xe5,0x3b,0xe6,0x06,0x01,0x17,0xe5,0x01,0x01,0x11, 0x09,0x01,0x0b,0x05,0xe9,0xe5,0x0b,0x09,0x09,0x06,0x02,0x06,0x02,0x02,0xe5,0x04, 0x06,0x02,0x03,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0x09,0x09,0x09,0xe6,0x03, 0x04,0x02,0x03,0x02,0x09,0x06,0x02,0x09,0x09,0x04,0x01,0x02,0x03,0x05,0x09,0x09, 0xe5,0x03,0x03,0x09,0x03,0x05,0x05,0x03,0x0d,0x01,0xe5,0xe5,0x01,0x24,0x02,0x08, 0xe5,0x11,0x0a,0x1a,0x29,0xe5,0xe5,0x07,0xe5,0x0f,0x20,0x4f,0x04,0x02,0xe5,0x01, 0x27,0x0b,0x1c,0x10,0x09,0x2b,0x02,0x08,0x2f,0x25,0x2b,0x09,0xe7,0xe5,0x07,0x16, 0x01,0x04,0x01,0x02,0x07,0x01,0x07,0x01,0x04,0x0c,0x01,0x01,0x04,0x02,0xe7,0xe5, 0x01,0x01,0xe7,0x05,0x11,0x0c,0x01,0x01,0x04,0x01,0x04,0x02,0x06,0x02,0x09,0x01, 0x09,0x04,0x0c,0x01,0xe6,0x04,0x13,0x01,0xe7,0x03,0x13,0x01,0x1a,0x01,0x10,0xe5, 0x0e,0x01,0xe5,0x05,0x02,0x06,0x01,0xe5,0x02,0x02,0x01,0xe5,0x08,0x06,0x01,0xe6, 0x01,0x02,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x02,0x0c,0x02,0x09,0xe5,0x06, 0x01,0xe5,0x07,0xe5,0x04,0xe5,0x01,0x06,0x01,0xe5,0x04,0xe5,0x01,0x09,0x06,0x01, 0x01,0x05,0x02,0x0d,0x02,0x01,0xe6,0x04,0x02,0x09,0x06,0x01,0xe5,0x16,0x01,0xe6, 0x13,0x0c,0x01,0x09,0x07,0x01,0x07,0x01,0x03,0x0a,0x04,0xe5,0x0f,0x01,0xe5,0x05, 0x01,0x0d,0x03,0x08,0xe5,0xe5,0x07,0x02,0x06,0xe5,0x08,0x09,0x01,0x09,0x03,0x03, 0x01,0x07,0x01,0xe5,0x02,0x02,0x16,0x05,0xe5,0x08,0x01,0x24,0xe6,0x13,0xe6,0x07, 0xe6,0xe5,0x04,0xe6,0x06,0xe6,0x06,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0xe5,0x04,0xe6, 0x01,0x04,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0x04,0xe8,0xe5,0x03,0xe6, 0x01,0x01,0x04,0xe6,0x01,0x03,0xe8,0x05,0xe7,0xe5,0x03,0xe7,0x06,0xe6,0xe5,0x02, 0xe8,0xe5,0x04,0xe6,0x04,0xe7,0x07,0xe5,0x07,0xe6,0x04,0xe8,0xe5,0x02,0xe7,0x07, 0xe5,0x07,0xe5,0x06,0xe5,0xe6,0x03,0x08,0x02,0x03,0x01,0x03,0x0f,0x02,0xe6,0x04, 0x01,0xe5,0x08,0xe5,0x07,0x06,0x01,0xe5,0x08,0x10,0x01,0xe5,0x08,0x06,0x05,0xe5, 0x04,0x08,0x01,0xe5,0x05,0x02,0xe5,0xe6,0x01,0x02,0x03,0x02,0x01,0x06,0x02,0xe5, 0x08,0xe5,0xe6,0x01,0x01,0x07,0x02,0xe5,0xe6,0x04,0x06,0x01,0xe5,0x05,0x05,0xe5, 0x07,0xe5,0x01,0x01,0x18,0x03,0x0d,0x04,0x02,0x15,0x09,0x01,0x14,0x06,0x01,0xe5, 0xe5,0x08,0x09,0x04,0x01,0xe5,0x08,0x06,0x08,0x03,0x01,0x03,0x02,0x01,0xe5,0x04, 0x02,0x06,0xe5,0x07,0xe5,0xe5,0x06,0x02,0x0f,0xe5,0xe5,0x01,0x04,0x08,0x03,0x06, 0x01,0x02,0x04,0x08,0x09,0xe5,0xe5,0x1a,0xe5,0x0c,0x02,0x09,0x12,0x07,0x01,0xe5, 0x19,0x01,0x0a,0x10,0x01,0x11,0x07,0x08,0xe5,0x01,0xe5,0xe5,0x03,0x02,0x07,0x01, 0x02,0x08,0x04,0x04,0x07,0x01,0x05,0x03,0x07,0x07,0x0e,0x06,0x07,0x01,0x01,0x05, 0x1c,0x01,0x01,0x0d,0x02,0x02,0xe5,0x04,0x0d,0x02,0x02,0x06,0x01,0xe5,0x08,0x08, 0xe5,0x05,0x01,0xe5,0x19,0x01,0xe5,0x08,0x06,0x05,0x01,0xe5,0x05,0xe7,0x01,0x01, 0xe5,0x02,0x01,0x02,0xe5,0x01,0x01,0xe6,0x01,0x03,0x01,0xe5,0xe6,0x05,0x02,0x06, 0x01,0xe5,0x01,0x01,0xe6,0xe6,0x05,0x02,0x02,0x01,0xe5,0x02,0x10,0x02,0x03,0xe7, 0xe5,0x03,0xe6,0x1a,0xe5,0xe6,0x0c,0x02,0x06,0x02,0xe5,0x0c,0x09,0x03,0xe5,0x05, 0x02,0x06,0xe5,0xe5,0x03,0x06,0x04,0x03,0x0d,0x01,0x02,0x01,0x02,0x15,0x02,0x04, 0x03,0x02,0x0b,0x04,0x01,0xe5,0x0d,0x06,0x1c,0x09,0x14,0x12,0x16,0xe5,0xe6,0xe5, 0x0b,0x02,0x01,0x04,0x02,0x01,0x02,0x04,0x01,0x07,0x01,0x07,0x01,0x06,0xe5,0xe5, 0x02,0x03,0x01,0x07,0x01,0xe6,0x04,0x01,0x07,0xe6,0xe5,0xe5,0x01,0xe5,0xe5,0xe5, 0x04,0x01,0x01,0x05,0x01,0x01,0x03,0xe6,0xe5,0xe5,0x04,0x01,0x01,0x01,0x05,0x01, 0x07,0x01,0xe6,0x04,0x01,0x01,0x05,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0xe5,0xe5, 0x03,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x0b,0x02, 0x0d,0x01,0xe5,0xe5,0x03,0x01,0xe5,0x01,0x09,0x05,0xe5,0xe5,0x02,0x02,0x01,0x01, 0x01,0x01,0xe6,0x02,0x02,0x15,0xe5,0x02,0x05,0xe5,0x02,0x02,0x01,0x01,0x07,0x04, 0x0e,0xe6,0xe5,0x03,0x02,0xe5,0x01,0xe5,0x08,0x03,0xe6,0x01,0x07,0x01,0x0a,0xe5, 0x13,0x05,0x08,0x03,0x03,0xe5,0x09,0x04,0x06,0x16,0x08,0x02,0xe5,0x0b,0x08,0xe5, 0xe5,0x02,0x09,0xe5,0x01,0x0d,0x04,0x02,0x03,0x07,0x01,0x02,0x03,0x05,0x03,0x07, 0xe5,0x01,0x03,0x02,0x01,0x04,0x0d,0xe5,0x03,0x05,0xe5,0x01,0x02,0x01,0xe6,0xe5, 0x0d,0xe5,0x01,0x03,0x04,0x01,0x02,0x09,0x09,0x1d,0xe5,0x36,0xe5,0xe5,0xe5,0x01, 0x09,0x02,0x1c,0x0a,0x04,0x08,0x06,0x02,0x06,0x02,0x02,0x01,0x07,0x08,0x04,0xe5, 0x01,0xe5,0x01,0x04,0x07,0x01,0x01,0x01,0xe5,0x04,0x17,0x06,0x02,0x06,0x09,0x02, 0x05,0x0b,0xe5,0x16,0x03,0x15,0x12,0x06,0xe5,0x03,0xe5,0x08,0x03,0x04,0x07,0x03, 0x02,0x01,0x02,0x01,0xe5,0x07,0x02,0x01,0x07,0x03,0x05,0xe6,0x06,0x01,0x07,0x01, 0x07,0x01,0x07,0xe5,0x19,0x01,0xe6,0x06,0xe6,0xe5,0x05,0x03,0x0a,0x05,0x01,0x01, 0x03,0x0e,0xe5,0x03,0xe5,0x18,0x0c,0x1d,0x10,0x03,0xe5,0x07,0x06,0x03,0xe5,0xe5, 0x05,0x02,0xe5,0x01,0x0c,0x01,0x01,0x02,0x09,0x03,0x03,0x06,0x04,0x09,0x04,0x03, 0x10,0x11,0x01,0x07,0x22,0x01,0x01,0x0b,0x01,0x0d,0x04,0x01,0x10,0xe6,0x06,0x09, 0x2c,0x09,0x05,0x2b,0x05,0x03,0x0a,0x02,0xe6,0x02,0x03,0x05,0xe5,0x01,0x05,0x09, 0x09,0xe5,0x0b,0x01,0x03,0x03,0x09,0x03,0x02,0x01,0x08,0x0a,0x04,0x0c,0x03,0x0f, 0x27,0x0a,0x09,0x02,0x06,0xe6,0xe6,0x09,0x02,0xe5,0x0c,0x07,0x0a,0xe5,0x02,0x03, 0x06,0x0e,0x04,0x02,0x05,0x08,0x04,0x0c,0x02,0x03,0x0b,0x01,0x02,0x02,0x04,0xe5, 0x04,0x05,0x0d,0x10,0x04,0x11,0x18,0x09,0x06,0x12,0x14,0x01,0x03,0x01,0xe5,0x0b, 0x02,0x09,0x01,0x09,0x07,0x09,0x01,0x08,0x0d,0x10,0x01,0x05,0x12,0x02,0x0c,0x02, 0x06,0x0a,0x03,0x01,0x01,0x09,0x07,0x0b,0x0b,0x09,0x25,0x09,0x1d,0x01,0xe6,0x0b, 0x01,0x02,0x10,0x11,0xe7,0x05,0x01,0x02,0x04,0x01,0x01,0x06,0xe6,0x04,0x01,0x01, 0x07,0xe5,0x01,0x04,0xe7,0x05,0x05,0x03,0x06,0xe5,0x01,0xe5,0x01,0x05,0x01,0xe5, 0xe5,0x05,0xe5,0x03,0x03,0xe5,0x0f,0x01,0xe6,0xe5,0x04,0x01,0xe5,0x01,0x03,0x01, 0x07,0xe5,0x03,0x03,0x09,0x03,0x05,0xe6,0x01,0xe5,0x02,0xe5,0x02,0x22,0xe5,0xe5, 0x05,0xe5,0x03,0x05,0x01,0x0c,0x01,0x12,0x07,0x0a,0x06,0x03,0xe5,0x01,0x09,0x02, 0xe5,0x09,0x1d,0x02,0x04,0x01,0x02,0x09,0x09,0x0c,0xe5,0x02,0x0c,0x02,0x0f,0xe5, 0x04,0x09,0x06,0x03,0x0b,0xe5,0x08,0x16,0x02,0xe5,0x02,0x04,0x25,0x05,0x09,0x02, 0x01,0x0b,0x09,0x05,0x02,0x04,0x01,0x04,0x01,0x02,0x01,0x04,0x02,0x0a,0x01,0x05, 0x06,0x02,0x02,0x02,0xe5,0x03,0xe7,0xe5,0x01,0x01,0x01,0x08,0x06,0x03,0x05,0x02, 0xe5,0xe5,0x06,0x02,0xe5,0x02,0x04,0x08,0x0c,0x03,0x01,0x06,0x02,0x1b,0x05,0x01, 0xe6,0x01,0xe5,0x0d,0x11,0xe5,0x08,0x04,0x01,0x01,0x07,0x0b,0x0e,0x02,0x03,0x08, 0x06,0x03,0x05,0x01,0x07,0x09,0x03,0x05,0xe6,0x0a,0x0b,0x10,0xe6,0xe5,0x03,0x07, 0x01,0xe5,0x09,0x0b,0x0e,0x05,0x27,0xe8,0xe5,0x1a,0x08,0xe5,0x12,0x06,0x03,0x08, 0x02,0x0a,0xe5,0x03,0x09,0x03,0x01,0x07,0xe5,0x02,0xe5,0x03,0x0a,0x13,0x05,0x0d, 0x13,0x01,0x06,0x2e,0x26,0x03,0x01,0x0b,0xe5,0x01,0xe5,0x08,0x01,0x09,0x04,0x04, 0x09,0x06,0x0c,0x04,0x04,0x07,0x0a,0x05,0x0e,0x08,0x05,0x03,0x0c,0x09,0x06,0x08, 0x09,0x09,0x09,0x08,0x02,0x08,0x26,0xe5,0x07,0x01,0x10,0xe5,0x03,0xe5,0xe5,0x01, 0x03,0x02,0x05,0x02,0xe5,0x08,0x05,0x03,0x05,0x02,0xe5,0x07,0xe5,0x04,0x02,0xe5, 0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5, 0x09,0xe5,0x07,0x06,0x02,0x07,0x02,0x08,0xe5,0x07,0x0a,0x06,0x01,0xe5,0x07,0xe5, 0x05,0x02,0x08,0xe5,0x07,0x09,0x06,0x0a,0x05,0x01,0xe8,0x07,0xe5,0xe5,0x01,0xe5, 0x07,0xe5,0x04,0x02,0xe5,0xe6,0x04,0xe5,0x04,0x03,0x0a,0xe6,0x03,0x01,0xe5,0x07, 0x05,0x02,0xe5,0xe6,0x05,0x05,0x04,0xe6,0x04,0xe5,0x07,0xe5,0x14,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x0b,0xe5,0x05,0x02,0x01,0x04,0x05,0x02,0xe5,0x08,0x08,0xe5,0x08, 0x02,0xe5,0x05,0x08,0xe5,0x06,0xe5,0x13,0xe5,0x01,0xe6,0x01,0x01,0x08,0xe6,0x0a, 0x05,0x02,0x03,0xe5,0xe6,0x07,0x03,0x05,0x0d,0x01,0xe5,0x05,0x05,0x02,0x02,0xe5, 0x06,0x04,0x06,0x01,0x04,0x05,0xe5,0x01,0x02,0xe5,0x10,0x02,0x01,0x03,0x03,0x01, 0x03,0x06,0xe5,0x07,0xe6,0x03,0x01,0xe6,0x07,0x03,0x05,0xe5,0xe5,0x09,0x02,0x01, 0x07,0x01,0x0d,0x0a,0x11,0x01,0x03,0x02,0x04,0x05,0xe5,0xe5,0x02,0xe5,0x0d,0x0b, 0x01,0x05,0x05,0xe5,0x07,0xe5,0x01,0x01,0x04,0x01,0xe5,0x01,0x06,0x04,0x01,0xe5, 0xe5,0xe5,0xe5,0x04,0x06,0x01,0xe5,0xe5,0x08,0x03,0xe5,0x03,0xe5,0x1b,0x01,0x09, 0x04,0xe5,0xe5,0xe5,0xe5,0x06,0x01,0x05,0x04,0x06,0x15,0x2f,0x05,0x02,0x02,0x07, 0xe5,0xe5,0xe5,0x10,0x09,0x09,0x09,0x06,0x02,0x01,0x07,0xe5,0xe5,0x05,0x06,0xe5, 0xe5,0xe5,0x06,0x09,0x01,0x07,0x06,0x03,0xe6,0x1b,0x01,0x07,0x06,0xe5,0xe5,0xe5, 0x06,0x09,0x09,0x49,0x02,0xe5,0x01,0x0d,0x01,0x11,0x0b,0x03,0x02,0xe5,0x03,0x09, 0x01,0xe6,0x04,0x04,0x01,0xe5,0xe5,0x03,0x0e,0x04,0x04,0x10,0x05,0xe6,0x03,0x0b, 0x04,0x01,0x02,0x09,0x06,0x03,0x02,0xe5,0xe5,0x01,0x01,0x01,0x02,0x02,0x03,0xe5, 0x0a,0x02,0x01,0x01,0x0f,0x09,0x09,0x1f,0xe6,0xe5,0x01,0x0a,0x01,0x01,0x0f,0x03, 0x05,0x03,0x01,0x02,0xe5,0x02,0x09,0x09,0x05,0x01,0x02,0x04,0x03,0x09,0x05,0x04, 0x08,0x05,0x03,0x03,0x01,0x02,0x12,0x01,0x01,0x09,0x05,0x03,0x01,0x02,0xe5,0xe5, 0xe5,0x04,0x03,0x02,0xe7,0x02,0x05,0x03,0x01,0x02,0x40,0x03,0x02,0xe5,0x0b,0x01, 0x02,0x0e,0x0b,0x07,0x04,0x04,0x03,0x02,0x02,0x01,0x01,0x05,0x09,0x09,0x01,0x01, 0xe5,0xe5,0x01,0x09,0x01,0x09,0x07,0x01,0x01,0xe5,0x0f,0x01,0x07,0x01,0x01,0x07, 0x07,0x01,0x01,0x07,0x07,0x0b,0x04,0x1a,0x02,0x01,0x04,0x02,0x1c,0x02,0x01,0x01, 0xe5,0x09,0x01,0x11,0x09,0x09,0x03,0xe5,0xe5,0x01,0x04,0xe7,0xe5,0xe5,0x01,0x04, 0x04,0x04,0x06,0x01,0xe5,0xe5,0x01,0x02,0x01,0x09,0x01,0x07,0x09,0x01,0xe5,0xe7, 0x07,0xe5,0xe5,0x01,0x01,0x01,0xe5,0x03,0x01,0x02,0x04,0x09,0x01,0x02,0x04,0x03, 0xe5,0x03,0x09,0x03,0xe8,0x0e,0xe5,0x08,0x01,0x01,0x05,0xe7,0x1e,0xe5,0x30,0x06, 0x04,0x04,0x02,0xe5,0x04,0x1d,0x0c,0xe5,0x11,0x02,0x03,0x02,0x08,0x04,0x04,0x09, 0x13,0x02,0x06,0x0c,0x02,0x03,0x13,0x09,0x09,0x22,0x02,0x14,0x1d,0xe6,0x02,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x11,0xe5,0x07,0xe5,0x15,0x05,0xe5,0x03,0x05,0xe5, 0x07,0xe5,0x07,0xe5,0x0b,0x05,0xe5,0x01,0xe6,0x02,0xe5,0x0b,0x05,0xe5,0x11,0xe5, 0x07,0xe5,0x07,0xe5,0x1f,0xe5,0x01,0xe5,0xe5,0x06,0x01,0x02,0x02,0xe6,0xe5,0x01, 0x09,0x09,0x02,0x06,0xe5,0x04,0x02,0xe6,0x06,0xe6,0x03,0x02,0xe6,0x06,0x09,0xe6, 0x06,0xe5,0xe5,0x05,0x09,0x04,0xe6,0x01,0xe5,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02, 0xe6,0x03,0x02,0x03,0x05,0xe6,0x06,0xe5,0x02,0x01,0x02,0x05,0x03,0xe5,0x07,0x09, 0xe5,0x07,0xe6,0x06,0xe6,0x06,0x09,0x06,0x04,0x01,0xe6,0x01,0x02,0x09,0x09,0x24, 0x02,0x10,0x34,0x26,0x02,0x06,0x02,0x6c,0x05,0x02,0x03,0x0b,0x08,0x24,0x03,0x37, 0x0c,0x17,0x04,0x09,0x02,0x0a,0x19,0x13,0x13,0x34,0x02,0xe5,0xe5,0x0a,0x01,0x06, 0x02,0x11,0x01,0x09,0xe7,0x03,0x01,0xe7,0x02,0xe5,0xe8,0x05,0xe7,0x02,0x02,0x06, 0x02,0xe7,0x0c,0x02,0x07,0x01,0x06,0x03,0x01,0x03,0x04,0xe7,0x05,0xe5,0xe5,0x05, 0xe7,0x03,0x01,0x04,0x02,0x01,0xe7,0x05,0xe7,0x02,0xe5,0xe5,0x09,0xe6,0x10,0x01, 0x06,0xe7,0xe5,0x04,0x01,0x15,0x0d,0x0d,0x01,0xe5,0x05,0x02,0x03,0x0c,0x01,0xe5, 0x07,0xe6,0x03,0xe5,0x01,0xe5,0x04,0x01,0xe6,0x04,0x02,0xe5,0x06,0xe5,0x07,0xe6, 0x01,0x04,0xe6,0x06,0xe5,0x05,0x01,0xe5,0x05,0x02,0xe5,0xe5,0x06,0xe6,0x06,0xe6, 0x04,0x02,0xe5,0x04,0x01,0xe5,0x05,0x01,0xe6,0x01,0x04,0xe6,0x04,0x01,0xe5,0x02, 0x06,0x13,0x04,0x02,0xe6,0x08,0x19,0xe5,0x02,0x01,0x02,0xe5,0x0e,0x09,0x0d,0x0c, 0x02,0x01,0x07,0xe5,0x05,0x01,0xe5,0x04,0x02,0xe5,0x01,0x05,0x09,0xe5,0x05,0x06, 0x04,0x07,0x01,0x07,0x0a,0x02,0x01,0x01,0x05,0x04,0x04,0x07,0x01,0x09,0xe5,0x07, 0xe5,0x05,0x01,0x0b,0x16,0x04,0xe5,0x2a,0xe8,0x0c,0x05,0x01,0xe6,0x06,0xe5,0x07, 0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0x06,0xe5,0x01,0x05,0xe5, 0x01,0x05,0xe6,0xe5,0x04,0xe5,0x01,0x05,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe5,0x03, 0x04,0xe6,0x01,0x03,0xe8,0x01,0x03,0xe7,0xe5,0x04,0xe5,0x01,0x05,0xe5,0x01,0x03, 0xe8,0xe5,0x04,0xe5,0x05,0xe7,0x07,0xe5,0x06,0xe6,0x01,0x03,0xe7,0x05,0xe7,0x07, 0xe5,0x07,0xe7,0x02,0x01,0x01,0x01,0x10,0x02,0x05,0x0a,0x06,0x01,0xe5,0x07,0xe5, 0x08,0x1c,0xe6,0x06,0xe6,0x06,0xe5,0x07,0x07,0x01,0x09,0xe5,0x09,0xe5,0x04,0x02, 0x06,0x02,0xe5,0x04,0xe5,0xe6,0x08,0x08,0xe5,0x04,0xe5,0xe6,0x08,0x05,0x03,0x09, 0x05,0x02,0x06,0x03,0xe5,0x03,0x1b,0x02,0xe5,0x0f,0x04,0x01,0x13,0x01,0x09,0xe5, 0xe5,0x08,0x09,0x09,0x06,0x09,0x02,0x04,0x01,0xe5,0xe5,0x05,0x07,0x01,0x07,0x01, 0xe5,0xe5,0x07,0xe5,0xe5,0x02,0x02,0x02,0xe5,0xe7,0x03,0x03,0xe5,0xe5,0x0b,0x06, 0x02,0xe5,0xe7,0xe6,0x0a,0xe5,0xe6,0x02,0x09,0x01,0x06,0x02,0xe5,0xe6,0x04,0xe5, 0xe6,0x1c,0x01,0x0f,0x09,0x0a,0x09,0x08,0x0a,0x1c,0x09,0x13,0x09,0x12,0x02,0x04, 0x04,0x04,0xe5,0x02,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x02,0xe5,0x02,0x0a, 0x03,0xe5,0x11,0x04,0x04,0xe5,0x07,0xe5,0x1c,0x02,0x0d,0x02,0x02,0x03,0x0c,0x09, 0x08,0x27,0xe5,0x07,0x07,0x0b,0xe5,0x05,0x01,0x09,0x0b,0x04,0x04,0x03,0x03,0x09, 0x01,0xe5,0x07,0x09,0x03,0x03,0x01,0xe5,0x0b,0x06,0x12,0x03,0x09,0x1f,0xe7,0x0f, 0x10,0x01,0xe5,0x0f,0x01,0xe5,0x05,0xe5,0xe5,0xe5,0x08,0x04,0x09,0x01,0xe6,0x09, 0x04,0x01,0xe6,0x18,0xe5,0xe6,0x18,0x0f,0x17,0x01,0x17,0xe5,0x15,0x26,0xe8,0xe5, 0x0e,0x01,0x07,0x01,0x04,0x02,0xe6,0x06,0x01,0x01,0x05,0x01,0x01,0x02,0x02,0x01, 0x01,0x05,0x01,0x01,0x01,0x03,0x01,0xe5,0x02,0x02,0x01,0xe5,0x05,0x01,0x02,0x01, 0x02,0x01,0xe5,0xe5,0x03,0x01,0x01,0x05,0xe7,0x01,0x03,0xe7,0x07,0x01,0xe6,0x04, 0x01,0xe5,0xe5,0x03,0x01,0x06,0xe5,0xe5,0x06,0x01,0x07,0x01,0x02,0x01,0x02,0x01, 0x07,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x01,0x05,0xe6,0x01,0x04, 0x01,0x07,0x01,0x02,0x08,0x02,0x0d,0x05,0x05,0x03,0x05,0x01,0x08,0x03,0x03,0xe6, 0x08,0x08,0xe6,0x01,0x04,0x01,0x09,0x04,0x04,0x07,0xe6,0xe5,0x04,0x04,0x04,0xe5, 0xe5,0x02,0x02,0x02,0x05,0x02,0xe7,0x05,0xe5,0xe5,0x05,0x03,0x05,0xe5,0x01,0xe5, 0x03,0xe5,0x04,0x03,0x02,0x05,0xe5,0x02,0xe5,0x02,0xe5,0x01,0x05,0x09,0xe6,0x0a, 0x05,0xe5,0x07,0xe5,0x16,0x02,0xe6,0x0a,0x0c,0x06,0x02,0x06,0x01,0x07,0xe5,0x04, 0x02,0xe5,0x03,0x04,0x08,0x02,0x06,0x15,0x04,0x05,0x03,0x04,0x03,0x0d,0x03,0x07, 0x01,0xe5,0x09,0xe5,0xe5,0x01,0x02,0x04,0x03,0xe5,0xe5,0x04,0x07,0x03,0x08,0x04, 0x05,0x05,0x01,0x08,0x04,0x0d,0x1b,0x02,0x01,0x26,0x07,0x08,0x13,0x06,0x03,0x01, 0x13,0x06,0x0c,0x06,0x01,0x06,0x03,0x08,0x01,0x01,0x0c,0x02,0x06,0xe5,0x0b,0xe6, 0x08,0x02,0x01,0x02,0xe5,0x05,0x14,0x02,0x02,0xe5,0x09,0x01,0x07,0x01,0x0f,0x08, 0xe5,0xe6,0x07,0x02,0x01,0xe6,0x04,0x05,0x02,0x09,0x18,0x0b,0xe5,0x05,0x04,0x01, 0x14,0x05,0x12,0x04,0xe7,0x05,0x05,0x0b,0xe5,0x0d,0x07,0xe5,0xe5,0x08,0x06,0x01, 0x18,0x0f,0x0a,0x16,0x08,0xe5,0x02,0x02,0x0b,0x17,0x03,0x08,0x0c,0x03,0x0f,0x0e, 0x10,0x04,0x11,0x01,0x05,0x05,0x05,0x16,0x1b,0xe5,0x02,0x01,0x35,0x0b,0x0f,0x02, 0xe5,0xe6,0x18,0xe5,0x08,0x17,0x04,0x03,0xe5,0x03,0x04,0x08,0x13,0x04,0xe5,0x0c, 0x04,0xe5,0x06,0x07,0x01,0x04,0x04,0x0d,0xe5,0x04,0xe5,0x04,0xe6,0x03,0x02,0x04, 0xe5,0x08,0x03,0x04,0xe5,0x0c,0x01,0x0c,0xe5,0x07,0xe5,0x16,0x09,0x04,0x01,0x09, 0x0a,0xe6,0x20,0x05,0x08,0x0c,0x0c,0x06,0x06,0x02,0x09,0x06,0x06,0x06,0xe5,0x09, 0x10,0x0b,0x03,0x0c,0x09,0x01,0x07,0x04,0x13,0x04,0x09,0x01,0x26,0x12,0x06,0x04, 0x09,0x06,0xe5,0x0a,0x06,0x16,0x07,0x01,0x06,0x01,0x05,0x05,0xe5,0x06,0x04,0x02, 0x0e,0x1c,0x0a,0x05,0x02,0x04,0x02,0x07,0x08,0x1a,0x02,0x04,0x09,0x1f,0xe6,0x01, 0x08,0x02,0x09,0x08,0xe6,0x11,0x01,0x05,0xe6,0x0a,0x02,0x03,0xe5,0x11,0xe6,0x06, 0xe5,0x08,0x01,0x07,0x02,0x02,0x09,0x02,0x04,0x02,0x03,0xe7,0x01,0x03,0xe6,0x02, 0x03,0xe6,0x02,0x03,0xe6,0x02,0x02,0xe6,0x01,0x03,0xe7,0x06,0x09,0xe6,0xe5,0x04, 0xe6,0x0b,0x05,0xe5,0x22,0xe5,0xe6,0x0d,0x09,0x03,0x06,0x0b,0x03,0x03,0x09,0x06, 0x08,0x01,0xe5,0x01,0x12,0x01,0x04,0x02,0xe5,0x06,0x09,0x0e,0x02,0x05,0x08,0xe5, 0x02,0x03,0x08,0x02,0x08,0x06,0xe5,0x03,0x03,0x01,0x0e,0x04,0x07,0xe5,0x14,0x21, 0xe8,0xe5,0xe5,0x0e,0x05,0x18,0x04,0x04,0x03,0x03,0x01,0x04,0x04,0x0b,0x25,0xe7, 0xe5,0x05,0xe5,0xe5,0x07,0x03,0x02,0xe6,0x02,0x02,0xe5,0x06,0x05,0x01,0x06,0x07, 0x01,0x04,0x07,0x01,0x02,0x01,0x0b,0x0d,0x01,0x0d,0x1f,0x04,0x02,0xe5,0x07,0x06, 0x08,0x05,0x0f,0x07,0xe5,0x01,0x06,0x02,0x05,0x25,0x01,0x17,0x05,0xe6,0xe5,0x0d, 0x02,0x0a,0x0a,0x0d,0x07,0xe5,0x03,0xe5,0xe5,0x07,0xe6,0x0e,0x0b,0x01,0xe5,0x12, 0x14,0xe5,0xe5,0x01,0x06,0x10,0x03,0x07,0x01,0x06,0x05,0x01,0x14,0x0a,0x38,0x0f, 0x04,0xe5,0x01,0xe5,0xe5,0x0f,0x19,0x01,0x15,0x06,0x33,0x03,0xe5,0xe8,0x0a,0x02, 0xe5,0xe6,0x04,0xe5,0x01,0x01,0x03,0xe5,0x07,0x03,0xe5,0x04,0x07,0x01,0x02,0xe5, 0x02,0x02,0x08,0xe7,0x04,0x02,0xe5,0x05,0x07,0x03,0xe5,0x05,0xe5,0x06,0xe5,0x07, 0xe5,0x0b,0x07,0xe5,0x07,0xe5,0x01,0xe5,0x07,0xe5,0x04,0x0c,0xe5,0x04,0x12,0xe5, 0x1b,0xe5,0x1d,0x02,0x01,0xe5,0x01,0x01,0x05,0x01,0xe5,0x01,0x0d,0xe5,0x11,0x01, 0x04,0x09,0x13,0x01,0xe5,0x07,0xe5,0x01,0xe5,0x15,0x05,0x02,0x06,0x03,0x07,0xe5, 0x02,0x04,0x02,0x03,0xe5,0x03,0x01,0x01,0xe5,0xe6,0x03,0xe5,0x01,0xe5,0x07,0x06, 0xe5,0x04,0x05,0x0c,0x1d,0x1c,0x01,0x02,0xe5,0xe6,0x03,0x08,0x0d,0x09,0x02,0x01, 0xe5,0xe5,0x17,0x01,0xe5,0x1e,0x01,0x02,0x01,0xe5,0x01,0x1a,0x0d,0x04,0x01,0xe5, 0x03,0x10,0x09,0x01,0x0a,0x29,0xe5,0xe5,0x05,0xe5,0xe5,0x19,0xe5,0x02,0x04,0xe5, 0xe6,0x01,0x0a,0x1d,0x01,0x18,0x02,0x1d,0x09,0xe5,0x01,0x0f,0x06,0x0e,0x06,0xe5, 0x14,0x09,0x09,0x13,0x1a,0x02,0x06,0x02,0x1a,0x06,0x05,0xe5,0x0c,0x09,0x01,0x20, 0x04,0x01,0x02,0x02,0x06,0xe5,0x01,0x05,0x10,0x02,0x01,0x01,0xe5,0xe6,0xe5,0x08, 0x04,0x05,0x01,0xe5,0x05,0x02,0x04,0x02,0x03,0x07,0x01,0xe5,0xe5,0x02,0x02,0x02, 0x02,0xe5,0x01,0x02,0x02,0x03,0xe5,0x04,0x02,0xe5,0xe5,0x01,0x04,0x1b,0x01,0x07, 0x01,0x1b,0x07,0x01,0xe5,0xe5,0x01,0x0a,0x03,0x05,0x01,0x01,0x13,0x0f,0x01,0x01, 0x05,0x03,0x04,0x14,0x03,0x05,0x01,0x15,0x02,0x01,0xe5,0x02,0x07,0x04,0xe5,0xe7, 0x02,0x09,0x04,0xe5,0x02,0x04,0xe5,0x02,0x02,0xe6,0x03,0x02,0x01,0xe5,0xe5,0xe5, 0x08,0x04,0x14,0x03,0x05,0x03,0x1d,0x02,0x03,0xe5,0x0b,0x04,0x04,0x01,0x11,0x01, 0x0c,0x04,0x01,0x01,0x01,0xe5,0x05,0x03,0x17,0x04,0x01,0xe5,0x05,0x01,0x0a,0x04, 0x04,0x05,0xe5,0x05,0x07,0x06,0x01,0xe5,0xe5,0x01,0x02,0x03,0x04,0xe5,0x02,0x04, 0x02,0x09,0x01,0x07,0x07,0x43,0x02,0x01,0x02,0x0a,0x09,0x01,0x11,0x01,0x04,0x01, 0x04,0xe5,0x03,0x01,0x02,0x01,0x02,0x04,0x01,0x01,0x04,0xe7,0x0b,0x03,0xe5,0xe5, 0x01,0x03,0xe5,0xe5,0x05,0xe5,0x19,0x06,0x01,0x14,0x03,0xe5,0x01,0x01,0x03,0xe5, 0xe6,0x0a,0x01,0x01,0xe5,0x4d,0xe5,0x04,0x01,0x1b,0x1a,0x09,0x04,0x04,0x02,0xe5, 0x04,0x13,0x04,0x09,0x04,0x22,0x1a,0x09,0x04,0x0e,0x20,0x02,0x2e,0xe5,0xe5,0xe5, 0x1d,0x01,0x17,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x11,0xe5,0x07,0xe5,0x07,0xe5, 0x01,0x1b,0xe5,0x03,0x11,0x05,0xe5,0x07,0xe5,0x0b,0x05,0xe5,0x0b,0x13,0x09,0x09, 0x20,0xe6,0xe5,0x0b,0x02,0x01,0x01,0x02,0x03,0xe5,0xe5,0x01,0x09,0x09,0xe5,0x07, 0xe7,0xe5,0xe5,0x01,0xe6,0x03,0x02,0xe5,0x07,0x04,0x01,0x02,0xe5,0xe5,0xe5,0xe5, 0x01,0xe5,0x07,0xe5,0x01,0x05,0x09,0x0b,0xe5,0x07,0x09,0x03,0x05,0xe5,0x07,0xe5, 0x07,0x05,0x03,0xe5,0x07,0x05,0x03,0x09,0x05,0x03,0x03,0x05,0x05,0x03,0x09,0x04, 0x01,0x06,0xe5,0xe7,0x01,0x45,0x06,0x16,0x09,0x13,0x09,0x15,0x1d,0x09,0x13,0x13, 0x09,0x09,0x1d,0x04,0x02,0x03,0x39,0x0d,0x0f,0x0c,0x06,0x03,0x0f,0x01,0x09,0x0e, 0x04,0x01,0x1b,0x02,0x06,0x01,0x11,0x01,0x13,0x09,0x09,0x1f,0x09,0x01,0xe5,0xe5, 0x14,0x01,0x09,0x13,0xe7,0x06,0xe6,0x03,0x01,0xe5,0xe5,0x05,0xe7,0x0a,0x01,0x02, 0xe7,0xe5,0x03,0xe7,0x03,0x01,0xe7,0x0a,0x0b,0x02,0x03,0x0e,0xe5,0x0a,0xe7,0x05, 0xe7,0x0f,0xe7,0x16,0xe5,0xe5,0x06,0x10,0x1d,0x01,0x01,0x17,0x01,0xe5,0x05,0x02, 0x12,0xe6,0x08,0x04,0xe5,0x01,0xe5,0x01,0x02,0x02,0xe5,0x0e,0x02,0xe5,0x07,0xe5, 0xe5,0x01,0xe5,0xe7,0x01,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x03,0x06,0xe6,0x06, 0xe5,0x02,0x02,0x02,0x03,0x04,0xe6,0x01,0x02,0x02,0xe5,0x01,0x05,0x05,0x02,0xe6, 0x03,0x03,0x10,0x01,0xe5,0x04,0x02,0xe5,0x08,0x21,0xe5,0xe6,0x16,0x01,0x03,0x05, 0x03,0xe5,0x0d,0x0b,0x02,0x04,0x09,0xe5,0x11,0xe5,0x02,0x01,0x02,0xe5,0x24,0x02, 0x01,0x02,0x08,0xe5,0x03,0x09,0xe5,0x07,0x01,0x07,0x09,0xe5,0x07,0x11,0x01,0x09, 0x09,0x03,0x1f,0x01,0xe5,0x14,0xe6,0x06,0xe6,0x06,0xe5,0x07,0xe5,0x01,0xe5,0x03, 0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe5,0x02,0x04,0xe6, 0x04,0xe6,0xe5,0x04,0xe6,0x04,0x04,0xe6,0x01,0x03,0xe5,0xe5,0x01,0x04,0xe6,0x01, 0x03,0x01,0xe6,0x06,0xe6,0x04,0xe6,0x02,0x03,0x01,0xe6,0x04,0xe6,0x08,0xe6,0xe5, 0x04,0xe6,0x04,0xe5,0xe6,0x04,0xe6,0x08,0xe5,0x07,0xe5,0x06,0x03,0x04,0x12,0x01, 0xe5,0x07,0xe5,0x08,0x08,0xe5,0x08,0x12,0xe6,0x10,0xe6,0x06,0x07,0x01,0xe5,0x07, 0xe5,0x01,0xe6,0x02,0xe6,0xe7,0x02,0x02,0x08,0xe6,0xe6,0x08,0x04,0x09,0xe5,0x07, 0xe5,0x01,0xe6,0x02,0x09,0xe5,0x01,0xe6,0x09,0xe5,0xe6,0x0b,0xe5,0x03,0x03,0xe6, 0x03,0x1a,0xe5,0x16,0x01,0xe5,0x05,0x02,0x12,0xe5,0xe5,0x06,0x01,0x09,0x03,0xe5, 0x03,0x0e,0x04,0x09,0x04,0x01,0xe5,0xe5,0x0c,0x09,0x02,0x04,0x03,0x09,0x09,0x02, 0x02,0x02,0x01,0x02,0x01,0x04,0x02,0x05,0x02,0x02,0x02,0x01,0x08,0x0a,0x01,0xe5, 0x0e,0xe5,0x07,0x08,0x13,0xe7,0x16,0x01,0x09,0x0a,0x08,0x1d,0x13,0x09,0x13,0xe5, 0x03,0x03,0x05,0x02,0x02,0xe5,0x02,0x01,0x02,0x05,0xe5,0x08,0x02,0x03,0x05,0x0f, 0x03,0x03,0x06,0x04,0x0d,0x0f,0xe5,0x07,0x04,0x18,0x02,0x10,0x06,0x01,0xe5,0x05, 0x02,0x09,0x08,0x18,0x01,0xe5,0x0b,0x06,0x20,0x02,0x06,0x02,0x05,0x06,0x05,0x02, 0x06,0x03,0x04,0x03,0x03,0x0c,0x02,0x05,0x0a,0x02,0x0a,0x02,0x01,0xe5,0x0c,0x09, 0x05,0x02,0x14,0xe8,0x02,0x0b,0xe5,0x32,0x04,0x02,0x24,0x02,0x10,0x09,0x15,0x03, 0xe5,0x03,0x01,0xe5,0x19,0x03,0x0f,0x1b,0x05,0x05,0x25,0x02,0x01,0x0f,0x01,0x07, 0x01,0x07,0x01,0x07,0x01,0xe6,0x04,0x01,0xe5,0xe5,0x03,0x01,0x01,0x05,0x01,0x07, 0x01,0x01,0x05,0x01,0xe5,0xe5,0x03,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0xe5,0x01, 0x03,0x01,0x03,0x03,0x01,0x09,0x01,0xe5,0x02,0x02,0x01,0x04,0x02,0x01,0x01,0x05, 0x01,0xe5,0x05,0x01,0x04,0x02,0x01,0x07,0x01,0x03,0x03,0xe6,0xe5,0x04,0x01,0x07, 0x01,0x01,0x05,0x01,0x03,0x03,0x01,0x01,0x05,0x01,0x07,0x01,0x02,0x04,0x06,0x2d, 0xe5,0x07,0x01,0x07,0xe6,0x01,0x02,0xe6,0x03,0x04,0xe5,0xe5,0x07,0xe5,0x09,0x03, 0xe5,0x09,0x05,0x03,0xe5,0x01,0x05,0x03,0x04,0x02,0xe6,0xe5,0x1e,0x08,0xe5,0x0c, 0x09,0x21,0x10,0x03,0x15,0xe7,0x01,0x08,0x03,0x05,0x13,0xe5,0x07,0x0a,0x08,0x0a, 0x08,0x0d,0x02,0xe5,0x0a,0x02,0x05,0x02,0x05,0x04,0x06,0x03,0x04,0x0b,0x14,0x08, 0x11,0x01,0x0a,0x09,0x03,0x02,0x02,0x09,0x0b,0x13,0x04,0x01,0x02,0x01,0x0a,0x09, 0x06,0x0a,0x04,0x05,0x02,0x02,0xe5,0x05,0xe5,0x07,0x02,0x06,0x02,0x01,0x0b,0x04, 0x0b,0x08,0x05,0x02,0x04,0x07,0x03,0x01,0x17,0x0c,0x06,0x08,0x06,0x0a,0x01,0x08, 0x1e,0x21,0xe5,0xe5,0x26,0x07,0xe5,0x06,0x03,0x05,0x08,0x1f,0x18,0xe5,0x08,0xe5, 0x07,0x03,0xe5,0x02,0xe5,0x03,0xe5,0x04,0xe7,0x02,0x05,0x03,0x0a,0x01,0x02,0x03, 0x05,0xe5,0x04,0x02,0x10,0xe6,0x06,0x06,0x02,0x01,0x0f,0x06,0x01,0x08,0xe5,0x01, 0xe5,0x1c,0x0b,0x27,0x25,0x09,0x06,0x0a,0x04,0xe5,0x01,0x04,0x02,0x04,0x02,0xe5, 0x01,0x06,0x07,0x01,0x0e,0x13,0x0c,0x04,0x0a,0x0a,0x03,0x01,0x0f,0x08,0x04,0x01, 0x09,0x1d,0x09,0x05,0xe5,0x07,0x04,0x02,0x01,0xe5,0xe5,0x07,0x03,0x04,0xe6,0x04, 0xe5,0x08,0x05,0x0d,0x04,0x01,0x06,0x03,0x17,0x03,0x13,0x05,0x02,0x10,0x02,0x1e, 0x03,0x01,0x0d,0x09,0x09,0x02,0xe6,0x11,0x08,0x12,0x06,0x08,0xe5,0x01,0x06,0x0c, 0x0d,0x02,0x04,0x04,0x04,0x01,0x01,0x07,0x01,0xe7,0x01,0xe5,0x01,0x11,0x01,0x05, 0x06,0xe5,0x07,0x03,0xe5,0x03,0x01,0xe5,0x05,0xe6,0x09,0x06,0xe6,0x02,0x02,0x05, 0x07,0x0f,0xe7,0x05,0x1f,0x03,0x0e,0x1d,0x04,0x02,0x20,0x09,0x05,0x20,0x09,0x04, 0xe5,0x04,0x0b,0x02,0x07,0x01,0x03,0x02,0x09,0x0b,0x23,0x27,0x0f,0x1b,0x08,0x06, 0xe6,0x06,0xe5,0x06,0x10,0x03,0xe6,0xe5,0x01,0x02,0x05,0xe5,0xe7,0xe5,0x01,0x04, 0x0b,0x06,0xe5,0x0d,0x01,0x01,0x01,0xe5,0x06,0xe6,0xe5,0x04,0x08,0x05,0x0b,0x03, 0xe5,0xe6,0xe5,0x02,0xe5,0x01,0x06,0x1c,0xe5,0x2c,0xe5,0x01,0x25,0x07,0x07,0xe5, 0x01,0x0e,0x0d,0x06,0x0b,0x06,0x03,0xe5,0x02,0x0a,0xe5,0xe5,0x02,0x0d,0x03,0x09, 0x01,0x1d,0x05,0x02,0x06,0xe5,0x09,0x18,0x02,0xe5,0x06,0x04,0x1c,0x05,0x01,0x01, 0x01,0x19,0x09,0x09,0x06,0x01,0xe6,0x03,0x03,0x05,0x03,0x0c,0x02,0xe5,0x05,0xe6, 0x01,0x06,0x03,0x01,0x0e,0x09,0x09,0xe5,0x02,0x02,0x01,0x04,0x03,0x01,0x04,0x0e, 0x04,0x01,0x07,0x09,0x05,0x02,0xe5,0x07,0x09,0x09,0x05,0xe5,0x20,0x02,0xe6,0x06, 0x01,0x0f,0x01,0x24,0x01,0xe5,0x05,0x0b,0xe5,0x01,0x02,0xe5,0x01,0x06,0x01,0x07, 0x02,0x0c,0x10,0x03,0x02,0xe6,0xe6,0x11,0x01,0x0e,0xe5,0xe5,0x13,0x07,0x24,0xe6, 0x17,0x06,0x03,0x01,0xe5,0x01,0x0e,0x13,0x1c,0x09,0x0c,0x05,0x09,0x0a,0x0a,0x14, 0x05,0x29,0x16,0x04,0x10,0x07,0x18,0x1f,0xe6,0x07,0xe5,0x02,0xe6,0x0b,0xe5,0x0a, 0x02,0xe6,0x03,0x02,0xe5,0x01,0xe5,0x01,0xe5,0xe6,0x04,0x03,0x01,0xe5,0xe5,0xe7, 0x04,0x02,0xe5,0xe6,0x01,0x03,0x06,0x02,0xe5,0x06,0x02,0xe5,0x05,0x02,0x01,0x06, 0xe5,0x07,0x01,0xe5,0x01,0x03,0x02,0x01,0xe5,0x03,0x01,0xe5,0x03,0xe8,0x04,0xe5, 0x08,0x09,0x09,0x09,0xe5,0xe6,0x02,0x01,0x02,0xe5,0x04,0x02,0x01,0x04,0x02,0xe5, 0x0a,0x02,0xe5,0x09,0x01,0xe6,0xe5,0x01,0x01,0x05,0xe5,0x1e,0x02,0x06,0x02,0x06, 0x03,0x02,0x01,0xe5,0x08,0x09,0x02,0x02,0xe5,0x01,0xe6,0xe5,0x05,0x02,0x0b,0xe5, 0x04,0x03,0xe5,0x07,0x01,0x01,0xe6,0xe5,0x02,0x01,0xe5,0x01,0x03,0x01,0xe5,0x0e, 0xe5,0x08,0xe5,0x03,0x0a,0x08,0x03,0xe5,0x04,0x09,0x02,0x01,0x03,0x03,0x01,0x03, 0x10,0x03,0x07,0x01,0x02,0xe8,0x03,0x0c,0x19,0x09,0x05,0xe5,0x08,0x01,0x05,0x04, 0x01,0x01,0x05,0x04,0x04,0x01,0x09,0x02,0x26,0x07,0x06,0x01,0x03,0x0a,0xe5,0x07, 0x0f,0x01,0x43,0x0b,0x04,0x02,0xe5,0x01,0x28,0x09,0x09,0x06,0xe5,0xe5,0x08,0x01, 0x07,0x05,0xe5,0x01,0x05,0xe5,0x2a,0xe5,0x08,0x05,0xe5,0x02,0x02,0x20,0xe5,0xe5, 0xe5,0x42,0x0d,0x02,0xe5,0x01,0x01,0x03,0x25,0x09,0x01,0x0c,0xe5,0x01,0x05,0xe5, 0x01,0xe5,0x08,0x09,0x01,0x02,0x09,0xe5,0x01,0x0a,0x01,0x02,0x09,0x08,0x02,0xe5, 0x01,0xe5,0x03,0x07,0x01,0x04,0x01,0x07,0x01,0x07,0x01,0x02,0x09,0x04,0x01,0x02, 0x09,0x09,0x09,0x04,0x01,0x02,0x0e,0x0f,0xe6,0xe6,0x01,0x28,0x03,0x05,0x01,0x01, 0x09,0x09,0x05,0x03,0x05,0x03,0x05,0x01,0x01,0x09,0x0f,0x01,0x01,0x11,0x03,0x05, 0x03,0x08,0x01,0x04,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x09,0x05,0x01,0x01, 0x23,0x01,0x01,0x0f,0x03,0x09,0x02,0xe5,0x01,0x02,0x02,0x0c,0x18,0x09,0x01,0x07, 0x05,0xe5,0x01,0x06,0x02,0x01,0x02,0x04,0x04,0x01,0x02,0x01,0x01,0xe5,0x05,0x04, 0x07,0x04,0x01,0x03,0xe5,0x08,0x06,0x01,0xe5,0x03,0x13,0x01,0x07,0x01,0x03,0xe5, 0x01,0x01,0x03,0xe5,0x01,0x01,0x07,0x01,0x04,0x01,0x18,0x05,0x01,0x04,0x0c,0x09, 0xe5,0x03,0x04,0x2b,0x09,0x01,0x07,0x03,0xe5,0xe5,0xe6,0x02,0xe5,0x01,0x01,0x01, 0x07,0x05,0xe6,0xe5,0xe5,0x01,0x06,0x01,0xe5,0x01,0x08,0x01,0xe5,0xe5,0xe6,0xe5, 0xe5,0x03,0xe8,0x02,0x04,0x04,0x03,0xe5,0x06,0x06,0x01,0x07,0x01,0x04,0x01,0xe5, 0xe5,0xe6,0x03,0x01,0x01,0xe7,0x01,0x01,0x01,0xe5,0xe5,0x05,0xe7,0x05,0xe7,0x06, 0x01,0x02,0x01,0x01,0xe5,0xe7,0x09,0x09,0x07,0xe7,0x08,0x31,0x02,0x03,0x02,0x02, 0x03,0x02,0x1a,0x09,0x13,0x09,0x0b,0x09,0x0c,0x1a,0x09,0x09,0x04,0x04,0x09,0x09, 0x04,0x04,0x20,0x03,0xe5,0xe5,0x07,0x01,0x2f,0x05,0xe5,0x01,0x05,0xe5,0x03,0x17, 0xe5,0x07,0xe5,0x01,0x0f,0xe5,0x07,0xe5,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x03,0x17, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x21,0x01,0xe5, 0x07,0x05,0x09,0x09,0x09,0x09,0x05,0xe5,0x01,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02, 0x06,0x02,0x09,0xe6,0x03,0x02,0xe5,0x01,0x05,0x09,0xe5,0x02,0x01,0x02,0xe5,0x09, 0xe6,0x03,0x02,0xe5,0x04,0x02,0xe5,0x02,0x01,0x02,0x09,0x09,0xe5,0x04,0x02,0xe5, 0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x07,0x06,0x02,0x02,0x0a, 0x03,0xe5,0x01,0x3a,0xe5,0x07,0xe5,0x07,0x07,0x02,0x12,0xe5,0x2f,0x02,0x08,0xe5, 0x08,0x1a,0x02,0x12,0xe5,0x2f,0xe5,0x08,0x04,0x02,0x03,0x3d,0x05,0x03,0x05,0x0d, 0x13,0x05,0x13,0x09,0x06,0x08,0x05,0x02,0x06,0x03,0x19,0x03,0x05,0x09,0x03,0x05, 0x09,0x13,0x0d,0x07,0x0e,0x35,0x01,0x09,0xe7,0x05,0xe7,0x03,0x01,0x07,0x01,0x07, 0x02,0xe6,0x06,0xe6,0x02,0xe5,0xe5,0x03,0x01,0xe5,0xe8,0xe5,0x03,0xe7,0x07,0xe5, 0xe5,0x05,0xe5,0xe5,0x03,0x02,0xe5,0x03,0x0c,0x04,0x05,0xe6,0x05,0xe7,0xe5,0x04, 0xe6,0x05,0xe7,0x06,0xe6,0x06,0xe6,0xe5,0x03,0xe7,0x0f,0x0f,0xe7,0x34,0x01,0xe5, 0x04,0xe5,0xe7,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x02,0xe5,0x07, 0xe5,0x04,0x01,0xe5,0x05,0x01,0xe6,0x04,0x02,0xe5,0x01,0x01,0x04,0xe6,0x03,0xe5, 0x01,0xe5,0x04,0x02,0xe5,0x07,0x08,0xe5,0x09,0x05,0x02,0xe5,0x08,0x05,0x02,0xe5, 0x08,0x09,0x04,0x02,0xe6,0x0e,0x02,0x0d,0xe5,0x01,0xe5,0x33,0x01,0x04,0x06,0x02, 0x09,0x02,0x01,0x04,0x02,0x01,0x06,0xe5,0x01,0x03,0x05,0x06,0x01,0x07,0x01,0x01, 0x07,0x08,0x02,0x03,0x05,0x09,0x13,0x03,0x07,0x04,0x02,0xe5,0x01,0x07,0x04,0x02, 0x01,0x09,0x01,0xe5,0x05,0x07,0x10,0x02,0x03,0x0c,0x01,0xe5,0x07,0x0b,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0x06,0xe6,0x01,0x04,0xe6,0xe5,0x04,0xe6, 0xe5,0x04,0xe6,0x06,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0xe5,0x02,0xe5,0xe6,0x01,0x02, 0xe6,0x04,0x04,0xe7,0x04,0xe8,0x01,0x04,0x09,0xe5,0x01,0x05,0xe5,0x05,0xe8,0x06, 0xe5,0x05,0xe8,0x06,0xe5,0x07,0xe5,0x05,0xe7,0x01,0xe5,0x01,0xe7,0x07,0xe6,0x06, 0xe5,0x06,0xe6,0xe5,0x34,0x01,0x14,0x08,0xe5,0x07,0xe6,0x04,0x0b,0xe6,0x07,0x12, 0xe5,0x03,0x05,0x06,0x02,0x06,0xe5,0x01,0xe5,0x06,0xe5,0x07,0xe6,0x0d,0x02,0xe6, 0x07,0x05,0x02,0x1a,0x02,0xe6,0x03,0x1b,0xe6,0xe5,0x34,0x01,0x0c,0x09,0x04,0x02, 0x06,0x01,0x07,0x04,0x04,0x01,0x02,0x13,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x04,0x02, 0x01,0x01,0xe5,0x03,0xe5,0xe6,0x01,0x02,0x03,0xe5,0x0a,0x0c,0xe5,0xe7,0x03,0x09, 0xe5,0xe7,0x03,0x09,0x09,0xe5,0xe6,0x01,0x02,0x01,0xe6,0x1a,0x01,0xe6,0x4b,0x08, 0x09,0x07,0x16,0x0d,0xe6,0x01,0xe5,0x02,0xe5,0x01,0x02,0x09,0x04,0xe5,0x02,0x09, 0xe5,0x07,0x0e,0xe5,0x02,0x0a,0x03,0xe5,0x02,0x18,0xe5,0x02,0x03,0x01,0x1d,0xe7, 0x01,0x21,0x2e,0x02,0x06,0x01,0xe5,0x05,0x09,0x0c,0x0c,0x03,0x05,0x07,0x04,0x02, 0x05,0x05,0x04,0x02,0x02,0x08,0xe5,0x0b,0x03,0x0f,0x03,0x19,0x05,0x04,0x05,0x19, 0x01,0xe5,0x26,0xe5,0x01,0x01,0x0a,0x05,0x02,0x06,0x02,0x10,0x09,0x01,0xe5,0x05, 0x02,0x15,0x15,0x04,0x02,0x09,0x04,0x2d,0x38,0x01,0xe5,0x0b,0xe6,0xe5,0x10,0x01, 0x07,0x01,0x07,0x01,0x07,0x01,0xe6,0x04,0x01,0xe5,0xe5,0x01,0x01,0x01,0xe6,0x02, 0x01,0x01,0xe5,0x05,0x01,0xe5,0x04,0xe5,0xe5,0x02,0x03,0x01,0xe5,0x03,0x01,0x01, 0xe6,0x04,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x07,0x01,0x01,0x01,0x03,0x01, 0x01,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x07,0x01, 0x02,0x04,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x02,0x04,0x01,0x06,0xe7,0x0c,0xe5, 0x01,0x37,0x06,0xe5,0x01,0x05,0xe5,0x0b,0x04,0x0a,0x03,0x02,0x02,0xe5,0x01,0x05, 0xe5,0x11,0xe5,0x03,0x02,0x02,0x07,0x01,0x01,0x05,0x01,0xe5,0x07,0x03,0x12,0x06, 0x01,0x03,0x03,0xe5,0x07,0x02,0x07,0xe5,0x15,0x06,0x07,0xe5,0x0e,0xe7,0x0e,0x03, 0x0b,0x13,0x08,0x02,0xe5,0x02,0x02,0xe5,0x02,0x01,0x02,0x06,0x02,0x03,0x09,0x06, 0x05,0x04,0x04,0x0f,0xe5,0x04,0x05,0x10,0x01,0x06,0x09,0x04,0x16,0x03,0x12,0x07, 0x1b,0xe5,0x0e,0x02,0x02,0xe6,0x1b,0x12,0xe5,0x02,0x06,0x02,0x0c,0x02,0x19,0x04, 0x04,0x03,0x06,0x01,0x0f,0x02,0x05,0x04,0xe5,0x02,0x01,0x01,0x01,0x01,0x09,0x06, 0x0c,0x02,0xe5,0x06,0x03,0x06,0x02,0x06,0x02,0x11,0x1e,0x01,0x0e,0xe6,0xe6,0x11, 0x06,0x09,0xe5,0x10,0x04,0x02,0x01,0x06,0xe5,0xe6,0x02,0x04,0x07,0xe6,0x0a,0x05, 0x01,0xe5,0x02,0xe5,0xe5,0xe6,0x02,0xe5,0xe5,0xe5,0x10,0x0e,0x04,0x03,0x07,0xe5, 0xe5,0x02,0x06,0x01,0x1e,0x06,0x0d,0x1f,0x01,0x0d,0x01,0x02,0x1b,0x0b,0x01,0x0c, 0x04,0x15,0x02,0x04,0x08,0x19,0x01,0x07,0x01,0x11,0x08,0x13,0x1a,0x17,0x24,0x01, 0x0d,0x0c,0x05,0xe5,0x07,0x05,0x0d,0x13,0x0f,0x03,0x04,0xe5,0x02,0x05,0x07,0x06, 0x04,0x03,0x07,0x15,0x07,0x01,0x03,0x04,0x0c,0x01,0x01,0x05,0x09,0x01,0x05,0x0b, 0x0b,0x17,0x0d,0x17,0x0f,0x03,0xe5,0x0c,0x04,0xe5,0x11,0x07,0x0b,0x04,0x09,0x02, 0x06,0x13,0xe7,0x03,0x04,0x09,0x09,0x0f,0x08,0x1a,0x09,0x13,0x0b,0x07,0x04,0x01, 0x18,0x04,0x01,0x07,0x01,0xe5,0x0c,0x03,0x2f,0x0a,0x10,0x0f,0x07,0x14,0x07,0x01, 0x07,0x01,0x07,0x04,0x0f,0x03,0x12,0x07,0x1f,0x07,0x09,0x12,0x19,0x02,0x04,0x02, 0x08,0x30,0x06,0x07,0x06,0x02,0xe7,0x06,0xe5,0x03,0x01,0x02,0x04,0x07,0x01,0x09, 0x03,0xe5,0x03,0x03,0xe5,0x04,0x01,0xe5,0x01,0x04,0xe5,0xe5,0x11,0x02,0x02,0xe5, 0x04,0x06,0x01,0xe5,0x07,0xe5,0x06,0xe5,0xe5,0x05,0x0a,0xe6,0x06,0xe6,0x07,0x10, 0xe5,0x10,0x03,0x03,0x3c,0x10,0x03,0x01,0x04,0xe6,0x01,0x06,0x1a,0xe5,0x02,0x08, 0x01,0x05,0x02,0x0c,0x04,0x04,0x07,0x06,0x03,0x04,0x01,0x02,0x06,0x01,0x04,0x15, 0x02,0x08,0x09,0x05,0x15,0x09,0x01,0xe5,0x01,0x06,0x04,0x02,0x14,0x09,0x07,0x01, 0xe5,0x03,0x03,0x0f,0x06,0x08,0x03,0xe5,0x0e,0x03,0x02,0x03,0xe6,0x02,0x04,0x04, 0x0b,0x07,0x02,0xe5,0x06,0x06,0x1d,0x01,0x1a,0x1d,0xe5,0x07,0x0e,0xe5,0x01,0x01, 0x01,0x0b,0x01,0x2f,0x01,0x11,0x01,0x09,0x04,0x01,0xe5,0x06,0x01,0x01,0x08,0x01, 0x09,0x03,0x05,0x0b,0x11,0x1c,0x16,0x09,0x06,0x1d,0x09,0x0e,0x03,0x10,0x0b,0x08, 0x1c,0x13,0x12,0x01,0x0b,0x08,0x20,0x1b,0x09,0x07,0x17,0x07,0x08,0x01,0x18,0x01, 0x07,0x01,0x0f,0xe5,0xe7,0x0a,0x19,0xe5,0x04,0xe5,0x01,0xe5,0x03,0x0a,0xe5,0xe6, 0xe5,0x03,0xe6,0x05,0x01,0xe5,0x01,0x02,0xe6,0x03,0x03,0x05,0x01,0xe6,0xe6,0x01, 0x02,0xe5,0xe6,0x01,0x05,0xe5,0x05,0xe5,0x07,0xe9,0x01,0x05,0xe5,0x0b,0x01,0x01, 0x02,0xe5,0xe5,0x02,0x05,0x03,0x08,0xe5,0x08,0x05,0x06,0xe5,0x04,0x01,0xe5,0x05, 0x09,0x1e,0xe6,0xe5,0xe5,0x01,0x01,0x08,0xe5,0x18,0xe5,0x07,0xe5,0x0d,0x03,0x01, 0xe5,0x04,0xe5,0x04,0x02,0xe5,0x01,0x02,0x06,0xe7,0x02,0xe5,0xe6,0x03,0xe5,0x01, 0x05,0xe5,0x01,0x05,0xe5,0x10,0x01,0x01,0x01,0xe5,0x03,0x01,0x01,0x05,0x01,0xe5, 0xe5,0x01,0x03,0xe5,0xe5,0x02,0x05,0x03,0xe5,0x07,0x09,0x05,0x05,0xe5,0x07,0xe5, 0x05,0x09,0x05,0xe5,0x08,0xe5,0x08,0x01,0x02,0xe8,0x03,0x04,0xe5,0x01,0x01,0x0e, 0xe5,0xe5,0xe5,0x03,0x01,0xe5,0xe5,0xe5,0x01,0xe5,0xe5,0xe5,0x01,0x01,0x01,0x02, 0x04,0x01,0x03,0x03,0x01,0x02,0x07,0x05,0xe5,0x01,0x01,0x07,0x01,0x01,0x05,0x03, 0x05,0x04,0xe6,0x01,0x01,0x01,0xe7,0x03,0x01,0x07,0x09,0x01,0x07,0x09,0x09,0x1d, 0x06,0x01,0xe5,0x02,0xe5,0x13,0xe5,0x03,0x09,0x09,0x06,0xe6,0x01,0x07,0x02,0x01, 0x0e,0x02,0x01,0x07,0x01,0x07,0x01,0x04,0x04,0x04,0xe5,0x01,0x02,0x02,0x04,0x07, 0x09,0x01,0x07,0x09,0x09,0x09,0x01,0x09,0x01,0x07,0x06,0xe5,0xe5,0xe5,0x06,0x09, 0x09,0x1d,0x06,0x02,0x19,0xe5,0x01,0x0b,0x0b,0x03,0x02,0x0b,0x01,0x01,0x0f,0x1d, 0x03,0x02,0xe5,0x01,0xe5,0x01,0x01,0x02,0x03,0x02,0xe5,0x01,0xe5,0x0a,0x02,0x01, 0x01,0xe5,0xe5,0x02,0x02,0xe5,0x08,0x04,0x09,0x10,0x04,0x09,0x04,0x04,0x04,0x04, 0x04,0x04,0x13,0x04,0x07,0x06,0x01,0x01,0x05,0x09,0x06,0x02,0xe5,0x01,0xe5,0xe5, 0x01,0x0f,0x02,0x0a,0x01,0x01,0x04,0x0e,0x09,0x09,0x09,0x04,0xe5,0x03,0x08,0x04, 0xe5,0x02,0x05,0x01,0x01,0x04,0xe5,0x02,0x04,0xe5,0x02,0x05,0x03,0x09,0x0b,0x05, 0x03,0x09,0x05,0x03,0x05,0x03,0x05,0x03,0x19,0x03,0x05,0x03,0x01,0x02,0x14,0x04, 0x01,0x01,0xe5,0xe5,0x01,0x08,0x02,0x02,0xe6,0x0b,0x01,0x05,0x0b,0x01,0x07,0x01, 0x07,0x01,0x04,0x04,0x01,0x02,0xe7,0xe5,0x08,0x01,0xe5,0x01,0x01,0x09,0x01,0x04, 0xe7,0xe5,0x04,0x01,0x04,0x01,0x02,0x09,0x01,0x09,0x01,0x07,0x09,0x01,0x01,0x01, 0xe5,0x01,0x09,0x09,0x06,0x13,0x01,0xe5,0x0c,0x09,0x09,0x02,0x02,0x01,0x07,0x01, 0x0d,0xe9,0x0c,0x01,0x11,0x01,0x07,0x01,0x07,0x01,0x05,0x06,0x01,0x02,0x01,0x02, 0x01,0x07,0x04,0x09,0x01,0x01,0xe7,0x01,0x09,0x05,0xe6,0xe5,0x02,0xe5,0x03,0x01, 0x01,0xe5,0x04,0xe5,0xe5,0x06,0x03,0xe5,0xe5,0xe6,0xe5,0x01,0x01,0x02,0x03,0xe5, 0xe5,0xe6,0x02,0xe5,0x03,0x03,0xe8,0x0e,0xe5,0xe5,0x01,0x09,0x04,0x09,0x09,0x01, 0xe6,0x0b,0x0b,0x02,0xe5,0x01,0x13,0x02,0x2b,0x02,0xe6,0x08,0x04,0x13,0x02,0x02, 0x17,0x09,0x15,0x02,0x06,0x04,0x04,0x09,0x09,0x0c,0xe5,0x04,0x04,0x0e,0x09,0x09, 0x0c,0xe5,0x04,0x02,0x02,0x05,0x01,0x02,0xe5,0xe5,0x11,0xe5,0x25,0x05,0xe5,0x07, 0xe5,0x01,0x05,0xe5,0x01,0xe5,0x04,0x08,0xe5,0x01,0xe5,0x17,0xe5,0x07,0xe5,0x13, 0xe5,0x03,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x11,0xe5,0x11,0xe5,0x01,0x05, 0xe5,0x07,0xe5,0x0b,0x06,0x02,0x0b,0x01,0xe5,0x08,0x01,0x02,0x04,0x01,0x02,0x09, 0x09,0x09,0x03,0x02,0x02,0xe6,0x06,0xe6,0x02,0x03,0xe6,0x01,0x01,0x02,0x09,0xe5, 0xe5,0x05,0x09,0x09,0xe5,0x07,0xe5,0x06,0x02,0x09,0xe5,0x07,0xe6,0x06,0xe5,0xe5, 0x05,0xe5,0x07,0xe5,0x07,0x09,0xe5,0x07,0x04,0x01,0x02,0xe6,0xe5,0x04,0xe6,0x06, 0xe6,0x06,0x04,0xe6,0x01,0x01,0x03,0x05,0x01,0xe5,0xe5,0xe5,0x01,0x09,0x09,0x26, 0x1e,0x31,0x1f,0x7c,0x04,0x01,0x0a,0x09,0x46,0x05,0x09,0x1d,0x01,0x07,0x06,0x0e, 0x02,0x10,0x09,0x09,0x13,0x48,0xe6,0xe5,0xe5,0x13,0x02,0x1b,0x01,0x0a,0x01,0x03, 0xe5,0xe8,0x02,0x03,0x01,0x0d,0x02,0xe7,0x03,0x01,0x04,0x02,0x01,0x04,0x04,0xe7, 0x05,0xe5,0xe5,0x05,0x01,0x07,0x01,0xe5,0xe5,0xe5,0x03,0xe7,0x03,0x01,0xe7,0x06, 0xe6,0xe5,0x03,0xe7,0x02,0xe5,0xe5,0x03,0x05,0xe6,0x0d,0x01,0xe5,0xe5,0xe5,0x04, 0xe6,0xe5,0x04,0xe6,0x03,0x0c,0x0f,0xe6,0x0f,0x06,0x02,0x17,0x02,0x01,0xe5,0x08, 0xe5,0x04,0x01,0xe6,0x07,0xe5,0x03,0x03,0xe5,0x06,0xe6,0x04,0x01,0xe5,0x05,0x01, 0xe5,0x07,0xe6,0x03,0xe5,0x01,0xe5,0x06,0x01,0xe5,0x02,0x02,0x01,0xe6,0x04,0x02, 0xe5,0x03,0xe5,0xe7,0x01,0x06,0x04,0x02,0xe6,0x01,0x02,0x01,0xe5,0x09,0x02,0x0c, 0x02,0xe5,0x08,0x09,0x05,0x03,0x02,0x05,0xe5,0x04,0xe5,0x02,0x01,0x02,0xe5,0x0c, 0x0b,0x22,0x13,0x16,0x01,0x01,0x05,0x01,0x03,0x02,0xe5,0xe5,0x08,0xe5,0x01,0x05, 0x06,0x01,0xe5,0xe5,0x05,0x09,0x02,0x03,0x03,0x01,0x01,0x0b,0x05,0xe5,0x01,0x11, 0x0c,0x04,0x03,0x09,0x06,0x03,0x02,0x01,0x03,0x09,0x0a,0xe5,0x01,0x15,0xe6,0x06, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0xe6, 0xe5,0x04,0xe6,0xe5,0x04,0xe6,0xe5,0x04,0xe5,0x01,0x03,0xe8,0x04,0xe7,0x01,0x01, 0x04,0xe6,0x05,0xe8,0x01,0x03,0xe7,0xe5,0x04,0xe5,0x02,0x04,0xe5,0x01,0x03,0xe7, 0x07,0xe5,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x02,0x02,0xe7,0x05,0xe8,0xe5,0x04,0xe5, 0x07,0xe7,0x02,0x01,0x03,0x04,0x14,0x1b,0x01,0x07,0x01,0xe6,0x0e,0x01,0x09,0x09, 0xe5,0x07,0xe5,0x08,0xe5,0x06,0x09,0xe5,0x0a,0x06,0x01,0x09,0xe5,0x12,0x08,0xe6, 0x04,0x01,0xe5,0x08,0x09,0x09,0x09,0x09,0xe5,0x06,0x19,0x01,0xe5,0x16,0x1d,0x01, 0x07,0x04,0x09,0x04,0x04,0x04,0x04,0x06,0x02,0x06,0x13,0x02,0xe5,0x01,0xe5,0x01, 0x01,0xe5,0x01,0x09,0x01,0xe5,0xe5,0x01,0xe5,0x01,0xe5,0x03,0x03,0x05,0x07,0x01, 0x06,0x02,0xe5,0x01,0xe5,0xe6,0x08,0x01,0xe5,0x01,0x0d,0x01,0x07,0x01,0xe5,0x01, 0x05,0xe5,0x01,0x02,0x01,0x09,0x0c,0x01,0xe6,0x0b,0x0c,0x27,0x13,0x09,0x0a,0x08, 0xe5,0x11,0x04,0xe5,0x02,0x04,0xe5,0x01,0x03,0x0d,0xe5,0x02,0xe5,0x02,0x18,0x04, 0xe5,0x11,0xe5,0x03,0x17,0xe5,0x07,0xe5,0x0a,0x10,0x01,0xe6,0x16,0x20,0x24,0x15, 0xe5,0x08,0x08,0x03,0x01,0xe6,0x04,0x01,0xe5,0x04,0x03,0x01,0x06,0x01,0xe6,0x07, 0x16,0x03,0x01,0xe5,0x02,0x0c,0x01,0xe5,0x02,0x16,0x01,0xe5,0x05,0x01,0xe5,0x1c, 0x01,0x0d,0x02,0x03,0x13,0x16,0x02,0x04,0x01,0x02,0x06,0x02,0x06,0xe5,0xe6,0x02, 0x01,0x02,0xe5,0x04,0x02,0x06,0x02,0xe6,0x03,0x03,0x1b,0xe5,0xe5,0x0f,0x09,0x09, 0xe5,0xe5,0x19,0x0b,0xe5,0x09,0x0f,0x02,0x03,0x02,0x02,0x04,0x05,0x06,0x01,0x0c, 0x02,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x02,0x02,0x01, 0xe5,0x02,0x02,0x01,0xe6,0xe5,0x02,0x01,0xe6,0xe5,0x02,0x01,0x03,0x03,0x01,0x04, 0x02,0x01,0x06,0xe5,0xe5,0x02,0x03,0x01,0x07,0x01,0x09,0x01,0x01,0x01,0x03,0x01, 0x02,0x04,0x01,0xe5,0xe5,0xe5,0x01,0x01,0xe5,0x02,0x02,0x01,0x03,0x03,0x01,0x01, 0x05,0x01,0x07,0x01,0x01,0x02,0x02,0x01,0x07,0xe6,0xe5,0x04,0x01,0x01,0x05,0x01, 0x02,0x03,0xe7,0xe5,0x01,0x02,0x01,0x0a,0xe6,0xe5,0x0c,0x04,0x17,0x14,0x01,0x06, 0xe5,0xe6,0x05,0x01,0x11,0x01,0x03,0x02,0xe5,0xe5,0xe5,0x04,0xe5,0x10,0x02,0x08, 0x02,0xe5,0x01,0xe5,0x03,0xe5,0x07,0x02,0x08,0x01,0x05,0x09,0xe5,0x01,0xe5,0x0d, 0xe5,0x02,0x04,0xe5,0x08,0x08,0xe5,0x07,0xe5,0x05,0xe5,0x01,0x05,0x01,0xe5,0x04, 0x07,0xe5,0x01,0xe5,0x0a,0x04,0x04,0x06,0x02,0x06,0x09,0x02,0x03,0x02,0x02,0x04, 0x01,0x05,0x01,0x04,0x06,0x02,0x06,0x01,0x03,0x03,0x05,0x01,0x08,0x0b,0x01,0x04, 0x02,0x01,0x04,0x01,0x03,0x03,0x07,0x1c,0x04,0x06,0x08,0x03,0x12,0x02,0x07,0x09, 0x10,0x01,0xe5,0xe5,0x0a,0x01,0xe7,0xe6,0x08,0x23,0x0a,0x02,0x01,0x08,0x02,0x01, 0x03,0x01,0x0c,0x13,0xe5,0xe5,0x08,0x0f,0x05,0x0a,0x02,0x02,0xe5,0x05,0x0d,0x05, 0x02,0x04,0x04,0x0e,0x09,0x01,0x11,0x09,0x17,0x0c,0xe5,0x01,0x10,0xe5,0xe5,0x05, 0x09,0x09,0x1c,0x07,0xe5,0x06,0x02,0x05,0x03,0xe5,0x05,0x01,0x07,0xe6,0x09,0x0b, 0x06,0x01,0xe5,0x01,0x02,0x03,0x09,0x06,0x06,0x06,0x0d,0x21,0x1d,0x07,0x01,0xe5, 0x07,0xe5,0x02,0xe5,0x01,0x25,0x01,0x01,0x07,0x2b,0x06,0x02,0x0b,0x04,0x09,0x09, 0x0c,0xe5,0xe6,0x0e,0x09,0x04,0x04,0x0a,0x27,0xe5,0x0e,0x0b,0x07,0x01,0x01,0x01, 0x0d,0xe6,0x01,0x13,0x1f,0x0c,0xe5,0x02,0x03,0x03,0x0a,0x0a,0x0d,0x01,0x07,0x03, 0x01,0x09,0x09,0xe5,0x01,0x03,0x0e,0x04,0x09,0x0e,0x04,0x0d,0x05,0x03,0x09,0x04, 0xe5,0x1a,0x0a,0xe5,0x0c,0x01,0xe5,0xe5,0xe5,0x0a,0x01,0x03,0xe5,0x06,0x09,0x01, 0x07,0x09,0x04,0x2c,0x04,0x04,0x07,0x09,0x09,0x03,0x04,0x0c,0x03,0x01,0xe5,0x02, 0x05,0x02,0x0b,0x04,0x01,0x04,0x01,0x03,0x03,0x01,0xe5,0xe5,0x05,0x0a,0x01,0x02, 0x09,0x07,0x10,0x02,0x0c,0x02,0x01,0x06,0x01,0xe5,0x28,0x0d,0x01,0x02,0x11,0x0a, 0x02,0x0e,0x02,0x12,0xe5,0x0b,0x11,0x03,0x02,0x05,0x07,0x07,0x07,0x13,0xe5,0x09, 0x1e,0x0f,0x0c,0x01,0x03,0x22,0x0f,0x05,0x05,0xe6,0x07,0xe5,0x01,0xe5,0x0d,0xe5, 0x0d,0x03,0xe7,0x01,0x0a,0x02,0xe6,0xe5,0x03,0xe7,0x05,0xe7,0x03,0x03,0x09,0xe5, 0x08,0xe6,0x06,0xe5,0x06,0x06,0xe5,0x16,0x26,0xe5,0xe5,0x15,0xe7,0xe5,0xe5,0x01, 0x1d,0x16,0x07,0x03,0x02,0xe6,0x01,0xe5,0x0d,0x02,0xe6,0x0f,0x14,0x07,0x08,0x01, 0x13,0x0c,0x01,0x04,0x02,0x03,0x21,0x29,0x03,0x13,0x03,0x01,0x01,0x06,0x02,0x01, 0x09,0x0d,0x08,0xe5,0x05,0x02,0x02,0x01,0xe5,0x0c,0x06,0xe5,0x03,0xe5,0xe5,0x03, 0x0e,0x04,0x01,0x06,0x02,0x0a,0x01,0x08,0x02,0x04,0x12,0xe5,0x03,0x03,0x02,0x06, 0x02,0x02,0xe5,0x01,0x05,0x09,0x11,0x01,0x1c,0x01,0xe5,0xe5,0x05,0x09,0xe6,0xea, 0x0d,0x09,0x27,0x0b,0x04,0x0c,0x0c,0x03,0x02,0x0b,0x09,0xe5,0x01,0x03,0xe5,0xe5, 0x07,0x01,0x1b,0x01,0x0f,0x01,0x07,0x01,0x07,0x01,0x15,0x18,0x07,0x10,0x02,0x03, 0x05,0x04,0x04,0x0a,0x0d,0x13,0x02,0x01,0x0b,0x01,0x0f,0x01,0x02,0x06,0x08,0x1b, 0x09,0x03,0x15,0x07,0x10,0x08,0xe5,0x07,0xe5,0x07,0x0f,0xe5,0x24,0x0c,0x02,0xe7, 0x1b,0xe5,0x03,0xe5,0x01,0xe5,0xe5,0x01,0xe6,0x06,0x0a,0x08,0xe5,0x04,0x02,0xe5, 0x07,0xe5,0x07,0xe5,0xe6,0x05,0x08,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x01,0x04,0xe5, 0xe6,0x03,0x02,0x05,0x02,0xe5,0x04,0x02,0xe5,0x06,0x01,0xe5,0xe5,0xe5,0x01,0xe6, 0x09,0x01,0xe5,0x02,0x13,0x07,0x01,0x04,0x01,0xe5,0x01,0xe5,0x04,0x0c,0xe5,0xe6, 0x01,0x0e,0xe5,0xe6,0xe5,0x01,0x01,0x0a,0x0e,0xe5,0x0a,0x09,0xe5,0x07,0xe5,0x01, 0x08,0x06,0x03,0xe5,0x03,0xe5,0xe5,0x02,0xe5,0xe6,0x01,0x01,0xe5,0x01,0x03,0x08, 0x03,0x01,0xe6,0x02,0xe6,0x0c,0xe5,0x01,0x06,0x09,0x02,0x01,0x07,0x03,0xe5,0x03, 0x06,0x06,0xe5,0x0a,0xe5,0x11,0x06,0x01,0x02,0x09,0x0a,0x03,0xe5,0x02,0x09,0x01, 0x02,0x02,0xe5,0x03,0x0d,0x12,0x05,0x03,0x07,0x02,0x01,0x01,0x07,0xe5,0xe5,0xe5, 0x06,0x0b,0x09,0x0e,0x01,0xe5,0xe5,0x08,0x01,0x05,0x01,0x01,0xe5,0x0f,0x03,0x05, 0x06,0x01,0x02,0x0e,0x01,0xe5,0x0f,0xe5,0xe5,0x12,0x09,0x03,0x05,0x10,0x01,0xe5, 0x0a,0x08,0x02,0x06,0xe5,0x1f,0x09,0x06,0xe5,0x01,0x05,0x02,0x01,0x07,0x09,0x09, 0x09,0x09,0x01,0x07,0x06,0xe5,0xe5,0xe5,0x07,0xe5,0x08,0x09,0x06,0x04,0x0e,0x02, 0x01,0x0e,0xe5,0xe5,0x12,0x0b,0x09,0x13,0x0b,0x02,0x01,0x01,0x13,0x02,0x14,0x0b, 0x06,0x02,0xe5,0xe5,0x01,0x0b,0x0b,0x09,0x0c,0x03,0x0c,0x06,0x05,0x0b,0xe5,0x01, 0xe5,0x08,0x01,0x05,0x03,0x02,0x04,0x01,0x05,0x01,0x04,0x0e,0x04,0x03,0x0a,0x01, 0x07,0x05,0x02,0xe5,0x05,0x01,0x0a,0x04,0x01,0x01,0x07,0x03,0x02,0x0e,0x19,0x03, 0x05,0x03,0x02,0xe7,0x07,0x04,0x05,0x03,0x05,0x03,0x05,0x03,0x04,0x04,0x02,0xe6, 0x03,0x05,0x03,0x03,0x05,0x0b,0x02,0x01,0xe5,0x02,0x05,0x01,0x01,0x09,0x01,0x02, 0xe5,0xe5,0xe5,0x04,0x04,0x01,0x01,0x0a,0x04,0x02,0xe5,0x09,0x01,0x01,0x07,0x01, 0x02,0x01,0x02,0x01,0x01,0x02,0x0a,0x01,0x01,0x01,0x02,0x04,0x04,0xe7,0x24,0x04, 0x0b,0x04,0x01,0xe5,0xe5,0x04,0x01,0x01,0x02,0x04,0x0b,0x09,0x05,0x01,0x09,0x01, 0x09,0x07,0x01,0x01,0x07,0x09,0x04,0x04,0x01,0x02,0x06,0x01,0x05,0x01,0x02,0x04, 0x13,0x04,0x02,0x0b,0x01,0x07,0x09,0x10,0x01,0x04,0x0b,0xe5,0x01,0xe5,0xe6,0x27, 0x09,0x05,0xe5,0x01,0x01,0x01,0xe5,0x03,0x01,0x07,0x05,0xe5,0x01,0x09,0x09,0x03, 0xe5,0x03,0x01,0x07,0x09,0x01,0x02,0xe6,0x02,0xe5,0x02,0xe5,0x03,0x09,0x01,0x0c, 0x01,0x01,0xe5,0xe5,0x06,0x13,0x13,0x01,0x09,0x09,0x0e,0x01,0xe5,0xe5,0x0d,0xe5, 0x01,0x74,0x1d,0x0b,0x02,0xe5,0x07,0x02,0x06,0x02,0x03,0x02,0xe5,0x11,0x02,0x10, 0x02,0x44,0x02,0x14,0x09,0x23,0xe5,0x2f,0xe5,0x01,0xe5,0x17,0xe5,0x09,0xe5,0x01, 0xe5,0x07,0xe5,0x07,0x05,0xe5,0x15,0xe5,0x11,0x45,0xe9,0x0d,0x05,0xe5,0x01,0x05, 0x03,0x09,0x02,0x06,0x06,0x02,0xe5,0x04,0x02,0x09,0x09,0x09,0x02,0x03,0x02,0xe5, 0x07,0x06,0x02,0x09,0xe6,0x05,0x02,0xe5,0x07,0x09,0x05,0x03,0xe6,0x06,0x09,0x09, 0x06,0x02,0x03,0x05,0x09,0x09,0x09,0x09,0x06,0x02,0x0d,0xe9,0x01,0x12,0xe5,0x23, 0x09,0x02,0x26,0xe5,0x11,0x48,0x12,0xe5,0x1c,0x1c,0xe5,0x0d,0x04,0x01,0x15,0x30, 0x28,0x05,0x24,0x04,0x2b,0x13,0x1d,0x1d,0x12,0xe5,0xe6,0x0c,0x01,0x06,0xe5,0xe5, 0x17,0x09,0x01,0x02,0xe7,0x05,0x04,0x02,0x01,0x09,0x07,0x01,0x09,0xe7,0x0f,0x04, 0x04,0xe7,0x07,0xe7,0x03,0x01,0x07,0x01,0x0a,0x01,0x04,0x01,0x04,0x04,0x09,0x07, 0x01,0x07,0x01,0x07,0x01,0x07,0x1a,0x02,0x0e,0x01,0xe6,0xe5,0x0c,0x01,0xe5,0x05, 0x01,0xe5,0x0c,0x19,0xe5,0xe5,0x02,0x02,0x06,0x01,0xe5,0x07,0xe5,0x05,0x01,0xe5, 0x04,0xe5,0xe7,0x01,0x0c,0x02,0x08,0xe6,0x01,0x01,0x02,0x01,0xe6,0x01,0x02,0x01, 0xe5,0x02,0x02,0x01,0xe5,0x04,0x03,0xe5,0x04,0x01,0xe5,0x05,0x03,0x04,0x02,0xe5, 0x05,0x01,0x01,0x05,0x01,0xe5,0x05,0x01,0xe5,0x07,0xe5,0x08,0x12,0x01,0x0c,0x02, 0xe6,0x0a,0xe5,0xe5,0x06,0x01,0x27,0x01,0x01,0x05,0x07,0x01,0x09,0x04,0x02,0x01, 0x03,0xe5,0x12,0x04,0x09,0xe5,0x06,0x06,0x0d,0x01,0x04,0x0c,0x01,0x09,0x04,0x04, 0x09,0x07,0x01,0x07,0x01,0x07,0x10,0x09,0x02,0x10,0xe9,0x07,0x03,0x01,0x05,0xe6, 0xe5,0x04,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe6,0xe5,0x04,0xe5, 0x01,0x05,0xe6,0xe5,0x04,0xe6,0x01,0x04,0xe5,0x07,0xe6,0x06,0xe5,0x01,0x04,0xe5, 0x02,0x02,0x03,0xe5,0x08,0xe6,0xe5,0x03,0xe6,0x07,0xe6,0xe5,0x04,0xe6,0x04,0xe5, 0xe5,0x01,0x05,0xe6,0xe5,0x02,0xe6,0xe5,0xe5,0x04,0xe6,0xe5,0x02,0x01,0xe5,0x01, 0x03,0xe7,0x05,0xe5,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0x02,0x04,0x04,0x05,0x07, 0x01,0x0a,0x09,0x09,0x08,0x09,0x0a,0xe5,0x06,0x14,0x09,0x08,0xe5,0x07,0xe6,0x01, 0x07,0x06,0x01,0xe5,0x05,0x01,0xe5,0x02,0x02,0x01,0xe6,0x0e,0x01,0x09,0x14,0x08, 0xe5,0x05,0x05,0xe5,0xe6,0xe5,0x02,0xe5,0x04,0xe5,0x06,0x10,0x01,0x0f,0x07,0x01, 0x0a,0x1f,0x04,0x15,0x14,0x01,0x0e,0x02,0x08,0x02,0x03,0x07,0x03,0xe5,0xe5,0x06, 0x01,0x06,0xe5,0x03,0x0e,0x04,0x01,0x01,0x02,0x0c,0x01,0xe6,0x02,0x08,0x03,0x01, 0x03,0x05,0xe6,0xe5,0x05,0x0a,0x03,0x0d,0xe7,0x0c,0x01,0x07,0x16,0x09,0x08,0x09, 0x13,0x1e,0x08,0x09,0x08,0x03,0x03,0x05,0x09,0x05,0x02,0x13,0x03,0x01,0xe5,0x01, 0x0d,0x01,0x0d,0xe5,0x05,0x07,0x01,0x01,0x05,0xe5,0x0b,0x0e,0x02,0xe5,0x0c,0x01, 0x07,0x01,0x0a,0x09,0x09,0x10,0x0c,0x08,0x0a,0x13,0x06,0x02,0x08,0x04,0x0b,0x05, 0x09,0x03,0x0f,0x0d,0x04,0xe5,0x05,0x06,0x05,0x08,0xe5,0x01,0x05,0x04,0x02,0x01, 0x04,0x05,0x06,0x12,0x01,0xe5,0xe5,0x09,0xe5,0xe5,0x19,0x01,0xe6,0x0c,0x13,0x01, 0x01,0xe6,0x12,0x05,0x02,0x26,0x02,0xe5,0x04,0x13,0x15,0xe5,0x0f,0xe5,0xe5,0x0d, 0x01,0x02,0x0a,0x09,0x0c,0x05,0x0e,0x02,0x0d,0x03,0x01,0x07,0x01,0x01,0x05,0x01, 0x02,0x01,0x02,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0x02,0x04,0x01,0xe5,0x02,0x02, 0x01,0x02,0x04,0x01,0x07,0x01,0x03,0x02,0xe5,0xe5,0x06,0x01,0x07,0x01,0xe5,0x05, 0x01,0x07,0x01,0x01,0x04,0x02,0x01,0x07,0x01,0x02,0x04,0x01,0xe5,0x05,0x01,0x07, 0x01,0xe5,0x05,0x01,0xe5,0x02,0x02,0x01,0xe5,0x05,0x01,0xe5,0x02,0x02,0x01,0x07, 0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x04,0xe5,0xe5,0x05,0x01,0x01,0x02,0xe5, 0x2c,0xe5,0x07,0x03,0x06,0x12,0x02,0x10,0xe5,0x05,0x01,0xe5,0x04,0x02,0xe5,0x01, 0x08,0x0b,0x03,0x02,0xe5,0x07,0xe5,0x02,0x01,0x02,0xe5,0x01,0x04,0x04,0x0a,0x08, 0x02,0x09,0x04,0x0f,0x02,0x03,0x0a,0x15,0x11,0x01,0xe6,0x09,0x09,0x13,0x01,0x01, 0x05,0x03,0x06,0x01,0x02,0x0d,0x03,0x01,0x03,0x07,0x04,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x08,0x06,0xe5,0xe5,0x02,0x04,0xe5,0x01,0xe5,0xe5,0xe5,0xe5,0x02, 0x02,0x04,0x04,0x03,0x06,0x02,0x1b,0x01,0x09,0x02,0x03,0x2a,0x08,0x02,0x01,0x01, 0x0a,0x09,0x19,0x1e,0x11,0x0e,0x06,0x01,0x0f,0x09,0x09,0x12,0x03,0x0a,0x02,0x0f, 0x17,0x0e,0x17,0x05,0x16,0x01,0xe6,0x0c,0x19,0x1a,0x0f,0xe7,0x0b,0x01,0x02,0x01, 0xe5,0x05,0xe5,0x16,0x01,0x01,0x05,0x06,0x01,0x02,0x0e,0x01,0x07,0x01,0x04,0x02, 0x03,0x03,0x09,0x0d,0x03,0x03,0x01,0x07,0x01,0x0a,0x08,0x05,0x0b,0xe5,0x0e,0x01, 0x01,0x0f,0x2e,0x09,0x0b,0x05,0x0b,0x01,0x01,0x03,0x09,0x06,0x03,0x14,0x01,0x07, 0x0f,0x03,0x01,0x03,0x04,0x03,0x26,0xe5,0x04,0x03,0x11,0x11,0x10,0x01,0x03,0x09, 0x18,0xe5,0x02,0x16,0x07,0x05,0x0c,0x01,0x07,0x04,0xe5,0xe6,0x06,0x04,0x0d,0x11, 0x03,0x01,0x0f,0x09,0x01,0x09,0x03,0x0f,0x07,0x05,0x05,0x07,0x01,0x20,0x12,0x06, 0x01,0x0f,0xe5,0x10,0x09,0x01,0x09,0x01,0x02,0x04,0x0e,0x13,0x01,0x07,0x05,0x08, 0x04,0xe5,0x02,0x03,0x03,0x05,0x02,0xe5,0x02,0x04,0x02,0x02,0x07,0x05,0x04,0x04, 0x0c,0x06,0x02,0x0e,0x01,0x02,0x06,0x38,0x01,0xe6,0x05,0x22,0x17,0x02,0x09,0x04, 0x03,0xe5,0x11,0xe5,0x03,0x03,0xe5,0xe5,0x08,0x04,0x01,0x02,0x0c,0x03,0x01,0xe5, 0x03,0x03,0x02,0x1f,0x04,0x01,0x02,0x0f,0x09,0x20,0xe5,0x0e,0x08,0x03,0x02,0x02, 0x0d,0x1a,0xe5,0x08,0x0c,0x02,0x0b,0x06,0x0a,0x01,0x06,0x02,0xe7,0x02,0x0d,0xe6, 0xe6,0x02,0x01,0x06,0xe6,0x09,0xe6,0x10,0x01,0x07,0xe8,0x06,0x01,0x11,0x0c,0x06, 0x08,0x01,0x07,0xe6,0x21,0xe5,0xe6,0x05,0x1f,0x04,0x01,0x08,0x06,0x05,0x03,0xe5, 0x03,0x01,0xe5,0x09,0xe5,0x03,0x09,0x06,0x10,0xe5,0x01,0x0a,0x01,0x04,0x07,0x03, 0x02,0x11,0x04,0x05,0x04,0x03,0x13,0x13,0x13,0x18,0x0a,0xe5,0xe6,0x08,0x05,0x11, 0x01,0x03,0x09,0x08,0xe5,0x05,0x0f,0x06,0x01,0x06,0xe5,0x01,0x02,0x03,0x03,0x01, 0x06,0x1d,0x0c,0x01,0x03,0x01,0x01,0x01,0x02,0xe6,0x03,0x06,0x02,0x09,0x09,0x03, 0x09,0x05,0x01,0x02,0x04,0x04,0x1c,0x14,0xe9,0x08,0x03,0xe5,0x02,0x0f,0xe5,0x1f, 0x07,0xe5,0x17,0xe6,0x01,0x04,0x01,0x07,0x01,0x18,0x09,0xe5,0x04,0x09,0x09,0x01, 0xe5,0x05,0xe5,0x07,0x01,0x07,0x13,0x01,0x04,0x02,0x04,0x2f,0xe5,0xe5,0x01,0x01, 0x06,0x05,0x13,0x0d,0x1d,0x1e,0x12,0x26,0x09,0x0a,0x09,0x06,0x01,0x01,0x04,0x03, 0x13,0xe5,0x07,0x0c,0x1b,0x0a,0x01,0xe5,0x01,0xe6,0x17,0xe5,0x04,0x03,0x08,0xe5, 0xe6,0x04,0xe5,0x0e,0x02,0xe5,0x04,0x03,0x05,0x06,0xe5,0x04,0xe5,0x03,0x03,0x09, 0x09,0x02,0x01,0x04,0x06,0x04,0x01,0xe5,0x05,0x01,0xe5,0x01,0x03,0xe5,0x07,0xe5, 0x05,0x01,0xe5,0xe6,0xe5,0x02,0x02,0xe5,0x04,0x07,0x01,0xe5,0xe6,0x04,0x0a,0x0b, 0x01,0x04,0x05,0x06,0x01,0x0f,0x02,0xe7,0x01,0x01,0x16,0x01,0xe5,0x01,0x0c,0x02, 0xe5,0x06,0x04,0xe5,0x04,0xe5,0xe5,0x08,0x03,0x05,0x06,0x01,0xe5,0x08,0x02,0x06, 0x03,0x09,0x02,0x01,0xe6,0x01,0x01,0xe5,0x0a,0x01,0x04,0x02,0x01,0xe5,0x02,0xe5, 0xe6,0x04,0x0b,0xe5,0x01,0x02,0x09,0x09,0x03,0x01,0x03,0x17,0xe5,0x0a,0x05,0xe5, 0x0d,0x01,0x03,0xe7,0x03,0x1c,0x03,0x0c,0xe5,0xe5,0xe5,0xe5,0x0e,0x03,0x01,0xe5, 0x0b,0x0d,0x05,0x06,0xe5,0x02,0x13,0x09,0x03,0x0f,0x06,0xe5,0xe5,0x05,0xe5,0xe5, 0x02,0x05,0x06,0x01,0xe5,0x0f,0xe5,0xe5,0x05,0xe5,0x02,0x15,0x05,0x10,0xe5,0x02, 0x06,0xe6,0x01,0x1e,0x10,0x02,0x01,0x04,0x0c,0x13,0x09,0x09,0x06,0xe5,0xe5,0xe5, 0x10,0x05,0xe5,0x03,0x13,0x06,0x02,0x06,0x02,0x09,0x06,0x02,0x10,0x02,0x06,0x04, 0x1b,0x0f,0xe5,0x05,0x04,0x01,0x23,0x02,0x0c,0x09,0x03,0x02,0xe5,0x01,0xe5,0x08, 0x09,0x0b,0x04,0x01,0x02,0x02,0x01,0x01,0x05,0xe5,0x01,0xe5,0xe5,0x01,0x06,0x02, 0x03,0x01,0xe5,0xe5,0x01,0x04,0x01,0x02,0x04,0x04,0x02,0x06,0x02,0x06,0xe5,0x01, 0xe5,0x03,0x02,0x06,0x01,0x01,0x05,0x02,0x06,0xe5,0xe6,0x01,0x02,0xe5,0x01,0x05, 0x09,0x04,0x19,0x01,0xe7,0x01,0x1e,0x04,0x03,0x0e,0x05,0x04,0x03,0xe5,0x02,0x05, 0x09,0x03,0x05,0x01,0x07,0x04,0x04,0x04,0x04,0x01,0x01,0x05,0x03,0x03,0xe5,0x01, 0x01,0x02,0x04,0x01,0x01,0x05,0x03,0x05,0x03,0x05,0x03,0x02,0x01,0xe5,0x02,0x05, 0x03,0x01,0x02,0x0a,0x03,0x09,0x01,0x02,0x14,0x03,0x13,0x02,0x01,0x01,0xe5,0x21, 0x02,0x02,0x0b,0x01,0x02,0x04,0x01,0x04,0xe5,0xe5,0x08,0x01,0x07,0x0b,0x09,0x09, 0x07,0x01,0x03,0xe5,0x03,0x02,0x02,0x03,0x09,0x01,0x07,0x03,0x01,0xe5,0x05,0x09, 0x05,0x06,0x06,0x0b,0xe5,0x05,0x07,0x01,0x09,0x02,0x01,0x04,0x02,0x01,0xe5,0x05, 0x01,0x10,0x01,0x01,0xe5,0x20,0x04,0x0e,0x01,0x07,0x01,0x02,0xe7,0xe5,0x08,0x01, 0x02,0x04,0x09,0x01,0x02,0x04,0x04,0x04,0x01,0x02,0x04,0x01,0x01,0xe5,0xe5,0xe6, 0x02,0xe5,0x05,0x04,0x04,0x01,0x01,0xe5,0x03,0x04,0x01,0x01,0xe5,0x03,0x04,0x04, 0x04,0x03,0xe5,0xe6,0xe5,0x03,0x08,0xe5,0xe5,0x01,0x04,0x09,0x01,0x01,0x05,0x01, 0x07,0x04,0x05,0xe5,0x13,0xe5,0xe5,0x44,0x13,0x27,0x06,0x09,0x15,0x09,0x09,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x04,0x04,0x09,0x04,0x04,0x09,0x02,0xe5,0x04,0x04,0x04, 0x20,0x01,0x02,0x46,0xe6,0x0d,0x04,0x0e,0x16,0x01,0x03,0xe5,0x07,0xe5,0x13,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01, 0x05,0xe5,0x07,0xe5,0x07,0xe5,0x20,0xe8,0x0d,0x09,0x09,0x09,0x09,0x06,0x02,0x09, 0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x09,0x03,0x05,0xe5,0x07,0xe5,0x02,0x01, 0x04,0x09,0xe5,0xe5,0x05,0xe6,0x01,0x01,0x02,0xe6,0x06,0xe6,0x06,0xe5,0x02,0x01, 0x02,0xe6,0xe5,0x04,0xe5,0x02,0x01,0x02,0xe6,0xe5,0x01,0x02,0xe6,0x06,0xe6,0x06, 0xe6,0x06,0x09,0x04,0x01,0x06,0x03,0xe5,0x01,0x1d,0x1a,0x13,0x09,0x02,0x06,0x02, 0x06,0x2a,0x51,0x06,0x02,0x31,0x04,0x06,0x1e,0x3a,0x09,0x25,0x09,0x03,0x02,0x0e, 0x27,0x13,0x01,0x0a,0x32,0x0c,0x0f,0x13,0x02,0x0e,0x01,0x07,0x01,0x02,0x01,0x02, 0x01,0x07,0x01,0x02,0x06,0x04,0x04,0x02,0x06,0x02,0x06,0x02,0x04,0x01,0xe7,0x03, 0x01,0xe7,0xe5,0x03,0x01,0x02,0x06,0xe7,0xe5,0x03,0xe7,0xe5,0xe6,0xe8,0xe5,0x01, 0x01,0xe7,0x05,0xe7,0x03,0x01,0xe7,0x03,0x01,0xe5,0xe5,0xe5,0x01,0x01,0xe7,0xe5, 0x04,0x01,0x07,0xe6,0x06,0xe6,0xe5,0x1e,0xe6,0x0e,0xe5,0x0f,0x01,0x01,0x0f,0x01, 0xe5,0x05,0x01,0x01,0x05,0x01,0xe5,0x02,0x02,0x01,0x01,0x01,0x03,0x02,0x08,0x01, 0x01,0xe5,0x03,0x01,0x07,0x01,0x05,0x01,0xe6,0x04,0x01,0xe6,0x08,0x01,0x07,0xe6, 0x06,0xe6,0x04,0x02,0xe5,0x04,0x02,0xe5,0x06,0xe6,0x04,0x02,0xe5,0x04,0x01,0xe6, 0x04,0x02,0xe5,0x03,0x03,0xe5,0x08,0x09,0x20,0xe5,0x01,0xe5,0x0d,0x0d,0xe5,0x03, 0x03,0x0d,0x01,0x04,0x04,0x07,0x01,0x03,0x05,0x04,0x04,0x04,0x04,0x09,0x03,0x05, 0x03,0x03,0x01,0x01,0x05,0x01,0xe5,0x06,0xe5,0xe5,0x08,0xe5,0x07,0x09,0x09,0xe5, 0x07,0x01,0x02,0x04,0x03,0x03,0x01,0x09,0x03,0x10,0x0a,0x0c,0x15,0x01,0x0f,0x05, 0xe5,0x05,0x01,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0x04,0xe6,0xe5,0xe5, 0x04,0xe6,0x06,0xe6,0x06,0xe5,0x01,0x05,0x03,0x05,0xe5,0x01,0x05,0xe6,0xe5,0x02, 0xe8,0xe5,0x02,0xe7,0x03,0x05,0xe5,0x01,0x03,0xe7,0x01,0xe5,0x02,0xe6,0x02,0x04, 0xe5,0x07,0xe5,0x01,0x03,0xe7,0x02,0x03,0xe7,0xe5,0x02,0xe7,0x02,0x04,0xe5,0x07, 0xe5,0x05,0xe7,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x07,0x02,0x0f,0x28,0x0e,0x04,0x06, 0x0b,0xe5,0x07,0x09,0x07,0x01,0x0a,0x14,0x09,0x09,0xe6,0x07,0x09,0x08,0x0a,0x03, 0x01,0x03,0xe5,0x07,0x06,0x01,0xe5,0x08,0x09,0x09,0xe5,0x18,0xe5,0x0e,0x0f,0x01, 0x04,0x11,0x0b,0xe5,0x01,0x0c,0x02,0x04,0x09,0x04,0x03,0xe5,0x03,0x04,0x01,0x02, 0x07,0x01,0xe5,0x01,0x05,0xe5,0x01,0x07,0x03,0x02,0x02,0xe5,0x01,0x02,0x02,0x03, 0x03,0x01,0x09,0x06,0x02,0xe5,0x01,0x05,0x09,0xe5,0x01,0x05,0x04,0x02,0x01,0x09, 0xe5,0x01,0x03,0x01,0xe5,0x01,0x1c,0x01,0xe6,0x0d,0x0d,0x23,0x04,0xe5,0x03,0x12, 0xe5,0x07,0x09,0x18,0xe5,0x07,0xe5,0x01,0x02,0x09,0x04,0xe5,0x02,0x04,0x0f,0x08, 0x04,0xe5,0x03,0x0d,0xe5,0x03,0x08,0x0a,0x03,0xe5,0x07,0xe5,0x1c,0x02,0x03,0x0b, 0x0d,0x03,0x16,0x06,0x05,0x06,0x10,0x02,0x06,0x06,0x02,0x19,0x01,0xe5,0x05,0x01, 0xe5,0x08,0x01,0x02,0x03,0x01,0xe5,0x01,0x06,0x16,0x03,0x01,0xe5,0x0f,0x01,0xe5, 0x19,0x01,0xe5,0x05,0x01,0xe5,0x02,0x09,0x11,0x02,0x39,0x0c,0x04,0x04,0x01,0xe5, 0x05,0x02,0x01,0x04,0xe5,0xe5,0x05,0x02,0x06,0xe5,0xe5,0x03,0x19,0xe5,0x0f,0x02, 0x06,0x09,0x01,0xe5,0x05,0xe5,0xe5,0x05,0x01,0xe7,0x0d,0x01,0xe5,0x05,0x01,0xe5, 0x19,0x02,0x17,0xe5,0x01,0x10,0xe5,0xe5,0x06,0x01,0x07,0x01,0x01,0x05,0x01,0x07, 0xe6,0x06,0x01,0xe6,0xe5,0x02,0x01,0xe6,0x01,0x02,0x01,0xe5,0xe5,0x02,0xe5,0xe5, 0x06,0x01,0x04,0x02,0x01,0x01,0x05,0x01,0x07,0xe7,0x04,0xe5,0xe6,0xe5,0x05,0x01, 0x01,0x05,0x01,0x07,0x01,0x04,0x02,0x01,0x01,0x02,0x02,0x01,0x04,0x02,0x01,0x01, 0x02,0x02,0x01,0x01,0xe5,0x03,0x01,0x01,0x02,0x02,0x01,0x04,0x02,0x01,0x01,0x05, 0x01,0x01,0x05,0x01,0x02,0x02,0x01,0x01,0x01,0x05,0x01,0x0a,0xe5,0xe6,0x09,0x0e, 0xe5,0xe5,0x07,0x0a,0x09,0x08,0x07,0x0a,0x02,0x03,0xe7,0x06,0x08,0x03,0xe5,0x01, 0x02,0x02,0x01,0xe5,0x03,0x01,0x04,0x02,0x01,0x06,0x0c,0x04,0x02,0xe6,0x08,0xe5, 0x01,0x06,0x0b,0x06,0xe5,0x03,0x03,0x09,0xe5,0x12,0x14,0x16,0x03,0xe5,0x14,0xe5, 0xe5,0x05,0x03,0x05,0x08,0x08,0x01,0x0c,0x01,0x04,0x01,0x09,0x02,0x03,0x0a,0x09, 0x05,0x06,0x01,0x07,0x01,0x01,0x0a,0xe5,0x04,0x01,0x01,0x05,0xe5,0x06,0x08,0x03, 0x04,0x05,0x03,0x01,0x0a,0x02,0x0a,0x09,0x04,0x04,0x09,0x14,0x08,0xe7,0xe5,0x1a, 0x04,0x09,0x05,0x03,0x06,0x06,0x05,0x06,0x0c,0x02,0x02,0x09,0x08,0xe5,0x07,0x0a, 0x0a,0x0e,0x06,0x0e,0x07,0x06,0x0f,0x11,0x01,0x10,0x31,0x01,0x21,0x01,0x0a,0x0c, 0x03,0x01,0x01,0x02,0x02,0x09,0xe6,0x03,0x05,0x03,0xe5,0xe7,0x03,0x02,0xe6,0x06, 0xe6,0x06,0x0b,0x04,0xe5,0xe7,0xe5,0x02,0xe5,0x09,0x0b,0x06,0x02,0x13,0x09,0xe5, 0x09,0x07,0x01,0x04,0x1a,0x13,0x01,0xe6,0x01,0x02,0x20,0x19,0x03,0x05,0x04,0x05, 0x01,0x06,0x07,0x0b,0x13,0x03,0x07,0x05,0x02,0xe5,0x03,0x16,0x01,0x03,0x01,0x08, 0x03,0x0a,0x04,0x0e,0x09,0x01,0x01,0x03,0x1f,0x12,0xe6,0xe5,0x08,0x09,0x05,0xe5, 0x05,0x01,0x03,0x05,0x03,0x05,0x04,0x02,0x01,0x01,0x01,0x05,0x09,0x03,0x05,0x07, 0x01,0x07,0x01,0x03,0xe6,0xe5,0xe5,0x02,0xe5,0x03,0x05,0x03,0x07,0x01,0x0f,0x13, 0x05,0x03,0x0a,0x0e,0x03,0x03,0x18,0x10,0x02,0x08,0xe5,0x09,0xe9,0x17,0x09,0x02, 0x06,0x04,0x04,0x09,0x01,0x04,0xe5,0xe5,0xe5,0x06,0x09,0x06,0x02,0x09,0x09,0x01, 0x07,0x01,0x07,0x03,0x06,0x01,0xe5,0x05,0x02,0x02,0x03,0x01,0x01,0x08,0x04,0x01, 0x1d,0x02,0x09,0x04,0x14,0x03,0x06,0xe6,0x15,0x03,0xe6,0x02,0x0e,0x0c,0x10,0x0a, 0x0a,0x09,0x07,0xe5,0x07,0x13,0x01,0x09,0x07,0x01,0x07,0x12,0x01,0x02,0x14,0x64, 0x0b,0x0c,0x09,0xe7,0x02,0x02,0xe6,0x03,0x02,0x09,0xe6,0x01,0xe5,0x02,0xe5,0x03, 0xe5,0x02,0xe5,0x02,0x05,0x10,0xe7,0x02,0x03,0xe6,0x06,0xe6,0x07,0xe6,0x05,0x0a, 0x07,0x11,0xe5,0x02,0xe7,0x02,0x01,0xe7,0x05,0xe5,0xe5,0x01,0xe5,0x02,0xe6,0x01, 0xe5,0x0d,0xe6,0xe5,0x18,0xe5,0x0f,0xe5,0x10,0xe8,0xe5,0x01,0x04,0x07,0x02,0x05, 0x06,0xe5,0x0e,0x05,0x06,0x04,0x07,0x01,0x02,0xe5,0x02,0x03,0xe6,0x0c,0x03,0x02, 0xe5,0x07,0xe5,0x02,0x02,0xe5,0x02,0x02,0x09,0x03,0x20,0x05,0x02,0x01,0x08,0x08, 0x02,0x0a,0x07,0x03,0x1a,0x21,0xe5,0x01,0x02,0x10,0x05,0x03,0x02,0x02,0x03,0x05, 0x03,0x04,0xe5,0x08,0x01,0x01,0x02,0x01,0xe5,0x08,0x04,0x04,0x05,0xe5,0xe6,0x08, 0x04,0x01,0x02,0x03,0x05,0x04,0x04,0xe5,0x02,0x01,0x04,0x04,0xe5,0x06,0xe5,0x03, 0x03,0xe5,0x03,0x03,0x05,0x03,0x02,0x06,0x03,0x01,0x03,0x05,0x18,0x04,0x04,0x03, 0x02,0x02,0x05,0x0b,0x0d,0x01,0xe5,0x16,0xe5,0x07,0xe5,0xe5,0x05,0xe5,0x07,0xe5, 0xe5,0x05,0xe5,0x07,0xe5,0xe5,0xe5,0x03,0xe6,0xe5,0x01,0x02,0xe5,0x01,0x02,0x02, 0xe6,0x03,0x02,0xe6,0x03,0x02,0xe5,0x07,0xe5,0x01,0x02,0x02,0xe5,0x02,0x07,0xe5, 0x07,0xe5,0x04,0x02,0x09,0x01,0x07,0x13,0x09,0x0c,0x09,0x05,0xe5,0x01,0x29,0xe8, 0x17,0x09,0x02,0x06,0x09,0x02,0x06,0x09,0x09,0x02,0x06,0x09,0x09,0x02,0x06,0x02, 0x01,0x02,0x01,0x09,0x03,0x06,0x01,0x02,0x06,0x07,0x01,0x0a,0x09,0x0a,0x07,0x0a, 0xe5,0x01,0x0b,0x0b,0x02,0x09,0x20,0xe5,0x01,0x01,0xe5,0x13,0x03,0x05,0x03,0x02, 0xe5,0xe5,0x05,0xe5,0xe5,0x02,0x05,0x03,0x05,0x03,0x05,0x03,0x02,0xe5,0xe5,0x02, 0x02,0xe5,0xe5,0x02,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x02,0x01,0x02,0x03, 0xe5,0xe6,0xe5,0x05,0xe5,0x0a,0x03,0xe5,0x08,0x07,0xe5,0x01,0xe5,0x03,0xe5,0xe6, 0x05,0xe5,0x06,0xe5,0x01,0xe5,0x0c,0x07,0x02,0xe5,0x07,0xe5,0x13,0x01,0x01,0xe7, 0xe5,0xe5,0x13,0x01,0x07,0x01,0x07,0x02,0x06,0xe5,0x07,0x01,0x02,0x01,0x02,0xe5, 0x07,0x01,0x01,0xe5,0x03,0x01,0x07,0xe5,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01, 0x09,0x04,0x01,0x02,0x04,0x01,0x01,0xe5,0x06,0x03,0x06,0x03,0x08,0x02,0xe5,0x03, 0x07,0x03,0x08,0x02,0xe5,0x11,0x01,0xe5,0x01,0x0a,0x11,0x01,0x02,0x03,0x02,0x01, 0x0f,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x02,0x01, 0x04,0x04,0x04,0x02,0x01,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x02,0x01,0x06,0x01, 0x02,0x04,0x02,0x01,0x02,0x04,0x04,0x06,0x01,0x02,0x04,0x01,0xe5,0xe5,0x01,0x0b, 0x01,0x02,0x01,0x23,0x1f,0x04,0xe6,0xe5,0x01,0x11,0x01,0xe5,0x05,0x01,0xe5,0x05, 0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0x02,0x06,0x01,0xe5,0x05,0x01,0xe5,0x08,0x06,0xe5,0x0a,0x01,0x0e,0xe5, 0x28,0x21,0x03,0x01,0xe7,0x16,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x06, 0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x06,0x02,0x01, 0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x06,0x02,0xe5,0x01,0x04,0x02,0x06,0x02,0x04, 0x04,0x04,0x04,0x09,0xe5,0x01,0x19,0xe5,0x01,0x1e,0x09,0x01,0x17,0x04,0x02,0x06, 0x02,0x0a,0x03,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5,0x02,0x05, 0x03,0x02,0x01,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x01,0xe6,0x02,0x01,0x01,0xe6, 0x02,0x05,0x03,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5,0x02,0x01,0x02,0xe5,0x02,0x02, 0x01,0x02,0x03,0x04,0xe5,0x02,0x05,0x03,0x05,0x04,0x08,0x09,0x13,0x23,0x03,0x05, 0x01,0x01,0x13,0x04,0xe6,0x01,0x14,0x03,0xe5,0x03,0x03,0xe5,0x03,0x04,0x01,0x01, 0xe5,0x02,0x05,0x03,0xe5,0xe5,0xe6,0x02,0x05,0x03,0xe5,0xe5,0x01,0x03,0x01,0xe5, 0x01,0x09,0x09,0x04,0x04,0x06,0x02,0x0b,0x03,0x02,0xe5,0xe5,0x05,0x04,0x04,0x01, 0xe5,0x05,0x01,0x02,0x04,0x02,0x01,0x13,0x0c,0x18,0x09,0x01,0x11,0xe5,0x03,0x02, 0xe6,0x18,0x02,0x06,0x02,0xe6,0x03,0x01,0xe5,0xe5,0xe5,0x01,0x02,0x06,0x02,0x01, 0x01,0x02,0x02,0x06,0x02,0x01,0xe5,0x02,0x02,0x01,0x04,0x01,0xe5,0x05,0x01,0xe7, 0x03,0x01,0xe7,0x03,0x01,0xe5,0xe6,0x02,0x01,0xe5,0xe5,0xe5,0x03,0x02,0xe7,0x02, 0x01,0xe5,0xe6,0x02,0x01,0xe5,0xe5,0xe6,0x03,0x01,0x04,0x01,0xe8,0xe5,0xe5,0x12, 0x01,0xe5,0xe5,0x1f,0x09,0x01,0x11,0x08,0xe7,0x15,0x02,0x02,0x02,0xe5,0x01,0x02, 0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x01, 0x01,0x02,0x02,0x01,0x01,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x01,0x01,0x02,0x02,0x05,0x02,0x02,0x03,0x02,0x02,0x01,0x04,0x02,0x09,0xe6,0x03, 0x02,0x1a,0x04,0x43,0x01,0xe7,0xe5,0x17,0xe5,0x07,0xe5,0xe6,0x04,0xe5,0xe6,0x04, 0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5, 0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x06,0xe5,0xe6, 0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0x01,0xe5,0x03,0xe5,0x01,0x01,0x17,0xe5, 0x01,0x46,0xe5,0xe6,0x0d,0x04,0x01,0xe5,0xe7,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05, 0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe6,0x04,0x01,0xe7,0x03,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x01,0xe5,0x07,0x01, 0xe7,0x03,0x01,0xe5,0x05,0x01,0xe5,0x04,0x02,0xe5,0xe5,0x05,0xe5,0x03,0x03,0x02, 0x06,0x06,0x02,0xe5,0x01,0x05,0x09,0x09,0x09,0x09,0x09,0x0d,0x02,0x01,0x16,0xe5, 0x08,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09,0x05, 0x02,0x13,0x12,0xe5,0x49,0x02,0xe5,0x01,0x15,0x01,0x09,0x02,0x06,0x02,0xe5,0x04, 0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04, 0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x05,0xe5,0x01,0x06,0x02,0xe5,0x04,0x02, 0xe5,0x02,0x05,0x09,0x03,0x13,0x05,0x4c,0x08,0x10,0xe7,0x03,0x02,0xe6,0x03,0x02, 0xe6,0x03,0x02,0xe6,0x02,0xe5,0x01,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02, 0xe6,0xe5,0x01,0x02,0xe6,0x03,0x02,0xe6,0x02,0xe5,0x01,0xe6,0x02,0xe5,0x01,0xe6, 0x03,0x02,0xe6,0x05,0x02,0x01,0x04,0x02,0xe6,0x03,0x02,0xe6,0x03,0x01,0x01,0xe5, 0x06,0x01,0x06,0x09,0x0a,0x01,0x1a,0x2d,0x01,0xe5,0x16,0x02,0xe5,0x03,0xe5,0x01, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x01,0x02,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5, 0x01,0x02,0x02,0xe5,0x01,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x01, 0xe6,0x01,0x05,0xe5,0x04,0x02,0x08,0xe5,0x08,0xe5,0x1a,0xe5,0x2e,0x1b,0x02,0x04, 0xe5,0x07,0x09,0x03,0x05,0x01,0x07,0x03,0x05,0x01,0x04,0x02,0x01,0x01,0x05,0x03, 0xe5,0x03,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01,0x02,0x02,0x06,0x01,0x02,0x04,0x04, 0x06,0x02,0x03,0x03,0x01,0xe5,0x11,0x03,0x05,0x04,0x13,0x0e,0x2d,0xe8,0x13,0xe5, 0x07,0xe5,0x01,0x05,0xe5,0x01,0xe5,0x03,0xe5,0x01,0xe5,0x01,0xe7,0x01,0x03,0x01, 0xe5,0x01,0xe5,0x01,0xe7,0x01,0x05,0xe5,0x01,0x04,0xe6,0x01,0xe5,0x02,0xe6,0x01, 0x04,0xe6,0x01,0x03,0x01,0xe5,0x01,0x05,0xe5,0x01,0xe5,0x02,0xe6,0x03,0x05,0xe5, 0x01,0xe5,0x02,0xe6,0x01,0xe5,0x03,0xe6,0xe5,0x04,0xe5,0x07,0xe6,0x04,0xe7,0x01, 0x05,0xe5,0x06,0xe6,0x07,0xe5,0x07,0xe5,0x01,0x03,0xe7,0x05,0xe7,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0xe5,0x16,0x01,0xe5,0x07,0xe6,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0x09,0xe5,0x07,0xe6,0x06,0xe6,0x06,0xe6,0x06,0xe6,0x08,0xe6, 0x06,0xe5,0x07,0xe5,0x05,0x01,0xe6,0x06,0xe5,0x07,0xe5,0x02,0x04,0x09,0xe6,0x07, 0x12,0x03,0x09,0x1e,0xe5,0xe6,0x16,0x01,0xe5,0xe5,0x05,0x02,0x02,0x03,0x02,0x06, 0x02,0x03,0x02,0xe5,0xe5,0x02,0x02,0x02,0x03,0x02,0xe5,0xe5,0x02,0x02,0x02,0x03, 0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x04, 0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0xe5,0xe5,0x01,0x04,0x01,0xe5,0xe5,0x03, 0x02,0x02,0x02,0x02,0x07,0x01,0x02,0x03,0x03,0x12,0x06,0x09,0x14,0xe5,0x03,0xe5, 0x02,0x17,0x09,0x09,0x02,0x06,0x02,0x02,0xe6,0xe5,0x04,0x01,0x01,0x02,0x02,0xe6, 0xe5,0x06,0x01,0x06,0xe5,0xe5,0x01,0x03,0xe5,0xe5,0x05,0xe5,0xe5,0x04,0x01,0x01, 0x07,0x01,0x06,0xe5,0x01,0xe5,0x08,0x02,0x03,0xe5,0xe5,0x01,0x06,0x01,0x13,0x05, 0x03,0x0e,0xe5,0x16,0x04,0xe5,0x07,0xe5,0x14,0x07,0x02,0x19,0x07,0x05,0x03,0x09, 0x05,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8,0x04,0xe8, 0x04,0xe8,0x04,0xe7,0x01,0x06,0x01,0xe5,0x04,0xe8,0x05,0x01,0x02,0x07,0x09,0x02, 0x02,0xe7,0x01,0xe5,0x05,0x05,0xe7,0x02,0x09,0x08,0x03,0xe7,0x02,0x04,0xe5,0x14, 0x03,0x02,0x02,0xe5,0x01,0x2b,0xe5,0x26,0x02,0x37,0x1b,0x09,0x01,0xe5,0x05,0x03, 0x1b,0xe6,0x23,0x01,0x0a,0x03,0x12,0x03,0x03,0x0d,0x01,0x06,0xe5,0xe5,0x01,0x04, 0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x01,0x07,0x01,0x07,0x01,0x02,0x04,0x01,0x02, 0x04,0x01,0x07,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0xe5,0xe5,0x03,0x01,0x07,0x01, 0x02,0x06,0x01,0x01,0x05,0xe6,0xe5,0xe5,0x02,0xe6,0x01,0x04,0x01,0xe6,0x04,0x01, 0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x07,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x07, 0x01,0x07,0x01,0x06,0xe7,0x0a,0xe7,0x18,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5,0x01, 0x02,0x02,0xe6,0x01,0xe5,0x02,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x01, 0x05,0xe5,0x04,0x02,0xe5,0x01,0x01,0x03,0xe5,0x07,0xe5,0x01,0xe5,0x02,0x02,0xe5, 0xe5,0x05,0xe5,0xe5,0x05,0xe5,0x06,0x02,0x06,0x01,0x08,0x0e,0x04,0xe5,0x01,0x05, 0xe5,0x1d,0x07,0x1a,0x01,0xe5,0x14,0x03,0x09,0x05,0xe5,0x06,0x02,0xe6,0x0b,0x02, 0xe5,0x01,0x02,0x01,0x04,0x04,0x0a,0x01,0xe7,0x03,0x03,0xe5,0x0c,0x09,0x04,0x04, 0x03,0x01,0x06,0x01,0xe7,0x03,0x01,0xe5,0x0a,0xe6,0x01,0x01,0x04,0x01,0x07,0x01, 0x07,0xe5,0x02,0xe5,0xe5,0xe5,0x01,0xe5,0x01,0x05,0x07,0x0a,0x07,0xe5,0x07,0xe5, 0x0a,0x06,0x03,0xe5,0x16,0x01,0xe5,0x05,0x01,0x03,0x05,0xe5,0x0b,0x03,0x01,0x01, 0xe5,0x02,0x02,0x03,0x03,0x01,0x04,0x02,0x01,0x02,0x01,0x09,0x02,0x01,0xe5,0x05, 0x01,0x02,0x03,0xe5,0xe5,0x06,0x01,0x05,0xe5,0x08,0x08,0xe5,0x1a,0x01,0x03,0x0c, 0x04,0x09,0x01,0x27,0x02,0x16,0x03,0xe5,0xe5,0x0d,0x05,0xe6,0x08,0x0a,0x03,0x0c, 0x01,0x03,0x03,0xe6,0x04,0x01,0x09,0x01,0x07,0x01,0x13,0x07,0x01,0x06,0xe5,0x08, 0x01,0x1a,0x02,0x01,0x03,0x03,0x01,0x06,0x07,0x07,0x04,0x01,0x10,0x03,0x03,0x05, 0xe5,0x07,0x09,0x17,0x03,0x13,0x16,0x09,0x07,0x01,0x03,0x02,0x04,0x04,0x05,0x09, 0x05,0x01,0x01,0x0b,0x0f,0x01,0x0a,0x06,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0x04, 0x09,0x03,0x0a,0x0b,0x02,0x04,0x11,0x02,0x07,0x14,0x0a,0x04,0x02,0xe6,0xe6,0x06, 0x0e,0x03,0x05,0x04,0x05,0xe5,0x02,0x04,0xe5,0x02,0x01,0x01,0x0b,0x04,0x08,0x03, 0x0c,0x01,0x02,0x02,0xe5,0x01,0x01,0x02,0x0d,0x03,0x08,0x01,0xe5,0x0f,0x08,0xe5, 0x03,0x09,0x04,0x07,0x05,0x07,0x0b,0x0c,0x09,0x14,0x18,0xe7,0x2a,0x09,0x01,0x01, 0x04,0xe5,0xe5,0x10,0x02,0x03,0x05,0x06,0x13,0x01,0x07,0x01,0xe5,0x05,0x02,0x02, 0x08,0x02,0x03,0x09,0x0e,0x04,0x01,0x14,0x09,0x03,0x05,0x06,0x02,0x04,0x01,0x02, 0x03,0x02,0x04,0x04,0x01,0x0a,0x0d,0xe5,0xe6,0x05,0x0d,0x0a,0x0d,0x02,0x01,0x06, 0x01,0xe5,0xe5,0x04,0xe5,0x05,0x08,0x02,0xe5,0x07,0xe5,0x01,0x02,0x03,0x02,0x05, 0xe5,0x01,0x14,0x04,0xe5,0x02,0x06,0x02,0x01,0x06,0x01,0xe5,0x02,0xe5,0x16,0x04, 0xe5,0x08,0x1a,0xe5,0x11,0x1e,0x04,0xe5,0xe5,0x05,0x14,0x05,0xe5,0x03,0x03,0xe5, 0x0c,0x0c,0x04,0xe5,0x01,0x01,0x04,0x05,0xe5,0x05,0x01,0xe5,0x0f,0x08,0x09,0x01, 0x09,0xe5,0x06,0x20,0x06,0x04,0x18,0x09,0xe5,0x0b,0x1c,0x0a,0x03,0x02,0xe5,0x01, 0x03,0x06,0x18,0x04,0x04,0x01,0x09,0xe5,0x1e,0x01,0x02,0x04,0x04,0x02,0x01,0x09, 0x01,0x08,0x06,0x09,0x04,0x0a,0x1b,0x06,0x06,0x0c,0x23,0x2e,0xe5,0x01,0xe5,0x01, 0x10,0x0a,0x02,0x08,0xe6,0x03,0x02,0x01,0xe5,0x08,0x05,0x09,0x02,0x08,0x01,0xe5, 0x04,0xe7,0x08,0x08,0x03,0x09,0x0c,0x08,0x06,0x04,0x04,0x09,0x08,0x04,0x09,0x08, 0xe5,0x04,0x0c,0x09,0xe5,0x03,0x13,0xe5,0x19,0xe5,0x02,0x18,0x09,0x06,0x02,0xe6, 0x03,0x05,0xe6,0xe5,0x04,0x0c,0xe5,0x01,0x18,0x01,0x08,0x05,0x02,0x01,0x09,0x0e, 0x04,0x01,0x02,0x04,0x01,0xe5,0x03,0x07,0x05,0x0e,0x05,0x03,0x07,0x06,0xe5,0xe5, 0x0f,0x09,0x08,0x18,0xe6,0xe6,0x07,0x09,0x08,0x01,0x0f,0x09,0x01,0x13,0x02,0x03, 0x02,0x27,0x09,0xe5,0x04,0x32,0x01,0x24,0x02,0x0c,0x07,0x08,0x24,0x01,0x01,0xe5, 0x09,0x16,0xe6,0x07,0x0b,0xe5,0x01,0x04,0x08,0xe5,0x03,0x03,0x05,0x03,0x09,0xe5, 0x07,0x02,0xe5,0x04,0xe5,0x03,0x03,0x05,0x03,0x0b,0x0b,0xe5,0x05,0x05,0x02,0xe5, 0x12,0x02,0x01,0xe5,0x01,0x0a,0x02,0x01,0x03,0xe6,0x11,0xe5,0x12,0x13,0x01,0xe9, 0xe5,0xe5,0x07,0x0e,0x01,0x01,0x03,0x06,0x02,0x0c,0xe5,0x03,0x02,0x01,0x01,0x0a, 0x03,0x08,0x06,0xe5,0xe5,0x05,0xe5,0x01,0x02,0xe5,0x0a,0x03,0x09,0x07,0xe5,0x01, 0x0b,0xe5,0x08,0xe5,0xe5,0x02,0x0f,0x02,0x02,0xe5,0x01,0x09,0x03,0x02,0xe5,0x0f, 0x01,0x01,0x03,0x26,0x01,0x03,0x02,0x02,0x01,0x08,0x06,0xe5,0xe5,0xe5,0x17,0x01, 0xe5,0xe5,0x03,0xe5,0xe5,0xe5,0x03,0xe5,0xe5,0x02,0x01,0xe5,0x08,0x01,0xe5,0xe5, 0x0d,0xe5,0x07,0x01,0x02,0x01,0x02,0xe5,0x0a,0x05,0xe5,0x03,0x09,0x01,0x07,0x01, 0x01,0x05,0x06,0x01,0x0c,0x07,0x06,0x01,0x02,0x0b,0x0c,0xe5,0x02,0x0c,0x1a,0x04, 0x01,0x01,0x02,0x0a,0x09,0x01,0x11,0x06,0x02,0x01,0x07,0x01,0x07,0x13,0x05,0xe5, 0x07,0xe5,0x0d,0x04,0x18,0x09,0x01,0x07,0x01,0x07,0x06,0x0c,0x01,0x07,0x09,0x01, 0x18,0x04,0x0e,0x13,0x06,0x02,0xe6,0xe7,0x0a,0x1d,0x04,0xe5,0xe6,0x01,0x02,0x09, 0xe5,0x01,0xe5,0xe5,0x01,0x04,0x04,0x04,0x05,0x02,0x02,0x02,0xe5,0x01,0x02,0x02, 0x04,0x04,0x02,0x01,0x09,0x01,0x02,0x01,0x01,0x04,0x02,0x01,0x01,0x02,0x02,0x03, 0x02,0x02,0x03,0xe5,0x06,0x01,0x01,0x0c,0x04,0x12,0x0a,0x01,0x05,0x03,0x02,0x03, 0x02,0x04,0xe6,0x05,0x02,0x08,0x07,0x02,0x01,0x02,0x09,0xe5,0x02,0x09,0x0f,0x01, 0x02,0x01,0x01,0xe5,0x03,0x03,0xe5,0xe5,0xe5,0x03,0xe5,0xe5,0x01,0x04,0x04,0x0e, 0x04,0x03,0xe5,0x03,0x01,0x01,0xe5,0x03,0x04,0x09,0x04,0x03,0x02,0x04,0xe5,0x01, 0xe5,0x03,0x01,0xe7,0x03,0x02,0xe6,0x02,0x05,0x01,0x07,0x04,0x04,0x03,0x05,0x07, 0xe5,0x09,0x01,0x01,0x09,0x02,0xe7,0x02,0x01,0x02,0x04,0x02,0x01,0x0e,0x02,0xe5, 0x01,0x02,0x08,0x01,0x09,0x01,0x11,0x0b,0x04,0xe5,0xe5,0xe5,0xe5,0xe7,0xe5,0x08, 0x01,0x07,0x01,0x02,0x04,0x01,0x05,0x01,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x05, 0x03,0x04,0x06,0x09,0x05,0x01,0x15,0x07,0x06,0x04,0x05,0x0b,0x01,0x09,0x01,0x07, 0x07,0x19,0x02,0x01,0x02,0x0a,0x09,0x01,0x03,0xe5,0x0b,0x01,0x02,0x04,0x04,0x01, 0xe7,0xe5,0x01,0x04,0x04,0x04,0x01,0x02,0x04,0x04,0x04,0x01,0x02,0x04,0x01,0x02, 0x04,0x01,0x02,0x04,0x01,0x02,0x04,0x04,0x06,0x04,0x04,0x04,0x04,0x04,0x04,0x13, 0x01,0x02,0x04,0x06,0xe5,0xe5,0xe5,0x01,0x0e,0x01,0x0c,0x04,0x0b,0x15,0xe5,0xe5, 0x02,0xe6,0x06,0x45,0x27,0xe5,0x07,0x02,0x1c,0x13,0x02,0x1a,0x02,0x08,0x07,0xe5, 0x18,0x02,0xe5,0x11,0x02,0x06,0x09,0x01,0x02,0x01,0x08,0x01,0x25,0x1f,0x07,0xe5, 0x04,0x02,0x24,0x0c,0xe5,0x09,0xe6,0x06,0xe5,0x07,0x10,0x1d,0x02,0x19,0xe5,0x0b, 0x09,0x0b,0x07,0x02,0xe6,0x07,0x05,0x09,0x09,0x09,0x03,0xe5,0xe5,0x01,0x06,0x02, 0x06,0x02,0x06,0x02,0x09,0x03,0xe5,0xe5,0x01,0x09,0x09,0x09,0x09,0x06,0x04,0x09, 0x09,0x03,0x02,0x02,0x02,0x03,0x02,0x09,0x09,0x04,0x01,0x02,0x03,0x05,0x09,0x09, 0xe6,0x06,0x04,0xe6,0x01,0x05,0xe5,0x01,0x0d,0xe5,0x01,0xe5,0x33,0x08,0xe5,0x07, 0xe5,0x05,0x0c,0x09,0x1d,0x10,0x0e,0x09,0x06,0x02,0x06,0x0c,0x3b,0x09,0x08,0x0f, 0x02,0x01,0x01,0x31,0x09,0x09,0x15,0x09,0x1d,0x1e,0x0a,0x07,0x14,0x3c,0x09,0x1f, 0xe5,0x13,0x09,0x02,0x01,0x04,0x02,0x01,0x02,0x01,0x04,0x02,0x01,0x04,0x02,0x04, 0x01,0x02,0x01,0x02,0x01,0x02,0x04,0x01,0x02,0x01,0x04,0x02,0x04,0x01,0x02,0x04, 0x01,0x02,0x04,0x01,0x02,0x06,0x02,0x05,0xe5,0xe5,0x01,0x04,0x01,0x02,0x06,0x02, 0x06,0x11,0x01,0x02,0x06,0x04,0x02,0x01,0x02,0x15,0x05,0xe6,0x0a,0x02,0x01,0x1c, 0x21,0x01,0xe5,0x02,0x02,0x01,0x01,0x02,0x02,0x01,0x01,0x05,0x01,0x01,0x02,0x02, 0x01,0x01,0x07,0x01,0x05,0x01,0x01,0x01,0x03,0x01,0x01,0x05,0x01,0x01,0x05,0x01, 0x01,0x07,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x01,0x02,0x02,0x03,0x02,0x02,0x01, 0x01,0x05,0x02,0x06,0x03,0x01,0xe5,0x01,0x01,0x01,0x05,0x02,0x06,0x01,0x01,0x01, 0x06,0x0f,0x03,0xe5,0x06,0xe5,0x05,0x01,0x01,0x04,0xe5,0x01,0x0f,0x01,0xe5,0x08, 0x16,0x01,0x09,0x09,0x09,0x09,0x03,0x05,0x03,0xe5,0x08,0x04,0x04,0x02,0x0b,0x03, 0x03,0x01,0x03,0xe5,0x01,0x01,0x03,0x07,0x03,0x05,0x13,0x04,0x12,0x05,0x03,0xe5, 0x30,0x02,0x01,0x1c,0xe5,0x13,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x01,0x05,0xe6,0xe5, 0x02,0x01,0xe6,0xe5,0x02,0x01,0xe6,0x04,0xe6,0x06,0xe5,0xe5,0x07,0xe6,0xe5,0x02, 0xe8,0x04,0xe6,0x02,0x03,0xe6,0x02,0x03,0xe6,0xe5,0xe5,0x02,0xe6,0x01,0xe5,0x05, 0xe5,0x07,0xe7,0x06,0x01,0x07,0xe6,0x05,0xe6,0x05,0xe6,0xe5,0x06,0xe5,0x05,0xe6, 0x08,0xe5,0x07,0xe5,0x02,0x03,0xe6,0x06,0xe7,0xe5,0x04,0xe5,0x07,0xe5,0x08,0x01, 0x09,0x06,0x09,0xe5,0x2c,0x07,0x01,0x01,0x09,0x13,0x05,0x09,0x03,0x05,0x03,0x05, 0x03,0x09,0x01,0x09,0x09,0x10,0x02,0x05,0x03,0x09,0x05,0x03,0x11,0x01,0xe5,0x0f, 0x01,0x18,0xe8,0x32,0x03,0x06,0x02,0x06,0x02,0x04,0x01,0x02,0x04,0x01,0xe5,0x07, 0x02,0x06,0x02,0x03,0x02,0x07,0x01,0x06,0x02,0x02,0x04,0x01,0x01,0x02,0x04,0x01, 0xe5,0x07,0xe5,0xe5,0x05,0xe5,0x0d,0x01,0xe5,0xe5,0x05,0x01,0xe5,0x08,0x05,0x02, 0x02,0x11,0x08,0x02,0x06,0xe5,0xe5,0x01,0x17,0x01,0xe5,0x0e,0x1c,0x0d,0x01,0x07, 0x01,0x07,0x01,0x03,0x03,0x01,0xe5,0x01,0x05,0x07,0xe6,0x07,0xe5,0x02,0x03,0x01, 0x03,0x04,0xe5,0x06,0x01,0x0f,0x04,0xe5,0x0c,0x13,0x03,0x01,0x03,0x09,0x04,0xe5, 0x02,0x13,0xe5,0x02,0xe5,0x07,0xe5,0x02,0x19,0x01,0xe5,0x0f,0x09,0x09,0x08,0x02, 0xe5,0x04,0x02,0xe8,0x01,0x02,0xe8,0x04,0x01,0x07,0x01,0x02,0x04,0xe8,0x01,0x02, 0xe8,0x04,0xe5,0x04,0x02,0x01,0x07,0xe5,0x07,0x01,0x09,0x01,0x07,0xe9,0x03,0x01, 0x02,0x02,0x0c,0xe5,0x01,0x04,0x01,0x02,0x02,0x06,0x04,0xe5,0x04,0x16,0xe8,0x05, 0xe8,0xe5,0x18,0x02,0xe5,0x01,0x09,0xe5,0xe5,0x0d,0x0b,0xe5,0xe5,0x0f,0x0c,0xe5, 0x04,0x02,0x01,0x12,0x0a,0x04,0xe5,0xe5,0x05,0x02,0x01,0x04,0x02,0x07,0x03,0xe5, 0x12,0x05,0xe5,0xe5,0x09,0x05,0x09,0x09,0xe5,0xe5,0x19,0x11,0x1c,0xe5,0xe6,0x0a, 0x01,0x03,0x01,0x07,0x01,0x07,0x01,0x01,0x01,0x03,0x01,0x07,0x01,0x04,0x02,0x01, 0xe5,0x05,0x01,0x03,0x03,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x04,0x02,0xe6,0x02, 0x03,0x01,0x03,0x03,0xe6,0x08,0x01,0x02,0x04,0x01,0x07,0x01,0x04,0x02,0x01,0xe5, 0x05,0xe6,0xe5,0x04,0x01,0xe5,0x02,0x02,0x01,0x04,0x01,0xe8,0x05,0x01,0xe6,0x04, 0x01,0x03,0x03,0x01,0x07,0x01,0xe5,0xe5,0x03,0x01,0x07,0x01,0x0e,0x0f,0x2b,0x0d, 0x05,0xe5,0x01,0x01,0x0d,0x0b,0x03,0xe6,0x01,0x06,0x04,0xe5,0xe7,0x04,0x01,0xe5, 0x04,0x02,0x01,0xe5,0x02,0x11,0x06,0x12,0x0c,0x01,0x05,0x01,0x1b,0x2c,0xe5,0xe7, 0x12,0x0f,0xe5,0x0c,0x04,0x03,0x05,0x04,0x07,0x02,0x02,0x0e,0x0a,0xe6,0x03,0x01, 0xe6,0x09,0x08,0x01,0x05,0x05,0x01,0x04,0x1b,0x06,0x05,0x0e,0x13,0x0d,0x0a,0x12, 0x0a,0x06,0x02,0x01,0x04,0x06,0xe5,0xe5,0x15,0x03,0xe5,0x02,0x0a,0x02,0x01,0x03, 0x13,0xe6,0x02,0x10,0x02,0x02,0x06,0x05,0x03,0x02,0x06,0x01,0x03,0x03,0x03,0x1e, 0x02,0x17,0x05,0x03,0x01,0x04,0x14,0x01,0x01,0x0b,0x22,0x01,0xe6,0x08,0x10,0x05, 0xe6,0x06,0x01,0xe5,0x02,0x02,0xe6,0x10,0x02,0x09,0x06,0x01,0x05,0x03,0x07,0x01, 0x02,0x04,0x05,0xe5,0x01,0x01,0x02,0x04,0x01,0xe5,0x06,0xe5,0xe5,0x02,0x06,0x03, 0x05,0x03,0x01,0x0a,0x01,0x04,0x02,0x01,0x02,0x10,0x01,0x04,0xe5,0xe5,0xe6,0x06, 0x06,0x01,0xe5,0x08,0xe5,0x04,0x06,0x02,0xe6,0x0c,0xe6,0xe6,0x07,0x16,0x07,0x05, 0x08,0x05,0x0a,0x04,0x10,0x02,0x07,0x0a,0x05,0x03,0x09,0x05,0x03,0x05,0x01,0x01, 0x02,0x26,0x01,0x08,0x0d,0x03,0x0a,0x11,0x01,0x13,0x05,0x02,0x0f,0xe9,0x07,0x09, 0x09,0x03,0x09,0x05,0x06,0x13,0x01,0x0a,0xe5,0x03,0xe5,0x08,0x01,0x04,0xe6,0x08, 0xe6,0x04,0x02,0x06,0xe6,0x02,0x03,0x07,0x09,0x0f,0x07,0x01,0x03,0x03,0xe7,0x01, 0x17,0x01,0x05,0x03,0x0b,0x07,0x03,0x05,0xe5,0x15,0x01,0xe7,0xe5,0x13,0xe5,0xe7, 0x09,0x07,0x13,0x02,0x01,0x0b,0x06,0x02,0xe5,0x04,0xe5,0xe5,0xe5,0x03,0x07,0x03, 0x05,0x0e,0x01,0x07,0x04,0x09,0x0a,0xe5,0x0d,0x01,0xe5,0x05,0x0e,0x0a,0x03,0x02, 0x06,0x0c,0x06,0xe5,0x02,0x04,0xe5,0x07,0x11,0xe5,0xe5,0x05,0x01,0x1d,0xe6,0x05, 0xe5,0x07,0x01,0x02,0x0f,0x01,0x01,0x02,0x02,0x02,0x03,0x10,0x04,0x08,0x15,0x04, 0x05,0x09,0xe5,0xe5,0x0e,0xe5,0x07,0x02,0x05,0x02,0x09,0x0e,0x02,0x08,0xe6,0xe5, 0x08,0xe5,0x01,0x07,0x01,0x03,0xe5,0x05,0x10,0x01,0x06,0x23,0x0a,0xe5,0x04,0x02, 0xe5,0x06,0x13,0xe5,0x01,0x09,0x07,0x0e,0x0a,0x0c,0x0a,0xe5,0x09,0x01,0x19,0x12, 0x02,0x01,0x02,0x09,0x0d,0xe6,0x05,0xe5,0x01,0xe5,0x03,0x09,0x07,0x05,0x07,0x35, 0xe5,0x0a,0x02,0xe5,0x15,0x05,0x08,0x06,0x04,0x1e,0x01,0x0b,0x0b,0x03,0x07,0x02, 0x22,0x04,0x07,0x03,0x0b,0x08,0x05,0x1e,0xe9,0x01,0x1d,0x01,0xe5,0x03,0x08,0x0d, 0x01,0x05,0x02,0x10,0x02,0xe5,0x07,0x01,0x05,0x01,0x06,0x03,0x03,0x31,0x07,0x0d, 0x01,0xe5,0xe6,0x02,0x01,0x02,0x09,0x07,0x01,0x09,0x06,0x02,0x01,0x02,0x04,0x04, 0x04,0x05,0x04,0x0e,0x02,0x03,0x1f,0x0a,0xe5,0x03,0x09,0x02,0xe5,0x06,0xe5,0xe5, 0x06,0x05,0x03,0x06,0x02,0x06,0x02,0x10,0x02,0x05,0xe5,0x07,0x05,0xe5,0xe6,0x09, 0xe5,0x0d,0x05,0xe5,0xe5,0x01,0x09,0x06,0x1d,0x02,0x06,0x09,0x09,0xe5,0x18,0xe6, 0x01,0x20,0x1d,0x09,0x09,0x07,0xe5,0x07,0x01,0x06,0x02,0x0b,0x06,0x02,0x0b,0x07, 0x06,0x05,0x17,0x0e,0x05,0xe5,0x02,0x04,0x13,0x01,0x07,0x01,0x07,0x01,0x07,0xe5, 0x04,0x10,0x06,0x01,0xe6,0x1f,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x0e,0x0e,0x07,0x06, 0x0c,0xe5,0x14,0xe5,0x05,0x0b,0x08,0x09,0xe5,0x0e,0x03,0x08,0xe5,0x04,0x03,0x08, 0xe5,0x01,0xe5,0x03,0xe5,0x11,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x01,0x03,0xe5,0x06, 0x02,0x01,0x03,0xe5,0xe5,0x1a,0x01,0xe6,0x0b,0x05,0x02,0x03,0x01,0xe5,0x05,0x01, 0xe5,0x03,0x01,0x01,0x0f,0x01,0x01,0x07,0xe5,0x03,0x01,0x04,0x09,0x05,0xe5,0x01, 0x0a,0xe6,0x01,0x05,0xe5,0x0a,0x07,0x05,0xe5,0xe5,0x08,0xe5,0x01,0x02,0x01,0xe5, 0x08,0x02,0x10,0x04,0x07,0x06,0x02,0x0a,0x01,0xe5,0x0f,0x03,0xe5,0xe5,0x01,0x01, 0x1e,0x01,0x05,0x01,0x07,0x06,0x01,0xe5,0x08,0x06,0xe5,0xe5,0x01,0x10,0x01,0x02, 0x15,0xe5,0x02,0x0b,0x07,0x09,0x01,0x13,0x04,0x01,0x04,0x07,0x04,0x01,0x02,0x01, 0x11,0x01,0x0b,0xe5,0x01,0x01,0x16,0x08,0x0c,0x01,0x01,0xe5,0xe5,0x1d,0x06,0x02, 0x01,0x07,0x06,0x02,0xe6,0x06,0x06,0xe5,0x01,0xe5,0x0d,0xe5,0x02,0x11,0x06,0xe5, 0xe5,0xe5,0x06,0x05,0xe5,0x03,0x09,0x01,0x25,0x06,0xe5,0xe5,0x08,0x06,0xe5,0xe5, 0x05,0xe5,0xe5,0x05,0x02,0x01,0x07,0x10,0x0d,0x02,0x01,0x03,0xe7,0x14,0x01,0x09, 0x03,0x01,0xe5,0x09,0x07,0x06,0x03,0xe5,0x03,0x08,0xe5,0x03,0x01,0x07,0x01,0x02, 0xe5,0x01,0xe5,0xe5,0x06,0x05,0x02,0x05,0x03,0xe5,0x03,0xe5,0x01,0x01,0x02,0x02, 0x06,0x06,0x11,0x01,0xe5,0x03,0x04,0x01,0x03,0x02,0xe5,0x03,0x03,0x13,0x06,0x01, 0xe5,0x0f,0x02,0x02,0x05,0x11,0xea,0xe5,0x13,0x01,0x01,0x05,0x03,0x02,0x01,0x04, 0x05,0x03,0x05,0x02,0x01,0x02,0xe6,0x0d,0x01,0x01,0xe5,0xe5,0xe5,0x04,0x03,0x02, 0x01,0xe5,0xe5,0xe5,0x04,0x01,0x06,0xe5,0x03,0x02,0xe6,0xe5,0x01,0x06,0x04,0x03, 0xe5,0x02,0x01,0x01,0xe5,0x10,0x01,0xe5,0x03,0x04,0x01,0x01,0x05,0x04,0x02,0x05, 0x09,0x01,0x01,0xe6,0x02,0x02,0x01,0x04,0x09,0x02,0x01,0xe5,0x02,0x04,0x04,0x09, 0x01,0x01,0x01,0xe5,0x01,0x14,0x01,0x09,0x07,0x01,0x04,0x02,0x06,0x02,0x07,0x01, 0x03,0xe5,0x0d,0x01,0x04,0x01,0x02,0x01,0x05,0x01,0x07,0x06,0xe6,0x01,0x04,0xe7, 0x0a,0x07,0x01,0x01,0x0e,0x01,0x05,0x04,0x01,0x07,0x01,0x09,0x13,0x02,0x0e,0x01, 0x13,0x05,0x01,0x01,0x0d,0xe6,0xe7,0x15,0x01,0x07,0x06,0x01,0xe5,0xe5,0x04,0x01, 0x07,0x01,0x01,0xe5,0xe7,0x01,0x10,0x02,0x01,0x03,0xe5,0x08,0x01,0xe5,0xe5,0x06, 0x01,0x02,0x01,0x02,0x01,0x02,0x01,0x02,0x01,0x02,0x06,0x04,0x04,0x01,0x03,0xe5, 0x08,0x01,0x0a,0x01,0x02,0x04,0x01,0x07,0x04,0x22,0x01,0x11,0x09,0x01,0x0d,0xe5, 0x01,0xe5,0x08,0x02,0x35,0x16,0x01,0xe5,0x0c,0x16,0xe6,0x08,0x07,0xe5,0x09,0x02, 0x1a,0x02,0x24,0x02,0x06,0x27,0x14,0xe5,0x01,0x0a,0x37,0xe5,0x01,0x05,0xe5,0x01, 0xe5,0x07,0x0f,0xe5,0x01,0x10,0x02,0xe5,0x31,0x06,0x02,0x27,0x0b,0x11,0x09,0x0b, 0x13,0xe7,0x09,0x03,0x09,0x09,0x09,0x09,0x06,0x02,0xe5,0xe7,0xe5,0x01,0xe5,0x07, 0x03,0x05,0x04,0x01,0x02,0xe6,0xe5,0x04,0x09,0x09,0x09,0x04,0x01,0x04,0x09,0x09, 0x05,0x03,0x05,0x03,0x06,0x02,0x06,0x02,0x09,0x05,0xe5,0x01,0x04,0x01,0x02,0x09, 0x04,0xe6,0x01,0x03,0x02,0x02,0x09,0x02,0x09,0xea,0x3a,0x02,0x09,0x1d,0x1d,0x09, 0x09,0x29,0x06,0x02,0x08,0x13,0xe5,0x08,0x13,0x08,0xe5,0x08,0x09,0x04,0x06,0x3d, 0x05,0x01,0x1d,0x1d,0x09,0x0b,0x27,0x0a,0x1e,0x09,0x13,0x07,0x09,0x0b,0x08,0xe5, 0xe6,0x22,0x11,0x01,0x07,0x02,0x01,0x03,0xe5,0x08,0x01,0xe5,0x04,0x02,0x06,0xe5, 0xe7,0x03,0x02,0x07,0x01,0x02,0x06,0x02,0x01,0x02,0x01,0x02,0x01,0x06,0x02,0x04, 0x01,0x04,0x02,0x01,0x07,0x01,0x07,0x01,0x02,0x06,0x07,0x01,0x02,0x04,0x13,0x01, 0x09,0x07,0x08,0x02,0x06,0x02,0x10,0x01,0x1e,0x04,0xe5,0x0c,0x02,0x01,0xe5,0x02, 0x04,0xe6,0x07,0xe5,0xe5,0x01,0xe5,0xe7,0x01,0x04,0xe5,0x02,0x04,0xe6,0x06,0xe5, 0x05,0x01,0x01,0x01,0xe5,0x01,0x03,0x05,0x01,0x01,0x07,0x01,0x01,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0x01,0x01,0x05,0x01,0x05,0x01,0xe5,0x07,0x01,0x05,0x03, 0x09,0x05,0x01,0x01,0x05,0x01,0xe5,0x05,0x01,0xe5,0x08,0x06,0x02,0x0d,0x01,0x01, 0xe5,0x21,0x04,0x0b,0xe5,0xe5,0x08,0x07,0x01,0x07,0x0b,0x07,0x01,0x01,0x02,0x04, 0x04,0x0e,0x03,0x05,0x03,0x0f,0x01,0x11,0x01,0x03,0x03,0x01,0x03,0x05,0x07,0x01, 0x03,0x03,0x0f,0xe5,0x01,0x01,0x09,0x1d,0x0e,0x03,0xe5,0x13,0xe5,0x07,0xe5,0x01, 0x04,0xe6,0x07,0xe6,0xe5,0x03,0xe5,0x02,0x05,0xe6,0x06,0xe6,0x04,0x01,0xe5,0x01, 0x05,0xe6,0x06,0xe5,0x01,0x05,0xe5,0x07,0xe6,0x05,0xe7,0x04,0x01,0xe5,0x03,0x05, 0xe6,0xe5,0x02,0xe7,0x06,0xe7,0xe5,0x04,0xe5,0x01,0x05,0xe6,0xe5,0x02,0xe6,0x02, 0x05,0xe6,0x04,0x01,0xe5,0x07,0xe6,0xe5,0x04,0xe6,0x04,0xe6,0x06,0xe6,0x08,0xe6, 0x06,0xe5,0x08,0xe6,0x22,0xe6,0x03,0x03,0xe5,0x07,0xe5,0x04,0x09,0x01,0x09,0x09, 0xe5,0x05,0x01,0x09,0x13,0x15,0x09,0xe5,0x01,0xe5,0xe6,0xe7,0x01,0x0e,0x09,0x03, 0xe6,0x02,0x07,0x0b,0x07,0x01,0x09,0xe5,0x01,0x06,0x04,0x1c,0xe6,0xe5,0x22,0x18, 0x01,0xe5,0x0a,0x09,0x03,0x02,0x02,0xe5,0x07,0x09,0x13,0x06,0x0b,0xe5,0xe5,0x05, 0x02,0xe5,0x03,0xe6,0xe5,0x05,0x14,0x01,0x06,0x0a,0x08,0x0a,0x01,0x07,0x07,0xe5, 0x05,0x02,0x1c,0xe8,0x21,0x04,0x18,0x09,0x05,0x01,0x01,0x05,0x03,0x07,0x01,0x09, 0x0b,0x03,0x03,0x01,0x02,0x04,0x03,0x01,0x0f,0x05,0x0d,0x01,0x07,0x09,0x05,0x03, 0x0b,0x01,0x01,0x05,0x07,0x01,0x07,0xe5,0x02,0xe5,0x03,0x03,0xe5,0x1b,0xe6,0xe5, 0x02,0x1f,0xe5,0x02,0x05,0x09,0x03,0x02,0x04,0x09,0xe5,0xe5,0xe5,0xe6,0xe9,0x01, 0xe5,0x05,0x04,0x06,0xe5,0x0b,0x01,0x03,0x04,0x01,0xe5,0x03,0xe5,0xe5,0x04,0x02, 0x01,0x01,0x02,0xe5,0x01,0x06,0x03,0x01,0x02,0x0c,0x04,0x01,0x03,0x08,0x0b,0xe6, 0x09,0x01,0x07,0x02,0x09,0x02,0x1e,0xe6,0xe5,0x37,0x06,0xe5,0xe5,0x05,0x01,0xe5, 0x1c,0x10,0x09,0x02,0x07,0x01,0x07,0xe5,0x1c,0x02,0x01,0x04,0x02,0x10,0x09,0x09, 0x01,0xe5,0x05,0x02,0x0b,0x04,0x02,0xe5,0x08,0x18,0xe6,0x10,0x01,0x01,0x05,0x01, 0x07,0x01,0x02,0x04,0x01,0x07,0x01,0x03,0x03,0x01,0xe6,0x01,0x02,0xe6,0x01,0x04, 0x01,0xe6,0x04,0x01,0x07,0xe6,0xe5,0x04,0xe7,0x01,0x03,0x01,0x06,0xe5,0xe5,0x06, 0x01,0x06,0x02,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0xe5,0xe5, 0x01,0x01,0xe5,0xe5,0x03,0x01,0x02,0x01,0x02,0x01,0x04,0x02,0x01,0x01,0x02,0x02, 0x01,0x01,0x04,0xe5,0xe5,0xe5,0x04,0x01,0x02,0xe5,0x02,0x01,0x01,0x05,0xe6,0x06, 0x01,0x0c,0x01,0x0c,0x21,0x01,0x04,0x05,0x05,0xe7,0x01,0x03,0x05,0x03,0xe5,0x0b, 0xe5,0xe6,0x0e,0x02,0x09,0xe6,0x03,0x11,0x03,0x13,0xe5,0x01,0xe5,0x01,0x05,0x03, 0x07,0xe5,0x0b,0xe5,0x0f,0x05,0xe5,0x01,0x04,0x02,0x01,0x05,0x01,0x04,0x04,0xe5, 0x16,0x01,0xe7,0x14,0x07,0x0c,0x08,0x06,0x01,0xe5,0xe5,0x06,0x0b,0x07,0x06,0x07, 0x04,0x07,0x05,0x03,0x08,0x01,0x07,0x0b,0x09,0x01,0x07,0xe5,0x09,0x0b,0x02,0x02, 0x09,0x09,0x02,0x09,0x08,0x05,0x04,0x04,0x04,0x02,0x06,0x10,0x01,0xe7,0xe5,0x2f, 0x09,0x01,0x01,0x07,0x02,0x09,0xe5,0xe5,0x02,0x0f,0x01,0x06,0xe5,0x06,0x03,0x07, 0x09,0x03,0x04,0x29,0xe5,0x04,0x07,0x05,0x08,0xe5,0xe5,0xe5,0x08,0x0b,0x07,0x01, 0x04,0x03,0x1d,0xe6,0x06,0x01,0x1c,0x01,0x0b,0x01,0x07,0x02,0x06,0x18,0x02,0x13, 0x05,0x02,0x04,0xe5,0x03,0x01,0x01,0xe5,0x06,0x08,0x01,0x01,0x13,0x01,0xe5,0x05, 0x03,0xe5,0x05,0x02,0x02,0x01,0xe5,0x04,0xe5,0x08,0x0b,0xe5,0x03,0x01,0x01,0x07, 0x0b,0x07,0x01,0x0d,0xe6,0x01,0x33,0x03,0x04,0xe5,0x02,0x05,0x04,0x02,0x01,0x11, 0x0b,0x0a,0x09,0x06,0xe5,0xe5,0xe5,0xe5,0x03,0x01,0x05,0x01,0x05,0x04,0x09,0xe5, 0x06,0xe6,0x01,0x03,0x0a,0x05,0x03,0x05,0x05,0x03,0x08,0x04,0x03,0x01,0x0a,0x06, 0x05,0x05,0x03,0x0a,0x01,0xe7,0x1b,0x08,0x0e,0x01,0x03,0x05,0xe5,0x01,0x05,0x08, 0xe5,0x02,0x04,0xe5,0x08,0x02,0x06,0x03,0x05,0x03,0x03,0x01,0x03,0x05,0x03,0x02, 0xe5,0x02,0x03,0x03,0x01,0x02,0x06,0x07,0x01,0x09,0x09,0x07,0x0b,0x09,0x09,0x0d, 0xe5,0x07,0xe5,0x02,0xe5,0x01,0x04,0x0f,0x01,0xe7,0x19,0x04,0x0e,0x06,0x02,0x06, 0x07,0x01,0x04,0xe5,0x02,0x02,0x06,0x06,0x02,0x02,0x06,0x09,0x02,0xe5,0x04,0x09, 0x02,0x05,0xe5,0xe5,0x08,0x02,0x06,0x03,0x05,0x04,0x04,0x02,0x06,0x0e,0x04,0x02, 0x0b,0x07,0x06,0x02,0x04,0x01,0x01,0x07,0x01,0x03,0x03,0x01,0x0e,0xe6,0xe5,0x2f, 0x06,0xe5,0x01,0x03,0x02,0x02,0x01,0x03,0xe5,0xe7,0xe5,0x01,0x06,0x02,0x09,0xe5, 0x07,0xe5,0x01,0x05,0xe5,0x02,0x04,0xe5,0x02,0x04,0xe5,0x05,0x01,0x02,0x06,0x02, 0x06,0x02,0x01,0x04,0x02,0x03,0x02,0x02,0x01,0x03,0xe5,0x01,0x0a,0xe5,0x03,0x02, 0x03,0x05,0xe6,0x06,0x08,0x02,0x02,0x0f,0xe5,0x10,0x03,0x03,0x16,0x09,0x14,0xe5, 0x05,0xe5,0x03,0x03,0xe5,0xe5,0x0d,0x01,0x08,0x01,0x09,0xe6,0x06,0x0b,0x04,0x03, 0xe6,0x02,0xe5,0x01,0x02,0x07,0xe6,0xe5,0x05,0xe6,0x05,0xe5,0x07,0xe6,0x07,0xe5, 0x06,0x10,0x09,0x01,0xe5,0x0a,0x19,0x14,0xe5,0xe5,0x02,0x07,0x19,0x0f,0x01,0x0e, 0x03,0x26,0xe5,0x12,0x03,0x06,0x07,0x02,0x08,0x01,0x09,0x07,0x09,0xe5,0x07,0x01, 0x03,0x09,0x0b,0x09,0x06,0x01,0xe5,0x02,0x06,0x25,0x01,0xe7,0x07,0x09,0x09,0x06, 0x10,0x01,0x06,0x06,0x02,0xe5,0xe5,0x03,0x04,0x03,0x05,0x06,0xe5,0xe5,0x04,0xe6, 0xe5,0x05,0xe5,0xe5,0x02,0xe5,0xe6,0xe5,0x03,0x01,0xe5,0xe5,0x0f,0x09,0x09,0x09, 0x09,0xe5,0x09,0x10,0xe5,0x07,0x05,0xe5,0x11,0x07,0x01,0x0a,0x0e,0x01,0xe5,0x09, 0x2a,0x01,0x01,0x07,0x02,0x04,0x01,0xe5,0x04,0x01,0xe5,0x05,0x01,0xe5,0xe5,0x04, 0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x02,0x03,0x04,0x02,0x06,0xe5,0x01, 0x02,0x02,0x09,0x09,0xe5,0x02,0xe6,0x01,0x0a,0x0f,0x02,0x08,0xe5,0x08,0x09,0x03, 0x05,0x0b,0x0c,0x02,0xe5,0x05,0x39,0x14,0x09,0x0b,0x02,0x13,0x19,0x09,0x09,0x09, 0x09,0x09,0x0c,0x09,0x06,0x0a,0x09,0x07,0x01,0x06,0x02,0x18,0xe6,0xe5,0xe6,0x0c, 0x09,0xe6,0x06,0xe5,0x0b,0xe5,0xe5,0x02,0x01,0xe5,0x01,0x03,0x02,0x01,0xe5,0x02, 0x02,0xe5,0xe5,0x05,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x08,0x03,0x05,0x09,0x09,0x06, 0x01,0x01,0xe5,0xe5,0xe5,0x01,0xe5,0xe5,0x02,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5, 0x02,0xe5,0xe5,0xe6,0x02,0xe5,0x07,0x08,0xe5,0x07,0x09,0x03,0xe5,0x28,0x02,0xe5, 0x09,0x02,0xe5,0xe6,0xe5,0xe5,0x0e,0xe5,0x07,0xe5,0x04,0x05,0xe5,0x04,0x01,0x02, 0xe5,0x07,0x01,0x02,0x01,0x02,0x04,0xe5,0x02,0x02,0x01,0xe5,0x02,0x01,0x07,0x09, 0x01,0x02,0xe5,0x02,0x04,0xe5,0x02,0x02,0x06,0x02,0x08,0x09,0xe5,0x02,0x01,0x02, 0x04,0x01,0x02,0x04,0x01,0x02,0x08,0xe6,0x19,0xe5,0x2c,0x01,0xe5,0x0a,0x01,0x02, 0xe5,0xe6,0x01,0x01,0x16,0x11,0x04,0x04,0x04,0x04,0x02,0x01,0x04,0x01,0xe5,0xe5, 0x03,0x04,0x04,0x04,0x04,0x02,0x01,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x06, 0x01,0x02,0x04,0x02,0x01,0x04,0x01,0xe5,0xe5,0x03,0x01,0x02,0x04,0x01,0x02,0x02, 0x09,0x01,0x01,0xe5,0xe5,0xe5,0x0b,0x02,0x09,0xe6,0x01,0x06,0x01,0x06,0xe5,0x01, 0x01,0x07,0x03,0x02,0xe5,0x02,0x04,0xe5,0xe6,0x01,0x28,0x06,0x01,0xe5,0x05,0x01, 0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0x01,0xe5,0x05,0x01,0xe5, 0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x04,0xe5,0x01,0x01, 0x04,0x0c,0x06,0x0c,0x13,0x01,0x07,0x06,0x06,0x02,0xe6,0xe7,0x09,0x20,0x03,0x02, 0x02,0x02,0x06,0x02,0x06,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01, 0x02,0x02,0xe5,0x01,0x02,0x02,0x06,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02, 0xe5,0x01,0x04,0x02,0xe5,0x01,0x02,0x02,0xe5,0x01,0x02,0x02,0x06,0x02,0x03,0x02, 0x02,0x01,0x01,0xe5,0x03,0x0c,0x01,0x0f,0x01,0xe6,0x11,0xe5,0x01,0x01,0x07,0x01, 0x0c,0x07,0x02,0xe6,0x01,0x06,0x01,0xe5,0x1d,0x03,0x04,0xe5,0x02,0x05,0x03,0x05, 0x03,0x05,0x03,0x02,0x01,0xe5,0x02,0x02,0x01,0xe5,0x02,0x05,0x03,0x05,0x03,0x02, 0x01,0xe5,0x02,0x04,0xe5,0x02,0x02,0x01,0x02,0x03,0x02,0x01,0xe5,0x02,0x05,0x03, 0x04,0xe5,0x02,0x01,0x01,0xe6,0x02,0x01,0x02,0xe5,0x02,0x09,0x05,0x03,0x09,0x02, 0x01,0xe5,0x02,0x01,0x02,0x04,0x01,0x01,0xe6,0x01,0x06,0x02,0x0a,0x09,0x02,0x03, 0x02,0x2a,0x04,0xe7,0x02,0x02,0x02,0x03,0x02,0x02,0x09,0x06,0x01,0xe5,0x02,0x05, 0x03,0x02,0x01,0xe5,0x02,0x02,0x02,0x06,0x01,0xe5,0x05,0xe7,0x05,0x01,0x02,0x09, 0x09,0x04,0x02,0x01,0x06,0x02,0x0b,0x01,0x01,0xe5,0x01,0x01,0x18,0x01,0xe5,0x03, 0x04,0x09,0x06,0x02,0x01,0x02,0x14,0xe5,0xe7,0x12,0xe5,0x15,0x0b,0x02,0x01,0xe6, 0x01,0x02,0x02,0x03,0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x02,0x01,0x01,0x02,0x02, 0x01,0x01,0x02,0x02,0x02,0x03,0x01,0xe5,0xe5,0xe5,0x01,0x01,0xe5,0xe5,0xe5,0x01, 0x01,0xe5,0x07,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe7,0x03,0x01,0xe8,0x02,0x01, 0xe7,0x03,0x02,0x04,0x01,0x17,0xe5,0x18,0xe5,0x01,0xe7,0xe7,0x15,0x02,0xe6,0x03, 0xe5,0x2d,0x02,0x02,0x02,0xe5,0x01,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x01,0x01, 0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02, 0x02,0x05,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02, 0x06,0x02,0x13,0x02,0x10,0xe5,0x07,0x06,0x02,0x02,0x03,0x16,0x02,0x01,0x05,0x32, 0xe5,0x07,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6, 0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x06,0xe5,0xe6,0x04, 0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x04,0xe5,0xe6,0x1c,0x09, 0xe5,0x07,0x01,0x03,0x0a,0x02,0xe5,0x11,0x02,0xe6,0x0d,0x09,0x09,0x09,0x02,0x03, 0xe5,0xe7,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe6,0x04,0x01, 0xe6,0x04,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x07,0x01,0xe5, 0xe5,0x03,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe6, 0x06,0x06,0x02,0x09,0x03,0x05,0x04,0x01,0x02,0x02,0xe6,0xe5,0x01,0xe5,0x04,0x02, 0xe6,0x01,0x01,0x02,0x04,0x01,0x06,0xe5,0x01,0xe5,0x32,0x01,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x09,0x09,0x0b,0x09,0x09,0x09,0x09,0x09,0x11,0xe5,0x08,0x09,0x09, 0x10,0x02,0x09,0x09,0x04,0x06,0x35,0x09,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04, 0x02,0x06,0x02,0x06,0x02,0x06,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x06,0x02, 0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0x0e, 0x08,0x0a,0x07,0x14,0x08,0x0b,0x08,0x02,0xe5,0x1d,0x02,0x01,0x04,0x0e,0xe7,0x03, 0x02,0xe6,0x03,0x02,0x01,0x04,0x02,0xe6,0x03,0x02,0xe6,0x02,0xe5,0x01,0xe6,0x03, 0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0x01,0x05,0xe5,0x01,0x01,0x04, 0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x03,0x02,0xe6,0x02,0xe5, 0xe5,0x03,0x02,0x13,0x01,0x07,0x01,0x0a,0xe5,0x01,0x05,0x19,0xe6,0x20,0x01,0xe5, 0x0f,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x04, 0x02,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x03,0xe5,0x01, 0xe5,0x06,0x02,0xe5,0x04,0x02,0xe5,0x03,0xe5,0x01,0xe5,0x04,0x02,0xe5,0x04,0x02, 0xe5,0x01,0x01,0xe5,0x01,0xe5,0x04,0x01,0xe5,0x07,0x01,0x07,0xe5,0x02,0x02,0x01, 0x01,0x05,0x01,0xe5,0x07,0x01,0x05,0x03,0x04,0x03,0x0d,0xe5,0x01,0xe5,0x11,0x0d, 0x01,0x15,0x07,0x09,0x09,0x09,0x09,0x09,0xe5,0x07,0x09,0xe5,0x07,0x0b,0x09,0x09, 0x01,0x07,0x01,0x07,0xe5,0x07,0x01,0x01,0xe5,0x01,0x01,0x04,0x04,0x04,0x04,0x07, 0x01,0x13,0x04,0x1d,0x01,0x01,0x15,0xe5,0x07,0xe6,0xe5,0x04,0xe5,0x07,0xe5,0x05, 0xe7,0x01,0xe5,0x01,0xe7,0x01,0x03,0xe7,0x01,0xe5,0x01,0xe7,0x01,0xe5,0x01,0xe7, 0x01,0xe5,0x01,0xe7,0x01,0x03,0xe7,0x01,0xe5,0x01,0xe7,0x01,0x03,0xe7,0x01,0x03, 0xe7,0xe5,0x01,0x04,0xe6,0x01,0xe5,0x01,0xe7,0x01,0x04,0xe6,0x01,0x05,0xe5,0x01, 0x05,0xe5,0x01,0x05,0xe6,0xe5,0x04,0xe6,0x04,0xe7,0x01,0x05,0xe6,0xe5,0x04,0xe5, 0x01,0x03,0xe7,0x01,0xe5,0x01,0x01,0xe5,0x07,0xe5,0x07,0xe5,0x06,0xe5,0x01,0x2e, 0xe5,0x04,0x01,0x06,0x02,0xe6,0x03,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe6,0x03, 0x02,0xe5,0x04,0x02,0xe6,0x03,0x02,0xe5,0x04,0x02,0x06,0x02,0xe5,0x04,0x04,0xe5, 0x04,0x02,0xe5,0x04,0x02,0xe5,0x04,0x02,0xe5,0x07,0xe6,0x06,0xe5,0x04,0x03,0x0f, 0x03,0x06,0x01,0x07,0x01,0xe5,0x04,0x03,0xe5,0x03,0x02,0x09,0x12,0x35,0x01,0x03, 0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0xe5,0x01,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01, 0x03,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01,0x03,0x01,0xe5,0x01, 0x03,0x01,0xe5,0x01,0xe5,0x01,0x01,0xe5,0x03,0xe5,0x04,0x02,0x03,0x01,0xe5,0x01, 0xe5,0x04,0x02,0xe5,0x07,0x09,0x05,0xe5,0x0e,0x02,0xe5,0x09,0x01,0x02,0x04,0x01, 0xe5,0x01,0x01,0xe5,0x04,0xe5,0xe7,0x03,0x04,0x14,0xe5,0x33,0x03,0x02,0xe5,0xe5, 0x02,0x02,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x01,0xe5,0x01,0xe5,0xe5,0x02,0x02,0xe5, 0xe5,0x01,0xe5,0x01,0xe5,0xe5,0x02,0x02,0xe5,0xe5,0x01,0xe5,0x01,0xe5,0xe5,0x02, 0x02,0xe5,0xe5,0x02,0x02,0xe5,0x01,0xe5,0x02,0x02,0x02,0x02,0xe5,0x01,0xe5,0xe5, 0x02,0x02,0x02,0x03,0x05,0x03,0x05,0x02,0xe5,0x02,0x04,0x0c,0xe6,0x03,0x08,0x0e, 0xe5,0x08,0x0d,0x0f,0x01,0xe5,0x23,0x09,0x08,0x03,0x03,0x05,0x03,0x05,0x03,0x05, 0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x03,0x05,0x05,0x09,0x05,0x03, 0x06,0x02,0x09,0x09,0x05,0xe5,0x04,0x0c,0x06,0x06,0x01,0xe5,0x08,0x02,0xe5,0x04, 0x02,0xe5,0x0b,0x10,0xe6,0xe5,0x45,0x10,0x20,0x06,0x0c,0x07,0x05,0x08,0x2c,0x0d, 0x02,0xe5,0xe5,0x0f,0xe5,0xe5,0x08,0x06,0xe5,0xe5,0x05,0x02,0x06,0xe5,0xe5,0x09, 0x04,0x01,0x0b,0x05,0x01,0x07,0x01,0xe5,0x05,0x01,0xe5,0xe5,0x03,0x01,0x01,0x05, 0x01,0x07,0x01,0x02,0x04,0x01,0x07,0x01,0xe5,0x04,0xe5,0xe5,0x06,0xe7,0xe5,0x03, 0xe7,0xe5,0x03,0x01,0x01,0x05,0x01,0xe5,0x05,0x01,0xe7,0x05,0x01,0x01,0x05,0xe6, 0xe5,0x04,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x02, 0xe5,0x01,0xe5,0xe5,0x01,0x04,0x01,0x03,0x03,0x01,0x01,0x05,0x01,0x01,0xe6,0x02, 0x01,0x02,0x01,0x02,0x01,0x02,0xe5,0x02,0x01,0x0b,0x02,0x02,0x13,0x09,0x20,0xe5, 0x02,0x05,0x03,0x01,0x02,0xe5,0xe5,0x05,0x04,0x04,0x09,0xe5,0x07,0xe5,0xe5,0xe5, 0x0d,0x0c,0x02,0x05,0xe5,0x13,0x01,0x0d,0x02,0x08,0xe5,0x11,0xe5,0x0f,0x10,0x02, 0x1c,0xe7,0x1c,0x21,0x09,0x02,0x02,0x02,0x03,0xe5,0x02,0x02,0x02,0x03,0x0b,0x03, 0x04,0x01,0x06,0x05,0xe5,0xe5,0x02,0x0a,0x02,0x06,0xe5,0x09,0x0c,0x01,0x07,0x01, 0x01,0x05,0x03,0x10,0x02,0x01,0x06,0x05,0x03,0x04,0xe5,0x1f,0x07,0x01,0xe7,0x02, 0xe5,0x28,0x03,0x05,0x03,0x04,0x0a,0x05,0x02,0x0a,0x08,0x02,0x01,0x04,0x01,0x0d, 0x05,0x0a,0x0b,0x0c,0x05,0x0a,0x0d,0x05,0x05,0x03,0x09,0x03,0x03,0x28,0x02,0x13, 0xe5,0xe6,0x01,0x01,0x03,0x14,0x05,0x13,0x05,0x03,0x01,0x0f,0x04,0x06,0x01,0xe5, 0x03,0xe8,0x06,0x02,0x02,0x03,0xe6,0x02,0x0b,0x01,0x01,0x04,0x05,0x06,0x01,0x06, 0xe7,0x06,0x01,0xe5,0x01,0x01,0x01,0x01,0xe5,0x05,0xe5,0x06,0xe5,0xe6,0x05,0xe7, 0x07,0xe5,0x03,0x03,0x01,0x07,0x02,0x04,0xe6,0x06,0x09,0xe6,0x0c,0xe5,0x02,0xe5, 0x01,0x1d,0x05,0x0a,0x08,0xe5,0x0f,0x03,0x04,0x0b,0x05,0x11,0xe5,0x03,0x0e,0x01, 0x02,0x05,0x07,0x01,0xe5,0xe5,0x01,0x05,0x03,0xe5,0xe5,0xe5,0xe5,0x07,0x01,0x0b, 0x05,0xe5,0x02,0x04,0x03,0xe5,0xe5,0xe5,0x08,0x05,0x03,0x05,0x04,0x04,0x05,0x08, 0x10,0x02,0xe5,0x1b,0x12,0x04,0x09,0x05,0x01,0xe5,0x01,0x04,0x05,0x06,0x02,0x01, 0x01,0x05,0x06,0x02,0x01,0x09,0x01,0x05,0x04,0x03,0x05,0x01,0x03,0x02,0x05,0xe5, 0x02,0x13,0x04,0xe5,0x12,0x05,0x02,0xe5,0x08,0x01,0xe5,0x0e,0xe5,0x03,0x07,0x1e, 0x02,0xe7,0x10,0x1e,0x17,0xe5,0x1f,0x20,0x0c,0x01,0x05,0x03,0x01,0x14,0x06,0x03, 0x0a,0x04,0xe5,0x07,0x02,0x06,0xe5,0x0a,0x35,0xe6,0xe5,0x02,0x06,0x10,0x2a,0x02, 0x10,0x01,0x05,0x01,0x01,0x06,0x1e,0x05,0x06,0x10,0x09,0x0e,0x0d,0x04,0xe5,0x05, 0x04,0x09,0x16,0x0d,0x05,0x08,0x0d,0xe5,0xe6,0x16,0x0c,0x0f,0xe6,0x0a,0x04,0x08, 0xe8,0x11,0x01,0x11,0x02,0x02,0x16,0xe8,0x0a,0x0f,0x03,0x0e,0x01,0x01,0x03,0x08, 0x09,0x06,0xe5,0x0c,0xe7,0x04,0x01,0x08,0x0e,0x0e,0xe5,0xe5,0x17,0x17,0x05,0x02, 0x1a,0x05,0x06,0x06,0x14,0x1b,0x03,0x04,0x01,0x14,0x06,0x06,0x02,0x03,0x0d,0x1e, 0x05,0x2d,0xe8,0x23,0x01,0x09,0x09,0x06,0x01,0x08,0x04,0x02,0x0a,0x06,0x01,0x28, 0x05,0x04,0xe5,0x01,0x06,0x02,0x06,0x08,0x09,0xe5,0x15,0xe5,0x0b,0x01,0xe5,0x02, 0x04,0x08,0x06,0xe5,0x25,0x01,0xe5,0x2e,0x0a,0x12,0x07,0x02,0x09,0x06,0x04,0x09, 0x14,0x0c,0x02,0xe5,0x05,0x01,0x06,0x0b,0x02,0xe5,0x04,0x01,0xe6,0x01,0x03,0x03, 0x0d,0x01,0x06,0x02,0x01,0x02,0x01,0x02,0x09,0x07,0x01,0x0c,0x15,0xe8,0x30,0x21, 0xe5,0x02,0x0f,0x1b,0x11,0x07,0x14,0x1a,0x16,0x07,0x07,0xe5,0x09,0x0c,0x08,0xe5, 0x14,0xe6,0xe7,0x17,0xe5,0x0e,0x16,0xe5,0x11,0x06,0x02,0xe5,0x07,0xe8,0x01,0x02, 0xe5,0x07,0x03,0xe5,0x04,0x05,0x0c,0x02,0x0a,0x07,0x09,0xe5,0xe6,0x04,0x09,0xe5, 0x07,0xe6,0x09,0xe5,0x04,0x03,0xe5,0x0d,0xe5,0x15,0x01,0x03,0xe5,0x09,0xe6,0xe7, 0x01,0x01,0x15,0x10,0x10,0x01,0x04,0x02,0xe5,0x14,0x03,0x05,0xe5,0x01,0x02,0xe5, 0xe5,0x02,0x05,0xe5,0x04,0x01,0xe6,0x01,0x05,0x0a,0x01,0x01,0x0d,0x01,0xe6,0x07, 0xe5,0xe5,0x12,0x01,0xe6,0x06,0x04,0xe5,0x03,0xe5,0x08,0xe5,0x0e,0x05,0xe5,0x0d, 0xe5,0x0d,0x01,0x04,0xe6,0x03,0x26,0x13,0x01,0x1b,0x0b,0x02,0x04,0x0b,0x09,0x07, 0x05,0xe5,0x03,0x01,0x02,0xe6,0x03,0x09,0x09,0x04,0x01,0x05,0x01,0xe5,0x02,0x0b, 0x07,0x04,0x01,0x0a,0x01,0x04,0x01,0x06,0xe5,0x01,0x01,0x09,0x0b,0x07,0x04,0xe5, 0xe6,0x01,0x28,0x10,0x02,0x01,0x1b,0x09,0x09,0x09,0x09,0x09,0x05,0xe5,0x03,0x06, 0xe5,0xe5,0x08,0x09,0x10,0xe5,0xe5,0x05,0xe5,0x0a,0x10,0xe5,0xe5,0xe5,0x0d,0x02, 0x01,0x04,0xe5,0xe5,0x16,0x03,0x02,0x2b,0x11,0x0b,0x01,0x11,0x0b,0x04,0x01,0xe5, 0x0a,0x04,0x01,0x02,0x02,0x01,0x01,0x05,0x0b,0x04,0x04,0x01,0x01,0x02,0x03,0x02, 0x02,0x0a,0x19,0xe5,0xe5,0x03,0x01,0x1c,0xe5,0x01,0x0d,0x02,0x11,0x02,0xe5,0x01, 0x28,0x03,0x13,0x05,0x01,0x01,0x0f,0x03,0x05,0x03,0x01,0x02,0xe5,0x02,0x05,0x03, 0x01,0x02,0xe5,0x02,0x01,0x02,0x04,0x06,0x04,0x07,0x02,0x03,0xe5,0x02,0x02,0x01, 0xe5,0x02,0x01,0x02,0xe5,0x02,0x04,0x04,0x01,0x02,0x04,0x01,0x01,0xe6,0xe5,0xe5, 0x08,0x09,0x09,0x02,0xe7,0x02,0x01,0x02,0x04,0x13,0x02,0x02,0xe6,0x26,0x02,0x13, 0x01,0x07,0x01,0x11,0x06,0x01,0x02,0x07,0x04,0x06,0x03,0xe5,0x03,0x01,0x09,0x0e, 0x02,0x01,0x05,0x03,0x03,0xe5,0x03,0x02,0x0c,0x03,0x07,0x01,0x07,0x01,0x07,0x06, 0x02,0x01,0x09,0x02,0x04,0x01,0x09,0x17,0xe6,0x01,0x27,0xe6,0xe5,0x12,0x01,0x07, 0x01,0x0d,0xe5,0x01,0x06,0x01,0xe5,0x08,0x09,0x06,0x02,0x04,0x01,0x01,0x0b,0x04, 0xe5,0xe6,0xe5,0xe5,0x01,0xe6,0x01,0x06,0x02,0x05,0xe5,0x04,0x06,0x0b,0x03,0xe5, 0x01,0x01,0x07,0x06,0xe5,0xe5,0xe5,0x06,0x09,0x01,0x07,0x17,0xe5,0xe5,0xe7,0x44, 0x09,0x33,0x04,0x0c,0x08,0x02,0x01,0xe5,0x05,0x0b,0x0e,0x02,0x02,0x06,0x02,0x06, 0x15,0x07,0x02,0x1a,0x02,0x12,0xe7,0x45,0xe6,0x06,0x01,0x25,0x0f,0xe5,0x0b,0x01, 0x05,0xe5,0x01,0x09,0xe6,0x16,0x03,0xe5,0x07,0xe5,0x09,0x1b,0x31,0xe5,0xe7,0x0d, 0x09,0x09,0x09,0x09,0x09,0x09,0x03,0x05,0x09,0x09,0x09,0x05,0x03,0x09,0xe6,0x03, 0x02,0x03,0x04,0x02,0xe5,0x01,0x05,0x04,0x01,0x02,0x09,0x09,0xe5,0x02,0x01,0x02, 0x04,0x01,0x02,0x06,0x02,0x09,0x06,0x02,0x02,0x02,0x03,0x02,0x06,0x04,0x01,0x02, 0x09,0x0d,0x02,0x01,0x02,0x88,0x0c,0x15,0x13,0x09,0x09,0x06,0x0c,0x06,0x0c,0x09, 0x09,0x18,0x06,0x97,0x02,0x04,0x0d,0x11,0x09,0x09,0x13,0x15,0x08,0x08,0x1e,0xe5, 0xe6,0xe5,0x11,0x09,0x1d,0x01,0xe5,0xe5,0x03,0x02,0x01,0x09,0x0e,0x04,0x07,0x01, 0x07,0x01,0x07,0x01,0xe7,0x0a,0x04,0x02,0xe6,0xe5,0x01,0x01,0x02,0x06,0x04,0x04, 0x09,0xe6,0x01,0x02,0x01,0x04,0x0e,0x04,0x04,0x09,0x09,0x09,0x09,0x16,0x03,0xe6, 0x3e,0x01,0xe5,0x05,0x01,0xe5,0x04,0x02,0xe5,0x11,0xe5,0x05,0x01,0xe5,0x02,0x02, 0x01,0xe5,0x05,0x01,0xe6,0x08,0x04,0x02,0x02,0xe5,0x04,0x01,0x01,0x02,0x02,0x02, 0x08,0xe5,0x0c,0x02,0x01,0x01,0x02,0x05,0x06,0x01,0xe5,0x08,0x09,0x05,0x03,0x03, 0x02,0x01,0xe5,0x05,0x02,0x17,0xe8,0x45,0x09,0x04,0x04,0x0b,0x02,0x04,0x0c,0x01, 0x07,0x01,0x01,0x01,0x0e,0x04,0x02,0x0e,0x09,0x13,0x06,0x02,0x09,0x09,0x04,0x01, 0x0c,0x09,0x22,0x01,0x01,0x14,0xe6,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x05,0x01,0xe5, 0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x01,0x03,0x01,0xe5,0x05,0x01,0x01, 0x01,0x05,0xe6,0xe5,0x02,0x01,0xe5,0x07,0xe5,0x09,0xe5,0x05,0xe6,0xe5,0x06,0xe5, 0x01,0x05,0xe5,0x02,0x04,0xe6,0xe5,0x02,0xe8,0x06,0xe6,0xe5,0x02,0xe5,0xe5,0x07, 0xe5,0x07,0xe5,0x05,0x01,0xe6,0xe5,0x02,0xe7,0x07,0xe5,0x07,0xe5,0x06,0x03,0x16, 0x03,0x09,0xe5,0x11,0x04,0x01,0x01,0x07,0x01,0x09,0xe5,0x08,0xe5,0x06,0xe5,0x03, 0x01,0x01,0x03,0x01,0x04,0x0c,0x01,0x01,0x02,0x0b,0x08,0x05,0x03,0x09,0xe6,0x0e, 0x01,0x0a,0xe5,0x06,0x03,0xe5,0x03,0x13,0xe5,0x03,0xe5,0x01,0xe5,0x01,0xe5,0xe5, 0x01,0x19,0x01,0xe5,0x37,0x02,0x03,0x01,0x07,0x01,0x09,0xe5,0x11,0xe5,0x01,0x03, 0x01,0x06,0x03,0x0a,0x07,0x02,0x06,0x04,0x06,0x02,0x01,0x01,0xe5,0x0a,0x11,0x01, 0x01,0x04,0x0a,0x01,0x06,0x17,0x02,0x02,0xe5,0xe5,0x04,0xe6,0x19,0x03,0x14,0x05, 0x22,0x17,0x13,0x05,0x09,0x13,0x04,0x07,0x03,0x08,0x03,0x01,0x03,0x09,0x13,0x03, 0xe6,0x0c,0x05,0xe5,0x01,0x13,0x05,0x03,0xe5,0x03,0x1e,0x01,0x0c,0x07,0x05,0x09, 0x13,0x02,0x06,0x09,0x08,0xe5,0x08,0x08,0xe5,0x01,0x06,0x02,0xe5,0x04,0x0c,0x0f, 0x0b,0x04,0x02,0x02,0x08,0xe5,0x11,0x03,0xe7,0x02,0x06,0x01,0x03,0x06,0x09,0x09, 0x02,0xe5,0x01,0x01,0xe5,0x02,0x21,0x1e,0x06,0x05,0x01,0xe5,0x2d,0xe5,0xe6,0x0e, 0x01,0xe5,0x19,0x02,0x07,0xe5,0x01,0x0b,0x18,0x02,0xe5,0x02,0x0b,0xe5,0xe5,0x0f, 0xe5,0xe5,0x27,0x13,0x02,0xe6,0xe5,0xe5,0x0e,0x01,0x07,0x01,0x07,0x01,0x04,0x02, 0x01,0x07,0x01,0x01,0x04,0xe5,0xe5,0x06,0x01,0x01,0x05,0x01,0x01,0x01,0x03,0x01, 0x07,0x01,0x03,0x03,0x01,0xe5,0x05,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x01,0x07, 0x01,0x01,0x04,0xe5,0xe5,0xe5,0x04,0x01,0x01,0x04,0xe5,0xe5,0x06,0x01,0x01,0x01, 0x03,0x01,0x06,0xe5,0xe5,0x02,0x03,0x01,0x07,0x01,0xe6,0x04,0x01,0xe5,0x05,0x01, 0xe6,0x04,0x01,0x07,0x01,0x01,0x06,0x04,0xe5,0x19,0x19,0x18,0x0b,0x01,0x0d,0xe5, 0x02,0x04,0xe5,0xe5,0x02,0x06,0xe5,0xe5,0x01,0xe6,0x04,0xe8,0x02,0x05,0x25,0xe5, 0x03,0x02,0x04,0xe5,0x1f,0x0e,0x08,0x22,0x03,0xe5,0x15,0x10,0x1d,0x08,0x08,0xe5, 0x01,0x08,0x01,0x01,0x04,0x01,0xe5,0x0c,0x01,0xe5,0x02,0xe5,0x05,0xe5,0x06,0x08, 0xe5,0xe5,0x09,0x0e,0x0b,0x13,0x09,0x07,0xe5,0xe5,0x0b,0x0b,0x21,0xe6,0xe6,0x3d, 0x09,0x05,0x0c,0x09,0x04,0x01,0x05,0x07,0x09,0x05,0x03,0x01,0x01,0x02,0x02,0x03, 0x02,0x01,0x20,0x09,0x09,0x58,0x02,0x05,0x03,0x42,0x13,0x09,0x0d,0x02,0x02,0x06, 0x02,0xe5,0x04,0x01,0x04,0x04,0x01,0x03,0x03,0x01,0xe5,0x08,0x0b,0x06,0xe5,0x05, 0x09,0xe6,0x0a,0x09,0x02,0x04,0xe5,0x0e,0x26,0xe7,0x04,0x02,0x35,0x12,0x0a,0x09, 0x0f,0x06,0x06,0x02,0x0a,0x04,0x07,0x04,0x02,0x0f,0x18,0x07,0x10,0x07,0x03,0x3b, 0xe5,0x12,0x09,0x09,0x27,0x0f,0x03,0x0e,0xe5,0x17,0x02,0x09,0x03,0x07,0x01,0x12, 0xe5,0x01,0x04,0xe6,0x09,0x28,0x15,0x1f,0x02,0x01,0xe5,0x01,0x06,0x07,0x05,0x09, 0x09,0x09,0x09,0x04,0x06,0x07,0x06,0xe6,0x06,0x01,0x02,0xe5,0x0f,0x0a,0x08,0x08, 0xe5,0xe5,0x05,0x01,0x02,0x0c,0x0b,0x01,0x01,0x12,0x06,0x01,0xe5,0x12,0x09,0x2b, 0x01,0x01,0x64,0x0a,0x05,0x1f,0x03,0x01,0x0c,0x05,0x11,0x1b,0x0d,0x08,0xe5,0x03, 0x2e,0x05,0x01,0x01,0x08,0x0d,0x09,0x09,0x09,0x09,0x09,0xe7,0x04,0xe5,0x09,0xe5, 0x0c,0x18,0xe5,0x04,0x01,0x09,0xe5,0xe6,0xe5,0x05,0x01,0x02,0x04,0xe5,0x07,0xe5, 0x06,0x08,0xe7,0x07,0xe6,0x06,0xe5,0x1e,0x06,0xe6,0x06,0xe5,0x0c,0x05,0x05,0xe5, 0x01,0x01,0x07,0x42,0xe5,0xe5,0x0c,0xe5,0x02,0x09,0x18,0xe5,0x11,0x04,0x05,0x0a, 0x02,0xe5,0x04,0x02,0x0a,0x05,0x02,0x07,0x0b,0x27,0x06,0x01,0x01,0x10,0x06,0xe5, 0xe5,0x09,0x05,0x0c,0x09,0x09,0x09,0x01,0x08,0x02,0x01,0x0d,0x03,0x01,0x09,0x07, 0x01,0x0a,0x17,0x05,0x03,0xe6,0x01,0x01,0x03,0xe5,0x04,0x03,0x04,0xe5,0x11,0x03, 0x05,0x08,0xe5,0x08,0xe5,0xe5,0x1e,0x09,0x06,0x0f,0xe6,0x07,0xe5,0x53,0x13,0x02, 0x09,0x03,0x05,0x09,0x12,0x06,0x08,0x03,0x01,0x09,0x07,0x01,0x02,0x04,0x05,0x03, 0x09,0x0c,0x0b,0x24,0x01,0x01,0xe5,0xe5,0x44,0x18,0x07,0xe5,0x09,0x27,0x09,0x02, 0x10,0x03,0x13,0xe5,0x07,0x09,0x05,0xe6,0x1a,0x1b,0x07,0x01,0xe5,0xe8,0x0d,0xe5, 0x0a,0xe5,0x04,0xe5,0x2d,0x0c,0x05,0x06,0xe5,0xe5,0x08,0x05,0xe5,0x05,0x0b,0xe5, 0x07,0x09,0x0c,0x01,0x04,0x08,0x06,0x0d,0x09,0x02,0xe5,0x03,0xe8,0x12,0x01,0x04, 0x1e,0x01,0x01,0x01,0x02,0x01,0x0c,0x0c,0xe5,0x2d,0x0e,0xe5,0xe5,0x05,0xe5,0x02, 0x01,0x01,0xe5,0x12,0xe5,0x04,0x01,0x05,0x05,0x03,0x09,0x0b,0xe6,0x15,0x0c,0x03, 0x01,0x03,0x03,0xe5,0x11,0x01,0x07,0x01,0x20,0x01,0x03,0x02,0x04,0x20,0x20,0xe5, 0xe5,0xe5,0x0c,0xe5,0x0b,0x01,0x02,0x01,0x01,0xe5,0xe6,0x02,0x01,0xe5,0xe5,0x08, 0x01,0x05,0x01,0x18,0x01,0x01,0xe5,0xe5,0x0a,0x0f,0x1a,0xe5,0xe5,0xe5,0xe5,0x1c, 0x15,0xe5,0x03,0x04,0xe5,0x01,0x02,0x46,0x01,0x1b,0x01,0x04,0x02,0xe6,0x06,0x09, 0x05,0xe5,0x01,0x01,0x1d,0x01,0x0e,0x02,0x09,0x1a,0x02,0x01,0x3d,0x04,0xe6,0xe5, 0x5c,0x02,0x03,0xe5,0xe5,0x04,0xe5,0x02,0xe6,0x03,0x08,0x02,0x02,0x09,0x06,0x01, 0x02,0x04,0x01,0x02,0x09,0x04,0x01,0x02,0x02,0x0b,0x04,0x09,0x04,0x01,0x02,0x02, 0x10,0x09,0x09,0x09,0x14,0xe6,0xe6,0x01,0x4a,0x0f,0x04,0xe5,0xe7,0xe5,0xe5,0xe5, 0xe6,0x02,0x0d,0xe7,0x02,0x02,0x01,0x04,0x06,0xe5,0xe5,0xe5,0x04,0x01,0x01,0x09, 0x05,0x01,0x01,0x09,0x01,0x02,0xe5,0x02,0x0f,0x01,0x01,0x09,0x3b,0x03,0xe5,0xe5, 0x01,0x46,0x01,0x11,0x01,0x07,0x01,0x07,0x01,0xe5,0x02,0x01,0xe5,0xe5,0x08,0x07, 0x01,0x04,0x04,0x01,0x07,0x01,0x04,0x02,0x01,0x01,0x02,0x01,0xe5,0xe5,0xe5,0x09, 0xe6,0x01,0x06,0x01,0x04,0x05,0x01,0x07,0x01,0x0d,0xe5,0x05,0x02,0x01,0x04,0x02, 0x08,0xe5,0x11,0x04,0x49,0x01,0x0e,0x01,0xe5,0xe5,0x01,0x04,0x01,0x07,0x01,0x01, 0x02,0x01,0xe5,0x08,0x03,0xe7,0x01,0x01,0x01,0xe5,0xe7,0x01,0x01,0x01,0xe5,0x03, 0x01,0x01,0xe5,0x01,0x01,0x01,0x02,0xe6,0x01,0x01,0x02,0x0a,0xe5,0x01,0x03,0xe5, 0xe5,0xe5,0x04,0x01,0x01,0xe5,0xe5,0xe6,0x03,0x01,0x0b,0xe5,0x08,0x01,0x01,0x05, 0x01,0xe5,0x04,0xe7,0x12,0x01,0x01,0x09,0x63,0x02,0x03,0x13,0x02,0x02,0x03,0x0b, 0x09,0x09,0x09,0x13,0x09,0x09,0x13,0x09,0x09,0x04,0x04,0x16,0x02,0xe6,0x09,0x01, 0x57,0x09,0xe5,0x03,0xe5,0x08,0x08,0xe5,0x07,0xe5,0x09,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe5,0x11,0xe5,0x07,0xe5,0x07,0xe5,0x11,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x15,0xe9,0x07,0x05,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x06,0x02,0x05,0x03,0x06, 0x02,0xe5,0x04,0x02,0x09,0xe5,0x07,0xe5,0x04,0x04,0xe5,0x07,0xe5,0x07,0xe6,0x06, 0xe6,0x03,0x02,0x09,0xe5,0x07,0xe6,0x06,0xe5,0x07,0x09,0xe5,0x07,0xe6,0x06,0xe6, 0x06,0xe5,0x07,0x0d,0xe5,0x01,0xe5,0x01,0x4f,0x06,0x0c,0x08,0xe5,0x05,0x1d,0x29, 0x02,0x63,0x04,0x03,0x01,0xe5,0x50,0x12,0x0a,0x19,0x09,0x0b,0x09,0x16,0x10,0x13, 0x13,0x1d,0x0d,0x0a,0x01,0x01,0x44,0x04,0x09,0x04,0x02,0x01,0x02,0x04,0x01,0x0a, 0xe5,0x06,0x07,0x01,0xe7,0x03,0x01,0xe7,0x04,0x02,0xe7,0x05,0xe7,0x03,0x01,0xe7, 0x02,0x02,0xe7,0xe5,0x0d,0xe7,0x05,0xe7,0x05,0xe7,0x05,0x0a,0x01,0x07,0x01,0x07, 0xe6,0xe5,0x04,0x01,0x16,0xe7,0x4a,0xe5,0x02,0x02,0x01,0xe5,0x02,0x04,0x01,0x07, 0xe5,0x02,0x06,0x01,0x03,0x03,0x01,0x03,0x01,0xe6,0x03,0xe5,0xe7,0x06,0x02,0xe5, 0x06,0xe6,0x01,0x02,0x01,0xe6,0x06,0xe6,0x07,0xe5,0x03,0x02,0xe6,0x06,0xe6,0x06, 0xe6,0x06,0xe5,0x09,0x09,0x09,0x09,0x17,0x01,0xe5,0x07,0x3f,0x02,0x09,0x09,0x03, 0x03,0x01,0x0d,0xe5,0x03,0x03,0x07,0x05,0x01,0x0b,0x09,0x01,0x05,0x01,0xe5,0x07, 0xe5,0x11,0xe5,0x04,0x02,0x01,0x01,0x02,0x02,0x09,0x0d,0x11,0x20,0x03,0x15,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe6,0xe5,0x04,0xe5, 0x05,0x01,0xe5,0x01,0x05,0xe5,0x02,0x04,0xe6,0x06,0xe5,0x05,0xe6,0xe5,0xe6,0x01, 0xe5,0x02,0xe5,0x01,0x03,0xe5,0x02,0x03,0xe8,0xe5,0x03,0xe6,0x01,0x05,0xe5,0x07, 0xe5,0x01,0x03,0xe7,0x01,0x05,0xe5,0x01,0xe5,0x01,0xe7,0x01,0x05,0xe5,0x07,0xe5, 0x05,0xe7,0x05,0xe7,0x07,0xe5,0x07,0xe5,0x06,0xe6,0xe5,0x03,0x46,0xe5,0x05,0x01, 0xe6,0x04,0x01,0x05,0x0b,0x0b,0x07,0x01,0xe5,0x03,0x04,0x0a,0xe5,0x07,0xe5,0x08, 0x03,0x04,0xe5,0x07,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x07,0x1e,0x09,0x17,0xe6,0xe5, 0x4a,0xe5,0x05,0x01,0x09,0x03,0x19,0x02,0x04,0x01,0xe5,0xe6,0x01,0x05,0xe5,0x01, 0x01,0xe5,0x03,0x03,0x02,0x02,0xe5,0x01,0x05,0x03,0x02,0x02,0x04,0x04,0x06,0x02, 0xe5,0x01,0x02,0x02,0x06,0x02,0xe5,0x01,0x02,0x0c,0x09,0xe5,0x01,0x03,0x01,0xe5, 0x01,0x03,0x01,0x15,0xe5,0x01,0x09,0x41,0x07,0x01,0x09,0x05,0x01,0x01,0x07,0x0b, 0x0e,0xe5,0x03,0x03,0xe5,0x04,0xe5,0x02,0x04,0xe5,0x02,0xe5,0x03,0x08,0xe5,0x07, 0x09,0xe5,0x02,0xe5,0x02,0xe5,0x07,0xe5,0x02,0xe5,0x02,0x18,0xe5,0x07,0xe5,0x1c, 0xe7,0x4a,0xe5,0x05,0x01,0xe5,0x0a,0xe5,0x04,0x0c,0x06,0x0d,0x09,0x05,0x0b,0x03, 0x01,0xe5,0x06,0x01,0x02,0x07,0x0b,0x03,0x01,0xe5,0x01,0x09,0x03,0x01,0xe5,0x01, 0x17,0x01,0xe5,0x05,0x01,0xe5,0x1e,0xe5,0x58,0x0c,0x09,0x02,0x06,0x02,0x1b,0x04, 0x0c,0x10,0x05,0x02,0x07,0x01,0x0a,0x3e,0x14,0xe7,0x01,0x0e,0x01,0x07,0x01,0x07, 0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x02,0x04,0x01,0x07,0x01,0x01,0x05, 0x01,0x04,0x02,0x01,0x07,0x01,0x03,0x03,0x01,0x07,0x01,0x01,0x05,0x01,0xe6,0x06, 0x01,0xe5,0x05,0x01,0xe6,0x04,0x01,0x01,0x05,0x01,0x07,0x01,0x07,0xe6,0xe5,0x04, 0x01,0x01,0x05,0x01,0x01,0x05,0xe6,0xe5,0x04,0x01,0x07,0x01,0x01,0x05,0x01,0x07, 0x01,0x06,0xe5,0xe5,0x09,0x03,0x10,0x0f,0x09,0x1c,0x19,0x0b,0xe5,0x01,0xe5,0x01, 0x01,0x03,0x08,0x0c,0x03,0x07,0xe5,0x08,0x08,0xe6,0x06,0xe5,0x05,0xe5,0x01,0x13, 0x07,0x13,0xe6,0x06,0x13,0xe6,0x02,0x12,0x01,0x01,0xe5,0x07,0x04,0x38,0x06,0x01, 0x07,0x0c,0x07,0x09,0x01,0x07,0x07,0x01,0x07,0x01,0x06,0x04,0x01,0x02,0x02,0x06, 0x04,0x03,0x04,0x07,0x16,0x09,0x0d,0x06,0x16,0x06,0x07,0x0a,0xe6,0x01,0x0c,0x45, 0x03,0x05,0x03,0x0b,0x1d,0x03,0x02,0x01,0xe5,0x02,0x04,0x03,0x0f,0x08,0x02,0x08, 0x1b,0x04,0x0e,0x09,0x06,0x0c,0x1a,0xe5,0xe6,0x4c,0x04,0x01,0x02,0x03,0x03,0x02, 0x03,0x01,0xe5,0x05,0x01,0xe5,0x05,0x01,0xe5,0x02,0x09,0x02,0xe7,0x02,0x11,0x04, 0x03,0xe5,0x02,0x0e,0x01,0x08,0x09,0x02,0x14,0x3a,0xe5,0xe7,0x12,0x09,0x49,0x01, 0x09,0x02,0x07,0x03,0xe5,0x08,0x0b,0x05,0x0f,0x05,0x02,0x10,0x09,0x5b,0x02,0xe5, 0x0f,0x1c,0x0c,0x09,0x1c,0x03,0x0a,0x01,0xe5,0x05,0x05,0x0d,0x01,0x02,0x04,0x04, 0x01,0x06,0x14,0x06,0x0b,0x09,0x04,0x01,0x0f,0x16,0x25,0x02,0x01,0x08,0x09,0x11, 0x13,0x09,0x04,0x04,0x0f,0x0d,0xe5,0x03,0xe5,0x0b,0x09,0x06,0xe6,0x12,0x02,0x1a, 0x01,0x02,0x03,0xe5,0x08,0x02,0x02,0x1a,0x13,0x13,0x0d,0x01,0x01,0x06,0x0e,0x50, 0x05,0x01,0x04,0xe5,0x02,0x11,0x08,0x05,0x0b,0x02,0x01,0x15,0x09,0x02,0x02,0x08, 0xe5,0x1f,0x34,0xe6,0x07,0x0a,0x2a,0x09,0x09,0x04,0x02,0x01,0x03,0x06,0xe6,0x0b, 0x0d,0x0b,0x08,0xe5,0x02,0xe5,0x03,0xe6,0x10,0x01,0x02,0xe5,0x03,0xe6,0x0b,0x0a, 0x0d,0x03,0x04,0x05,0x03,0x09,0x23,0x01,0xe5,0x01,0x6a,0xe5,0x1e,0x01,0xe5,0x01, 0x03,0x03,0xe5,0x07,0x01,0xe5,0x14,0x05,0x02,0x01,0x16,0x0c,0x42,0xe6,0xe6,0x36, 0x02,0x09,0x01,0x07,0x14,0x0e,0xe6,0x09,0x01,0x01,0x07,0x09,0x09,0xe7,0x03,0x09, 0x0c,0xe5,0x07,0x02,0x06,0xe5,0x01,0x01,0x2b,0x06,0x02,0x1f,0x01,0xe5,0x57,0x19, 0xe6,0x02,0x01,0x03,0xe5,0x02,0x03,0x02,0x0a,0x02,0x11,0x09,0x07,0x12,0x07,0x01, 0x0b,0x04,0x05,0x3f,0x01,0x4d,0x25,0x13,0x10,0x06,0x2a,0x03,0x03,0x08,0x02,0x1a, 0x35,0x01,0xe5,0xe6,0x05,0xe5,0x04,0xe5,0x25,0xe5,0x25,0x01,0x04,0x09,0x04,0xe6, 0x01,0x04,0x09,0x0a,0xe5,0x03,0x02,0x09,0x02,0xe5,0x04,0xe5,0x04,0x02,0xe5,0x07, 0xe5,0x15,0xe5,0x03,0xe5,0x15,0xe5,0x03,0xe5,0xe6,0x04,0x1f,0x01,0xe5,0x01,0xe5, 0xe5,0xe5,0x04,0x01,0x2c,0x2a,0x01,0x05,0x04,0xe6,0x06,0x09,0x10,0xe5,0x01,0xe5, 0x0d,0xe6,0x0b,0x02,0x01,0x01,0x01,0x03,0x10,0xe5,0x0c,0x1f,0xe5,0x20,0x01,0x03, 0xe5,0xe5,0x01,0x01,0x0c,0x55,0x01,0x09,0x07,0x06,0x01,0x02,0x13,0x17,0x05,0x0e, 0x01,0x01,0x06,0xe5,0x0b,0x01,0x01,0x1d,0x02,0x01,0xe5,0xe5,0x01,0x15,0x01,0x02, 0x05,0x02,0x02,0x61,0xe5,0xe5,0x08,0x01,0x07,0x06,0x02,0x01,0x13,0x1d,0x10,0x09, 0x09,0xe5,0xe5,0xe5,0x24,0x01,0x17,0xe5,0x05,0x05,0xe7,0x64,0x04,0x01,0x01,0x05, 0xe5,0x01,0xe5,0x03,0x02,0x0a,0xe5,0xe5,0x01,0x05,0xe5,0x08,0x01,0x11,0x09,0x01, 0x05,0x03,0x02,0x01,0xe6,0x06,0x01,0x4c,0xe6,0x01,0x02,0x06,0x02,0x5e,0x01,0x02, 0x05,0x01,0x01,0xe5,0x02,0x05,0x07,0xe6,0xe5,0xe5,0x06,0x03,0x05,0x01,0x01,0x0f, 0x03,0x05,0x01,0x01,0x05,0x04,0x01,0x01,0x07,0x01,0x04,0x27,0x1d,0x03,0xe7,0x01, 0x60,0xe5,0x01,0x01,0x01,0x0f,0x06,0x04,0x05,0x01,0x01,0x04,0x04,0x06,0x01,0xe5, 0xe5,0x03,0x0c,0x09,0x01,0x1b,0x01,0x22,0x01,0xe5,0xe5,0x0d,0x01,0x10,0x03,0xe5, 0xe6,0x5f,0xe5,0x01,0x04,0x0e,0x03,0xe5,0x01,0x01,0x01,0x07,0x01,0x01,0xe5,0x01, 0x03,0x09,0x01,0x04,0xe6,0x09,0x09,0x01,0x1b,0x01,0x25,0x01,0x03,0xe5,0x07,0xe5, 0x14,0xe7,0x68,0x13,0x02,0x09,0xe5,0x04,0x0e,0xe5,0x1b,0x09,0x02,0x06,0xe5,0x07, 0xe5,0x4d,0x01,0x01,0xe5,0x59,0xe5,0x07,0x05,0xe5,0x01,0x0f,0xe5,0x03,0x04,0x08, 0xe5,0x01,0xe5,0x29,0x11,0x06,0x02,0x31,0x1f,0xe7,0x0d,0x09,0x09,0x09,0x09,0x09, 0x09,0x09,0x09,0x05,0x03,0xe6,0x02,0xe5,0x01,0x09,0xe5,0x07,0x09,0xe5,0x09,0x02, 0x06,0x04,0x01,0x02,0x09,0x09,0x06,0x02,0x03,0xe5,0xe5,0x01,0x03,0xe5,0xe5,0x01, 0x09,0x09,0x09,0x09,0x05,0x03,0x09,0x0d,0x04,0x66,0x08,0xe5,0x3c,0x1c,0xe5,0x08, 0x09,0x4a,0x02,0xe8,0x62,0x0b,0x0f,0x13,0x19,0x1d,0x08,0x08,0x50,0xe8,0x31,0x20, 0x01,0x07,0x01,0x09,0xe7,0xe5,0x0b,0x01,0xe5,0xe5,0xe5,0x03,0x07,0x01,0xe5,0xe5, 0x05,0x01,0x07,0x01,0x09,0x07,0x01,0x07,0x01,0x07,0x15,0x25,0x01,0x22,0x01,0x01, 0x46,0x0c,0x01,0xe5,0x05,0x01,0xe5,0x05,0x02,0xe5,0x06,0x01,0x04,0xe5,0xe7,0x06, 0x01,0x01,0x03,0x01,0xe6,0x06,0x01,0xe5,0x05,0x01,0xe5,0x05,0x02,0x05,0xe5,0xe6, 0x02,0x02,0x01,0xe5,0x02,0x06,0x09,0x01,0x03,0x02,0x03,0x1d,0x02,0x01,0xe5,0x21, 0x01,0xe6,0x11,0x53,0x01,0xe5,0x01,0x09,0x0a,0x04,0x03,0x02,0x07,0x01,0x0b,0xe5, 0xe5,0x05,0x02,0x03,0x02,0x09,0x06,0x09,0x03,0x0b,0x03,0x14,0x08,0x28,0x01,0x01, 0x15,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x06,0xe5,0xe5,0xe5,0x04,0xe5,0x01,0x05,0xe5,0x07,0x03,0x05,0xe5,0x07,0xe5,0x09, 0xe6,0xe5,0x04,0xe6,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe6,0x06,0xe5, 0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x06,0xe8,0x37,0x13, 0x06,0x01,0x07,0x01,0x04,0x04,0x11,0x01,0x07,0x01,0x07,0x01,0xe5,0x07,0x01,0x0a, 0x08,0xe5,0x05,0x01,0xe5,0x05,0x01,0x07,0x15,0xe5,0x08,0x13,0xe5,0x04,0x01,0x0a, 0xe5,0x15,0x01,0x01,0x38,0x1a,0x01,0x07,0x01,0x06,0xe5,0x0a,0x07,0x01,0x06,0xe5, 0xe5,0x06,0x01,0xe5,0x06,0xe5,0xe5,0x09,0x06,0x09,0x01,0x07,0x01,0x1b,0x0c,0x1a, 0x01,0x22,0xe6,0xe5,0x4b,0x32,0x09,0x09,0x1d,0xe5,0x08,0x1a,0x0b,0xe5,0x4c,0x38, 0x13,0x17,0x07,0x06,0x02,0x0b,0x1b,0x09,0x06,0x02,0x09,0x09,0x0b,0x09,0x04,0x02, 0x09,0x13,0x09,0x09,0x18,0xe8,0x65,0x03,0x05,0xe5,0xe5,0x05,0x02,0x06,0x14,0x01, 0x01,0x17,0x02,0xe5,0xe5,0xe5,0x12,0x27,0x1c,0x1a,0x11,0x01,0x07,0x01,0x01,0x05, 0x01,0x07,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x02,0x04, 0x01,0x01,0x02,0x02,0x01,0x03,0x03,0x01,0x02,0xe5,0x02,0x01,0x01,0x05,0x01,0xe5, 0xe5,0x03,0xe7,0xe5,0x05,0x01,0x02,0x04,0x01,0x01,0x05,0x01,0x01,0x01,0x03,0x01, 0x07,0x01,0x07,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01,0x01,0x05,0x01,0x07,0x01, 0x01,0x05,0x01,0x01,0x05,0x01,0x07,0x01,0x0b,0xe5,0xe5,0x0b,0x1c,0xe5,0x0b,0xe5, 0x0d,0x03,0x02,0x2c,0x01,0xe6,0x05,0x0a,0xe6,0x1c,0x01,0x07,0xe5,0x0e,0x0e,0x07, 0xe5,0x29,0x20,0xe7,0x34,0x10,0x06,0x0a,0x09,0x02,0x04,0x08,0x04,0x01,0x13,0x07, 0x11,0x07,0x03,0x03,0x03,0x02,0x15,0x06,0x02,0x01,0x01,0x02,0x09,0x02,0x08,0x0f, 0x0d,0x08,0x08,0xe5,0x01,0xe5,0x35,0x13,0x21,0x0f,0x05,0x06,0xe5,0x01,0xe5,0xe5, 0x23,0x02,0x01,0x0b,0x19,0x01,0x2f,0x09,0x15,0x04,0x29,0x34,0x03,0x01,0xe5,0x09, 0x02,0x0a,0x01,0x01,0x04,0x09,0x0c,0x01,0x01,0xe5,0x0c,0x05,0xe5,0x07,0xe5,0x07, 0x02,0x08,0x0c,0x04,0x03,0x07,0x07,0x04,0x21,0x01,0xe5,0x04,0xe5,0x65,0x01,0x0d, 0x07,0x08,0x02,0x01,0x09,0x01,0xe5,0x05,0x03,0x01,0x0f,0x09,0x09,0x0a,0x05,0x21, 0x09,0x24,0xe5,0x02,0xe5,0x01,0x0c,0x09,0x09,0x01,0x28,0x06,0x04,0x05,0x08,0x01, 0x06,0x0a,0x02,0x03,0x01,0x08,0xe5,0x06,0x03,0x09,0x03,0xe6,0x02,0x02,0x06,0xe5, 0x07,0x09,0x09,0x02,0x06,0x13,0x09,0x09,0x03,0x11,0x08,0x02,0xe5,0xe9,0x1d,0x0d, 0x09,0x09,0x2e,0xe5,0x1e,0xe5,0x01,0xe5,0xe5,0x01,0x05,0x03,0x01,0xe5,0x01,0x03, 0x06,0x02,0x09,0x01,0x02,0x04,0x01,0x07,0x05,0x03,0x01,0x11,0x01,0x07,0x05,0x03, 0x01,0x0e,0x09,0x07,0x01,0x01,0xe5,0x07,0x15,0x52,0x0f,0x06,0x02,0x0b,0x01,0x05, 0xe5,0x08,0x08,0x04,0x05,0x1e,0x01,0x07,0x12,0x01,0x31,0xe6,0xe6,0x48,0x18,0x03, 0xe5,0x03,0x0a,0x06,0x0f,0xe6,0x01,0x03,0xe5,0xe7,0x06,0xe6,0x06,0xe5,0x08,0x04, 0xe6,0x01,0x07,0xe7,0x05,0xe5,0xe5,0x05,0xe6,0x01,0x0e,0xe5,0xe5,0x06,0xe5,0x21, 0x09,0xe5,0x01,0xe5,0x05,0x43,0x0c,0x0c,0x02,0xe5,0x23,0x04,0x02,0x03,0xe5,0x01, 0x06,0x03,0x06,0x02,0x09,0x09,0x08,0x11,0x02,0x19,0xe5,0x01,0x2b,0x01,0xe7,0x07, 0x0f,0x53,0x09,0x01,0x08,0x05,0x03,0x04,0x04,0x04,0x01,0x03,0xe5,0x01,0x01,0x03, 0xe5,0x01,0x01,0x07,0x01,0x06,0xe5,0xe6,0x04,0x1d,0x03,0x01,0xe5,0x04,0x03,0xe5, 0x03,0xe5,0x05,0x01,0xe5,0x1a,0x06,0xe5,0xe5,0x12,0x04,0xe5,0x0d,0x41,0x15,0x09, 0x02,0x02,0xe5,0x08,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x02, 0x04,0x01,0x01,0xe5,0x03,0x01,0xe5,0x10,0xe5,0xe5,0x05,0xe5,0x05,0xe5,0xe5,0x1a, 0x07,0xe5,0xe5,0xe5,0x15,0x5c,0x09,0x11,0x17,0x13,0x39,0x02,0x06,0x15,0x10,0x07, 0xe8,0x06,0xe5,0x04,0xe5,0x01,0x01,0x32,0x02,0xe5,0x14,0xe5,0x05,0x08,0xe5,0x01, 0x01,0xe5,0x01,0xe5,0x01,0xe5,0x19,0xe5,0x07,0xe5,0x08,0xe7,0x05,0x02,0x01,0x04, 0x01,0xe5,0x04,0xe5,0x08,0xe5,0x06,0x01,0x07,0xe5,0x0a,0xe5,0x05,0x01,0xe5,0x04, 0x02,0xe5,0x04,0xe5,0x13,0x05,0x02,0x01,0x05,0xe5,0x06,0xe5,0x12,0x01,0x1e,0x02, 0x20,0xe5,0x07,0xe5,0x01,0x06,0xe5,0x07,0x01,0x0b,0x03,0x09,0x09,0x02,0xe5,0x04, 0x03,0xe5,0x03,0x03,0xe5,0x03,0x09,0x14,0x0b,0xe5,0x08,0x01,0x07,0xe5,0x16,0x01, 0x02,0x02,0xe5,0x03,0x0c,0x37,0x04,0x1c,0x09,0x05,0x03,0x02,0xe5,0xe5,0xe5,0x07, 0x05,0xe5,0x03,0x09,0x01,0x07,0x01,0x03,0x01,0x01,0x01,0x07,0x01,0x03,0xe5,0x01, 0x01,0x03,0x01,0x01,0x01,0x03,0x01,0x01,0x01,0x11,0x04,0xe5,0x02,0x01,0xe5,0x01, 0xe5,0x01,0x01,0x0a,0x0b,0xe5,0x03,0x04,0xe5,0xe6,0x01,0x46,0x31,0x06,0x02,0x01, 0x0e,0x0a,0xe5,0x05,0x02,0xe5,0x07,0xe5,0x04,0x01,0xe5,0x07,0xe5,0x04,0x0a,0x09, 0x0b,0xe5,0x08,0xe5,0x07,0xe5,0x03,0x16,0xe5,0x05,0x02,0x03,0x01,0x47,0x22,0x04, 0x01,0x07,0x07,0x06,0x09,0x02,0x1b,0xe5,0x08,0x08,0xe5,0x12,0xe5,0x01,0x1b,0x01, 0x04,0xe5,0x09,0x02,0x1a,0x04,0x02,0x46,0x03,0x23,0x01,0x01,0x05,0x03,0x09,0x09, 0x04,0x01,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x02,0x01,0x04, 0x04,0x04,0x13,0x04,0x04,0x09,0x02,0x01,0x18,0x02,0x01,0x01,0x02,0x46,0x04,0x0b, 0x01,0x14,0x01,0x02,0x01,0x01,0xe5,0x03,0x01,0x01,0x04,0x06,0x02,0xe5,0x01,0x04, 0x06,0x02,0x05,0xe6,0xe5,0x01,0x01,0xe6,0x01,0x06,0x02,0x02,0xe8,0x01,0x09,0x06, 0x02,0x04,0xe5,0x0c,0x06,0x02,0x04,0xe5,0x02,0x22,0xe5,0xe5,0xe5,0x48,0x21,0xe5, 0x03,0x01,0x07,0x18,0x01,0x01,0x13,0x01,0x05,0x08,0xe5,0xe5,0xe5,0x04,0xe6,0x0f, 0xe5,0x09,0xe5,0x19,0xe5,0xe5,0xe5,0x06,0x01,0x1c,0xe7,0x08,0x02,0x5d,0x16,0xe5, 0x07,0x05,0xe5,0x01,0xe5,0x05,0x03,0x02,0x02,0x03,0xe5,0x03,0xe5,0x07,0xe5,0x01, 0x02,0x02,0xe5,0x01,0x02,0x02,0x03,0x02,0x02,0xe5,0x01,0x02,0x02,0x13,0x03,0x02, 0x02,0xe5,0x01,0x02,0x02,0x03,0xe5,0x1b,0x04,0x0a,0xe5,0x5d,0xe5,0x15,0x0b,0x03, 0xe5,0x0d,0x0f,0xe5,0x01,0x05,0xe5,0x01,0x05,0xe5,0x11,0xe5,0x0b,0x13,0x05,0xe5, 0x0b,0xe5,0x1d,0xe7,0x0d,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x04,0x01,0x02, 0xe5,0x04,0x02,0x09,0x02,0x02,0x03,0x06,0x02,0xe6,0x03,0x04,0x05,0xe5,0x01,0x06, 0x02,0xe6,0x02,0xe5,0x01,0xe5,0x02,0xe6,0x01,0xe6,0x03,0x02,0x04,0x01,0x02,0xe5, 0x02,0x01,0x02,0x05,0x03,0x06,0x02,0x05,0x03,0xe5,0x04,0x02,0x09,0x09,0x0d,0x01, 0x01,0xe5,0x01,0x63,0x08,0xe5,0x12,0x06,0x02,0x08,0xe5,0x09,0xe5,0x07,0xe5,0x05, 0x13,0x02,0x13,0x10,0x02,0x09,0x06,0x02,0x1d,0x04,0x02,0x02,0xe5,0x62,0x07,0x02, 0x13,0x0a,0x03,0x05,0x05,0x05,0x03,0x05,0x03,0x09,0x01,0x07,0x04,0x04,0x09,0x01, 0x01,0x05,0x0e,0x04,0x05,0x03,0x01,0x02,0x04,0x17,0x0a,0xe5,0x01,0x01,0x06,0x10, 0x36,0x18,0xe5,0xe5,0xe5,0x03,0x07,0x01,0x04,0x0f,0xe6,0x04,0x17,0xe6,0x06,0xe6, 0x06,0x01,0x11,0xe6,0x24,0x01,0x2b,0x01,0x19,0xe5,0x4b,0x02,0xe5,0x04,0x02,0x03, 0x02,0x01,0xe5,0x09,0x09,0x0b,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x13,0x09,0x09, 0x20,0x01,0xe6,0x12,0xe5,0x03,0x4a,0x04,0x09,0x4b,0xe5,0x6c,0x01,0x01,0x15,0xe5, 0x01,0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07, 0xe6,0x06,0xe6,0x06,0xe5,0x07,0xe5,0x07,0xe5,0x02,0x02,0x01,0xe5,0xe5,0x06,0xe5, 0x06,0xe7,0x02,0x03,0xe6,0x02,0x04,0xe5,0x07,0xe5,0x05,0xe7,0x02,0x04,0xe5,0x05, 0xe7,0x07,0xe5,0x07,0xe5,0x05,0xe7,0x05,0x01,0xe5,0x07,0xe5,0x07,0xe5,0x06,0x02, 0xe5,0x03,0x14,0x3c,0xe5,0x10,0xe5,0x07,0xe6,0x04,0x01,0xe6,0x11,0x02,0x0c,0x05, 0xe5,0x01,0x05,0x03,0x05,0xe5,0x11,0x03,0x05,0xe5,0x07,0x03,0x0f,0x09,0x03,0x05, 0x02,0x1f,0x01,0xe5,0x18,0x4d,0x02,0x06,0x09,0x01,0x10,0x05,0x03,0x01,0x05,0x03, 0x05,0x03,0x05,0x03,0x05,0x09,0x09,0x03,0x05,0x07,0x01,0x03,0x0f,0x07,0x01,0x03, 0x05,0x03,0x1b,0x01,0xe6,0x18,0x4f,0x09,0x13,0x05,0x04,0x04,0x13,0x01,0x04,0x1d, 0x02,0x01,0x11,0x01,0x0e,0x0c,0x01,0x04,0x04,0x1d,0x02,0x19,0xe5,0x12,0x27,0x10, 0x04,0x04,0x02,0x09,0x06,0x04,0x02,0xe5,0x07,0xe5,0x13,0xe5,0x08,0x1c,0xe5,0x0f, 0x01,0xe5,0x19,0x01,0xe5,0x07,0xe5,0x1b,0x01,0xe6,0x31,0x15,0x06,0x28,0x01,0x0d, 0x05,0x02,0x07,0xe5,0x01,0x03,0x02,0x02,0xe5,0x04,0x02,0x06,0x02,0x06,0x02,0x03, 0x02,0x02,0x06,0x02,0x06,0x02,0x10,0x02,0x06,0x02,0x06,0x02,0x21,0xe5,0xe6,0x10, 0x01,0x07,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0x01,0x05,0x01,0xe5, 0xe5,0xe5,0x01,0x01,0xe5,0x05,0x01,0x01,0x05,0xe6,0x01,0x04,0x01,0x02,0x04,0x01, 0x02,0x01,0x02,0x01,0x01,0x05,0xe6,0x04,0x01,0x01,0x01,0x05,0x01,0x01,0x01,0x03, 0x01,0x01,0x05,0x01,0x01,0x01,0x03,0x01,0x01,0x01,0x03,0x01,0x01,0x01,0xe5,0x01, 0x01,0x01,0x02,0x02,0x01,0x01,0x01,0xe5,0x01,0x01,0x01,0x07,0x01,0x01,0x03,0x01, 0x01,0x05,0x01,0x01,0x02,0x02,0x01,0x01,0x07,0x01,0x01,0x05,0x01,0x0d,0xe5,0x27, 0x06,0x3a,0x15,0x0d,0x02,0xe6,0x01,0x17,0x02,0xe6,0x08,0x07,0xe5,0x07,0xe5,0x03, 0x07,0x0f,0xe5,0x07,0x05,0x04,0x2b,0x01,0x01,0xe5,0x19,0x15,0x09,0x09,0x06,0x02, 0x01,0x07,0x0e,0x0b,0x04,0x15,0x02,0x07,0x0f,0x04,0x08,0x06,0x02,0xe5,0x01,0x02, 0x03,0x04,0x18,0x13,0x05,0x09,0x0c,0x0d,0x02,0xe7,0xe5,0x02,0x37,0x01,0x13,0x11, 0x08,0x02,0x08,0x11,0x05,0x02,0x1c,0x13,0x05,0x03,0x1e,0x04,0x03,0x01,0x35,0xe5, 0x01,0xe5,0x0f,0x09,0x3a,0x05,0x2a,0x04,0x02,0x01,0x09,0x01,0xe5,0x05,0x01,0x0f, 0x01,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0xe5,0x01,0x06,0x06,0x01,0xe5, 0x05,0x01,0x02,0x04,0x01,0x21,0x01,0xe7,0x15,0x27,0x22,0x04,0x09,0x14,0x09,0x01, 0x07,0xe5,0xe5,0xe5,0x06,0x01,0x11,0x01,0x02,0x01,0x02,0x01,0x07,0x01,0x02,0x04, 0x01,0x02,0x04,0x01,0x11,0x01,0x04,0x02,0x01,0x07,0x01,0x25,0xe5,0x0d,0x09,0x09, 0x13,0x03,0x04,0xe5,0x02,0x04,0xe5,0x05,0x02,0x01,0x07,0x09,0x03,0x0e,0xe5,0x09, 0x04,0x04,0x01,0x02,0x01,0x07,0x29,0xe5,0xe6,0x08,0x09,0x13,0x04,0x01,0x0c,0x08, 0xe5,0x16,0xe5,0x01,0xe5,0x0c,0xe5,0x07,0xe5,0x07,0x13,0x01,0x14,0x07,0x01,0x03, 0x02,0x02,0x09,0x10,0x17,0x02,0x07,0x09,0x09,0x0a,0x08,0x06,0x09,0xe5,0x07,0xe5, 0xe5,0x0f,0x02,0x02,0x0d,0xe5,0x05,0x01,0x1c,0xe6,0x0e,0x04,0x02,0x09,0x02,0x13, 0x26,0xe5,0x08,0x12,0x15,0x05,0xe5,0x19,0x17,0x09,0x09,0x13,0x02,0x04,0x0b,0x09, 0x1a,0x01,0x01,0x01,0x1c,0x25,0x26,0x04,0x1c,0xe5,0x08,0xe5,0x43,0x28,0x26,0x01, 0xe6,0x44,0xe5,0x2d,0x04,0x04,0x03,0x15,0xe5,0x92,0x02,0xe6,0x17,0x09,0x0b,0x07, 0x09,0x0e,0x04,0x09,0x1d,0x03,0x07,0xe6,0x06,0xe6,0x07,0xe5,0x01,0x08,0x08,0xe5, 0x08,0x09,0x03,0x01,0x02,0xe5,0x08,0x14,0x04,0x05,0x07,0x0f,0x18,0xe8,0x17,0x17, 0x04,0xe6,0xe6,0x0f,0x08,0xe5,0x02,0x04,0xe5,0x02,0x05,0x02,0x09,0x09,0x08,0x10, 0x04,0x2f,0x07,0x01,0x07,0x01,0x07,0x01,0x14,0x08,0x09,0x19,0x02,0x21,0x19,0x03, 0x0b,0x09,0x09,0x09,0x11,0x2f,0x12,0x10,0x0b,0x0a,0x09,0x01,0x02,0x15,0x07,0xe5, 0x19,0xe7,0x02,0x0b,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0xe5,0x01,0x03, 0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07, 0xe5,0x07,0xe5,0x09,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x08, 0x06,0xe7,0x03,0x01,0xe5,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x08,0x08,0xe5,0x07,0xe5, 0x0b,0x03,0xe5,0x01,0x0b,0x01,0x05,0x01,0x01,0x02,0x05,0xe5,0x08,0x08,0xe5,0x08, 0x09,0x08,0xe5,0x07,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x01,0x02,0x02,0xe5, 0x04,0x02,0xe5,0x04,0x01,0x03,0x08,0xe6,0x06,0xe5,0x04,0x03,0x07,0xe6,0x01,0x06, 0x08,0x08,0xe6,0x06,0xe5,0x05,0x02,0xe5,0x03,0x04,0x09,0x09,0x09,0x0f,0x01,0x02, 0xe5,0x03,0x06,0xe5,0x03,0x03,0xe5,0xe5,0xe5,0xe5,0x01,0xe5,0x07,0x06,0x02,0xe6, 0x06,0xe5,0x02,0x04,0xe6,0xe5,0x01,0x02,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x07,0x02, 0x01,0x04,0xe6,0x06,0x09,0x01,0x09,0x01,0xe5,0x05,0x02,0x04,0xe6,0x08,0x02,0x03, 0x02,0x07,0x01,0x01,0x04,0x02,0x06,0xe5,0xe6,0x07,0x09,0xe5,0x06,0xe5,0x06,0x01, 0x09,0x09,0x10,0xe6,0x23,0x0b,0x25,0x09,0x09,0x16,0x05,0x02,0xe5,0x07,0x0c,0x05, 0x03,0x06,0x13,0x12,0x07,0x02,0x08,0x02,0x03,0x02,0x0d,0x2b,0xe5,0x0e,0x05,0x03, 0x09,0x09,0x09,0xe5,0x02,0x04,0x09,0x09,0x04,0x04,0x09,0x04,0x04,0xe5,0x07,0xe5, 0x07,0x01,0x01,0x05,0xe5,0x09,0x09,0x08,0xe5,0x08,0x09,0x02,0x04,0x01,0x09,0x10, 0x02,0x13,0x08,0xe5,0x08,0x09,0x0e,0x01,0x01,0x0d,0xe5,0x01,0x03,0x01,0xe5,0x07, 0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x01,0x05,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x02, 0x03,0xe6,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x09,0xe5,0x07,0xe5,0x02,0x05, 0x03,0x04,0xe5,0x07,0xe5,0x02,0x04,0xe5,0x07,0xe5,0x04,0x02,0xe5,0x07,0xe5,0x01, 0x05,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5,0x10,0xe6,0x0c,0x02,0x04,0x01, 0x02,0x06,0x02,0x06,0x09,0x02,0x05,0xe5,0x01,0x05,0xe5,0x01,0x06,0x02,0x02,0x03, 0x02,0x02,0x03,0x02,0x01,0xe5,0x02,0x09,0x02,0x06,0x06,0x02,0x08,0x01,0xe5,0x06, 0x01,0x09,0x05,0x03,0x09,0x0a,0x02,0x05,0x02,0x01,0xe5,0x03,0x04,0x03,0x02,0x06, 0x05,0x04,0x09,0x08,0x09,0x11,0x01,0xe5,0x0d,0xe5,0x07,0x09,0x09,0xe5,0xe5,0x05, 0x09,0x09,0x09,0x09,0x09,0x09,0xe5,0xe5,0x05,0x09,0xe5,0xe5,0x05,0xe5,0xe5,0x07, 0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5,0xe5,0x05,0xe5, 0xe5,0x05,0x09,0x09,0x09,0xe5,0x07,0xe5,0xe5,0x05,0x02,0x06,0xe5,0xe5,0x05,0xe5, 0xe5,0x0b,0xe5,0xe6,0x09,0x03,0x02,0x06,0x01,0x07,0x01,0x07,0x09,0x01,0x03,0x03, 0x01,0x03,0x03,0x01,0x07,0x01,0x07,0x01,0x07,0x01,0x07,0x04,0x04,0x01,0x07,0x04, 0x04,0x04,0x06,0x05,0x03,0x05,0x03,0x09,0x09,0x13,0x13,0x04,0x04,0x03,0x11,0x07, 0x03,0x05,0x0f,0x02,0xe5,0x18,0x09,0x04,0x0e,0x09,0x01,0x02,0x04,0x01,0x07,0x09, 0x06,0x02,0x01,0x11,0x01,0xe5,0xe5,0x1f,0x06,0x17,0x05,0x01,0x02,0x03,0x08,0x06, 0x02,0x09,0x10,0x04,0x04,0x24,0xe6,0xe5,0x0d,0x05,0x03,0x03,0x05,0x02,0x02,0x06, 0x06,0x09,0x09,0x03,0x05,0x02,0x06,0x02,0x02,0xe5,0x01,0x09,0x09,0x09,0x05,0xe5, 0x01,0x08,0x02,0x09,0x02,0x06,0x0d,0x05,0x0d,0x05,0x02,0x04,0x04,0xe5,0x04,0x09, 0x02,0x1a,0x09,0x0f,0xe8,0x2b,0x11,0x1d,0x1d,0x18,0x04,0xe5,0x09,0x13,0x01,0x1a, 0xe5,0x09,0xe5,0xe5,0x04,0x1f,0x25,0xe6,0x0f,0x03,0x05,0x09,0x09,0x09,0x09,0x01, 0x07,0x09,0x09,0x09,0xe5,0x07,0x05,0x03,0x09,0x01,0x07,0x06,0xe5,0x02,0x09,0x09, 0x01,0x03,0x03,0x05,0x03,0x09,0xe5,0x07,0x09,0x09,0x01,0x07,0x09,0x07,0x01,0x09, 0x09,0x0d,0x01,0x01,0x04,0x27,0x39,0x25,0x09,0x0b,0x09,0x09,0x02,0xe5,0x0e,0x02, 0x07,0x0b,0x1d,0x2e,0xe5,0xe6,0x50,0xe5,0x1b,0x13,0x04,0x04,0x04,0x06,0x04,0x09, 0x04,0x04,0x13,0x0e,0x13,0x1d,0x27,0xe7,0x21,0xc6,0x40,0x03,0x03,0x07,0xe3,0x09, 0x3a,0x03,0x07,0x29,0x1f,0x25,0x0b,0x3b,0x15,0xe5,0x2d,0x01,0x27,0xe7,0x6e,0x16, 0x05,0xe5,0x01,0x05,0x02,0x02,0x05,0x03,0x05,0xe5,0x01,0x05,0x0d,0x05,0x5c,0x06, 0x01,0x01,0x05,0x93,0x59,0x34,0x05,0xe5,0x01,0x04,0xe5,0xe3,0x34,0x10,0x02,0xe5, 0x13,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x0b,0x09, 0x09,0x09,0x09,0x09,0x09,0x09,0x09,0xe6,0x06,0x09,0x09,0x09,0x03,0x05,0x0a,0xe5, 0xe5,0x04,0x09,0x08,0xe5,0x07,0xe5,0x08,0x08,0xe5,0x07,0xe5,0x07,0xe5,0x07,0xe5, 0x07,0xe5,0x07,0xe5,0x08,0x08,0xe5,0x08,0x09,0x07,0x03,0x09,0x09,0x09,0x09,0x09, 0x04,0x03,0xe5,0x07,0xe5,0x07,0xe5,0x05,0x01,0xe5,0x08,0x08,0xe5,0x08,0x07,0xe6, 0x11,0x16,0x09,0x09,0x0f,0x03,0x09,0x09,0x05,0x03,0x09,0x05,0x03,0x13,0x04,0x09, 0x05,0x05,0x09,0x09,0x13,0x14,0x03,0x09,0x09,0x05,0x03,0x2f,0xe5,0x01,0x0d,0x03, 0x03,0x05,0x03,0x05,0x03,0x01,0x0d,0x03,0x05,0x03,0x09,0x05,0x03,0x09,0x09,0x01, 0x11,0x01,0x04,0x04,0x04,0x06,0x04,0x04,0x04,0x04,0x04,0x04,0x09,0x04,0x06,0x0b, 0x03,0x05,0x03,0x09,0x09,0x01,0x0d,0x03,0x03,0x05,0x03,0x0e,0xe8,0x0a,0x06,0x01, 0x02,0x01,0x02,0x01,0x02,0x01,0x01,0xe5,0xe5,0x02,0x07,0xe5,0xe5,0x03,0x02,0x01, 0x01,0xe5,0xe5,0x01,0x01,0x01,0xe5,0xe5,0x01,0x01,0x04,0x02,0x01,0x01,0xe5,0xe5, 0x01,0x01,0x01,0x02,0x03,0x07,0xe5,0xe5,0xe6,0xe5,0x04,0x02,0x02,0x03,0x02,0x02, 0x05,0x02,0x02,0x03,0x02,0x02,0x03,0x02,0x02,0x01,0x07,0x03,0x02,0x02,0x02,0x04, 0x02,0x01,0x04,0x02,0x01,0x02,0x01,0x02,0x01,0x01,0xe5,0x05,0x04,0x03,0x07,0xe5, 0xe5,0x03,0x04,0x04,0x0b,0x02,0xe5,0x01,0xe5,0x01,0xe5,0xff,0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
gpl-2.0
ajopanoor/mic_card_os
fs/hpfs/buffer.c
232
4391
/* * linux/fs/hpfs/buffer.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * general buffer i/o */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/blkdev.h> #include "hpfs_fn.h" void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n) { struct buffer_head *bh; struct blk_plug plug; if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size)) return; bh = sb_find_get_block(s, secno); if (bh) { if (buffer_uptodate(bh)) { brelse(bh); return; } brelse(bh); }; blk_start_plug(&plug); while (n > 0) { if (unlikely(secno >= hpfs_sb(s)->sb_fs_size)) break; sb_breadahead(s, secno); secno++; n--; } blk_finish_plug(&plug); } /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, int ahead) { struct buffer_head *bh; hpfs_lock_assert(s); hpfs_prefetch_sectors(s, secno, ahead); cond_resched(); *bhp = bh = sb_bread(s, secno); if (bh != NULL) return bh->b_data; else { printk("HPFS: hpfs_map_sector: read error\n"); return NULL; } } /* Like hpfs_map_sector but don't read anything */ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp) { struct buffer_head *bh; /*return hpfs_map_sector(s, secno, bhp, 0);*/ hpfs_lock_assert(s); cond_resched(); if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { if (!buffer_uptodate(bh)) wait_on_buffer(bh); set_buffer_uptodate(bh); return bh->b_data; } else { printk("HPFS: hpfs_get_sector: getblk failed\n"); return NULL; } } /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh, int ahead) { struct buffer_head *bh; char *data; hpfs_lock_assert(s); cond_resched(); if (secno & 3) { printk("HPFS: hpfs_map_4sectors: unaligned read\n"); return NULL; } hpfs_prefetch_sectors(s, secno, 4 + ahead); qbh->data = data = kmalloc(2048, GFP_NOFS); if (!data) { printk("HPFS: hpfs_map_4sectors: out of memory\n"); goto bail; } qbh->bh[0] = bh = sb_bread(s, secno); if (!bh) goto bail0; memcpy(data, bh->b_data, 512); qbh->bh[1] = bh = sb_bread(s, secno + 1); if (!bh) goto bail1; memcpy(data + 512, bh->b_data, 512); qbh->bh[2] = bh = sb_bread(s, secno + 2); if (!bh) goto bail2; memcpy(data + 2 * 512, bh->b_data, 512); qbh->bh[3] = bh = sb_bread(s, secno + 3); if (!bh) goto bail3; memcpy(data + 3 * 512, bh->b_data, 512); return data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: kfree(data); printk("HPFS: hpfs_map_4sectors: read error\n"); bail: return NULL; } /* Don't read sectors */ void *hpfs_get_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh) { cond_resched(); hpfs_lock_assert(s); if (secno & 3) { printk("HPFS: hpfs_get_4sectors: unaligned read\n"); return NULL; } /*return hpfs_map_4sectors(s, secno, qbh, 0);*/ if (!(qbh->data = kmalloc(2048, GFP_NOFS))) { printk("HPFS: hpfs_get_4sectors: out of memory\n"); return NULL; } if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0; if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1; if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2; if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3; memcpy(qbh->data, qbh->bh[0]->b_data, 512); memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512); memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512); memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512); return qbh->data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: return NULL; } void hpfs_brelse4(struct quad_buffer_head *qbh) { brelse(qbh->bh[3]); brelse(qbh->bh[2]); brelse(qbh->bh[1]); brelse(qbh->bh[0]); kfree(qbh->data); } void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh) { memcpy(qbh->bh[0]->b_data, qbh->data, 512); memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512); memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512); memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512); mark_buffer_dirty(qbh->bh[0]); mark_buffer_dirty(qbh->bh[1]); mark_buffer_dirty(qbh->bh[2]); mark_buffer_dirty(qbh->bh[3]); }
gpl-2.0
stefanstrogin/linux
arch/arm64/kernel/cpu_ops.c
488
2191
/* * CPU kernel entry/exit control * * Copyright (C) 2013 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <asm/cpu_ops.h> #include <asm/smp_plat.h> #include <linux/errno.h> #include <linux/of.h> #include <linux/string.h> extern const struct cpu_operations smp_spin_table_ops; extern const struct cpu_operations cpu_psci_ops; const struct cpu_operations *cpu_ops[NR_CPUS]; static const struct cpu_operations *supported_cpu_ops[] __initconst = { #ifdef CONFIG_SMP &smp_spin_table_ops, #endif &cpu_psci_ops, NULL, }; static const struct cpu_operations * __init cpu_get_ops(const char *name) { const struct cpu_operations **ops = supported_cpu_ops; while (*ops) { if (!strcmp(name, (*ops)->name)) return *ops; ops++; } return NULL; } /* * Read a cpu's enable method from the device tree and record it in cpu_ops. */ int __init cpu_read_ops(struct device_node *dn, int cpu) { const char *enable_method = of_get_property(dn, "enable-method", NULL); if (!enable_method) { /* * The boot CPU may not have an enable method (e.g. when * spin-table is used for secondaries). Don't warn spuriously. */ if (cpu != 0) pr_err("%s: missing enable-method property\n", dn->full_name); return -ENOENT; } cpu_ops[cpu] = cpu_get_ops(enable_method); if (!cpu_ops[cpu]) { pr_warn("%s: unsupported enable-method property: %s\n", dn->full_name, enable_method); return -EOPNOTSUPP; } return 0; } void __init cpu_read_bootcpu_ops(void) { struct device_node *dn = of_get_cpu_node(0, NULL); if (!dn) { pr_err("Failed to find device node for boot cpu\n"); return; } cpu_read_ops(dn, 0); }
gpl-2.0
Jaiglissechef-i9100/android_kernel_samsung_smdk4412
drivers/misc/mpu3050/accel/mantis.c
744
8033
/* $License: Copyright (C) 2010 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ /** * @defgroup ACCELDL (Motion Library - Accelerometer Driver Layer) * @brief Provides the interface to setup and handle an accelerometers * connected to the secondary I2C interface of the gyroscope. * * @{ * @file lis331.c * @brief Accelerometer setup and handling methods for Invensense MANTIS */ /* ------------------ */ /* - Include Files. - */ /* ------------------ */ #ifdef __KERNEL__ #include <linux/module.h> #endif #include "mpu.h" #include "mlsl.h" #include "mlos.h" #include <log.h> #undef MPL_LOG_TAG #define MPL_LOG_TAG "MPL-acc" /* --------------------- */ /* - Variables. - */ /* --------------------- */ struct mantis_config { unsigned int odr; /* output data rate 1/1000 Hz*/ unsigned int fsr; /* full scale range mg */ unsigned int ths; /* Motion no-motion thseshold mg */ unsigned int dur; /* Motion no-motion duration ms */ }; struct mantis_private_data { struct mantis_config suspend; struct mantis_config resume; }; /***************************************** *Accelerometer Initialization Functions *****************************************/ /** * Record the odr for use in computing duration values. * * @param config Config to set, suspend or resume structure * @param odr output data rate in 1/1000 hz */ void mantis_set_odr(struct mantis_config *config, long odr) { config->odr = odr; } void mantis_set_ths(struct mantis_config *config, long ths) { if (ths < 0) ths = 0; config->ths = ths; MPL_LOGV("THS: %d\n", config->ths); } void mantis_set_dur(struct mantis_config *config, long dur) { if (dur < 0) dur = 0; config->dur = dur; MPL_LOGV("DUR: %d\n", config->dur); } static void mantis_set_fsr( struct mantis_config *config, long fsr) { if (fsr <= 2000) config->fsr = 2000; else if (fsr <= 4000) config->fsr = 4000; else config->fsr = 8000; MPL_LOGV("FSR: %d\n", config->fsr); } static int mantis_init(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { struct mantis_private_data *private_data; private_data = (struct mantis_private_data *) MLOSMalloc(sizeof(struct mantis_private_data)); if (!private_data) return ML_ERROR_MEMORY_EXAUSTED; pdata->private_data = private_data; mantis_set_odr(&private_data->suspend, 0); mantis_set_odr(&private_data->resume, 200000); mantis_set_fsr(&private_data->suspend, 2000); mantis_set_fsr(&private_data->resume, 2000); mantis_set_ths(&private_data->suspend, 80); mantis_set_ths(&private_data->resume, 40); mantis_set_dur(&private_data->suspend, 1000); mantis_set_dur(&private_data->resume, 2540); return ML_SUCCESS; } static int mantis_exit(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { if (pdata->private_data) return MLOSFree(pdata->private_data); else return ML_SUCCESS; } int mantis_suspend(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { unsigned char reg; int result; result = MLSLSerialRead(mlsl_handle, pdata->address, MPUREG_PWR_MGMT_2, 1, &reg); ERROR_CHECK(result); reg |= (BIT_STBY_XA | BIT_STBY_YA | BIT_STBY_ZA); result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, MPUREG_PWR_MGMT_2, reg); ERROR_CHECK(result); return ML_SUCCESS; } int mantis_resume(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { int result = ML_SUCCESS; unsigned char reg; struct mantis_private_data *private_data; private_data = (struct mantis_private_data *) pdata->private_data; MLSLSerialRead(mlsl_handle, pdata->address, MPUREG_PWR_MGMT_2, 1, &reg); reg &= ~(BIT_STBY_XA | BIT_STBY_YA | BIT_STBY_ZA); result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, MPUREG_PWR_MGMT_2, reg); ERROR_CHECK(result); if (slave->range.mantissa == 2) reg = 0; else if (slave->range.mantissa == 4) reg = 1 << 3; else if (slave->range.mantissa == 8) reg = 2 << 3; else if (slave->range.mantissa == 16) reg = 3 << 3; else return ML_ERROR; result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, MPUREG_ACCEL_CONFIG, reg); ERROR_CHECK(result); reg = (unsigned char) private_data->suspend.ths / ACCEL_MOT_THR_LSB; result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, MPUREG_ACCEL_MOT_THR, reg); ERROR_CHECK(result); reg = (unsigned char) ACCEL_ZRMOT_THR_LSB_CONVERSION(private_data->resume.ths); result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, MPUREG_ACCEL_ZRMOT_THR, reg); ERROR_CHECK(result); reg = (unsigned char) private_data->suspend.ths / ACCEL_MOT_DUR_LSB; result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, MPUREG_ACCEL_MOT_DUR, reg); ERROR_CHECK(result); reg = (unsigned char) private_data->resume.ths / ACCEL_ZRMOT_DUR_LSB; result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, MPUREG_ACCEL_ZRMOT_DUR, reg); ERROR_CHECK(result); return result; } int mantis_read(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, unsigned char *data) { int result; result = MLSLSerialRead(mlsl_handle, pdata->address, slave->reg, slave->len, data); return result; } static int mantis_config(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, struct ext_slave_config *data) { struct mantis_private_data *private_data = pdata->private_data; if (!data->data) return ML_ERROR_INVALID_PARAMETER; switch (data->key) { case MPU_SLAVE_CONFIG_ODR_SUSPEND: mantis_set_odr(&private_data->suspend, *((long *)data->data)); break; case MPU_SLAVE_CONFIG_ODR_RESUME: mantis_set_odr(&private_data->resume, *((long *)data->data)); break; case MPU_SLAVE_CONFIG_FSR_SUSPEND: mantis_set_fsr(&private_data->suspend, *((long *)data->data)); break; case MPU_SLAVE_CONFIG_FSR_RESUME: mantis_set_fsr(&private_data->resume, *((long *)data->data)); break; case MPU_SLAVE_CONFIG_MOT_THS: mantis_set_ths(&private_data->suspend, (*((long *)data->data))); break; case MPU_SLAVE_CONFIG_NMOT_THS: mantis_set_ths(&private_data->resume, (*((long *)data->data))); break; case MPU_SLAVE_CONFIG_MOT_DUR: mantis_set_dur(&private_data->suspend, (*((long *)data->data))); break; case MPU_SLAVE_CONFIG_NMOT_DUR: mantis_set_dur(&private_data->resume, (*((long *)data->data))); break; default: return ML_ERROR_FEATURE_NOT_IMPLEMENTED; }; return ML_SUCCESS; } struct ext_slave_descr mantis_descr = { /*.init = */ mantis_init, /*.exit = */ mantis_exit, /*.suspend = */ mantis_suspend, /*.resume = */ mantis_resume, /*.read = */ mantis_read, /*.config = */ mantis_config, /*.get_config = */ NULL, /*.name = */ "mantis", /*.type = */ EXT_SLAVE_TYPE_ACCELEROMETER, /*.id = */ ACCEL_ID_MPU6000, /*.reg = */ 0xA8, /*.len = */ 6, /*.endian = */ EXT_SLAVE_BIG_ENDIAN, /*.range = */ {2, 0}, }; struct ext_slave_descr *mantis_get_slave_descr(void) { return &mantis_descr; } EXPORT_SYMBOL(mantis_get_slave_descr); /** * @} */
gpl-2.0
adeepv/android-kernel-zte-v9a
drivers/net/wireless/ath/ath5k/initvals.c
1000
53594
/* * Initial register settings functions * * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include "ath5k.h" #include "reg.h" #include "debug.h" #include "base.h" /* * Mode-independent initial register writes */ struct ath5k_ini { u16 ini_register; u32 ini_value; enum { AR5K_INI_WRITE = 0, /* Default */ AR5K_INI_READ = 1, /* Cleared on read */ } ini_mode; }; /* * Mode specific initial register values */ struct ath5k_ini_mode { u16 mode_register; u32 mode_value[5]; }; /* Initial register settings for AR5210 */ static const struct ath5k_ini ar5210_ini[] = { /* PCU and MAC registers */ { AR5K_NOQCU_TXDP0, 0 }, { AR5K_NOQCU_TXDP1, 0 }, { AR5K_RXDP, 0 }, { AR5K_CR, 0 }, { AR5K_ISR, 0, AR5K_INI_READ }, { AR5K_IMR, 0 }, { AR5K_IER, AR5K_IER_DISABLE }, { AR5K_BSR, 0, AR5K_INI_READ }, { AR5K_TXCFG, AR5K_DMASIZE_128B }, { AR5K_RXCFG, AR5K_DMASIZE_128B }, { AR5K_CFG, AR5K_INIT_CFG }, { AR5K_TOPS, 8 }, { AR5K_RXNOFRM, 8 }, { AR5K_RPGTO, 0 }, { AR5K_TXNOFRM, 0 }, { AR5K_SFR, 0 }, { AR5K_MIBC, 0 }, { AR5K_MISC, 0 }, { AR5K_RX_FILTER_5210, 0 }, { AR5K_MCAST_FILTER0_5210, 0 }, { AR5K_MCAST_FILTER1_5210, 0 }, { AR5K_TX_MASK0, 0 }, { AR5K_TX_MASK1, 0 }, { AR5K_CLR_TMASK, 0 }, { AR5K_TRIG_LVL, AR5K_TUNE_MIN_TX_FIFO_THRES }, { AR5K_DIAG_SW_5210, 0 }, { AR5K_RSSI_THR, AR5K_TUNE_RSSI_THRES }, { AR5K_TSF_L32_5210, 0 }, { AR5K_TIMER0_5210, 0 }, { AR5K_TIMER1_5210, 0xffffffff }, { AR5K_TIMER2_5210, 0xffffffff }, { AR5K_TIMER3_5210, 1 }, { AR5K_CFP_DUR_5210, 0 }, { AR5K_CFP_PERIOD_5210, 0 }, /* PHY registers */ { AR5K_PHY(0), 0x00000047 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY(3), 0x09848ea6 }, { AR5K_PHY(4), 0x3d32e000 }, { AR5K_PHY(5), 0x0000076b }, { AR5K_PHY_ACT, AR5K_PHY_ACT_DISABLE }, { AR5K_PHY(8), 0x02020200 }, { AR5K_PHY(9), 0x00000e0e }, { AR5K_PHY(10), 0x0a020201 }, { AR5K_PHY(11), 0x00036ffc }, { AR5K_PHY(12), 0x00000000 }, { AR5K_PHY(13), 0x00000e0e }, { AR5K_PHY(14), 0x00000007 }, { AR5K_PHY(15), 0x00020100 }, { AR5K_PHY(16), 0x89630000 }, { AR5K_PHY(17), 0x1372169c }, { AR5K_PHY(18), 0x0018b633 }, { AR5K_PHY(19), 0x1284613c }, { AR5K_PHY(20), 0x0de8b8e0 }, { AR5K_PHY(21), 0x00074859 }, { AR5K_PHY(22), 0x7e80beba }, { AR5K_PHY(23), 0x313a665e }, { AR5K_PHY_AGCCTL, 0x00001d08 }, { AR5K_PHY(25), 0x0001ce00 }, { AR5K_PHY(26), 0x409a4190 }, { AR5K_PHY(28), 0x0000000f }, { AR5K_PHY(29), 0x00000080 }, { AR5K_PHY(30), 0x00000004 }, { AR5K_PHY(31), 0x00000018 }, /* 0x987c */ { AR5K_PHY(64), 0x00000000 }, /* 0x9900 */ { AR5K_PHY(65), 0x00000000 }, { AR5K_PHY(66), 0x00000000 }, { AR5K_PHY(67), 0x00800000 }, { AR5K_PHY(68), 0x00000003 }, /* BB gain table (64bytes) */ { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000020 }, { AR5K_BB_GAIN(2), 0x00000010 }, { AR5K_BB_GAIN(3), 0x00000030 }, { AR5K_BB_GAIN(4), 0x00000008 }, { AR5K_BB_GAIN(5), 0x00000028 }, { AR5K_BB_GAIN(6), 0x00000028 }, { AR5K_BB_GAIN(7), 0x00000004 }, { AR5K_BB_GAIN(8), 0x00000024 }, { AR5K_BB_GAIN(9), 0x00000014 }, { AR5K_BB_GAIN(10), 0x00000034 }, { AR5K_BB_GAIN(11), 0x0000000c }, { AR5K_BB_GAIN(12), 0x0000002c }, { AR5K_BB_GAIN(13), 0x00000002 }, { AR5K_BB_GAIN(14), 0x00000022 }, { AR5K_BB_GAIN(15), 0x00000012 }, { AR5K_BB_GAIN(16), 0x00000032 }, { AR5K_BB_GAIN(17), 0x0000000a }, { AR5K_BB_GAIN(18), 0x0000002a }, { AR5K_BB_GAIN(19), 0x00000001 }, { AR5K_BB_GAIN(20), 0x00000021 }, { AR5K_BB_GAIN(21), 0x00000011 }, { AR5K_BB_GAIN(22), 0x00000031 }, { AR5K_BB_GAIN(23), 0x00000009 }, { AR5K_BB_GAIN(24), 0x00000029 }, { AR5K_BB_GAIN(25), 0x00000005 }, { AR5K_BB_GAIN(26), 0x00000025 }, { AR5K_BB_GAIN(27), 0x00000015 }, { AR5K_BB_GAIN(28), 0x00000035 }, { AR5K_BB_GAIN(29), 0x0000000d }, { AR5K_BB_GAIN(30), 0x0000002d }, { AR5K_BB_GAIN(31), 0x00000003 }, { AR5K_BB_GAIN(32), 0x00000023 }, { AR5K_BB_GAIN(33), 0x00000013 }, { AR5K_BB_GAIN(34), 0x00000033 }, { AR5K_BB_GAIN(35), 0x0000000b }, { AR5K_BB_GAIN(36), 0x0000002b }, { AR5K_BB_GAIN(37), 0x00000007 }, { AR5K_BB_GAIN(38), 0x00000027 }, { AR5K_BB_GAIN(39), 0x00000017 }, { AR5K_BB_GAIN(40), 0x00000037 }, { AR5K_BB_GAIN(41), 0x0000000f }, { AR5K_BB_GAIN(42), 0x0000002f }, { AR5K_BB_GAIN(43), 0x0000002f }, { AR5K_BB_GAIN(44), 0x0000002f }, { AR5K_BB_GAIN(45), 0x0000002f }, { AR5K_BB_GAIN(46), 0x0000002f }, { AR5K_BB_GAIN(47), 0x0000002f }, { AR5K_BB_GAIN(48), 0x0000002f }, { AR5K_BB_GAIN(49), 0x0000002f }, { AR5K_BB_GAIN(50), 0x0000002f }, { AR5K_BB_GAIN(51), 0x0000002f }, { AR5K_BB_GAIN(52), 0x0000002f }, { AR5K_BB_GAIN(53), 0x0000002f }, { AR5K_BB_GAIN(54), 0x0000002f }, { AR5K_BB_GAIN(55), 0x0000002f }, { AR5K_BB_GAIN(56), 0x0000002f }, { AR5K_BB_GAIN(57), 0x0000002f }, { AR5K_BB_GAIN(58), 0x0000002f }, { AR5K_BB_GAIN(59), 0x0000002f }, { AR5K_BB_GAIN(60), 0x0000002f }, { AR5K_BB_GAIN(61), 0x0000002f }, { AR5K_BB_GAIN(62), 0x0000002f }, { AR5K_BB_GAIN(63), 0x0000002f }, /* 5110 RF gain table (64btes) */ { AR5K_RF_GAIN(0), 0x0000001d }, { AR5K_RF_GAIN(1), 0x0000005d }, { AR5K_RF_GAIN(2), 0x0000009d }, { AR5K_RF_GAIN(3), 0x000000dd }, { AR5K_RF_GAIN(4), 0x0000011d }, { AR5K_RF_GAIN(5), 0x00000021 }, { AR5K_RF_GAIN(6), 0x00000061 }, { AR5K_RF_GAIN(7), 0x000000a1 }, { AR5K_RF_GAIN(8), 0x000000e1 }, { AR5K_RF_GAIN(9), 0x00000031 }, { AR5K_RF_GAIN(10), 0x00000071 }, { AR5K_RF_GAIN(11), 0x000000b1 }, { AR5K_RF_GAIN(12), 0x0000001c }, { AR5K_RF_GAIN(13), 0x0000005c }, { AR5K_RF_GAIN(14), 0x00000029 }, { AR5K_RF_GAIN(15), 0x00000069 }, { AR5K_RF_GAIN(16), 0x000000a9 }, { AR5K_RF_GAIN(17), 0x00000020 }, { AR5K_RF_GAIN(18), 0x00000019 }, { AR5K_RF_GAIN(19), 0x00000059 }, { AR5K_RF_GAIN(20), 0x00000099 }, { AR5K_RF_GAIN(21), 0x00000030 }, { AR5K_RF_GAIN(22), 0x00000005 }, { AR5K_RF_GAIN(23), 0x00000025 }, { AR5K_RF_GAIN(24), 0x00000065 }, { AR5K_RF_GAIN(25), 0x000000a5 }, { AR5K_RF_GAIN(26), 0x00000028 }, { AR5K_RF_GAIN(27), 0x00000068 }, { AR5K_RF_GAIN(28), 0x0000001f }, { AR5K_RF_GAIN(29), 0x0000001e }, { AR5K_RF_GAIN(30), 0x00000018 }, { AR5K_RF_GAIN(31), 0x00000058 }, { AR5K_RF_GAIN(32), 0x00000098 }, { AR5K_RF_GAIN(33), 0x00000003 }, { AR5K_RF_GAIN(34), 0x00000004 }, { AR5K_RF_GAIN(35), 0x00000044 }, { AR5K_RF_GAIN(36), 0x00000084 }, { AR5K_RF_GAIN(37), 0x00000013 }, { AR5K_RF_GAIN(38), 0x00000012 }, { AR5K_RF_GAIN(39), 0x00000052 }, { AR5K_RF_GAIN(40), 0x00000092 }, { AR5K_RF_GAIN(41), 0x000000d2 }, { AR5K_RF_GAIN(42), 0x0000002b }, { AR5K_RF_GAIN(43), 0x0000002a }, { AR5K_RF_GAIN(44), 0x0000006a }, { AR5K_RF_GAIN(45), 0x000000aa }, { AR5K_RF_GAIN(46), 0x0000001b }, { AR5K_RF_GAIN(47), 0x0000001a }, { AR5K_RF_GAIN(48), 0x0000005a }, { AR5K_RF_GAIN(49), 0x0000009a }, { AR5K_RF_GAIN(50), 0x000000da }, { AR5K_RF_GAIN(51), 0x00000006 }, { AR5K_RF_GAIN(52), 0x00000006 }, { AR5K_RF_GAIN(53), 0x00000006 }, { AR5K_RF_GAIN(54), 0x00000006 }, { AR5K_RF_GAIN(55), 0x00000006 }, { AR5K_RF_GAIN(56), 0x00000006 }, { AR5K_RF_GAIN(57), 0x00000006 }, { AR5K_RF_GAIN(58), 0x00000006 }, { AR5K_RF_GAIN(59), 0x00000006 }, { AR5K_RF_GAIN(60), 0x00000006 }, { AR5K_RF_GAIN(61), 0x00000006 }, { AR5K_RF_GAIN(62), 0x00000006 }, { AR5K_RF_GAIN(63), 0x00000006 }, /* PHY activation */ { AR5K_PHY(53), 0x00000020 }, { AR5K_PHY(51), 0x00000004 }, { AR5K_PHY(50), 0x00060106 }, { AR5K_PHY(39), 0x0000006d }, { AR5K_PHY(48), 0x00000000 }, { AR5K_PHY(52), 0x00000014 }, { AR5K_PHY_ACT, AR5K_PHY_ACT_ENABLE }, }; /* Initial register settings for AR5211 */ static const struct ath5k_ini ar5211_ini[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RTSD0, 0x84849c9c }, { AR5K_RTSD1, 0x7c7c7c7c }, { AR5K_RXCFG, 0x00000005 }, { AR5K_MIBC, 0x00000000 }, { AR5K_TOPS, 0x00000008 }, { AR5K_RXNOFRM, 0x00000008 }, { AR5K_TXNOFRM, 0x00000010 }, { AR5K_RPGTO, 0x00000000 }, { AR5K_RFCNT, 0x0000001f }, { AR5K_QUEUE_TXDP(0), 0x00000000 }, { AR5K_QUEUE_TXDP(1), 0x00000000 }, { AR5K_QUEUE_TXDP(2), 0x00000000 }, { AR5K_QUEUE_TXDP(3), 0x00000000 }, { AR5K_QUEUE_TXDP(4), 0x00000000 }, { AR5K_QUEUE_TXDP(5), 0x00000000 }, { AR5K_QUEUE_TXDP(6), 0x00000000 }, { AR5K_QUEUE_TXDP(7), 0x00000000 }, { AR5K_QUEUE_TXDP(8), 0x00000000 }, { AR5K_QUEUE_TXDP(9), 0x00000000 }, { AR5K_DCU_FP, 0x00000000 }, { AR5K_STA_ID1, 0x00000000 }, { AR5K_BSS_ID0, 0x00000000 }, { AR5K_BSS_ID1, 0x00000000 }, { AR5K_RSSI_THR, 0x00000000 }, { AR5K_CFP_PERIOD_5211, 0x00000000 }, { AR5K_TIMER0_5211, 0x00000030 }, { AR5K_TIMER1_5211, 0x0007ffff }, { AR5K_TIMER2_5211, 0x01ffffff }, { AR5K_TIMER3_5211, 0x00000031 }, { AR5K_CFP_DUR_5211, 0x00000000 }, { AR5K_RX_FILTER_5211, 0x00000000 }, { AR5K_MCAST_FILTER0_5211, 0x00000000 }, { AR5K_MCAST_FILTER1_5211, 0x00000002 }, { AR5K_DIAG_SW_5211, 0x00000000 }, { AR5K_ADDAC_TEST, 0x00000000 }, { AR5K_DEFAULT_ANTENNA, 0x00000000 }, /* PHY registers */ { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY(3), 0x2d849093 }, { AR5K_PHY(4), 0x7d32e000 }, { AR5K_PHY(5), 0x00000f6b }, { AR5K_PHY_ACT, 0x00000000 }, { AR5K_PHY(11), 0x00026ffe }, { AR5K_PHY(12), 0x00000000 }, { AR5K_PHY(15), 0x00020100 }, { AR5K_PHY(16), 0x206a017a }, { AR5K_PHY(19), 0x1284613c }, { AR5K_PHY(21), 0x00000859 }, { AR5K_PHY(26), 0x409a4190 }, /* 0x9868 */ { AR5K_PHY(27), 0x050cb081 }, { AR5K_PHY(28), 0x0000000f }, { AR5K_PHY(29), 0x00000080 }, { AR5K_PHY(30), 0x0000000c }, { AR5K_PHY(64), 0x00000000 }, { AR5K_PHY(65), 0x00000000 }, { AR5K_PHY(66), 0x00000000 }, { AR5K_PHY(67), 0x00800000 }, { AR5K_PHY(68), 0x00000001 }, { AR5K_PHY(71), 0x0000092a }, { AR5K_PHY_IQ, 0x00000000 }, { AR5K_PHY(73), 0x00058a05 }, { AR5K_PHY(74), 0x00000001 }, { AR5K_PHY(75), 0x00000000 }, { AR5K_PHY_PAPD_PROBE, 0x00000000 }, { AR5K_PHY(77), 0x00000000 }, /* 0x9934 */ { AR5K_PHY(78), 0x00000000 }, /* 0x9938 */ { AR5K_PHY(79), 0x0000003f }, /* 0x993c */ { AR5K_PHY(80), 0x00000004 }, { AR5K_PHY(82), 0x00000000 }, { AR5K_PHY(83), 0x00000000 }, { AR5K_PHY(84), 0x00000000 }, { AR5K_PHY_RADAR, 0x5d50f14c }, { AR5K_PHY(86), 0x00000018 }, { AR5K_PHY(87), 0x004b6a8e }, /* Initial Power table (32bytes) * common on all cards/modes. * Note: Table is rewritten during * txpower setup later using calibration * data etc. so next write is non-common */ { AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff }, { AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff }, { AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff }, { AR5K_PHY_PCDAC_TXPOWER(4), 0x09ff09ff }, { AR5K_PHY_PCDAC_TXPOWER(5), 0x0aff0aff }, { AR5K_PHY_PCDAC_TXPOWER(6), 0x0bff0bff }, { AR5K_PHY_PCDAC_TXPOWER(7), 0x0cff0cff }, { AR5K_PHY_PCDAC_TXPOWER(8), 0x0dff0dff }, { AR5K_PHY_PCDAC_TXPOWER(9), 0x0fff0eff }, { AR5K_PHY_PCDAC_TXPOWER(10), 0x12ff12ff }, { AR5K_PHY_PCDAC_TXPOWER(11), 0x14ff13ff }, { AR5K_PHY_PCDAC_TXPOWER(12), 0x16ff15ff }, { AR5K_PHY_PCDAC_TXPOWER(13), 0x19ff17ff }, { AR5K_PHY_PCDAC_TXPOWER(14), 0x1bff1aff }, { AR5K_PHY_PCDAC_TXPOWER(15), 0x1eff1dff }, { AR5K_PHY_PCDAC_TXPOWER(16), 0x23ff20ff }, { AR5K_PHY_PCDAC_TXPOWER(17), 0x27ff25ff }, { AR5K_PHY_PCDAC_TXPOWER(18), 0x2cff29ff }, { AR5K_PHY_PCDAC_TXPOWER(19), 0x31ff2fff }, { AR5K_PHY_PCDAC_TXPOWER(20), 0x37ff34ff }, { AR5K_PHY_PCDAC_TXPOWER(21), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(22), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(23), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(24), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(25), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(26), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(27), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff }, { AR5K_PHY_CCKTXCTL, 0x00000000 }, { AR5K_PHY(642), 0x503e4646 }, { AR5K_PHY_GAIN_2GHZ, 0x6480416c }, { AR5K_PHY(644), 0x0199a003 }, { AR5K_PHY(645), 0x044cd610 }, { AR5K_PHY(646), 0x13800040 }, { AR5K_PHY(647), 0x1be00060 }, { AR5K_PHY(648), 0x0c53800a }, { AR5K_PHY(649), 0x0014df3b }, { AR5K_PHY(650), 0x000001b5 }, { AR5K_PHY(651), 0x00000020 }, }; /* Initial mode-specific settings for AR5211 * 5211 supports OFDM-only g (draft g) but we * need to test it ! */ static const struct ath5k_ini_mode ar5211_ini_mode[] = { { AR5K_TXCFG, /* a aTurbo b g (OFDM) */ { 0x00000015, 0x00000015, 0x0000001d, 0x00000015 } }, { AR5K_QUEUE_DFS_LOCAL_IFS(0), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(1), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(2), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(3), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(4), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(5), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(6), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(7), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(8), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(9), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_DCU_GBL_IFS_SLOT, { 0x00000168, 0x000001e0, 0x000001b8, 0x00000168 } }, { AR5K_DCU_GBL_IFS_SIFS, { 0x00000230, 0x000001e0, 0x000000b0, 0x00000230 } }, { AR5K_DCU_GBL_IFS_EIFS, { 0x00000d98, 0x00001180, 0x00001f48, 0x00000d98 } }, { AR5K_DCU_GBL_IFS_MISC, { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000a0e0 } }, { AR5K_TIME_OUT, { 0x04000400, 0x08000800, 0x20003000, 0x04000400 } }, { AR5K_USEC_5211, { 0x0e8d8fa7, 0x0e8d8fcf, 0x01608f95, 0x0e8d8fa7 } }, { AR5K_PHY_TURBO, { 0x00000000, 0x00000003, 0x00000000, 0x00000000 } }, { AR5K_PHY(8), { 0x02020200, 0x02020200, 0x02010200, 0x02020200 } }, { AR5K_PHY(9), { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e } }, { AR5K_PHY(10), { 0x0a020001, 0x0a020001, 0x05010000, 0x0a020001 } }, { AR5K_PHY(13), { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY(14), { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY(17), { 0x1372169c, 0x137216a5, 0x137216a8, 0x1372169c } }, { AR5K_PHY(18), { 0x0018ba67, 0x0018ba67, 0x0018ba69, 0x0018ba69 } }, { AR5K_PHY(20), { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } }, { AR5K_PHY_AGCCOARSE, { 0x31375d5e, 0x31375d5e, 0x313a5d5e, 0x31375d5e } }, { AR5K_PHY_AGCCTL, { 0x0000bd10, 0x0000bd10, 0x0000bd38, 0x0000bd10 } }, { AR5K_PHY_NF, { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, { AR5K_PHY_RX_DELAY, { 0x00002710, 0x00002710, 0x0000157c, 0x00002710 } }, { AR5K_PHY(70), { 0x00000190, 0x00000190, 0x00000084, 0x00000190 } }, { AR5K_PHY_FRAME_CTL_5211, { 0x6fe01020, 0x6fe01020, 0x6fe00920, 0x6fe01020 } }, { AR5K_PHY_PCDAC_TXPOWER_BASE, { 0x05ff14ff, 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } }, { AR5K_RF_BUFFER_CONTROL_4, { 0x00000010, 0x00000014, 0x00000010, 0x00000010 } }, }; /* Initial register settings for AR5212 */ static const struct ath5k_ini ar5212_ini_common_start[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RXCFG, 0x00000005 }, { AR5K_MIBC, 0x00000000 }, { AR5K_TOPS, 0x00000008 }, { AR5K_RXNOFRM, 0x00000008 }, { AR5K_TXNOFRM, 0x00000010 }, { AR5K_RPGTO, 0x00000000 }, { AR5K_RFCNT, 0x0000001f }, { AR5K_QUEUE_TXDP(0), 0x00000000 }, { AR5K_QUEUE_TXDP(1), 0x00000000 }, { AR5K_QUEUE_TXDP(2), 0x00000000 }, { AR5K_QUEUE_TXDP(3), 0x00000000 }, { AR5K_QUEUE_TXDP(4), 0x00000000 }, { AR5K_QUEUE_TXDP(5), 0x00000000 }, { AR5K_QUEUE_TXDP(6), 0x00000000 }, { AR5K_QUEUE_TXDP(7), 0x00000000 }, { AR5K_QUEUE_TXDP(8), 0x00000000 }, { AR5K_QUEUE_TXDP(9), 0x00000000 }, { AR5K_DCU_FP, 0x00000000 }, { AR5K_DCU_TXP, 0x00000000 }, /* Tx filter table 0 (32 entries) */ { AR5K_DCU_TX_FILTER_0(0), 0x00000000 }, /* DCU 0 */ { AR5K_DCU_TX_FILTER_0(1), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(2), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(3), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(4), 0x00000000 }, /* DCU 1 */ { AR5K_DCU_TX_FILTER_0(5), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(6), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(7), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(8), 0x00000000 }, /* DCU 2 */ { AR5K_DCU_TX_FILTER_0(9), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(10), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(11), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(12), 0x00000000 }, /* DCU 3 */ { AR5K_DCU_TX_FILTER_0(13), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(14), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(15), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(16), 0x00000000 }, /* DCU 4 */ { AR5K_DCU_TX_FILTER_0(17), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(18), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(19), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(20), 0x00000000 }, /* DCU 5 */ { AR5K_DCU_TX_FILTER_0(21), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(22), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(23), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(24), 0x00000000 }, /* DCU 6 */ { AR5K_DCU_TX_FILTER_0(25), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(26), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(27), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(28), 0x00000000 }, /* DCU 7 */ { AR5K_DCU_TX_FILTER_0(29), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(30), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(31), 0x00000000 }, /* Tx filter table 1 (16 entries) */ { AR5K_DCU_TX_FILTER_1(0), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(1), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(2), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(3), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(4), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(5), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(6), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(7), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(8), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(9), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(10), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(11), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(12), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(13), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(14), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(15), 0x00000000 }, { AR5K_DCU_TX_FILTER_CLR, 0x00000000 }, { AR5K_DCU_TX_FILTER_SET, 0x00000000 }, { AR5K_STA_ID1, 0x00000000 }, { AR5K_BSS_ID0, 0x00000000 }, { AR5K_BSS_ID1, 0x00000000 }, { AR5K_BEACON_5211, 0x00000000 }, { AR5K_CFP_PERIOD_5211, 0x00000000 }, { AR5K_TIMER0_5211, 0x00000030 }, { AR5K_TIMER1_5211, 0x0007ffff }, { AR5K_TIMER2_5211, 0x01ffffff }, { AR5K_TIMER3_5211, 0x00000031 }, { AR5K_CFP_DUR_5211, 0x00000000 }, { AR5K_RX_FILTER_5211, 0x00000000 }, { AR5K_DIAG_SW_5211, 0x00000000 }, { AR5K_ADDAC_TEST, 0x00000000 }, { AR5K_DEFAULT_ANTENNA, 0x00000000 }, { AR5K_FRAME_CTL_QOSM, 0x000fc78f }, { AR5K_XRMODE, 0x2a82301a }, { AR5K_XRDELAY, 0x05dc01e0 }, { AR5K_XRTIMEOUT, 0x1f402710 }, { AR5K_XRCHIRP, 0x01f40000 }, { AR5K_XRSTOMP, 0x00001e1c }, { AR5K_SLEEP0, 0x0002aaaa }, { AR5K_SLEEP1, 0x02005555 }, { AR5K_SLEEP2, 0x00000000 }, { AR_BSSMSKL, 0xffffffff }, { AR_BSSMSKU, 0x0000ffff }, { AR5K_TXPC, 0x00000000 }, { AR5K_PROFCNT_TX, 0x00000000 }, { AR5K_PROFCNT_RX, 0x00000000 }, { AR5K_PROFCNT_RXCLR, 0x00000000 }, { AR5K_PROFCNT_CYCLE, 0x00000000 }, { AR5K_QUIET_CTL1, 0x00000088 }, /* Initial rate duration table (32 entries )*/ { AR5K_RATE_DUR(0), 0x00000000 }, { AR5K_RATE_DUR(1), 0x0000008c }, { AR5K_RATE_DUR(2), 0x000000e4 }, { AR5K_RATE_DUR(3), 0x000002d5 }, { AR5K_RATE_DUR(4), 0x00000000 }, { AR5K_RATE_DUR(5), 0x00000000 }, { AR5K_RATE_DUR(6), 0x000000a0 }, { AR5K_RATE_DUR(7), 0x000001c9 }, { AR5K_RATE_DUR(8), 0x0000002c }, { AR5K_RATE_DUR(9), 0x0000002c }, { AR5K_RATE_DUR(10), 0x00000030 }, { AR5K_RATE_DUR(11), 0x0000003c }, { AR5K_RATE_DUR(12), 0x0000002c }, { AR5K_RATE_DUR(13), 0x0000002c }, { AR5K_RATE_DUR(14), 0x00000030 }, { AR5K_RATE_DUR(15), 0x0000003c }, { AR5K_RATE_DUR(16), 0x00000000 }, { AR5K_RATE_DUR(17), 0x00000000 }, { AR5K_RATE_DUR(18), 0x00000000 }, { AR5K_RATE_DUR(19), 0x00000000 }, { AR5K_RATE_DUR(20), 0x00000000 }, { AR5K_RATE_DUR(21), 0x00000000 }, { AR5K_RATE_DUR(22), 0x00000000 }, { AR5K_RATE_DUR(23), 0x00000000 }, { AR5K_RATE_DUR(24), 0x000000d5 }, { AR5K_RATE_DUR(25), 0x000000df }, { AR5K_RATE_DUR(26), 0x00000102 }, { AR5K_RATE_DUR(27), 0x0000013a }, { AR5K_RATE_DUR(28), 0x00000075 }, { AR5K_RATE_DUR(29), 0x0000007f }, { AR5K_RATE_DUR(30), 0x000000a2 }, { AR5K_RATE_DUR(31), 0x00000000 }, { AR5K_QUIET_CTL2, 0x00010002 }, { AR5K_TSF_PARM, 0x00000001 }, { AR5K_QOS_NOACK, 0x000000c0 }, { AR5K_PHY_ERR_FIL, 0x00000000 }, { AR5K_XRLAT_TX, 0x00000168 }, { AR5K_ACKSIFS, 0x00000000 }, /* Rate -> db table * notice ...03<-02<-01<-00 ! */ { AR5K_RATE2DB(0), 0x03020100 }, { AR5K_RATE2DB(1), 0x07060504 }, { AR5K_RATE2DB(2), 0x0b0a0908 }, { AR5K_RATE2DB(3), 0x0f0e0d0c }, { AR5K_RATE2DB(4), 0x13121110 }, { AR5K_RATE2DB(5), 0x17161514 }, { AR5K_RATE2DB(6), 0x1b1a1918 }, { AR5K_RATE2DB(7), 0x1f1e1d1c }, /* Db -> Rate table */ { AR5K_DB2RATE(0), 0x03020100 }, { AR5K_DB2RATE(1), 0x07060504 }, { AR5K_DB2RATE(2), 0x0b0a0908 }, { AR5K_DB2RATE(3), 0x0f0e0d0c }, { AR5K_DB2RATE(4), 0x13121110 }, { AR5K_DB2RATE(5), 0x17161514 }, { AR5K_DB2RATE(6), 0x1b1a1918 }, { AR5K_DB2RATE(7), 0x1f1e1d1c }, /* PHY registers (Common settings * for all chips/modes) */ { AR5K_PHY(3), 0xad848e19 }, { AR5K_PHY(4), 0x7d28e000 }, { AR5K_PHY_TIMING_3, 0x9c0a9f6b }, { AR5K_PHY_ACT, 0x00000000 }, { AR5K_PHY(16), 0x206a017a }, { AR5K_PHY(21), 0x00000859 }, { AR5K_PHY_BIN_MASK_1, 0x00000000 }, { AR5K_PHY_BIN_MASK_2, 0x00000000 }, { AR5K_PHY_BIN_MASK_3, 0x00000000 }, { AR5K_PHY_BIN_MASK_CTL, 0x00800000 }, { AR5K_PHY_ANT_CTL, 0x00000001 }, /*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */ { AR5K_PHY_MAX_RX_LEN, 0x00000c80 }, { AR5K_PHY_IQ, 0x05100000 }, { AR5K_PHY_WARM_RESET, 0x00000001 }, { AR5K_PHY_CTL, 0x00000004 }, { AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 }, { AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d }, { AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f }, { AR5K_PHY(82), 0x9280b212 }, { AR5K_PHY_RADAR, 0x5d50e188 }, /*{ AR5K_PHY(86), 0x000000ff },*/ { AR5K_PHY(87), 0x004b6a8e }, { AR5K_PHY_NFTHRES, 0x000003ce }, { AR5K_PHY_RESTART, 0x192fb515 }, { AR5K_PHY(94), 0x00000001 }, { AR5K_PHY_RFBUS_REQ, 0x00000000 }, /*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */ /*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */ { AR5K_PHY(644), 0x00806333 }, { AR5K_PHY(645), 0x00106c10 }, { AR5K_PHY(646), 0x009c4060 }, /* { AR5K_PHY(647), 0x1483800a }, */ /* { AR5K_PHY(648), 0x01831061 }, */ /* Old value */ { AR5K_PHY(648), 0x018830c6 }, { AR5K_PHY(649), 0x00000400 }, /*{ AR5K_PHY(650), 0x000001b5 },*/ { AR5K_PHY(651), 0x00000000 }, { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, /*{ AR5K_PHY(655), 0x13c889af },*/ { AR5K_PHY(656), 0x38490a20 }, { AR5K_PHY(657), 0x00007bb6 }, { AR5K_PHY(658), 0x0fff3ffc }, }; /* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { { AR5K_QUEUE_DFS_LOCAL_IFS(0), /* a/XR aTurbo b g (DYN) gTurbo */ { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(1), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(2), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(3), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(4), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(5), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(6), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(7), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(8), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(9), { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, { AR5K_DCU_GBL_IFS_SIFS, { 0x00000230, 0x000001e0, 0x000000b0, 0x00000160, 0x000001e0 } }, { AR5K_DCU_GBL_IFS_SLOT, { 0x00000168, 0x000001e0, 0x000001b8, 0x0000018c, 0x000001e0 } }, { AR5K_DCU_GBL_IFS_EIFS, { 0x00000e60, 0x00001180, 0x00001f1c, 0x00003e38, 0x00001180 } }, { AR5K_DCU_GBL_IFS_MISC, { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000b0e0, 0x00014068 } }, { AR5K_TIME_OUT, { 0x03e803e8, 0x06e006e0, 0x04200420, 0x08400840, 0x06e006e0 } }, { AR5K_PHY_TURBO, { 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000003 } }, { AR5K_PHY(8), { 0x02020200, 0x02020200, 0x02010200, 0x02020200, 0x02020200 } }, { AR5K_PHY_RF_CTL2, { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_SETTLING, { 0x1372161c, 0x13721c25, 0x13721722, 0x137216a2, 0x13721c25 } }, { AR5K_PHY_AGCCTL, { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d10 } }, { AR5K_PHY_NF, { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, { AR5K_PHY_WEAK_OFDM_HIGH_THR, { 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 } }, { AR5K_PHY(70), { 0x000001b8, 0x000001b8, 0x00000084, 0x00000108, 0x000001b8 } }, { AR5K_PHY_OFDM_SELFCORR, { 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05 } }, { 0xa230, { 0x00000000, 0x00000000, 0x00000000, 0x00000108, 0x00000000 } }, }; /* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { { AR5K_TXCFG, /* a/XR aTurbo b g (DYN) gTurbo */ { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, { AR5K_USEC_5211, { 0x128d8fa7, 0x09880fcf, 0x04e00f95, 0x12e00fab, 0x09880fcf } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x0a020001, 0x05010100, 0x0a020001, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018da5a, 0x0018da5a, 0x0018ca69, 0x0018ca69, 0x0018ca69 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e, 0x7e800d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137615e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb080, 0x050cb080 } }, { AR5K_PHY_RX_DELAY, { 0x00002710, 0x00002710, 0x0000157c, 0x00002af8, 0x00002710 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81020, 0xf7b81020, 0xf7b80d20, 0xf7b81020, 0xf7b81020 } }, { AR5K_PHY_GAIN_2GHZ, { 0x642c416a, 0x642c416a, 0x6440416a, 0x6440416a, 0x6440416a } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, }; static const struct ath5k_ini rf5111_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x00022ffe }, { 0x983c, 0x00020100 }, { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, { AR5K_PHY_PAPD_PROBE, 0x00004883 }, { 0x9940, 0x00000004 }, { 0x9958, 0x000000ff }, { 0x9974, 0x00000000 }, { AR5K_PHY_SPENDING, 0x00000018 }, { AR5K_PHY_CCKTXCTL, 0x00000000 }, { AR5K_PHY_CCK_CROSSCORR, 0xd03e6788 }, { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, { 0xa23c, 0x13c889af }, }; /* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { { AR5K_TXCFG, /* a/XR aTurbo b g (DYN) gTurbo */ { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018da6d, 0x0018da6d, 0x0018ca75, 0x0018ca75, 0x0018ca75 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7e800d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81020, 0xf7b81020, 0xf7b80d10, 0xf7b81010, 0xf7b81010 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000008, 0x00000008, 0x00000008 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x642c0140, 0x642c0140, 0x6442c160, 0x6442c160, 0x6442c160 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, }; static const struct ath5k_ini rf5112_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x00022ffe }, { 0x983c, 0x00020100 }, { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, { AR5K_PHY_PAPD_PROBE, 0x00004882 }, { 0x9940, 0x00000004 }, { 0x9958, 0x000000ff }, { 0x9974, 0x00000000 }, { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, { 0xa23c, 0x13c889af }, }; /* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { { AR5K_TXCFG, /* a/XR aTurbo b g (DYN) gTurbo */ { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018fa61, 0x0018fa61, 0x001a1a63, 0x001a1a63, 0x001a1a63 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, { 0xa300, { 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 } }, { 0xa304, { 0x30032602, 0x30032602, 0x30032602, 0x30032602, 0x30032602 } }, { 0xa308, { 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06 } }, { 0xa30c, { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } }, { 0xa310, { 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f } }, { 0xa314, { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } }, { 0xa318, { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } }, { 0xa31c, { 0x90cf865b, 0x90cf865b, 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } }, { 0xa320, { 0x9d4f970f, 0x9d4f970f, 0x9b4f970f, 0x9b4f970f, 0x9b4f970f } }, { 0xa324, { 0xa7cfa38f, 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f, 0xa3cf9f8f } }, { 0xa328, { 0xb55faf1f, 0xb55faf1f, 0xb35faf1f, 0xb35faf1f, 0xb35faf1f } }, { 0xa32c, { 0xbddfb99f, 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f, 0xbbdfb99f } }, { 0xa330, { 0xcb7fc53f, 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f, 0xcb7fc73f } }, { 0xa334, { 0xd5ffd1bf, 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } }, }; static const struct ath5k_ini rf5413_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_5414_CBCFG, 0x00000010 }, { AR5K_SEQ_MASK, 0x0000000f }, { 0x809c, 0x00000000 }, { 0x80a0, 0x00000000 }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800003f9 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x00081fff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c16a }, { 0xa270, 0x00820820 }, { 0xa274, 0x081b7caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa338, 0x00000000 }, { 0xa33c, 0x00000000 }, { 0xa340, 0x00000000 }, { 0xa344, 0x00000000 }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { { AR5K_TXCFG, /* a/XR aTurbo b g (DYN) gTurbo */ { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x0a020001, 0x05020000, 0x0a020001, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00 } }, { AR5K_PHY_PA_CTL, { 0x00000002, 0x00000002, 0x0000000a, 0x0000000a, 0x0000000a } }, { AR5K_PHY_GAIN, { 0x0018da6d, 0x0018da6d, 0x001a6a64, 0x001a6a64, 0x001a6a64 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da, 0x0de8b0da } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e, 0x7e800d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3137665e, 0x3139605e, 0x3137665e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x002c0140, 0x002c0140, 0x0042c140, 0x0042c140, 0x0042c140 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, }; static const struct ath5k_ini rf2413_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_SEQ_MASK, 0x0000000f }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800000a8 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x000000ff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c16a }, { 0xa270, 0x00820820 }, { 0xa274, 0x001b7caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa300, 0x18010000 }, { 0xa304, 0x30032602 }, { 0xa308, 0x48073e06 }, { 0xa30c, 0x560b4c0a }, { 0xa310, 0x641a600f }, { 0xa314, 0x784f6e1b }, { 0xa318, 0x868f7c5a }, { 0xa31c, 0x8ecf865b }, { 0xa320, 0x9d4f970f }, { 0xa324, 0xa5cfa18f }, { 0xa328, 0xb55faf1f }, { 0xa32c, 0xbddfb99f }, { 0xa330, 0xcd7fc73f }, { 0xa334, 0xd5ffd1bf }, { 0xa338, 0x00000000 }, { 0xa33c, 0x00000000 }, { 0xa340, 0x00000000 }, { 0xa344, 0x00000000 }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { { AR5K_TXCFG, /* a/XR aTurbo b g (DYN) gTurbo */ { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, { AR5K_PHY_TURBO, { 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000001 } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000003, 0x00000003, 0x0000000b, 0x0000000b, 0x0000000b } }, { AR5K_PHY_SETTLING, { 0x1372161c, 0x13721c25, 0x13721722, 0x13721422, 0x13721c25 } }, { AR5K_PHY_GAIN, { 0x0018fa61, 0x0018fa61, 0x00199a65, 0x00199a65, 0x00199a65 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x00000140, 0x00000140, 0x0052c140, 0x0052c140, 0x0052c140 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, { 0xa324, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa328, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa32c, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa330, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa334, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, }; static const struct ath5k_ini rf2425_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_SEQ_MASK, 0x0000000f }, { 0x809c, 0x00000000 }, { 0x80a0, 0x00000000 }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800003f9 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x00081fff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { 0x99dc, 0xfebadbe8 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c166 }, { 0xa270, 0x00820820 }, { 0xa274, 0x081a3caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa300, 0x16010000 }, { 0xa304, 0x2c032402 }, { 0xa308, 0x48433e42 }, { 0xa30c, 0x5a0f500b }, { 0xa310, 0x6c4b624a }, { 0xa314, 0x7e8b748a }, { 0xa318, 0x96cf8ccb }, { 0xa31c, 0xa34f9d0f }, { 0xa320, 0xa7cfa58f }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* * Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with * RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI) */ /* RF5111 Initial BaseBand Gain settings */ static const struct ath5k_ini rf5111_ini_bbgain[] = { { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000020 }, { AR5K_BB_GAIN(2), 0x00000010 }, { AR5K_BB_GAIN(3), 0x00000030 }, { AR5K_BB_GAIN(4), 0x00000008 }, { AR5K_BB_GAIN(5), 0x00000028 }, { AR5K_BB_GAIN(6), 0x00000004 }, { AR5K_BB_GAIN(7), 0x00000024 }, { AR5K_BB_GAIN(8), 0x00000014 }, { AR5K_BB_GAIN(9), 0x00000034 }, { AR5K_BB_GAIN(10), 0x0000000c }, { AR5K_BB_GAIN(11), 0x0000002c }, { AR5K_BB_GAIN(12), 0x00000002 }, { AR5K_BB_GAIN(13), 0x00000022 }, { AR5K_BB_GAIN(14), 0x00000012 }, { AR5K_BB_GAIN(15), 0x00000032 }, { AR5K_BB_GAIN(16), 0x0000000a }, { AR5K_BB_GAIN(17), 0x0000002a }, { AR5K_BB_GAIN(18), 0x00000006 }, { AR5K_BB_GAIN(19), 0x00000026 }, { AR5K_BB_GAIN(20), 0x00000016 }, { AR5K_BB_GAIN(21), 0x00000036 }, { AR5K_BB_GAIN(22), 0x0000000e }, { AR5K_BB_GAIN(23), 0x0000002e }, { AR5K_BB_GAIN(24), 0x00000001 }, { AR5K_BB_GAIN(25), 0x00000021 }, { AR5K_BB_GAIN(26), 0x00000011 }, { AR5K_BB_GAIN(27), 0x00000031 }, { AR5K_BB_GAIN(28), 0x00000009 }, { AR5K_BB_GAIN(29), 0x00000029 }, { AR5K_BB_GAIN(30), 0x00000005 }, { AR5K_BB_GAIN(31), 0x00000025 }, { AR5K_BB_GAIN(32), 0x00000015 }, { AR5K_BB_GAIN(33), 0x00000035 }, { AR5K_BB_GAIN(34), 0x0000000d }, { AR5K_BB_GAIN(35), 0x0000002d }, { AR5K_BB_GAIN(36), 0x00000003 }, { AR5K_BB_GAIN(37), 0x00000023 }, { AR5K_BB_GAIN(38), 0x00000013 }, { AR5K_BB_GAIN(39), 0x00000033 }, { AR5K_BB_GAIN(40), 0x0000000b }, { AR5K_BB_GAIN(41), 0x0000002b }, { AR5K_BB_GAIN(42), 0x0000002b }, { AR5K_BB_GAIN(43), 0x0000002b }, { AR5K_BB_GAIN(44), 0x0000002b }, { AR5K_BB_GAIN(45), 0x0000002b }, { AR5K_BB_GAIN(46), 0x0000002b }, { AR5K_BB_GAIN(47), 0x0000002b }, { AR5K_BB_GAIN(48), 0x0000002b }, { AR5K_BB_GAIN(49), 0x0000002b }, { AR5K_BB_GAIN(50), 0x0000002b }, { AR5K_BB_GAIN(51), 0x0000002b }, { AR5K_BB_GAIN(52), 0x0000002b }, { AR5K_BB_GAIN(53), 0x0000002b }, { AR5K_BB_GAIN(54), 0x0000002b }, { AR5K_BB_GAIN(55), 0x0000002b }, { AR5K_BB_GAIN(56), 0x0000002b }, { AR5K_BB_GAIN(57), 0x0000002b }, { AR5K_BB_GAIN(58), 0x0000002b }, { AR5K_BB_GAIN(59), 0x0000002b }, { AR5K_BB_GAIN(60), 0x0000002b }, { AR5K_BB_GAIN(61), 0x0000002b }, { AR5K_BB_GAIN(62), 0x00000002 }, { AR5K_BB_GAIN(63), 0x00000016 }, }; /* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414+) */ static const struct ath5k_ini rf5112_ini_bbgain[] = { { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000001 }, { AR5K_BB_GAIN(2), 0x00000002 }, { AR5K_BB_GAIN(3), 0x00000003 }, { AR5K_BB_GAIN(4), 0x00000004 }, { AR5K_BB_GAIN(5), 0x00000005 }, { AR5K_BB_GAIN(6), 0x00000008 }, { AR5K_BB_GAIN(7), 0x00000009 }, { AR5K_BB_GAIN(8), 0x0000000a }, { AR5K_BB_GAIN(9), 0x0000000b }, { AR5K_BB_GAIN(10), 0x0000000c }, { AR5K_BB_GAIN(11), 0x0000000d }, { AR5K_BB_GAIN(12), 0x00000010 }, { AR5K_BB_GAIN(13), 0x00000011 }, { AR5K_BB_GAIN(14), 0x00000012 }, { AR5K_BB_GAIN(15), 0x00000013 }, { AR5K_BB_GAIN(16), 0x00000014 }, { AR5K_BB_GAIN(17), 0x00000015 }, { AR5K_BB_GAIN(18), 0x00000018 }, { AR5K_BB_GAIN(19), 0x00000019 }, { AR5K_BB_GAIN(20), 0x0000001a }, { AR5K_BB_GAIN(21), 0x0000001b }, { AR5K_BB_GAIN(22), 0x0000001c }, { AR5K_BB_GAIN(23), 0x0000001d }, { AR5K_BB_GAIN(24), 0x00000020 }, { AR5K_BB_GAIN(25), 0x00000021 }, { AR5K_BB_GAIN(26), 0x00000022 }, { AR5K_BB_GAIN(27), 0x00000023 }, { AR5K_BB_GAIN(28), 0x00000024 }, { AR5K_BB_GAIN(29), 0x00000025 }, { AR5K_BB_GAIN(30), 0x00000028 }, { AR5K_BB_GAIN(31), 0x00000029 }, { AR5K_BB_GAIN(32), 0x0000002a }, { AR5K_BB_GAIN(33), 0x0000002b }, { AR5K_BB_GAIN(34), 0x0000002c }, { AR5K_BB_GAIN(35), 0x0000002d }, { AR5K_BB_GAIN(36), 0x00000030 }, { AR5K_BB_GAIN(37), 0x00000031 }, { AR5K_BB_GAIN(38), 0x00000032 }, { AR5K_BB_GAIN(39), 0x00000033 }, { AR5K_BB_GAIN(40), 0x00000034 }, { AR5K_BB_GAIN(41), 0x00000035 }, { AR5K_BB_GAIN(42), 0x00000035 }, { AR5K_BB_GAIN(43), 0x00000035 }, { AR5K_BB_GAIN(44), 0x00000035 }, { AR5K_BB_GAIN(45), 0x00000035 }, { AR5K_BB_GAIN(46), 0x00000035 }, { AR5K_BB_GAIN(47), 0x00000035 }, { AR5K_BB_GAIN(48), 0x00000035 }, { AR5K_BB_GAIN(49), 0x00000035 }, { AR5K_BB_GAIN(50), 0x00000035 }, { AR5K_BB_GAIN(51), 0x00000035 }, { AR5K_BB_GAIN(52), 0x00000035 }, { AR5K_BB_GAIN(53), 0x00000035 }, { AR5K_BB_GAIN(54), 0x00000035 }, { AR5K_BB_GAIN(55), 0x00000035 }, { AR5K_BB_GAIN(56), 0x00000035 }, { AR5K_BB_GAIN(57), 0x00000035 }, { AR5K_BB_GAIN(58), 0x00000035 }, { AR5K_BB_GAIN(59), 0x00000035 }, { AR5K_BB_GAIN(60), 0x00000035 }, { AR5K_BB_GAIN(61), 0x00000035 }, { AR5K_BB_GAIN(62), 0x00000010 }, { AR5K_BB_GAIN(63), 0x0000001a }, }; /* * Write initial register dump */ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini *ini_regs, bool change_channel) { unsigned int i; /* Write initial registers */ for (i = 0; i < size; i++) { /* On channel change there is * no need to mess with PCU */ if (change_channel && ini_regs[i].ini_register >= AR5K_PCU_MIN && ini_regs[i].ini_register <= AR5K_PCU_MAX) continue; switch (ini_regs[i].ini_mode) { case AR5K_INI_READ: /* Cleared on read */ ath5k_hw_reg_read(ah, ini_regs[i].ini_register); break; case AR5K_INI_WRITE: default: AR5K_REG_WAIT(i); ath5k_hw_reg_write(ah, ini_regs[i].ini_value, ini_regs[i].ini_register); } } } static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini_mode *ini_mode, u8 mode) { unsigned int i; for (i = 0; i < size; i++) { AR5K_REG_WAIT(i); ath5k_hw_reg_write(ah, ini_mode[i].mode_value[mode], (u32)ini_mode[i].mode_register); } } int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel) { /* * Write initial register settings */ /* For AR5212 and combatible */ if (ah->ah_version == AR5K_AR5212) { /* First set of mode-specific settings */ ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5212_ini_mode_start), ar5212_ini_mode_start, mode); /* * Write initial settings common for all modes */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start), ar5212_ini_common_start, change_channel); /* Second set of mode-specific settings */ switch (ah->ah_radio) { case AR5K_RF5111: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5111_ini_mode_end), rf5111_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_common_end), rf5111_ini_common_end, change_channel); /* Baseband gain table */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), rf5111_ini_bbgain, change_channel); break; case AR5K_RF5112: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5112_ini_mode_end), rf5112_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_common_end), rf5112_ini_common_end, change_channel); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, change_channel); break; case AR5K_RF5413: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5413_ini_mode_end), rf5413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5413_ini_common_end), rf5413_ini_common_end, change_channel); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, change_channel); break; case AR5K_RF2316: case AR5K_RF2413: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2413_ini_mode_end), rf2413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2413_ini_common_end), rf2413_ini_common_end, change_channel); /* Override settings from rf2413_ini_common_end */ if (ah->ah_radio == AR5K_RF2316) { ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC); ath5k_hw_reg_write(ah, 0x081b7caa, 0xa274); } ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, change_channel); break; case AR5K_RF2317: case AR5K_RF2425: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2425_ini_mode_end), rf2425_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2425_ini_common_end), rf2425_ini_common_end, change_channel); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, change_channel); break; default: return -EINVAL; } /* For AR5211 */ } else if (ah->ah_version == AR5K_AR5211) { /* AR5K_MODE_11B */ if (mode > 2) { ATH5K_ERR(ah->ah_sc, "unsupported channel mode: %d\n", mode); return -EINVAL; } /* Mode-specific settings */ ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5211_ini_mode), ar5211_ini_mode, mode); /* * Write initial settings common for all modes */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini), ar5211_ini, change_channel); /* AR5211 only comes with 5111 */ /* Baseband gain table */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), rf5111_ini_bbgain, change_channel); /* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */ } else if (ah->ah_version == AR5K_AR5210) { ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini), ar5210_ini, change_channel); } return 0; }
gpl-2.0
zombi-x/android_kernel_asus_flo
arch/arm/mach-msm/spm.c
1000
7789
/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <mach/msm_iomap.h> #include "spm.h" enum { MSM_SPM_DEBUG_SHADOW = 1U << 0, MSM_SPM_DEBUG_VCTL = 1U << 1, }; static int msm_spm_debug_mask; module_param_named( debug_mask, msm_spm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP ); #define MSM_SPM_PMIC_STATE_IDLE 0 static uint32_t msm_spm_reg_offsets[MSM_SPM_REG_NR] = { [MSM_SPM_REG_SAW_AVS_CTL] = 0x04, [MSM_SPM_REG_SAW_VCTL] = 0x08, [MSM_SPM_REG_SAW_STS] = 0x0C, [MSM_SPM_REG_SAW_CFG] = 0x10, [MSM_SPM_REG_SAW_SPM_CTL] = 0x14, [MSM_SPM_REG_SAW_SPM_SLP_TMR_DLY] = 0x18, [MSM_SPM_REG_SAW_SPM_WAKE_TMR_DLY] = 0x1C, [MSM_SPM_REG_SAW_SPM_PMIC_CTL] = 0x20, [MSM_SPM_REG_SAW_SLP_CLK_EN] = 0x24, [MSM_SPM_REG_SAW_SLP_HSFS_PRECLMP_EN] = 0x28, [MSM_SPM_REG_SAW_SLP_HSFS_POSTCLMP_EN] = 0x2C, [MSM_SPM_REG_SAW_SLP_CLMP_EN] = 0x30, [MSM_SPM_REG_SAW_SLP_RST_EN] = 0x34, [MSM_SPM_REG_SAW_SPM_MPM_CFG] = 0x38, }; struct msm_spm_device { void __iomem *reg_base_addr; uint32_t reg_shadow[MSM_SPM_REG_NR]; uint8_t awake_vlevel; uint8_t retention_vlevel; uint8_t collapse_vlevel; uint8_t retention_mid_vlevel; uint8_t collapse_mid_vlevel; uint32_t vctl_timeout_us; unsigned int low_power_mode; bool notify_rpm; bool dirty; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_spm_devices); /****************************************************************************** * Internal helper functions *****************************************************************************/ static inline void msm_spm_set_vctl( struct msm_spm_device *dev, uint32_t vlevel) { dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0xFF; dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= vlevel; } static inline void msm_spm_set_spm_ctl(struct msm_spm_device *dev, uint32_t rpm_bypass, uint32_t mode_encoding) { dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x0F; dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= rpm_bypass << 3; dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= mode_encoding; } static inline void msm_spm_set_pmic_ctl(struct msm_spm_device *dev, uint32_t awake_vlevel, uint32_t mid_vlevel, uint32_t sleep_vlevel) { dev->reg_shadow[MSM_SPM_REG_SAW_SPM_PMIC_CTL] = (mid_vlevel << 16) | (awake_vlevel << 8) | (sleep_vlevel); } static inline void msm_spm_set_slp_rst_en( struct msm_spm_device *dev, uint32_t slp_rst_en) { dev->reg_shadow[MSM_SPM_REG_SAW_SLP_RST_EN] = slp_rst_en; } static inline void msm_spm_flush_shadow( struct msm_spm_device *dev, unsigned int reg_index) { __raw_writel(dev->reg_shadow[reg_index], dev->reg_base_addr + msm_spm_reg_offsets[reg_index]); } static inline void msm_spm_load_shadow( struct msm_spm_device *dev, unsigned int reg_index) { dev->reg_shadow[reg_index] = __raw_readl(dev->reg_base_addr + msm_spm_reg_offsets[reg_index]); } static inline uint32_t msm_spm_get_sts_pmic_state(struct msm_spm_device *dev) { return (dev->reg_shadow[MSM_SPM_REG_SAW_STS] >> 20) & 0x03; } static inline uint32_t msm_spm_get_sts_curr_pmic_data( struct msm_spm_device *dev) { return (dev->reg_shadow[MSM_SPM_REG_SAW_STS] >> 10) & 0xFF; } /****************************************************************************** * Public functions *****************************************************************************/ int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm) { struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices); uint32_t rpm_bypass = notify_rpm ? 0x00 : 0x01; if (mode == dev->low_power_mode && notify_rpm == dev->notify_rpm && !dev->dirty) return 0; switch (mode) { case MSM_SPM_MODE_CLOCK_GATING: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x00); msm_spm_set_slp_rst_en(dev, 0x00); break; case MSM_SPM_MODE_POWER_RETENTION: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02); msm_spm_set_pmic_ctl(dev, dev->awake_vlevel, dev->retention_mid_vlevel, dev->retention_vlevel); msm_spm_set_slp_rst_en(dev, 0x00); break; case MSM_SPM_MODE_POWER_COLLAPSE: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02); msm_spm_set_pmic_ctl(dev, dev->awake_vlevel, dev->collapse_mid_vlevel, dev->collapse_vlevel); msm_spm_set_slp_rst_en(dev, 0x01); break; default: BUG(); } msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_PMIC_CTL); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SLP_RST_EN); /* Ensure that the registers are written before returning */ mb(); dev->low_power_mode = mode; dev->notify_rpm = notify_rpm; dev->dirty = false; if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) { int i; for (i = 0; i < MSM_SPM_REG_NR; i++) pr_info("%s: reg %02x = 0x%08x\n", __func__, msm_spm_reg_offsets[i], dev->reg_shadow[i]); } return 0; } int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel) { struct msm_spm_device *dev; uint32_t timeout_us; dev = &per_cpu(msm_spm_devices, cpu); if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: requesting cpu %u vlevel 0x%x\n", __func__, cpu, vlevel); msm_spm_set_vctl(dev, vlevel); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL); /* Wait for PMIC state to return to idle or until timeout */ timeout_us = dev->vctl_timeout_us; msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); while (msm_spm_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) { if (!timeout_us) goto set_vdd_bail; if (timeout_us > 10) { udelay(10); timeout_us -= 10; } else { udelay(timeout_us); timeout_us = 0; } msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); } if (msm_spm_get_sts_curr_pmic_data(dev) != vlevel) goto set_vdd_bail; dev->awake_vlevel = vlevel; dev->dirty = true; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: cpu %u done, remaining timeout %uus\n", __func__, cpu, timeout_us); return 0; set_vdd_bail: pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n", __func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev)); return -EIO; } void msm_spm_reinit(void) { struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices); int i; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_flush_shadow(dev, i); /* Ensure that the registers are written before returning */ mb(); } int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs) { unsigned int cpu; BUG_ON(nr_devs < num_possible_cpus()); for_each_possible_cpu(cpu) { struct msm_spm_device *dev = &per_cpu(msm_spm_devices, cpu); int i; dev->reg_base_addr = data[cpu].reg_base_addr; memcpy(dev->reg_shadow, data[cpu].reg_init_values, sizeof(data[cpu].reg_init_values)); dev->awake_vlevel = data[cpu].awake_vlevel; dev->retention_vlevel = data[cpu].retention_vlevel; dev->collapse_vlevel = data[cpu].collapse_vlevel; dev->retention_mid_vlevel = data[cpu].retention_mid_vlevel; dev->collapse_mid_vlevel = data[cpu].collapse_mid_vlevel; dev->vctl_timeout_us = data[cpu].vctl_timeout_us; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_flush_shadow(dev, i); /* Ensure that the registers are written before returning */ mb(); dev->low_power_mode = MSM_SPM_MODE_CLOCK_GATING; dev->notify_rpm = false; dev->dirty = true; } return 0; }
gpl-2.0
ac100-ru/picasso-kernel
drivers/input/tablet/kbtab.c
1768
5204
#include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #include <asm/unaligned.h> /* * Version Information * v0.0.1 - Original, extremely basic version, 2.4.xx only * v0.0.2 - Updated, works with 2.5.62 and 2.4.20; * - added pressure-threshold modules param code from * Alex Perry <alex.perry@ieee.org> */ #define DRIVER_VERSION "v0.0.2" #define DRIVER_AUTHOR "Josh Myer <josh@joshisanerd.com>" #define DRIVER_DESC "USB KB Gear JamStudio Tablet driver" #define DRIVER_LICENSE "GPL" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); #define USB_VENDOR_ID_KBGEAR 0x084e static int kb_pressure_click = 0x10; module_param(kb_pressure_click, int, 0); MODULE_PARM_DESC(kb_pressure_click, "pressure threshold for clicks"); struct kbtab { unsigned char *data; dma_addr_t data_dma; struct input_dev *dev; struct usb_device *usbdev; struct usb_interface *intf; struct urb *irq; char phys[32]; }; static void kbtab_irq(struct urb *urb) { struct kbtab *kbtab = urb->context; unsigned char *data = kbtab->data; struct input_dev *dev = kbtab->dev; int pressure; int retval; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&kbtab->intf->dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(&kbtab->intf->dev, "%s - nonzero urb status received: %d\n", __func__, urb->status); goto exit; } input_report_key(dev, BTN_TOOL_PEN, 1); input_report_abs(dev, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(dev, ABS_Y, get_unaligned_le16(&data[3])); /*input_report_key(dev, BTN_TOUCH , data[0] & 0x01);*/ input_report_key(dev, BTN_RIGHT, data[0] & 0x02); pressure = data[5]; if (kb_pressure_click == -1) input_report_abs(dev, ABS_PRESSURE, pressure); else input_report_key(dev, BTN_LEFT, pressure > kb_pressure_click ? 1 : 0); input_sync(dev); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&kbtab->intf->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static struct usb_device_id kbtab_ids[] = { { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, { } }; MODULE_DEVICE_TABLE(usb, kbtab_ids); static int kbtab_open(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); kbtab->irq->dev = kbtab->usbdev; if (usb_submit_urb(kbtab->irq, GFP_KERNEL)) return -EIO; return 0; } static void kbtab_close(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); usb_kill_urb(kbtab->irq); } static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct kbtab *kbtab; struct input_dev *input_dev; int error = -ENOMEM; kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) goto fail1; kbtab->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbtab->data_dma); if (!kbtab->data) goto fail1; kbtab->irq = usb_alloc_urb(0, GFP_KERNEL); if (!kbtab->irq) goto fail2; kbtab->usbdev = dev; kbtab->intf = intf; kbtab->dev = input_dev; usb_make_path(dev, kbtab->phys, sizeof(kbtab->phys)); strlcat(kbtab->phys, "/input0", sizeof(kbtab->phys)); input_dev->name = "KB Gear Tablet"; input_dev->phys = kbtab->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, kbtab); input_dev->open = kbtab_open; input_dev->close = kbtab_close; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, 0x2000, 4, 0); input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); endpoint = &intf->cur_altsetting->endpoint[0].desc; usb_fill_int_urb(kbtab->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), kbtab->data, 8, kbtab_irq, kbtab, endpoint->bInterval); kbtab->irq->transfer_dma = kbtab->data_dma; kbtab->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(kbtab->dev); if (error) goto fail3; usb_set_intfdata(intf, kbtab); return 0; fail3: usb_free_urb(kbtab->irq); fail2: usb_free_coherent(dev, 8, kbtab->data, kbtab->data_dma); fail1: input_free_device(input_dev); kfree(kbtab); return error; } static void kbtab_disconnect(struct usb_interface *intf) { struct kbtab *kbtab = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); input_unregister_device(kbtab->dev); usb_free_urb(kbtab->irq); usb_free_coherent(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma); kfree(kbtab); } static struct usb_driver kbtab_driver = { .name = "kbtab", .probe = kbtab_probe, .disconnect = kbtab_disconnect, .id_table = kbtab_ids, }; module_usb_driver(kbtab_driver);
gpl-2.0
quadcores/linux
drivers/gpu/drm/radeon/radeon_mem.c
1768
7412
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */ /* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * * The Weather Channel (TM) funded Tungsten Graphics to develop the * initial release of the Radeon 8500 driver under the XFree86 license. * This notice must be preserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Keith Whitwell <keith@tungstengraphics.com> * * ------------------------ This file is DEPRECATED! ------------------------- */ #include <drm/drmP.h> #include <drm/radeon_drm.h> #include "radeon_drv.h" /* Very simple allocator for GART memory, working on a static range * already mapped into each client's address space. */ static struct mem_block *split_block(struct mem_block *p, int start, int size, struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->file_priv = file_priv; return p; } static struct mem_block *alloc_block(struct mem_block *heap, int size, int align2, struct drm_file *file_priv) { struct mem_block *p; int mask = (1 << align2) - 1; list_for_each(p, heap) { int start = (p->start + mask) & ~mask; if (p->file_priv == NULL && start + size <= p->start + p->size) return split_block(p, start, size, file_priv); } return NULL; } static struct mem_block *find_block(struct mem_block *heap, int start) { struct mem_block *p; list_for_each(p, heap) if (p->start == start) return p; return NULL; } static void free_block(struct mem_block *p) { p->file_priv = NULL; /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ if (p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; kfree(q); } if (p->prev->file_priv == NULL) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; kfree(p); } } /* Initialize. How to check for an uninitialized heap? */ static int init_heap(struct mem_block **heap, int start, int size) { struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); if (!blocks) return -ENOMEM; *heap = kzalloc(sizeof(**heap), GFP_KERNEL); if (!*heap) { kfree(blocks); return -ENOMEM; } blocks->start = start; blocks->size = size; blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* Free all blocks associated with the releasing file. */ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; list_for_each(p, heap) { if (p->file_priv == file_priv) p->file_priv = NULL; } /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ list_for_each(p, heap) { while (p->file_priv == NULL && p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; kfree(q); } } } /* Shutdown. */ void radeon_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; kfree(q); } kfree(*heap); *heap = NULL; } /* IOCTL HANDLERS */ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region) { switch (region) { case RADEON_MEM_REGION_GART: return &dev_priv->gart_heap; case RADEON_MEM_REGION_FB: return &dev_priv->fb_heap; default: return NULL; } } int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_alloc_t *alloc = data; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, alloc->region); if (!heap || !*heap) return -EFAULT; /* Make things easier on ourselves: all allocations at least * 4k aligned. */ if (alloc->alignment < 12) alloc->alignment = 12; block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); if (!block) return -ENOMEM; if (copy_to_user(alloc->region_offset, &block->start, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_free_t *memfree = data; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, memfree->region); if (!heap || !*heap) return -EFAULT; block = find_block(*heap, memfree->region_offset); if (!block) return -EFAULT; if (block->file_priv != file_priv) return -EPERM; free_block(block); return 0; } int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_init_heap_t *initheap = data; struct mem_block **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, initheap->region); if (!heap) return -EFAULT; if (*heap) { DRM_ERROR("heap already initialized?"); return -EFAULT; } return init_heap(heap, initheap->start, initheap->size); }
gpl-2.0
HazyTeam/android_kernel_moto_shamu
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
2280
5379
/******************************************************************************* PTP 1588 clock using the STMMAC. Copyright (C) 2013 Vayavya Labs Pvt Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> *******************************************************************************/ #include "stmmac.h" #include "stmmac_ptp.h" /** * stmmac_adjust_freq * * @ptp: pointer to ptp_clock_info structure * @ppb: desired period change in parts ber billion * * Description: this function will adjust the frequency of hardware clock. */ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; u32 diff, addend; int neg_adj = 0; u64 adj; if (ppb < 0) { neg_adj = 1; ppb = -ppb; } addend = priv->default_addend; adj = addend; adj *= ppb; diff = div_u64(adj, 1000000000ULL); addend = neg_adj ? (addend - diff) : (addend + diff); spin_lock_irqsave(&priv->ptp_lock, flags); priv->hw->ptp->config_addend(priv->ioaddr, addend); spin_unlock_irqrestore(&priv->lock, flags); return 0; } /** * stmmac_adjust_time * * @ptp: pointer to ptp_clock_info structure * @delta: desired change in nanoseconds * * Description: this function will shift/adjust the hardware clock time. */ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; u32 sec, nsec; u32 quotient, reminder; int neg_adj = 0; if (delta < 0) { neg_adj = 1; delta = -delta; } quotient = div_u64_rem(delta, 1000000000ULL, &reminder); sec = quotient; nsec = reminder; spin_lock_irqsave(&priv->ptp_lock, flags); priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); spin_unlock_irqrestore(&priv->lock, flags); return 0; } /** * stmmac_get_time * * @ptp: pointer to ptp_clock_info structure * @ts: pointer to hold time/result * * Description: this function will read the current time from the * hardware clock and store it in @ts. */ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; u64 ns; u32 reminder; spin_lock_irqsave(&priv->ptp_lock, flags); ns = priv->hw->ptp->get_systime(priv->ioaddr); spin_unlock_irqrestore(&priv->ptp_lock, flags); ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder); ts->tv_nsec = reminder; return 0; } /** * stmmac_set_time * * @ptp: pointer to ptp_clock_info structure * @ts: time value to set * * Description: this function will set the current time on the * hardware clock. */ static int stmmac_set_time(struct ptp_clock_info *ptp, const struct timespec *ts) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; spin_lock_irqsave(&priv->ptp_lock, flags); priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } static int stmmac_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { return -EOPNOTSUPP; } /* structure describing a PTP hardware clock */ static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac_ptp_clock", .max_adj = 62500000, .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, .pps = 0, .adjfreq = stmmac_adjust_freq, .adjtime = stmmac_adjust_time, .gettime = stmmac_get_time, .settime = stmmac_set_time, .enable = stmmac_enable, }; /** * stmmac_ptp_register * @priv: driver private structure * Description: this function will register the ptp clock driver * to kernel. It also does some house keeping work. */ int stmmac_ptp_register(struct stmmac_priv *priv) { spin_lock_init(&priv->ptp_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, priv->device); if (IS_ERR(priv->ptp_clock)) { priv->ptp_clock = NULL; pr_err("ptp_clock_register() failed on %s\n", priv->dev->name); } else pr_debug("Added PTP HW clock successfully on %s\n", priv->dev->name); return 0; } /** * stmmac_ptp_unregister * @priv: driver private structure * Description: this function will remove/unregister the ptp clock driver * from the kernel. */ void stmmac_ptp_unregister(struct stmmac_priv *priv) { if (priv->ptp_clock) { ptp_clock_unregister(priv->ptp_clock); pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } }
gpl-2.0
TeamWin/android_kernel_samsung_goyave
arch/arm/mach-s3c24xx/mach-nexcoder.c
2280
3961
/* linux/arch/arm/mach-s3c2440/mach-nexcoder.c * * Copyright (c) 2004 Nex Vision * Guillaume GOURAT <guillaume.gourat@nexvision.tv> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Modifications: * 15-10-2004 GG Created initial version * 12-03-2005 BJD Updated for release */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/string.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/map.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/setup.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> //#include <asm/debug-ll.h> #include <mach/regs-gpio.h> #include <plat/regs-serial.h> #include <linux/platform_data/i2c-s3c2410.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/samsung-time.h> #include "common.h" static struct map_desc nexcoder_iodesc[] __initdata = { /* nothing here yet */ }; #define UCON S3C2410_UCON_DEFAULT #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB #define UFCON S3C2410_UFCON_RXTRIG12 | S3C2410_UFCON_FIFOMODE static struct s3c2410_uartcfg nexcoder_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, } }; /* NOR Flash on NexVision NexCoder 2440 board */ static struct resource nexcoder_nor_resource[] = { [0] = DEFINE_RES_MEM(S3C2410_CS0, SZ_8M), }; static struct map_info nexcoder_nor_map = { .bankwidth = 2, }; static struct platform_device nexcoder_device_nor = { .name = "mtd-flash", .id = -1, .num_resources = ARRAY_SIZE(nexcoder_nor_resource), .resource = nexcoder_nor_resource, .dev = { .platform_data = &nexcoder_nor_map, } }; /* Standard Nexcoder devices */ static struct platform_device *nexcoder_devices[] __initdata = { &s3c_device_ohci, &s3c_device_lcd, &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, &s3c_device_rtc, &s3c_device_camif, &s3c_device_spi0, &s3c_device_spi1, &nexcoder_device_nor, }; static void __init nexcoder_sensorboard_init(void) { /* Initialize SCCB bus */ gpio_request_one(S3C2410_GPE(14), GPIOF_OUT_INIT_HIGH, NULL); gpio_free(S3C2410_GPE(14)); /* IICSCL */ gpio_request_one(S3C2410_GPE(15), GPIOF_OUT_INIT_HIGH, NULL); gpio_free(S3C2410_GPE(15)); /* IICSDA */ /* Power up the sensor board */ gpio_request_one(S3C2410_GPF(1), GPIOF_OUT_INIT_HIGH, NULL); gpio_free(S3C2410_GPF(1)); /* CAM_GPIO7 => nLDO_PWRDN */ gpio_request_one(S3C2410_GPF(2), GPIOF_OUT_INIT_LOW, NULL); gpio_free(S3C2410_GPF(2)); /* CAM_GPIO6 => CAM_PWRDN */ } static void __init nexcoder_map_io(void) { s3c24xx_init_io(nexcoder_iodesc, ARRAY_SIZE(nexcoder_iodesc)); s3c24xx_init_clocks(0); s3c24xx_init_uarts(nexcoder_uartcfgs, ARRAY_SIZE(nexcoder_uartcfgs)); samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4); nexcoder_sensorboard_init(); } static void __init nexcoder_init(void) { s3c_i2c0_set_platdata(NULL); platform_add_devices(nexcoder_devices, ARRAY_SIZE(nexcoder_devices)); }; MACHINE_START(NEXCODER_2440, "NexVision - Nexcoder 2440") /* Maintainer: Guillaume GOURAT <guillaume.gourat@nexvision.tv> */ .atag_offset = 0x100, .map_io = nexcoder_map_io, .init_machine = nexcoder_init, .init_irq = s3c2440_init_irq, .init_time = samsung_timer_init, .restart = s3c244x_restart, MACHINE_END
gpl-2.0
ktoonsez/SiyahD
drivers/target/target_core_fabric_lib.c
2536
12758
/******************************************************************************* * Filename: target_core_fabric_lib.c * * This file contains generic high level protocol identifier and PR * handlers for TCM fabric modules * * Copyright (c) 2010 Rising Tide Systems, Inc. * Copyright (c) 2010 Linux-iSCSI.org * * Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/string.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> #include <target/target_core_device.h> #include <target/target_core_transport.h> #include <target/target_core_fabric_lib.h> #include <target/target_core_fabric_ops.h> #include <target/target_core_configfs.h> #include "target_core_hba.h" #include "target_core_pr.h" /* * Handlers for Serial Attached SCSI (SAS) */ u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg) { /* * Return a SAS Serial SCSI Protocol identifier for loopback operations * This is defined in section 7.5.1 Table 362 in spc4r17 */ return 0x6; } EXPORT_SYMBOL(sas_get_fabric_proto_ident); u32 sas_get_pr_transport_id( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf) { unsigned char binary, *ptr; int i; u32 off = 4; /* * Set PROTOCOL IDENTIFIER to 6h for SAS */ buf[0] = 0x06; /* * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI * over SAS Serial SCSI Protocol */ ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ for (i = 0; i < 16; i += 2) { binary = transport_asciihex_to_binaryhex(&ptr[i]); buf[off++] = binary; } /* * The SAS Transport ID is a hardcoded 24-byte length */ return 24; } EXPORT_SYMBOL(sas_get_pr_transport_id); u32 sas_get_pr_transport_id_len( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code) { *format_code = 0; /* * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI * over SAS Serial SCSI Protocol * * The SAS Transport ID is a hardcoded 24-byte length */ return 24; } EXPORT_SYMBOL(sas_get_pr_transport_id_len); /* * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. */ char *sas_parse_pr_out_transport_id( struct se_portal_group *se_tpg, const char *buf, u32 *out_tid_len, char **port_nexus_ptr) { /* * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID * for initiator ports using SCSI over SAS Serial SCSI Protocol * * The TransportID for a SAS Initiator Port is of fixed size of * 24 bytes, and SAS does not contain a I_T nexus identifier, * so we return the **port_nexus_ptr set to NULL. */ *port_nexus_ptr = NULL; *out_tid_len = 24; return (char *)&buf[4]; } EXPORT_SYMBOL(sas_parse_pr_out_transport_id); /* * Handlers for Fibre Channel Protocol (FCP) */ u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg) { return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */ } EXPORT_SYMBOL(fc_get_fabric_proto_ident); u32 fc_get_pr_transport_id_len( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code) { *format_code = 0; /* * The FC Transport ID is a hardcoded 24-byte length */ return 24; } EXPORT_SYMBOL(fc_get_pr_transport_id_len); u32 fc_get_pr_transport_id( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf) { unsigned char binary, *ptr; int i; u32 off = 8; /* * PROTOCOL IDENTIFIER is 0h for FCP-2 * * From spc4r17, 7.5.4.2 TransportID for initiator ports using * SCSI over Fibre Channel * * We convert the ASCII formatted N Port name into a binary * encoded TransportID. */ ptr = &se_nacl->initiatorname[0]; for (i = 0; i < 24; ) { if (!(strncmp(&ptr[i], ":", 1))) { i++; continue; } binary = transport_asciihex_to_binaryhex(&ptr[i]); buf[off++] = binary; i += 2; } /* * The FC Transport ID is a hardcoded 24-byte length */ return 24; } EXPORT_SYMBOL(fc_get_pr_transport_id); char *fc_parse_pr_out_transport_id( struct se_portal_group *se_tpg, const char *buf, u32 *out_tid_len, char **port_nexus_ptr) { /* * The TransportID for a FC N Port is of fixed size of * 24 bytes, and FC does not contain a I_T nexus identifier, * so we return the **port_nexus_ptr set to NULL. */ *port_nexus_ptr = NULL; *out_tid_len = 24; return (char *)&buf[8]; } EXPORT_SYMBOL(fc_parse_pr_out_transport_id); /* * Handlers for Internet Small Computer Systems Interface (iSCSI) */ u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) { /* * This value is defined for "Internet SCSI (iSCSI)" * in spc4r17 section 7.5.1 Table 362 */ return 0x5; } EXPORT_SYMBOL(iscsi_get_fabric_proto_ident); u32 iscsi_get_pr_transport_id( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf) { u32 off = 4, padding = 0; u16 len = 0; spin_lock_irq(&se_nacl->nacl_sess_lock); /* * Set PROTOCOL IDENTIFIER to 5h for iSCSI */ buf[0] = 0x05; /* * From spc4r17 Section 7.5.4.6: TransportID for initiator * ports using SCSI over iSCSI. * * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field * shall contain the iSCSI name of an iSCSI initiator node (see * RFC 3720). The first ISCSI NAME field byte containing an ASCII * null character terminates the ISCSI NAME field without regard for * the specified length of the iSCSI TransportID or the contents of * the ADDITIONAL LENGTH field. */ len = sprintf(&buf[off], "%s", se_nacl->initiatorname); /* * Add Extra byte for NULL terminator */ len++; /* * If there is ISID present with the registration and *format code == 1 * 1, use iSCSI Initiator port TransportID format. * * Otherwise use iSCSI Initiator device TransportID format that * does not contain the ASCII encoded iSCSI Initiator iSID value * provied by the iSCSi Initiator during the iSCSI login process. */ if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) { /* * Set FORMAT CODE 01b for iSCSI Initiator port TransportID * format. */ buf[0] |= 0x40; /* * From spc4r17 Section 7.5.4.6: TransportID for initiator * ports using SCSI over iSCSI. Table 390 * * The SEPARATOR field shall contain the five ASCII * characters ",i,0x". * * The null-terminated, null-padded ISCSI INITIATOR SESSION ID * field shall contain the iSCSI initiator session identifier * (see RFC 3720) in the form of ASCII characters that are the * hexadecimal digits converted from the binary iSCSI initiator * session identifier value. The first ISCSI INITIATOR SESSION * ID field byte containing an ASCII null character */ buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ buf[off+len] = 0x69; off++; /* ASCII Character: "i" */ buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ buf[off+len] = 0x30; off++; /* ASCII Character: "0" */ buf[off+len] = 0x78; off++; /* ASCII Character: "x" */ len += 5; buf[off+len] = pr_reg->pr_reg_isid[0]; off++; buf[off+len] = pr_reg->pr_reg_isid[1]; off++; buf[off+len] = pr_reg->pr_reg_isid[2]; off++; buf[off+len] = pr_reg->pr_reg_isid[3]; off++; buf[off+len] = pr_reg->pr_reg_isid[4]; off++; buf[off+len] = pr_reg->pr_reg_isid[5]; off++; buf[off+len] = '\0'; off++; len += 7; } spin_unlock_irq(&se_nacl->nacl_sess_lock); /* * The ADDITIONAL LENGTH field specifies the number of bytes that follow * in the TransportID. The additional length shall be at least 20 and * shall be a multiple of four. */ padding = ((-len) & 3); if (padding != 0) len += padding; buf[2] = ((len >> 8) & 0xff); buf[3] = (len & 0xff); /* * Increment value for total payload + header length for * full status descriptor */ len += 4; return len; } EXPORT_SYMBOL(iscsi_get_pr_transport_id); u32 iscsi_get_pr_transport_id_len( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code) { u32 len = 0, padding = 0; spin_lock_irq(&se_nacl->nacl_sess_lock); len = strlen(se_nacl->initiatorname); /* * Add extra byte for NULL terminator */ len++; /* * If there is ISID present with the registration, use format code: * 01b: iSCSI Initiator port TransportID format * * If there is not an active iSCSI session, use format code: * 00b: iSCSI Initiator device TransportID format */ if (pr_reg->isid_present_at_reg) { len += 5; /* For ",i,0x" ASCII seperator */ len += 7; /* For iSCSI Initiator Session ID + Null terminator */ *format_code = 1; } else *format_code = 0; spin_unlock_irq(&se_nacl->nacl_sess_lock); /* * The ADDITIONAL LENGTH field specifies the number of bytes that follow * in the TransportID. The additional length shall be at least 20 and * shall be a multiple of four. */ padding = ((-len) & 3); if (padding != 0) len += padding; /* * Increment value for total payload + header length for * full status descriptor */ len += 4; return len; } EXPORT_SYMBOL(iscsi_get_pr_transport_id_len); char *iscsi_parse_pr_out_transport_id( struct se_portal_group *se_tpg, const char *buf, u32 *out_tid_len, char **port_nexus_ptr) { char *p; u32 tid_len, padding; int i; u16 add_len; u8 format_code = (buf[0] & 0xc0); /* * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: * * TransportID for initiator ports using SCSI over iSCSI, * from Table 388 -- iSCSI TransportID formats. * * 00b Initiator port is identified using the world wide unique * SCSI device name of the iSCSI initiator * device containing the initiator port (see table 389). * 01b Initiator port is identified using the world wide unique * initiator port identifier (see table 390).10b to 11b * Reserved */ if ((format_code != 0x00) && (format_code != 0x40)) { printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" " Initiator Transport ID\n", format_code); return NULL; } /* * If the caller wants the TransportID Length, we set that value for the * entire iSCSI Tarnsport ID now. */ if (out_tid_len != NULL) { add_len = ((buf[2] >> 8) & 0xff); add_len |= (buf[3] & 0xff); tid_len = strlen((char *)&buf[4]); tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ tid_len += 1; /* Add one byte for NULL terminator */ padding = ((-tid_len) & 3); if (padding != 0) tid_len += padding; if ((add_len + 4) != tid_len) { printk(KERN_INFO "LIO-Target Extracted add_len: %hu " "does not match calculated tid_len: %u," " using tid_len instead\n", add_len+4, tid_len); *out_tid_len = tid_len; } else *out_tid_len = (add_len + 4); } /* * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator * Session ID as defined in Table 390 - iSCSI initiator port TransportID * format. */ if (format_code == 0x40) { p = strstr((char *)&buf[4], ",i,0x"); if (!(p)) { printk(KERN_ERR "Unable to locate \",i,0x\" seperator" " for Initiator port identifier: %s\n", (char *)&buf[4]); return NULL; } *p = '\0'; /* Terminate iSCSI Name */ p += 5; /* Skip over ",i,0x" seperator */ *port_nexus_ptr = p; /* * Go ahead and do the lower case conversion of the received * 12 ASCII characters representing the ISID in the TransportID * for comparison against the running iSCSI session's ISID from * iscsi_target.c:lio_sess_get_initiator_sid() */ for (i = 0; i < 12; i++) { if (isdigit(*p)) { p++; continue; } *p = tolower(*p); p++; } } return (char *)&buf[4]; } EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
gpl-2.0
bestmjh47/Ultimate-Kernel-EF34K
net/netfilter/xt_AUDIT.c
2536
5058
/* * Creates audit record for dropped/accepted packets * * (C) 2010-2011 Thomas Graf <tgraf@redhat.com> * (C) 2010-2011 Red Hat, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/audit.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/if_arp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_AUDIT.h> #include <linux/netfilter_bridge/ebtables.h> #include <net/ipv6.h> #include <net/ip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>"); MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets"); MODULE_ALIAS("ipt_AUDIT"); MODULE_ALIAS("ip6t_AUDIT"); MODULE_ALIAS("ebt_AUDIT"); MODULE_ALIAS("arpt_AUDIT"); static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb, unsigned int proto, unsigned int offset) { switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_UDPLITE: { const __be16 *pptr; __be16 _ports[2]; pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports); if (pptr == NULL) { audit_log_format(ab, " truncated=1"); return; } audit_log_format(ab, " sport=%hu dport=%hu", ntohs(pptr[0]), ntohs(pptr[1])); } break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: { const u8 *iptr; u8 _ih[2]; iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih); if (iptr == NULL) { audit_log_format(ab, " truncated=1"); return; } audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu", iptr[0], iptr[1]); } break; } } static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb) { struct iphdr _iph; const struct iphdr *ih; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (!ih) { audit_log_format(ab, " truncated=1"); return; } audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu", &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol); if (ntohs(ih->frag_off) & IP_OFFSET) { audit_log_format(ab, " frag=1"); return; } audit_proto(ab, skb, ih->protocol, ih->ihl * 4); } static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) { struct ipv6hdr _ip6h; const struct ipv6hdr *ih; u8 nexthdr; int offset; ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); if (!ih) { audit_log_format(ab, " truncated=1"); return; } nexthdr = ih->nexthdr; offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), &nexthdr); audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu", &ih->saddr, &ih->daddr, nexthdr); if (offset) audit_proto(ab, skb, nexthdr, offset); } static unsigned int audit_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_audit_info *info = par->targinfo; struct audit_buffer *ab; ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); if (ab == NULL) goto errout; audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s", info->type, par->hooknum, skb->len, par->in ? par->in->name : "?", par->out ? par->out->name : "?"); if (skb->mark) audit_log_format(ab, " mark=%#x", skb->mark); if (skb->dev && skb->dev->type == ARPHRD_ETHER) { audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, ntohs(eth_hdr(skb)->h_proto)); if (par->family == NFPROTO_BRIDGE) { switch (eth_hdr(skb)->h_proto) { case __constant_htons(ETH_P_IP): audit_ip4(ab, skb); break; case __constant_htons(ETH_P_IPV6): audit_ip6(ab, skb); break; } } } switch (par->family) { case NFPROTO_IPV4: audit_ip4(ab, skb); break; case NFPROTO_IPV6: audit_ip6(ab, skb); break; } audit_log_end(ab); errout: return XT_CONTINUE; } static unsigned int audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par) { audit_tg(skb, par); return EBT_CONTINUE; } static int audit_tg_check(const struct xt_tgchk_param *par) { const struct xt_audit_info *info = par->targinfo; if (info->type > XT_AUDIT_TYPE_MAX) { pr_info("Audit type out of range (valid range: 0..%hhu)\n", XT_AUDIT_TYPE_MAX); return -ERANGE; } return 0; } static struct xt_target audit_tg_reg[] __read_mostly = { { .name = "AUDIT", .family = NFPROTO_UNSPEC, .target = audit_tg, .targetsize = sizeof(struct xt_audit_info), .checkentry = audit_tg_check, .me = THIS_MODULE, }, { .name = "AUDIT", .family = NFPROTO_BRIDGE, .target = audit_tg_ebt, .targetsize = sizeof(struct xt_audit_info), .checkentry = audit_tg_check, .me = THIS_MODULE, }, }; static int __init audit_tg_init(void) { return xt_register_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); } static void __exit audit_tg_exit(void) { xt_unregister_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); } module_init(audit_tg_init); module_exit(audit_tg_exit);
gpl-2.0
jonypx09/new_kernel_kylessopen
drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
2536
28403
/****************************************************************************** Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: James P. Ketrenos <ipw2100-admin@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andreamrl@tiscali.it> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/version.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include <linux/if_vlan.h> #include "ieee80211.h" /* 802.11 Data Frame 802.11 frame_contorl for data frames - 2 bytes ,-----------------------------------------------------------------------------------------. bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep | | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | | '-----------------------------------------------------------------------------------------' /\ | 802.11 Data Frame | ,--------- 'ctrl' expands to >-----------' | ,--'---,-------------------------------------------------------------. Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | |------|------|---------|---------|---------|------|---------|------| Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | | | tion | (BSSID) | | | ence | data | | `--------------------------------------------------| |------' Total: 28 non-data bytes `----.----' | .- 'Frame data' expands to <---------------------------' | V ,---------------------------------------------------. Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | |------|------|---------|----------|------|---------| Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | | DSAP | SSAP | | | | Packet | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | `-----------------------------------------| | Total: 8 non-data bytes `----.----' | .- 'IP Packet' expands, if WEP enabled, to <--' | V ,-----------------------. Bytes | 4 | 0-2296 | 4 | |-----|-----------|-----| Desc. | IV | Encrypted | ICV | | | IP Packet | | `-----------------------' Total: 8 non-data bytes 802.3 Ethernet Data Frame ,-----------------------------------------. Bytes | 6 | 6 | 2 | Variable | 4 | |-------|-------|------|-----------|------| Desc. | Dest. | Source| Type | IP Packet | fcs | | MAC | MAC | | | | `-----------------------------------------' Total: 18 non-data bytes In the event that fragmentation is required, the incoming payload is split into N parts of size ieee->fts. The first fragment contains the SNAP header and the remaining packets are just data. If encryption is enabled, each fragment payload size is reduced by enough space to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to 500 without encryption it will take 3 frames. With WEP it will take 4 frames as the payload of each frame is reduced to 492 bytes. * SKB visualization * * ,- skb->data * | * | ETHERNET HEADER ,-<-- PAYLOAD * | | 14 bytes from skb->data * | 2 bytes for Type --> ,T. | (sizeof ethhdr) * | | | | * |,-Dest.--. ,--Src.---. | | | * | 6 bytes| | 6 bytes | | | | * v | | | | | | * 0 | v 1 | v | v 2 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * ^ | ^ | ^ | * | | | | | | * | | | | `T' <---- 2 bytes for Type * | | | | * | | '---SNAP--' <-------- 6 bytes for SNAP * | | * `-IV--' <-------------------- 4 bytes for IV (WEP) * * SNAP HEADER * */ static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; static inline int ieee80211_put_snap(u8 *data, u16 h_proto) { struct ieee80211_snap_hdr *snap; u8 *oui; snap = (struct ieee80211_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == 0x8137 || h_proto == 0x80f3) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; *(u16 *)(data + SNAP_SIZE) = htons(h_proto); return SNAP_SIZE + sizeof(u16); } int ieee80211_encrypt_fragment( struct ieee80211_device *ieee, struct sk_buff *frag, int hdr_len) { struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx]; int res; if (!(crypt && crypt->ops)) { printk("=========>%s(), crypt is null\n", __FUNCTION__); return -1; } #ifdef CONFIG_IEEE80211_CRYPT_TKIP struct ieee80211_hdr *header; if (ieee->tkip_countermeasures && crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { header = (struct ieee80211_hdr *) frag->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "TX packet to %pM\n", ieee->dev->name, header->addr1); } return -1; } #endif /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption. /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops->encrypt_msdu) res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv); if (res == 0 && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_INFO "%s: Encryption failed: len=%d.\n", ieee->dev->name, frag->len); ieee->ieee_stats.tx_discards++; return -1; } return 0; } void ieee80211_txb_free(struct ieee80211_txb *txb) { if (unlikely(!txb)) return; kfree(txb); } struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, int gfp_mask) { struct ieee80211_txb *txb; int i; txb = kmalloc( sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags), gfp_mask); if (!txb) return NULL; memset(txb, 0, sizeof(struct ieee80211_txb)); txb->nr_frags = nr_frags; txb->frag_size = txb_size; for (i = 0; i < nr_frags; i++) { txb->fragments[i] = dev_alloc_skb(txb_size); if (unlikely(!txb->fragments[i])) { i--; break; } memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); } if (unlikely(i != nr_frags)) { while (i >= 0) dev_kfree_skb_any(txb->fragments[i--]); kfree(txb); return NULL; } return txb; } // Classify the to-be send data packet // Need to acquire the sent queue index. static int ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network) { struct ethhdr *eth; struct iphdr *ip; eth = (struct ethhdr *)skb->data; if (eth->h_proto != htons(ETH_P_IP)) return 0; ip = ip_hdr(skb); switch (ip->tos & 0xfc) { case 0x20: return 2; case 0x40: return 1; case 0x60: return 3; case 0x80: return 4; case 0xa0: return 5; case 0xc0: return 6; case 0xe0: return 7; default: return 0; } } #define SN_LESS(a, b) (((a-b)&0x800)!=0) void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PTX_TS_RECORD pTxTs = NULL; struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data; if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT) return; if (!IsQoSDataFrame(skb->data)) return; if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1)) return; //check packet and mode later #ifdef TO_DO_LIST if(pTcb->PacketLength >= 4096) return; // For RTL819X, if pairwisekey = wep/tkip, we don't aggrregation. if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter)) return; #endif if(tcb_desc->bdhcp)// || ieee->CntAfterLink<2) { return; } #if 1 if (!ieee->GetNmodeSupportBySecCfg(ieee)) { return; } #endif if(pHTInfo->bCurrentAMPDUEnable) { if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true)) { printk("===>can't get TS\n"); return; } if (pTxTs->TxAdmittedBARecord.bValid == false) { //as some AP will refuse our action frame until key handshake has been finished. WB if (ieee->wpa_ie_len && (ieee->pairwise_key_type == KEY_TYPE_NA)) ; else TsStartAddBaProcess(ieee, pTxTs); goto FORCED_AGG_SETTING; } else if (pTxTs->bUsingBa == false) { if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096)) pTxTs->bUsingBa = true; else goto FORCED_AGG_SETTING; } if (ieee->iw_mode == IW_MODE_INFRA) { tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor; tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity; } } FORCED_AGG_SETTING: switch(pHTInfo->ForcedAMPDUMode ) { case HT_AGG_AUTO: break; case HT_AGG_FORCE_ENABLE: tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity; tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor; break; case HT_AGG_FORCE_DISABLE: tcb_desc->bAMPDUEnable = false; tcb_desc->ampdu_density = 0; tcb_desc->ampdu_factor = 0; break; } return; } extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc) { tcb_desc->bUseShortPreamble = false; if (tcb_desc->data_rate == 2) {//// 1M can only use Long Preamble. 11B spec return; } else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { tcb_desc->bUseShortPreamble = true; } return; } extern void ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; tcb_desc->bUseShortGI = false; if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT) return; if(pHTInfo->bForcedShortGI) { tcb_desc->bUseShortGI = true; return; } if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz) tcb_desc->bUseShortGI = true; else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz) tcb_desc->bUseShortGI = true; } void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; tcb_desc->bPacketBW = false; if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT) return; if(tcb_desc->bMulticast || tcb_desc->bBroadcast) return; if((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel. return; //BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz) tcb_desc->bPacketBW = true; return; } void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb) { // Common Settings tcb_desc->bRTSSTBC = false; tcb_desc->bRTSUseShortGI = false; // Since protection frames are always sent by legacy rate, ShortGI will never be used. tcb_desc->bCTSEnable = false; // Most of protection using RTS/CTS tcb_desc->RTSSC = 0; // 20MHz: Don't care; 40MHz: Duplicate. tcb_desc->bRTSBW = false; // RTS frame bandwidth is always 20MHz if(tcb_desc->bBroadcast || tcb_desc->bMulticast)//only unicast frame will use rts/cts return; if (is_broadcast_ether_addr(skb->data+16)) //check addr3 as infrastructure add3 is DA. return; if (ieee->mode < IEEE_N_24G) //b, g mode { // (1) RTS_Threshold is compared to the MPDU, not MSDU. // (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. // Other fragments are protected by previous fragment. // So we only need to check the length of first fragment. if (skb->len > ieee->rts) { tcb_desc->bRTSEnable = true; tcb_desc->rts_rate = MGN_24M; } else if (ieee->current_network.buseprotection) { // Use CTS-to-SELF in protection mode. tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; } //otherwise return; return; } else {// 11n High throughput case. PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; while (true) { //check ERP protection if (ieee->current_network.buseprotection) {// CTS-to-SELF tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; break; } //check HT op mode if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) { u8 HTOpMode = pHTInfo->CurrentOpMode; if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) || (!pHTInfo->bCurBW40MHz && HTOpMode == 3) ) { tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps. tcb_desc->bRTSEnable = true; break; } } //check rts if (skb->len > ieee->rts) { tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps. tcb_desc->bRTSEnable = true; break; } //to do list: check MIMO power save condition. //check AMPDU aggregation for TXOP if(tcb_desc->bAMPDUEnable) { tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps. // According to 8190 design, firmware sends CF-End only if RTS/CTS is enabled. However, it degrads // throughput around 10M, so we disable of this mechanism. 2007.08.03 by Emily tcb_desc->bRTSEnable = false; break; } //check IOT action if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) { tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } // Totally no protection case!! goto NO_PROTECTION; } } // For test , CTS replace with RTS if( 0 ) { tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; } if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) tcb_desc->bUseShortPreamble = true; if (ieee->mode == IW_MODE_MASTER) goto NO_PROTECTION; return; NO_PROTECTION: tcb_desc->bRTSEnable = false; tcb_desc->bCTSEnable = false; tcb_desc->rts_rate = 0; tcb_desc->RTSSC = 0; tcb_desc->bRTSBW = false; } void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc) { #ifdef TO_DO_LIST if(!IsDataFrame(pFrame)) { pTcb->bTxDisableRateFallBack = TRUE; pTcb->bTxUseDriverAssingedRate = TRUE; pTcb->RATRIndex = 7; return; } if(pMgntInfo->ForcedDataRate!= 0) { pTcb->bTxDisableRateFallBack = TRUE; pTcb->bTxUseDriverAssingedRate = TRUE; return; } #endif if(ieee->bTxDisableRateFallBack) tcb_desc->bTxDisableRateFallBack = true; if(ieee->bTxUseDriverAssingedRate) tcb_desc->bTxUseDriverAssingedRate = true; if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate) { if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) tcb_desc->RATRIndex = 0; } } void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst) { if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst)) return; if (IsQoSDataFrame(skb->data)) //we deal qos data only { PTX_TS_RECORD pTS = NULL; if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true)) { return; } pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096; } } int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_device *ieee = netdev_priv(dev); struct ieee80211_txb *txb = NULL; struct ieee80211_hdr_3addrqos *frag_hdr; int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; unsigned long flags; struct net_device_stats *stats = &ieee->stats; int ether_type = 0, encrypt; int bytes, fc, qos_ctl = 0, hdr_len; struct sk_buff *skb_frag; struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */ .duration_id = 0, .seq_ctl = 0, .qos_ctl = 0 }; u8 dest[ETH_ALEN], src[ETH_ALEN]; int qos_actived = ieee->current_network.qos_data.active; struct ieee80211_crypt_data* crypt; bool bdhcp =false; cb_desc *tcb_desc; spin_lock_irqsave(&ieee->lock, flags); /* If there is no driver handler to take the TXB, dont' bother * creating it... */ if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))|| ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) { printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); goto success; } if(likely(ieee->raw_tx == 0)){ if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } memset(skb->cb, 0, sizeof(skb->cb)); ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto); crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && ieee->host_encrypt && crypt && crypt->ops; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { stats->tx_dropped++; goto success; } #ifdef CONFIG_IEEE80211_DEBUG if (crypt && !encrypt && ether_type == ETH_P_PAE) { struct eapol *eap = (struct eapol *)(skb->data + sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16)); IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n", eap_get_type(eap->type)); } #endif // The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time // to prevent DHCP protocol fail if (skb->len > 282){//MINIMUM_DHCP_PACKET_SIZE) { if (ETH_P_IP == ether_type) {// IP header const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); if (IPPROTO_UDP == ip->protocol) {//FIXME windows is 11 but here UDP in linux kernel is 17. struct udphdr *udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); if(((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) || ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) { // 68 : UDP BOOTP client // 67 : UDP BOOTP server printk("DHCP pkt src port:%d, dest port:%d!!\n", ((u8 *)udp)[1],((u8 *)udp)[3]); bdhcp = true; #ifdef _RTL8192_EXT_PATCH_ ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2; //AMY,090701 #else ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2; #endif } } }else if(ETH_P_ARP == ether_type){// IP ARP packet printk("=================>DHCP Protocol start tx ARP pkt!!\n"); bdhcp = true; ieee->LPSDelayCnt = ieee->current_network.tim.tim_count; } } /* Save source and destination addresses */ memcpy(&dest, skb->data, ETH_ALEN); memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN); /* Advance the SKB to the start of the payload */ skb_pull(skb, sizeof(struct ethhdr)); /* Determine total amount of storage required for TXB packets */ bytes = skb->len + SNAP_SIZE + sizeof(u16); if (encrypt) fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP; else fc = IEEE80211_FTYPE_DATA; if(qos_actived) fc |= IEEE80211_STYPE_QOS_DATA; else fc |= IEEE80211_STYPE_DATA; if (ieee->iw_mode == IW_MODE_INFRA) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN); memcpy(&header.addr2, &src, ETH_ALEN); memcpy(&header.addr3, &dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ memcpy(&header.addr1, dest, ETH_ALEN); memcpy(&header.addr2, src, ETH_ALEN); memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN); } header.frame_ctl = cpu_to_le16(fc); /* Determine fragmentation size based on destination (multicast * and broadcast are not fragmented) */ if (is_multicast_ether_addr(header.addr1) || is_broadcast_ether_addr(header.addr1)) { frag_size = MAX_FRAG_THRESHOLD; qos_ctl |= QOS_CTL_NOTCONTAIN_ACK; } else { frag_size = ieee->fts;//default:392 qos_ctl = 0; } if(qos_actived) { hdr_len = IEEE80211_3ADDR_LEN + 2; skb->priority = ieee80211_classify(skb, &ieee->current_network); qos_ctl |= skb->priority; //set in the ieee80211_classify header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID); } else { hdr_len = IEEE80211_3ADDR_LEN; } /* Determine amount of payload per fragment. Regardless of if * this stack is providing the full 802.11 header, one will * eventually be affixed to this fragment -- so we must account for * it when determining the amount of payload space. */ bytes_per_frag = frag_size - hdr_len; if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) bytes_per_frag -= IEEE80211_FCS_LEN; /* Each fragment may need to have room for encryptiong pre/postfix */ if (encrypt) bytes_per_frag -= crypt->ops->extra_prefix_len + crypt->ops->extra_postfix_len; /* Number of fragments is the total bytes_per_frag / * payload_per_fragment */ nr_frags = bytes / bytes_per_frag; bytes_last_frag = bytes % bytes_per_frag; if (bytes_last_frag) nr_frags++; else bytes_last_frag = bytes_per_frag; /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, * postfix, header, FCS, etc.) */ txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = encrypt; txb->payload_size = bytes; if(qos_actived) { txb->queue_index = UP2AC(skb->priority); } else { txb->queue_index = WME_AC_BK; } for (i = 0; i < nr_frags; i++) { skb_frag = txb->fragments[i]; tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE); if(qos_actived){ skb_frag->priority = skb->priority;//UP2AC(skb->priority); tcb_desc->queue_index = UP2AC(skb->priority); } else { skb_frag->priority = WME_AC_BK; tcb_desc->queue_index = WME_AC_BK; } skb_reserve(skb_frag, ieee->tx_headroom); if (encrypt){ if (ieee->hwsec_active) tcb_desc->bHwSec = 1; else tcb_desc->bHwSec = 0; skb_reserve(skb_frag, crypt->ops->extra_prefix_len); } else { tcb_desc->bHwSec = 0; } frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len); memcpy(frag_hdr, &header, hdr_len); /* If this is not the last fragment, then add the MOREFRAGS * bit to the frame control */ if (i != nr_frags - 1) { frag_hdr->frame_ctl = cpu_to_le16( fc | IEEE80211_FCTL_MOREFRAGS); bytes = bytes_per_frag; } else { /* The last fragment takes the remaining length */ bytes = bytes_last_frag; } if(qos_actived) { // add 1 only indicate to corresponding seq number control 2006/7/12 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i); } else { frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i); } /* Put a SNAP header on the first fragment */ if (i == 0) { ieee80211_put_snap( skb_put(skb_frag, SNAP_SIZE + sizeof(u16)), ether_type); bytes -= SNAP_SIZE + sizeof(u16); } memcpy(skb_put(skb_frag, bytes), skb->data, bytes); /* Advance the SKB... */ skb_pull(skb, bytes); /* Encryption routine will move the header forward in order * to insert the IV between the header and the payload */ if (encrypt) ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) skb_put(skb_frag, 4); } if(qos_actived) { if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF) ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0; else ieee->seq_ctrl[UP2AC(skb->priority) + 1]++; } else { if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; } }else{ if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC); if(!txb){ printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = 0; txb->payload_size = skb->len; memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len); } success: //WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place. if (txb) { cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bTxEnableFwCalcDur = 1; if (is_multicast_ether_addr(header.addr1)) tcb_desc->bMulticast = 1; if (is_broadcast_ether_addr(header.addr1)) tcb_desc->bBroadcast = 1; ieee80211_txrate_selectmode(ieee, tcb_desc); if ( tcb_desc->bMulticast || tcb_desc->bBroadcast) tcb_desc->data_rate = ieee->basic_rate; else tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate); if(bdhcp == true){ tcb_desc->data_rate = MGN_1M; tcb_desc->bTxDisableRateFallBack = 1; tcb_desc->RATRIndex = 7; tcb_desc->bTxUseDriverAssingedRate = 1; tcb_desc->bdhcp = 1; } ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc); ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc); ieee80211_query_HTCapShortGI(ieee, tcb_desc); ieee80211_query_BandwidthMode(ieee, tcb_desc); ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]); ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1); } spin_unlock_irqrestore(&ieee->lock, flags); dev_kfree_skb_any(skb); if (txb) { if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){ ieee80211_softmac_xmit(txb, ieee); }else{ if ((*ieee->hard_start_xmit)(txb, ieee) == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; return 0; } ieee80211_txb_free(txb); } } return 0; failed: spin_unlock_irqrestore(&ieee->lock, flags); netif_stop_queue(dev); stats->tx_errors++; return 1; }
gpl-2.0
DragunKorr/dragun-android_kernel_htc_pyramid
drivers/pcmcia/pxa2xx_colibri.c
2536
5157
/* * linux/drivers/pcmcia/pxa2xx_colibri.c * * Driver for Toradex Colibri PXA270 CF socket * * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include "soc_common.h" #define COLIBRI270_RESET_GPIO 53 #define COLIBRI270_PPEN_GPIO 107 #define COLIBRI270_BVD1_GPIO 83 #define COLIBRI270_BVD2_GPIO 82 #define COLIBRI270_DETECT_GPIO 84 #define COLIBRI270_READY_GPIO 1 #define COLIBRI320_RESET_GPIO 77 #define COLIBRI320_PPEN_GPIO 57 #define COLIBRI320_BVD1_GPIO 53 #define COLIBRI320_BVD2_GPIO 79 #define COLIBRI320_DETECT_GPIO 81 #define COLIBRI320_READY_GPIO 29 enum { DETECT = 0, READY = 1, BVD1 = 2, BVD2 = 3, PPEN = 4, RESET = 5, }; /* Contents of this array are configured on-the-fly in init function */ static struct gpio colibri_pcmcia_gpios[] = { { 0, GPIOF_IN, "PCMCIA Detect" }, { 0, GPIOF_IN, "PCMCIA Ready" }, { 0, GPIOF_IN, "PCMCIA BVD1" }, { 0, GPIOF_IN, "PCMCIA BVD2" }, { 0, GPIOF_INIT_LOW, "PCMCIA PPEN" }, { 0, GPIOF_INIT_HIGH,"PCMCIA Reset" }, }; static struct pcmcia_irqs colibri_irqs[] = { { .sock = 0, .str = "PCMCIA CD" }, }; static int colibri_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { int ret; ret = gpio_request_array(colibri_pcmcia_gpios, ARRAY_SIZE(colibri_pcmcia_gpios)); if (ret) goto err1; colibri_irqs[0].irq = gpio_to_irq(colibri_pcmcia_gpios[DETECT].gpio); skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpios[READY].gpio); ret = soc_pcmcia_request_irqs(skt, colibri_irqs, ARRAY_SIZE(colibri_irqs)); if (ret) goto err2; return ret; err2: gpio_free_array(colibri_pcmcia_gpios, ARRAY_SIZE(colibri_pcmcia_gpios)); err1: return ret; } static void colibri_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { gpio_free_array(colibri_pcmcia_gpios, ARRAY_SIZE(colibri_pcmcia_gpios)); } static void colibri_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->detect = !!gpio_get_value(colibri_pcmcia_gpios[DETECT].gpio); state->ready = !!gpio_get_value(colibri_pcmcia_gpios[READY].gpio); state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpios[BVD1].gpio); state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpios[BVD2].gpio); state->wrprot = 0; state->vs_3v = 1; state->vs_Xv = 0; } static int colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { gpio_set_value(colibri_pcmcia_gpios[PPEN].gpio, !(state->Vcc == 33 && state->Vpp < 50)); gpio_set_value(colibri_pcmcia_gpios[RESET].gpio, state->flags & SS_RESET); return 0; } static void colibri_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { } static void colibri_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { } static struct pcmcia_low_level colibri_pcmcia_ops = { .owner = THIS_MODULE, .first = 0, .nr = 1, .hw_init = colibri_pcmcia_hw_init, .hw_shutdown = colibri_pcmcia_hw_shutdown, .socket_state = colibri_pcmcia_socket_state, .configure_socket = colibri_pcmcia_configure_socket, .socket_init = colibri_pcmcia_socket_init, .socket_suspend = colibri_pcmcia_socket_suspend, }; static struct platform_device *colibri_pcmcia_device; static int __init colibri_pcmcia_init(void) { int ret; if (!machine_is_colibri() && !machine_is_colibri320()) return -ENODEV; colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!colibri_pcmcia_device) return -ENOMEM; /* Colibri PXA270 */ if (machine_is_colibri()) { colibri_pcmcia_gpios[RESET].gpio = COLIBRI270_RESET_GPIO; colibri_pcmcia_gpios[PPEN].gpio = COLIBRI270_PPEN_GPIO; colibri_pcmcia_gpios[BVD1].gpio = COLIBRI270_BVD1_GPIO; colibri_pcmcia_gpios[BVD2].gpio = COLIBRI270_BVD2_GPIO; colibri_pcmcia_gpios[DETECT].gpio = COLIBRI270_DETECT_GPIO; colibri_pcmcia_gpios[READY].gpio = COLIBRI270_READY_GPIO; /* Colibri PXA320 */ } else if (machine_is_colibri320()) { colibri_pcmcia_gpios[RESET].gpio = COLIBRI320_RESET_GPIO; colibri_pcmcia_gpios[PPEN].gpio = COLIBRI320_PPEN_GPIO; colibri_pcmcia_gpios[BVD1].gpio = COLIBRI320_BVD1_GPIO; colibri_pcmcia_gpios[BVD2].gpio = COLIBRI320_BVD2_GPIO; colibri_pcmcia_gpios[DETECT].gpio = COLIBRI320_DETECT_GPIO; colibri_pcmcia_gpios[READY].gpio = COLIBRI320_READY_GPIO; } ret = platform_device_add_data(colibri_pcmcia_device, &colibri_pcmcia_ops, sizeof(colibri_pcmcia_ops)); if (!ret) ret = platform_device_add(colibri_pcmcia_device); if (ret) platform_device_put(colibri_pcmcia_device); return ret; } static void __exit colibri_pcmcia_exit(void) { platform_device_unregister(colibri_pcmcia_device); } module_init(colibri_pcmcia_init); module_exit(colibri_pcmcia_exit); MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); MODULE_DESCRIPTION("PCMCIA support for Toradex Colibri PXA270/PXA320"); MODULE_ALIAS("platform:pxa2xx-pcmcia"); MODULE_LICENSE("GPL");
gpl-2.0
Marvell-Semi/PXA168_kernel
arch/arm/mach-omap2/clock2430.c
3816
1788
/* * clock2430.c - OMAP2430-specific clock integration code * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include "soc.h" #include "iomap.h" #include "clock.h" #include "clock2xxx.h" #include "cm2xxx.h" #include "cm-regbits-24xx.h" /** * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS * @clk: struct clk * being enabled * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator * * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function * passes back the correct CM_IDLEST register address for I2CHS * modules. No return value. */ static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk, void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) { *idlest_reg = OMAP2430_CM_REGADDR(CORE_MOD, CM_IDLEST); *idlest_bit = clk->enable_bit; *idlest_val = OMAP24XX_CM_IDLEST_VAL; } /* 2430 I2CHS has non-standard IDLEST register */ const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = { .find_idlest = omap2430_clk_i2chs_find_idlest, .find_companion = omap2_clk_dflt_find_companion, };
gpl-2.0
zeroblade1984/armani_kernel
drivers/hwmon/lm75.c
4584
12300
/* * lm75.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include "lm75.h" /* * This driver handles the LM75 and compatible digital temperature sensors. */ enum lm75_type { /* keep sorted in alphabetical order */ adt75, ds1775, ds75, lm75, lm75a, max6625, max6626, mcp980x, stds75, tcn75, tmp100, tmp101, tmp105, tmp175, tmp275, tmp75, }; /* Addresses scanned */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* The LM75 registers */ #define LM75_REG_CONF 0x01 static const u8 LM75_REG_TEMP[3] = { 0x00, /* input */ 0x03, /* max */ 0x02, /* hyst */ }; /* Each client has this additional data */ struct lm75_data { struct device *hwmon_dev; struct mutex update_lock; u8 orig_conf; char valid; /* !=0 if registers are valid */ unsigned long last_updated; /* In jiffies */ u16 temp[3]; /* Register values, 0 = input 1 = max 2 = hyst */ }; static int lm75_read_value(struct i2c_client *client, u8 reg); static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value); static struct lm75_data *lm75_update_device(struct device *dev); /*-----------------------------------------------------------------------*/ /* sysfs attributes for hwmon */ static ssize_t show_temp(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm75_data *data = lm75_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->temp[attr->index])); } static ssize_t set_temp(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); struct lm75_data *data = i2c_get_clientdata(client); int nr = attr->index; long temp; int error; error = kstrtol(buf, 10, &temp); if (error) return error; mutex_lock(&data->update_lock); data->temp[nr] = LM75_TEMP_TO_REG(temp); lm75_write_value(client, LM75_REG_TEMP[nr], data->temp[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 1); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp, set_temp, 2); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static struct attribute *lm75_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, NULL }; static const struct attribute_group lm75_group = { .attrs = lm75_attributes, }; /*-----------------------------------------------------------------------*/ /* device probe and removal */ static int lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm75_data *data; int status; u8 set_mask, clr_mask; int new; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -EIO; data = kzalloc(sizeof(struct lm75_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Set to LM75 resolution (9 bits, 1/2 degree C) and range. * Then tweak to be more precise when appropriate. */ set_mask = 0; clr_mask = (1 << 0) /* continuous conversions */ | (1 << 6) | (1 << 5); /* 9-bit mode */ /* configure as specified */ status = lm75_read_value(client, LM75_REG_CONF); if (status < 0) { dev_dbg(&client->dev, "Can't read config? %d\n", status); goto exit_free; } data->orig_conf = status; new = status & ~clr_mask; new |= set_mask; if (status != new) lm75_write_value(client, LM75_REG_CONF, new); dev_dbg(&client->dev, "Config %02x\n", new); /* Register sysfs hooks */ status = sysfs_create_group(&client->dev.kobj, &lm75_group); if (status) goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { status = PTR_ERR(data->hwmon_dev); goto exit_remove; } dev_info(&client->dev, "%s: sensor '%s'\n", dev_name(data->hwmon_dev), client->name); return 0; exit_remove: sysfs_remove_group(&client->dev.kobj, &lm75_group); exit_free: kfree(data); return status; } static int lm75_remove(struct i2c_client *client) { struct lm75_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm75_group); lm75_write_value(client, LM75_REG_CONF, data->orig_conf); kfree(data); return 0; } static const struct i2c_device_id lm75_ids[] = { { "adt75", adt75, }, { "ds1775", ds1775, }, { "ds75", ds75, }, { "lm75", lm75, }, { "lm75a", lm75a, }, { "max6625", max6625, }, { "max6626", max6626, }, { "mcp980x", mcp980x, }, { "stds75", stds75, }, { "tcn75", tcn75, }, { "tmp100", tmp100, }, { "tmp101", tmp101, }, { "tmp105", tmp105, }, { "tmp175", tmp175, }, { "tmp275", tmp275, }, { "tmp75", tmp75, }, { /* LIST END */ } }; MODULE_DEVICE_TABLE(i2c, lm75_ids); #define LM75A_ID 0xA1 /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm75_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; int i; int conf, hyst, os; bool is_lm75a = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; /* * Now, we do the remaining detection. There is no identification- * dedicated register so we have to rely on several tricks: * unused bits, registers cycling over 8-address boundaries, * addresses 0x04-0x07 returning the last read value. * The cycling+unused addresses combination is not tested, * since it would significantly slow the detection down and would * hardly add any value. * * The National Semiconductor LM75A is different than earlier * LM75s. It has an ID byte of 0xaX (where X is the chip * revision, with 1 being the only revision in existence) in * register 7, and unused registers return 0xff rather than the * last read value. * * Note that this function only detects the original National * Semiconductor LM75 and the LM75A. Clones from other vendors * aren't detected, on purpose, because they are typically never * found on PC hardware. They are found on embedded designs where * they can be instantiated explicitly so detection is not needed. * The absence of identification registers on all these clones * would make their exhaustive detection very difficult and weak, * and odds are that the driver would bind to unsupported devices. */ /* Unused bits */ conf = i2c_smbus_read_byte_data(new_client, 1); if (conf & 0xe0) return -ENODEV; /* First check for LM75A */ if (i2c_smbus_read_byte_data(new_client, 7) == LM75A_ID) { /* LM75A returns 0xff on unused registers so just to be sure we check for that too. */ if (i2c_smbus_read_byte_data(new_client, 4) != 0xff || i2c_smbus_read_byte_data(new_client, 5) != 0xff || i2c_smbus_read_byte_data(new_client, 6) != 0xff) return -ENODEV; is_lm75a = 1; hyst = i2c_smbus_read_byte_data(new_client, 2); os = i2c_smbus_read_byte_data(new_client, 3); } else { /* Traditional style LM75 detection */ /* Unused addresses */ hyst = i2c_smbus_read_byte_data(new_client, 2); if (i2c_smbus_read_byte_data(new_client, 4) != hyst || i2c_smbus_read_byte_data(new_client, 5) != hyst || i2c_smbus_read_byte_data(new_client, 6) != hyst || i2c_smbus_read_byte_data(new_client, 7) != hyst) return -ENODEV; os = i2c_smbus_read_byte_data(new_client, 3); if (i2c_smbus_read_byte_data(new_client, 4) != os || i2c_smbus_read_byte_data(new_client, 5) != os || i2c_smbus_read_byte_data(new_client, 6) != os || i2c_smbus_read_byte_data(new_client, 7) != os) return -ENODEV; } /* Addresses cycling */ for (i = 8; i <= 248; i += 40) { if (i2c_smbus_read_byte_data(new_client, i + 1) != conf || i2c_smbus_read_byte_data(new_client, i + 2) != hyst || i2c_smbus_read_byte_data(new_client, i + 3) != os) return -ENODEV; if (is_lm75a && i2c_smbus_read_byte_data(new_client, i + 7) != LM75A_ID) return -ENODEV; } strlcpy(info->type, is_lm75a ? "lm75a" : "lm75", I2C_NAME_SIZE); return 0; } #ifdef CONFIG_PM static int lm75_suspend(struct device *dev) { int status; struct i2c_client *client = to_i2c_client(dev); status = lm75_read_value(client, LM75_REG_CONF); if (status < 0) { dev_dbg(&client->dev, "Can't read config? %d\n", status); return status; } status = status | LM75_SHUTDOWN; lm75_write_value(client, LM75_REG_CONF, status); return 0; } static int lm75_resume(struct device *dev) { int status; struct i2c_client *client = to_i2c_client(dev); status = lm75_read_value(client, LM75_REG_CONF); if (status < 0) { dev_dbg(&client->dev, "Can't read config? %d\n", status); return status; } status = status & ~LM75_SHUTDOWN; lm75_write_value(client, LM75_REG_CONF, status); return 0; } static const struct dev_pm_ops lm75_dev_pm_ops = { .suspend = lm75_suspend, .resume = lm75_resume, }; #define LM75_DEV_PM_OPS (&lm75_dev_pm_ops) #else #define LM75_DEV_PM_OPS NULL #endif /* CONFIG_PM */ static struct i2c_driver lm75_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm75", .pm = LM75_DEV_PM_OPS, }, .probe = lm75_probe, .remove = lm75_remove, .id_table = lm75_ids, .detect = lm75_detect, .address_list = normal_i2c, }; /*-----------------------------------------------------------------------*/ /* register access */ /* * All registers are word-sized, except for the configuration register. * LM75 uses a high-byte first convention, which is exactly opposite to * the SMBus standard. */ static int lm75_read_value(struct i2c_client *client, u8 reg) { if (reg == LM75_REG_CONF) return i2c_smbus_read_byte_data(client, reg); else return i2c_smbus_read_word_swapped(client, reg); } static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value) { if (reg == LM75_REG_CONF) return i2c_smbus_write_byte_data(client, reg, value); else return i2c_smbus_write_word_swapped(client, reg, value); } static struct lm75_data *lm75_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm75_data *data = i2c_get_clientdata(client); struct lm75_data *ret = data; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { int i; dev_dbg(&client->dev, "Starting lm75 update\n"); for (i = 0; i < ARRAY_SIZE(data->temp); i++) { int status; status = lm75_read_value(client, LM75_REG_TEMP[i]); if (unlikely(status < 0)) { dev_dbg(dev, "LM75: Failed to read value: reg %d, error %d\n", LM75_REG_TEMP[i], status); ret = ERR_PTR(status); data->valid = 0; goto abort; } data->temp[i] = status; } data->last_updated = jiffies; data->valid = 1; } abort: mutex_unlock(&data->update_lock); return ret; } module_i2c_driver(lm75_driver); MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); MODULE_DESCRIPTION("LM75 driver"); MODULE_LICENSE("GPL");
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M290S
drivers/isdn/hisax/hfcscard.c
4840
8107
/* $Id: hfcscard.c,v 1.10.2.4 2004/01/14 16:04:48 keil Exp $ * * low level stuff for hfcs based cards (Teles3c, ACER P10) * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include <linux/isapnp.h> #include "hisax.h" #include "hfc_2bds0.h" #include "isdnl1.h" static const char *hfcs_revision = "$Revision: 1.10.2.4 $"; static irqreturn_t hfcs_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val, stat; u_long flags; spin_lock_irqsave(&cs->lock, flags); if ((HFCD_ANYINT | HFCD_BUSY_NBUSY) & (stat = cs->BC_Read_Reg(cs, HFCD_DATA, HFCD_STAT))) { val = cs->BC_Read_Reg(cs, HFCD_DATA, HFCD_INT_S1); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFCS: stat(%02x) s1(%02x)", stat, val); hfc2bds0_interrupt(cs, val); } else { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFCS: irq_no_irq stat(%02x)", stat); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void hfcs_Timer(struct IsdnCardState *cs) { cs->hw.hfcD.timer.expires = jiffies + 75; /* WD RESET */ /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt | 0x80); add_timer(&cs->hw.hfcD.timer); */ } static void release_io_hfcs(struct IsdnCardState *cs) { release2bds0(cs); del_timer(&cs->hw.hfcD.timer); if (cs->hw.hfcD.addr) release_region(cs->hw.hfcD.addr, 2); } static void reset_hfcs(struct IsdnCardState *cs) { printk(KERN_INFO "HFCS: resetting card\n"); cs->hw.hfcD.cirm = HFCD_RESET; if (cs->typ == ISDN_CTYPE_TELES3C) cs->hw.hfcD.cirm |= HFCD_MEM8K; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm); /* Reset On */ mdelay(10); cs->hw.hfcD.cirm = 0; if (cs->typ == ISDN_CTYPE_TELES3C) cs->hw.hfcD.cirm |= HFCD_MEM8K; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm); /* Reset Off */ mdelay(10); if (cs->typ == ISDN_CTYPE_TELES3C) cs->hw.hfcD.cirm |= HFCD_INTB; else if (cs->typ == ISDN_CTYPE_ACERP10) cs->hw.hfcD.cirm |= HFCD_INTA; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm); cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CLKDEL, 0x0e); cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_TEST, HFCD_AUTO_AWAKE); /* S/T Auto awake */ cs->hw.hfcD.ctmt = HFCD_TIM25 | HFCD_AUTO_TIMER; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt); cs->hw.hfcD.int_m2 = HFCD_IRQ_ENABLE; cs->hw.hfcD.int_m1 = HFCD_INTS_B1TRANS | HFCD_INTS_B2TRANS | HFCD_INTS_DTRANS | HFCD_INTS_B1REC | HFCD_INTS_B2REC | HFCD_INTS_DREC | HFCD_INTS_L1STATE; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_INT_M1, cs->hw.hfcD.int_m1); cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_INT_M2, cs->hw.hfcD.int_m2); cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_STATES, HFCD_LOAD_STATE | 2); /* HFC ST 2 */ udelay(10); cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_STATES, 2); /* HFC ST 2 */ cs->hw.hfcD.mst_m = HFCD_MASTER; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_MST_MODE, cs->hw.hfcD.mst_m); /* HFC Master */ cs->hw.hfcD.sctrl = 0; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_SCTRL, cs->hw.hfcD.sctrl); } static int hfcs_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; int delay; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFCS: card_msg %x", mt); switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_hfcs(cs); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_RELEASE: release_io_hfcs(cs); return(0); case CARD_INIT: delay = (75*HZ)/100 +1; mod_timer(&cs->hw.hfcD.timer, jiffies + delay); spin_lock_irqsave(&cs->lock, flags); reset_hfcs(cs); init2bds0(cs); spin_unlock_irqrestore(&cs->lock, flags); delay = (80*HZ)/1000 +1; msleep(80); spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcD.ctmt |= HFCD_TIM800; cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt); cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_MST_MODE, cs->hw.hfcD.mst_m); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_TEST: return(0); } return(0); } #ifdef __ISAPNP__ static struct isapnp_device_id hfc_ids[] __devinitdata = { { ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114), ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114), (unsigned long) "Acer P10" }, { ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0002), ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0002), (unsigned long) "Billion 2" }, { ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0001), ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0001), (unsigned long) "Billion 1" }, { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x7410), ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x7410), (unsigned long) "IStar PnP" }, { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2610), ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2610), (unsigned long) "Teles 16.3c" }, { ISAPNP_VENDOR('S', 'F', 'M'), ISAPNP_FUNCTION(0x0001), ISAPNP_VENDOR('S', 'F', 'M'), ISAPNP_FUNCTION(0x0001), (unsigned long) "Tornado Tipa C" }, { ISAPNP_VENDOR('K', 'Y', 'E'), ISAPNP_FUNCTION(0x0001), ISAPNP_VENDOR('K', 'Y', 'E'), ISAPNP_FUNCTION(0x0001), (unsigned long) "Genius Speed Surfer" }, { 0, } }; static struct isapnp_device_id *ipid __devinitdata = &hfc_ids[0]; static struct pnp_card *pnp_c __devinitdata = NULL; #endif int __devinit setup_hfcs(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, hfcs_revision); printk(KERN_INFO "HiSax: HFC-S driver Rev. %s\n", HiSax_getrev(tmp)); #ifdef __ISAPNP__ if (!card->para[1] && isapnp_present()) { struct pnp_dev *pnp_d; while(ipid->card_vendor) { if ((pnp_c = pnp_find_card(ipid->card_vendor, ipid->card_device, pnp_c))) { pnp_d = NULL; if ((pnp_d = pnp_find_dev(pnp_c, ipid->vendor, ipid->function, pnp_d))) { int err; printk(KERN_INFO "HiSax: %s detected\n", (char *)ipid->driver_data); pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); if (err<0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __func__, err); return(0); } card->para[1] = pnp_port_start(pnp_d, 0); card->para[0] = pnp_irq(pnp_d, 0); if (!card->para[0] || !card->para[1]) { printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n", card->para[0], card->para[1]); pnp_disable_dev(pnp_d); return(0); } break; } else { printk(KERN_ERR "HFC PnP: PnP error card found, no device\n"); } } ipid++; pnp_c = NULL; } if (!ipid->card_vendor) { printk(KERN_INFO "HFC PnP: no ISAPnP card found\n"); return(0); } } #endif cs->hw.hfcD.addr = card->para[1] & 0xfffe; cs->irq = card->para[0]; cs->hw.hfcD.cip = 0; cs->hw.hfcD.int_s1 = 0; cs->hw.hfcD.send = NULL; cs->bcs[0].hw.hfc.send = NULL; cs->bcs[1].hw.hfc.send = NULL; cs->hw.hfcD.dfifosize = 512; cs->dc.hfcd.ph_state = 0; cs->hw.hfcD.fifo = 255; if (cs->typ == ISDN_CTYPE_TELES3C) { cs->hw.hfcD.bfifosize = 1024 + 512; } else if (cs->typ == ISDN_CTYPE_ACERP10) { cs->hw.hfcD.bfifosize = 7*1024 + 512; } else return (0); if (!request_region(cs->hw.hfcD.addr, 2, "HFCS isdn")) { printk(KERN_WARNING "HiSax: %s config port %x-%x already in use\n", CardType[card->typ], cs->hw.hfcD.addr, cs->hw.hfcD.addr + 2); return (0); } printk(KERN_INFO "HFCS: defined at 0x%x IRQ %d HZ %d\n", cs->hw.hfcD.addr, cs->irq, HZ); if (cs->typ == ISDN_CTYPE_TELES3C) { /* Teles 16.3c IO ADR is 0x200 | YY0U (YY Bit 15/14 address) */ outb(0x00, cs->hw.hfcD.addr); outb(0x56, cs->hw.hfcD.addr | 1); } else if (cs->typ == ISDN_CTYPE_ACERP10) { /* Acer P10 IO ADR is 0x300 */ outb(0x00, cs->hw.hfcD.addr); outb(0x57, cs->hw.hfcD.addr | 1); } set_cs_func(cs); cs->hw.hfcD.timer.function = (void *) hfcs_Timer; cs->hw.hfcD.timer.data = (long) cs; init_timer(&cs->hw.hfcD.timer); cs->cardmsg = &hfcs_card_msg; cs->irq_func = &hfcs_interrupt; return (1); }
gpl-2.0
NoelMacwan/SXDHuashan
drivers/mmc/host/wbsd.c
5352
40733
/* * linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver * * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * * Warning! * * Changes to the FIFO system should be done with extreme care since * the hardware is full of bugs related to the FIFO. Known issues are: * * - FIFO size field in FSR is always zero. * * - FIFO interrupts tend not to work as they should. Interrupts are * triggered only for full/empty events, not for threshold values. * * - On APIC systems the FIFO empty interrupt is sometimes lost. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/pnp.h> #include <linux/highmem.h> #include <linux/mmc/host.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/dma.h> #include "wbsd.h" #define DRIVER_NAME "wbsd" #define DBG(x...) \ pr_debug(DRIVER_NAME ": " x) #define DBGF(f, x...) \ pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x) /* * Device resources */ #ifdef CONFIG_PNP static const struct pnp_device_id pnp_dev_table[] = { { "WEC0517", 0 }, { "WEC0518", 0 }, { "", 0 }, }; MODULE_DEVICE_TABLE(pnp, pnp_dev_table); #endif /* CONFIG_PNP */ static const int config_ports[] = { 0x2E, 0x4E }; static const int unlock_codes[] = { 0x83, 0x87 }; static const int valid_ids[] = { 0x7112, }; #ifdef CONFIG_PNP static unsigned int param_nopnp = 0; #else static const unsigned int param_nopnp = 1; #endif static unsigned int param_io = 0x248; static unsigned int param_irq = 6; static int param_dma = 2; /* * Basic functions */ static inline void wbsd_unlock_config(struct wbsd_host *host) { BUG_ON(host->config == 0); outb(host->unlock_code, host->config); outb(host->unlock_code, host->config); } static inline void wbsd_lock_config(struct wbsd_host *host) { BUG_ON(host->config == 0); outb(LOCK_CODE, host->config); } static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value) { BUG_ON(host->config == 0); outb(reg, host->config); outb(value, host->config + 1); } static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg) { BUG_ON(host->config == 0); outb(reg, host->config); return inb(host->config + 1); } static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value) { outb(index, host->base + WBSD_IDXR); outb(value, host->base + WBSD_DATAR); } static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index) { outb(index, host->base + WBSD_IDXR); return inb(host->base + WBSD_DATAR); } /* * Common routines */ static void wbsd_init_device(struct wbsd_host *host) { u8 setup, ier; /* * Reset chip (SD/MMC part) and fifo. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * Set DAT3 to input */ setup &= ~WBSD_DAT3_H; wbsd_write_index(host, WBSD_IDX_SETUP, setup); host->flags &= ~WBSD_FIGNORE_DETECT; /* * Read back default clock. */ host->clk = wbsd_read_index(host, WBSD_IDX_CLK); /* * Power down port. */ outb(WBSD_POWER_N, host->base + WBSD_CSR); /* * Set maximum timeout. */ wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F); /* * Test for card presence */ if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT) host->flags |= WBSD_FCARD_PRESENT; else host->flags &= ~WBSD_FCARD_PRESENT; /* * Enable interesting interrupts. */ ier = 0; ier |= WBSD_EINT_CARD; ier |= WBSD_EINT_FIFO_THRE; ier |= WBSD_EINT_CRC; ier |= WBSD_EINT_TIMEOUT; ier |= WBSD_EINT_TC; outb(ier, host->base + WBSD_EIR); /* * Clear interrupts. */ inb(host->base + WBSD_ISR); } static void wbsd_reset(struct wbsd_host *host) { u8 setup; pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc)); /* * Soft reset of chip (SD/MMC part). */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_SOFT_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); } static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq) { unsigned long dmaflags; if (host->dma >= 0) { /* * Release ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); release_dma_lock(dmaflags); /* * Disable DMA on host. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); } host->mrq = NULL; /* * MMC layer might call back into the driver so first unlock. */ spin_unlock(&host->lock); mmc_request_done(host->mmc, mrq); spin_lock(&host->lock); } /* * Scatter/gather functions */ static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data) { /* * Get info. about SG list from data structure. */ host->cur_sg = data->sg; host->num_sg = data->sg_len; host->offset = 0; host->remain = host->cur_sg->length; } static inline int wbsd_next_sg(struct wbsd_host *host) { /* * Skip to next SG entry. */ host->cur_sg++; host->num_sg--; /* * Any entries left? */ if (host->num_sg > 0) { host->offset = 0; host->remain = host->cur_sg->length; } return host->num_sg; } static inline char *wbsd_sg_to_buffer(struct wbsd_host *host) { return sg_virt(host->cur_sg); } static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) { unsigned int len, i; struct scatterlist *sg; char *dmabuf = host->dma_buffer; char *sgbuf; sg = data->sg; len = data->sg_len; for (i = 0; i < len; i++) { sgbuf = sg_virt(&sg[i]); memcpy(dmabuf, sgbuf, sg[i].length); dmabuf += sg[i].length; } } static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data) { unsigned int len, i; struct scatterlist *sg; char *dmabuf = host->dma_buffer; char *sgbuf; sg = data->sg; len = data->sg_len; for (i = 0; i < len; i++) { sgbuf = sg_virt(&sg[i]); memcpy(sgbuf, dmabuf, sg[i].length); dmabuf += sg[i].length; } } /* * Command handling */ static inline void wbsd_get_short_reply(struct wbsd_host *host, struct mmc_command *cmd) { /* * Correct response type? */ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) { cmd->error = -EILSEQ; return; } cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0; cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24; } static inline void wbsd_get_long_reply(struct wbsd_host *host, struct mmc_command *cmd) { int i; /* * Correct response type? */ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) { cmd->error = -EILSEQ; return; } for (i = 0; i < 4; i++) { cmd->resp[i] = wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0; } } static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd) { int i; u8 status, isr; /* * Clear accumulated ISR. The interrupt routine * will fill this one with events that occur during * transfer. */ host->isr = 0; /* * Send the command (CRC calculated by host). */ outb(cmd->opcode, host->base + WBSD_CMDR); for (i = 3; i >= 0; i--) outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR); cmd->error = 0; /* * Wait for the request to complete. */ do { status = wbsd_read_index(host, WBSD_IDX_STATUS); } while (status & WBSD_CARDTRAFFIC); /* * Do we expect a reply? */ if (cmd->flags & MMC_RSP_PRESENT) { /* * Read back status. */ isr = host->isr; /* Card removed? */ if (isr & WBSD_INT_CARD) cmd->error = -ENOMEDIUM; /* Timeout? */ else if (isr & WBSD_INT_TIMEOUT) cmd->error = -ETIMEDOUT; /* CRC? */ else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC)) cmd->error = -EILSEQ; /* All ok */ else { if (cmd->flags & MMC_RSP_136) wbsd_get_long_reply(host, cmd); else wbsd_get_short_reply(host, cmd); } } } /* * Data functions */ static void wbsd_empty_fifo(struct wbsd_host *host) { struct mmc_data *data = host->mrq->cmd->data; char *buffer; int i, fsr, fifo; /* * Handle excessive data. */ if (host->num_sg == 0) return; buffer = wbsd_sg_to_buffer(host) + host->offset; /* * Drain the fifo. This has a tendency to loop longer * than the FIFO length (usually one block). */ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) { /* * The size field in the FSR is broken so we have to * do some guessing. */ if (fsr & WBSD_FIFO_FULL) fifo = 16; else if (fsr & WBSD_FIFO_FUTHRE) fifo = 8; else fifo = 1; for (i = 0; i < fifo; i++) { *buffer = inb(host->base + WBSD_DFR); buffer++; host->offset++; host->remain--; data->bytes_xfered++; /* * End of scatter list entry? */ if (host->remain == 0) { /* * Get next entry. Check if last. */ if (!wbsd_next_sg(host)) return; buffer = wbsd_sg_to_buffer(host); } } } /* * This is a very dirty hack to solve a * hardware problem. The chip doesn't trigger * FIFO threshold interrupts properly. */ if ((data->blocks * data->blksz - data->bytes_xfered) < 16) tasklet_schedule(&host->fifo_tasklet); } static void wbsd_fill_fifo(struct wbsd_host *host) { struct mmc_data *data = host->mrq->cmd->data; char *buffer; int i, fsr, fifo; /* * Check that we aren't being called after the * entire buffer has been transferred. */ if (host->num_sg == 0) return; buffer = wbsd_sg_to_buffer(host) + host->offset; /* * Fill the fifo. This has a tendency to loop longer * than the FIFO length (usually one block). */ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) { /* * The size field in the FSR is broken so we have to * do some guessing. */ if (fsr & WBSD_FIFO_EMPTY) fifo = 0; else if (fsr & WBSD_FIFO_EMTHRE) fifo = 8; else fifo = 15; for (i = 16; i > fifo; i--) { outb(*buffer, host->base + WBSD_DFR); buffer++; host->offset++; host->remain--; data->bytes_xfered++; /* * End of scatter list entry? */ if (host->remain == 0) { /* * Get next entry. Check if last. */ if (!wbsd_next_sg(host)) return; buffer = wbsd_sg_to_buffer(host); } } } /* * The controller stops sending interrupts for * 'FIFO empty' under certain conditions. So we * need to be a bit more pro-active. */ tasklet_schedule(&host->fifo_tasklet); } static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) { u16 blksize; u8 setup; unsigned long dmaflags; unsigned int size; /* * Calculate size. */ size = data->blocks * data->blksz; /* * Check timeout values for overflow. * (Yes, some cards cause this value to overflow). */ if (data->timeout_ns > 127000000) wbsd_write_index(host, WBSD_IDX_TAAC, 127); else { wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns / 1000000); } if (data->timeout_clks > 255) wbsd_write_index(host, WBSD_IDX_NSAC, 255); else wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks); /* * Inform the chip of how large blocks will be * sent. It needs this to determine when to * calculate CRC. * * Space for CRC must be included in the size. * Two bytes are needed for each data line. */ if (host->bus_width == MMC_BUS_WIDTH_1) { blksize = data->blksz + 2; wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); } else if (host->bus_width == MMC_BUS_WIDTH_4) { blksize = data->blksz + 2 * 4; wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); } else { data->error = -EINVAL; return; } /* * Clear the FIFO. This is needed even for DMA * transfers since the chip still uses the FIFO * internally. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_FIFO_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * DMA transfer? */ if (host->dma >= 0) { /* * The buffer for DMA is only 64 kB. */ BUG_ON(size > 0x10000); if (size > 0x10000) { data->error = -EINVAL; return; } /* * Transfer data from the SG list to * the DMA buffer. */ if (data->flags & MMC_DATA_WRITE) wbsd_sg_to_dma(host, data); /* * Initialise the ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); if (data->flags & MMC_DATA_READ) set_dma_mode(host->dma, DMA_MODE_READ & ~0x40); else set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40); set_dma_addr(host->dma, host->dma_addr); set_dma_count(host->dma, size); enable_dma(host->dma); release_dma_lock(dmaflags); /* * Enable DMA on the host. */ wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE); } else { /* * This flag is used to keep printk * output to a minimum. */ host->firsterr = 1; /* * Initialise the SG list. */ wbsd_init_sg(host, data); /* * Turn off DMA. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); /* * Set up FIFO threshold levels (and fill * buffer if doing a write). */ if (data->flags & MMC_DATA_READ) { wbsd_write_index(host, WBSD_IDX_FIFOEN, WBSD_FIFOEN_FULL | 8); } else { wbsd_write_index(host, WBSD_IDX_FIFOEN, WBSD_FIFOEN_EMPTY | 8); wbsd_fill_fifo(host); } } data->error = 0; } static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data) { unsigned long dmaflags; int count; u8 status; WARN_ON(host->mrq == NULL); /* * Send a stop command if needed. */ if (data->stop) wbsd_send_command(host, data->stop); /* * Wait for the controller to leave data * transfer state. */ do { status = wbsd_read_index(host, WBSD_IDX_STATUS); } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE)); /* * DMA transfer? */ if (host->dma >= 0) { /* * Disable DMA on the host. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); /* * Turn of ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); count = get_dma_residue(host->dma); release_dma_lock(dmaflags); data->bytes_xfered = host->mrq->data->blocks * host->mrq->data->blksz - count; data->bytes_xfered -= data->bytes_xfered % data->blksz; /* * Any leftover data? */ if (count) { pr_err("%s: Incomplete DMA transfer. " "%d bytes left.\n", mmc_hostname(host->mmc), count); if (!data->error) data->error = -EIO; } else { /* * Transfer data from DMA buffer to * SG list. */ if (data->flags & MMC_DATA_READ) wbsd_dma_to_sg(host, data); } if (data->error) { if (data->bytes_xfered) data->bytes_xfered -= data->blksz; } } wbsd_request_end(host, host->mrq); } /*****************************************************************************\ * * * MMC layer callbacks * * * \*****************************************************************************/ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct wbsd_host *host = mmc_priv(mmc); struct mmc_command *cmd; /* * Disable tasklets to avoid a deadlock. */ spin_lock_bh(&host->lock); BUG_ON(host->mrq != NULL); cmd = mrq->cmd; host->mrq = mrq; /* * Check that there is actually a card in the slot. */ if (!(host->flags & WBSD_FCARD_PRESENT)) { cmd->error = -ENOMEDIUM; goto done; } if (cmd->data) { /* * The hardware is so delightfully stupid that it has a list * of "data" commands. If a command isn't on this list, it'll * just go back to the idle state and won't send any data * interrupts. */ switch (cmd->opcode) { case 11: case 17: case 18: case 20: case 24: case 25: case 26: case 27: case 30: case 42: case 56: break; /* ACMDs. We don't keep track of state, so we just treat them * like any other command. */ case 51: break; default: #ifdef CONFIG_MMC_DEBUG pr_warning("%s: Data command %d is not " "supported by this controller.\n", mmc_hostname(host->mmc), cmd->opcode); #endif cmd->error = -EINVAL; goto done; }; } /* * Does the request include data? */ if (cmd->data) { wbsd_prepare_data(host, cmd->data); if (cmd->data->error) goto done; } wbsd_send_command(host, cmd); /* * If this is a data transfer the request * will be finished after the data has * transferred. */ if (cmd->data && !cmd->error) { /* * Dirty fix for hardware bug. */ if (host->dma == -1) tasklet_schedule(&host->fifo_tasklet); spin_unlock_bh(&host->lock); return; } done: wbsd_request_end(host, mrq); spin_unlock_bh(&host->lock); } static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct wbsd_host *host = mmc_priv(mmc); u8 clk, setup, pwr; spin_lock_bh(&host->lock); /* * Reset the chip on each power off. * Should clear out any weird states. */ if (ios->power_mode == MMC_POWER_OFF) wbsd_init_device(host); if (ios->clock >= 24000000) clk = WBSD_CLK_24M; else if (ios->clock >= 16000000) clk = WBSD_CLK_16M; else if (ios->clock >= 12000000) clk = WBSD_CLK_12M; else clk = WBSD_CLK_375K; /* * Only write to the clock register when * there is an actual change. */ if (clk != host->clk) { wbsd_write_index(host, WBSD_IDX_CLK, clk); host->clk = clk; } /* * Power up card. */ if (ios->power_mode != MMC_POWER_OFF) { pwr = inb(host->base + WBSD_CSR); pwr &= ~WBSD_POWER_N; outb(pwr, host->base + WBSD_CSR); } /* * MMC cards need to have pin 1 high during init. * It wreaks havoc with the card detection though so * that needs to be disabled. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); if (ios->chip_select == MMC_CS_HIGH) { BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1); setup |= WBSD_DAT3_H; host->flags |= WBSD_FIGNORE_DETECT; } else { if (setup & WBSD_DAT3_H) { setup &= ~WBSD_DAT3_H; /* * We cannot resume card detection immediately * because of capacitance and delays in the chip. */ mod_timer(&host->ignore_timer, jiffies + HZ / 100); } } wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * Store bus width for later. Will be used when * setting up the data transfer. */ host->bus_width = ios->bus_width; spin_unlock_bh(&host->lock); } static int wbsd_get_ro(struct mmc_host *mmc) { struct wbsd_host *host = mmc_priv(mmc); u8 csr; spin_lock_bh(&host->lock); csr = inb(host->base + WBSD_CSR); csr |= WBSD_MSLED; outb(csr, host->base + WBSD_CSR); mdelay(1); csr = inb(host->base + WBSD_CSR); csr &= ~WBSD_MSLED; outb(csr, host->base + WBSD_CSR); spin_unlock_bh(&host->lock); return !!(csr & WBSD_WRPT); } static const struct mmc_host_ops wbsd_ops = { .request = wbsd_request, .set_ios = wbsd_set_ios, .get_ro = wbsd_get_ro, }; /*****************************************************************************\ * * * Interrupt handling * * * \*****************************************************************************/ /* * Helper function to reset detection ignore */ static void wbsd_reset_ignore(unsigned long data) { struct wbsd_host *host = (struct wbsd_host *)data; BUG_ON(host == NULL); DBG("Resetting card detection ignore\n"); spin_lock_bh(&host->lock); host->flags &= ~WBSD_FIGNORE_DETECT; /* * Card status might have changed during the * blackout. */ tasklet_schedule(&host->card_tasklet); spin_unlock_bh(&host->lock); } /* * Tasklets */ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host) { WARN_ON(!host->mrq); if (!host->mrq) return NULL; WARN_ON(!host->mrq->cmd); if (!host->mrq->cmd) return NULL; WARN_ON(!host->mrq->cmd->data); if (!host->mrq->cmd->data) return NULL; return host->mrq->cmd->data; } static void wbsd_tasklet_card(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; u8 csr; int delay = -1; spin_lock(&host->lock); if (host->flags & WBSD_FIGNORE_DETECT) { spin_unlock(&host->lock); return; } csr = inb(host->base + WBSD_CSR); WARN_ON(csr == 0xff); if (csr & WBSD_CARDPRESENT) { if (!(host->flags & WBSD_FCARD_PRESENT)) { DBG("Card inserted\n"); host->flags |= WBSD_FCARD_PRESENT; delay = 500; } } else if (host->flags & WBSD_FCARD_PRESENT) { DBG("Card removed\n"); host->flags &= ~WBSD_FCARD_PRESENT; if (host->mrq) { pr_err("%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); wbsd_reset(host); host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } delay = 0; } /* * Unlock first since we might get a call back. */ spin_unlock(&host->lock); if (delay != -1) mmc_detect_change(host->mmc, msecs_to_jiffies(delay)); } static void wbsd_tasklet_fifo(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; if (data->flags & MMC_DATA_WRITE) wbsd_fill_fifo(host); else wbsd_empty_fifo(host); /* * Done? */ if (host->num_sg == 0) { wbsd_write_index(host, WBSD_IDX_FIFOEN, 0); tasklet_schedule(&host->finish_tasklet); } end: spin_unlock(&host->lock); } static void wbsd_tasklet_crc(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; DBGF("CRC error\n"); data->error = -EILSEQ; tasklet_schedule(&host->finish_tasklet); end: spin_unlock(&host->lock); } static void wbsd_tasklet_timeout(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; DBGF("Timeout\n"); data->error = -ETIMEDOUT; tasklet_schedule(&host->finish_tasklet); end: spin_unlock(&host->lock); } static void wbsd_tasklet_finish(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); WARN_ON(!host->mrq); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; wbsd_finish_data(host, data); end: spin_unlock(&host->lock); } /* * Interrupt handling */ static irqreturn_t wbsd_irq(int irq, void *dev_id) { struct wbsd_host *host = dev_id; int isr; isr = inb(host->base + WBSD_ISR); /* * Was it actually our hardware that caused the interrupt? */ if (isr == 0xff || isr == 0x00) return IRQ_NONE; host->isr |= isr; /* * Schedule tasklets as needed. */ if (isr & WBSD_INT_CARD) tasklet_schedule(&host->card_tasklet); if (isr & WBSD_INT_FIFO_THRE) tasklet_schedule(&host->fifo_tasklet); if (isr & WBSD_INT_CRC) tasklet_hi_schedule(&host->crc_tasklet); if (isr & WBSD_INT_TIMEOUT) tasklet_hi_schedule(&host->timeout_tasklet); if (isr & WBSD_INT_TC) tasklet_schedule(&host->finish_tasklet); return IRQ_HANDLED; } /*****************************************************************************\ * * * Device initialisation and shutdown * * * \*****************************************************************************/ /* * Allocate/free MMC structure. */ static int __devinit wbsd_alloc_mmc(struct device *dev) { struct mmc_host *mmc; struct wbsd_host *host; /* * Allocate MMC structure. */ mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); host->mmc = mmc; host->dma = -1; /* * Set host parameters. */ mmc->ops = &wbsd_ops; mmc->f_min = 375000; mmc->f_max = 24000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA; spin_lock_init(&host->lock); /* * Set up timers */ init_timer(&host->ignore_timer); host->ignore_timer.data = (unsigned long)host; host->ignore_timer.function = wbsd_reset_ignore; /* * Maximum number of segments. Worst case is one sector per segment * so this will be 64kB/512. */ mmc->max_segs = 128; /* * Maximum request size. Also limited by 64KiB buffer. */ mmc->max_req_size = 65536; /* * Maximum segment size. Could be one segment with the maximum number * of bytes. */ mmc->max_seg_size = mmc->max_req_size; /* * Maximum block size. We have 12 bits (= 4095) but have to subtract * space for CRC. So the maximum is 4095 - 4*2 = 4087. */ mmc->max_blk_size = 4087; /* * Maximum block count. There is no real limit so the maximum * request size will be the only restriction. */ mmc->max_blk_count = mmc->max_req_size; dev_set_drvdata(dev, mmc); return 0; } static void wbsd_free_mmc(struct device *dev) { struct mmc_host *mmc; struct wbsd_host *host; mmc = dev_get_drvdata(dev); if (!mmc) return; host = mmc_priv(mmc); BUG_ON(host == NULL); del_timer_sync(&host->ignore_timer); mmc_free_host(mmc); dev_set_drvdata(dev, NULL); } /* * Scan for known chip id:s */ static int __devinit wbsd_scan(struct wbsd_host *host) { int i, j, k; int id; /* * Iterate through all ports, all codes to * find hardware that is in our known list. */ for (i = 0; i < ARRAY_SIZE(config_ports); i++) { if (!request_region(config_ports[i], 2, DRIVER_NAME)) continue; for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) { id = 0xFFFF; host->config = config_ports[i]; host->unlock_code = unlock_codes[j]; wbsd_unlock_config(host); outb(WBSD_CONF_ID_HI, config_ports[i]); id = inb(config_ports[i] + 1) << 8; outb(WBSD_CONF_ID_LO, config_ports[i]); id |= inb(config_ports[i] + 1); wbsd_lock_config(host); for (k = 0; k < ARRAY_SIZE(valid_ids); k++) { if (id == valid_ids[k]) { host->chip_id = id; return 0; } } if (id != 0xFFFF) { DBG("Unknown hardware (id %x) found at %x\n", id, config_ports[i]); } } release_region(config_ports[i], 2); } host->config = 0; host->unlock_code = 0; return -ENODEV; } /* * Allocate/free io port ranges */ static int __devinit wbsd_request_region(struct wbsd_host *host, int base) { if (base & 0x7) return -EINVAL; if (!request_region(base, 8, DRIVER_NAME)) return -EIO; host->base = base; return 0; } static void wbsd_release_regions(struct wbsd_host *host) { if (host->base) release_region(host->base, 8); host->base = 0; if (host->config) release_region(host->config, 2); host->config = 0; } /* * Allocate/free DMA port and buffer */ static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma) { if (dma < 0) return; if (request_dma(dma, DRIVER_NAME)) goto err; /* * We need to allocate a special buffer in * order for ISA to be able to DMA to it. */ host->dma_buffer = kmalloc(WBSD_DMA_SIZE, GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN); if (!host->dma_buffer) goto free; /* * Translate the address to a physical address. */ host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); /* * ISA DMA must be aligned on a 64k basis. */ if ((host->dma_addr & 0xffff) != 0) goto kfree; /* * ISA cannot access memory above 16 MB. */ else if (host->dma_addr >= 0x1000000) goto kfree; host->dma = dma; return; kfree: /* * If we've gotten here then there is some kind of alignment bug */ BUG_ON(1); dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); host->dma_addr = 0; kfree(host->dma_buffer); host->dma_buffer = NULL; free: free_dma(dma); err: pr_warning(DRIVER_NAME ": Unable to allocate DMA %d. " "Falling back on FIFO.\n", dma); } static void wbsd_release_dma(struct wbsd_host *host) { if (host->dma_addr) { dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); } kfree(host->dma_buffer); if (host->dma >= 0) free_dma(host->dma); host->dma = -1; host->dma_buffer = NULL; host->dma_addr = 0; } /* * Allocate/free IRQ. */ static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq) { int ret; /* * Set up tasklets. Must be done before requesting interrupt. */ tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host); tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host); tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host); tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host); tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host); /* * Allocate interrupt. */ ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host); if (ret) return ret; host->irq = irq; return 0; } static void wbsd_release_irq(struct wbsd_host *host) { if (!host->irq) return; free_irq(host->irq, host); host->irq = 0; tasklet_kill(&host->card_tasklet); tasklet_kill(&host->fifo_tasklet); tasklet_kill(&host->crc_tasklet); tasklet_kill(&host->timeout_tasklet); tasklet_kill(&host->finish_tasklet); } /* * Allocate all resources for the host. */ static int __devinit wbsd_request_resources(struct wbsd_host *host, int base, int irq, int dma) { int ret; /* * Allocate I/O ports. */ ret = wbsd_request_region(host, base); if (ret) return ret; /* * Allocate interrupt. */ ret = wbsd_request_irq(host, irq); if (ret) return ret; /* * Allocate DMA. */ wbsd_request_dma(host, dma); return 0; } /* * Release all resources for the host. */ static void wbsd_release_resources(struct wbsd_host *host) { wbsd_release_dma(host); wbsd_release_irq(host); wbsd_release_regions(host); } /* * Configure the resources the chip should use. */ static void wbsd_chip_config(struct wbsd_host *host) { wbsd_unlock_config(host); /* * Reset the chip. */ wbsd_write_config(host, WBSD_CONF_SWRST, 1); wbsd_write_config(host, WBSD_CONF_SWRST, 0); /* * Select SD/MMC function. */ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); /* * Set up card detection. */ wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11); /* * Configure chip */ wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8); wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff); wbsd_write_config(host, WBSD_CONF_IRQ, host->irq); if (host->dma >= 0) wbsd_write_config(host, WBSD_CONF_DRQ, host->dma); /* * Enable and power up chip. */ wbsd_write_config(host, WBSD_CONF_ENABLE, 1); wbsd_write_config(host, WBSD_CONF_POWER, 0x20); wbsd_lock_config(host); } /* * Check that configured resources are correct. */ static int wbsd_chip_validate(struct wbsd_host *host) { int base, irq, dma; wbsd_unlock_config(host); /* * Select SD/MMC function. */ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); /* * Read configuration. */ base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8; base |= wbsd_read_config(host, WBSD_CONF_PORT_LO); irq = wbsd_read_config(host, WBSD_CONF_IRQ); dma = wbsd_read_config(host, WBSD_CONF_DRQ); wbsd_lock_config(host); /* * Validate against given configuration. */ if (base != host->base) return 0; if (irq != host->irq) return 0; if ((dma != host->dma) && (host->dma != -1)) return 0; return 1; } /* * Powers down the SD function */ static void wbsd_chip_poweroff(struct wbsd_host *host) { wbsd_unlock_config(host); wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); wbsd_write_config(host, WBSD_CONF_ENABLE, 0); wbsd_lock_config(host); } /*****************************************************************************\ * * * Devices setup and shutdown * * * \*****************************************************************************/ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, int pnp) { struct wbsd_host *host = NULL; struct mmc_host *mmc = NULL; int ret; ret = wbsd_alloc_mmc(dev); if (ret) return ret; mmc = dev_get_drvdata(dev); host = mmc_priv(mmc); /* * Scan for hardware. */ ret = wbsd_scan(host); if (ret) { if (pnp && (ret == -ENODEV)) { pr_warning(DRIVER_NAME ": Unable to confirm device presence. You may " "experience lock-ups.\n"); } else { wbsd_free_mmc(dev); return ret; } } /* * Request resources. */ ret = wbsd_request_resources(host, base, irq, dma); if (ret) { wbsd_release_resources(host); wbsd_free_mmc(dev); return ret; } /* * See if chip needs to be configured. */ if (pnp) { if ((host->config != 0) && !wbsd_chip_validate(host)) { pr_warning(DRIVER_NAME ": PnP active but chip not configured! " "You probably have a buggy BIOS. " "Configuring chip manually.\n"); wbsd_chip_config(host); } } else wbsd_chip_config(host); /* * Power Management stuff. No idea how this works. * Not tested. */ #ifdef CONFIG_PM if (host->config) { wbsd_unlock_config(host); wbsd_write_config(host, WBSD_CONF_PME, 0xA0); wbsd_lock_config(host); } #endif /* * Allow device to initialise itself properly. */ mdelay(5); /* * Reset the chip into a known state. */ wbsd_init_device(host); mmc_add_host(mmc); pr_info("%s: W83L51xD", mmc_hostname(mmc)); if (host->chip_id != 0) printk(" id %x", (int)host->chip_id); printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); if (host->dma >= 0) printk(" dma %d", (int)host->dma); else printk(" FIFO"); if (pnp) printk(" PnP"); printk("\n"); return 0; } static void __devexit wbsd_shutdown(struct device *dev, int pnp) { struct mmc_host *mmc = dev_get_drvdata(dev); struct wbsd_host *host; if (!mmc) return; host = mmc_priv(mmc); mmc_remove_host(mmc); /* * Power down the SD/MMC function. */ if (!pnp) wbsd_chip_poweroff(host); wbsd_release_resources(host); wbsd_free_mmc(dev); } /* * Non-PnP */ static int __devinit wbsd_probe(struct platform_device *dev) { /* Use the module parameters for resources */ return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); } static int __devexit wbsd_remove(struct platform_device *dev) { wbsd_shutdown(&dev->dev, 0); return 0; } /* * PnP */ #ifdef CONFIG_PNP static int __devinit wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) { int io, irq, dma; /* * Get resources from PnP layer. */ io = pnp_port_start(pnpdev, 0); irq = pnp_irq(pnpdev, 0); if (pnp_dma_valid(pnpdev, 0)) dma = pnp_dma(pnpdev, 0); else dma = -1; DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma); return wbsd_init(&pnpdev->dev, io, irq, dma, 1); } static void __devexit wbsd_pnp_remove(struct pnp_dev *dev) { wbsd_shutdown(&dev->dev, 1); } #endif /* CONFIG_PNP */ /* * Power management */ #ifdef CONFIG_PM static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) { BUG_ON(host == NULL); return mmc_suspend_host(host->mmc); } static int wbsd_resume(struct wbsd_host *host) { BUG_ON(host == NULL); wbsd_init_device(host); return mmc_resume_host(host->mmc); } static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; int ret; if (mmc == NULL) return 0; DBGF("Suspending...\n"); host = mmc_priv(mmc); ret = wbsd_suspend(host, state); if (ret) return ret; wbsd_chip_poweroff(host); return 0; } static int wbsd_platform_resume(struct platform_device *dev) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Resuming...\n"); host = mmc_priv(mmc); wbsd_chip_config(host); /* * Allow device to initialise itself properly. */ mdelay(5); return wbsd_resume(host); } #ifdef CONFIG_PNP static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Suspending...\n"); host = mmc_priv(mmc); return wbsd_suspend(host, state); } static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Resuming...\n"); host = mmc_priv(mmc); /* * See if chip needs to be configured. */ if (host->config != 0) { if (!wbsd_chip_validate(host)) { pr_warning(DRIVER_NAME ": PnP active but chip not configured! " "You probably have a buggy BIOS. " "Configuring chip manually.\n"); wbsd_chip_config(host); } } /* * Allow device to initialise itself properly. */ mdelay(5); return wbsd_resume(host); } #endif /* CONFIG_PNP */ #else /* CONFIG_PM */ #define wbsd_platform_suspend NULL #define wbsd_platform_resume NULL #define wbsd_pnp_suspend NULL #define wbsd_pnp_resume NULL #endif /* CONFIG_PM */ static struct platform_device *wbsd_device; static struct platform_driver wbsd_driver = { .probe = wbsd_probe, .remove = __devexit_p(wbsd_remove), .suspend = wbsd_platform_suspend, .resume = wbsd_platform_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; #ifdef CONFIG_PNP static struct pnp_driver wbsd_pnp_driver = { .name = DRIVER_NAME, .id_table = pnp_dev_table, .probe = wbsd_pnp_probe, .remove = __devexit_p(wbsd_pnp_remove), .suspend = wbsd_pnp_suspend, .resume = wbsd_pnp_resume, }; #endif /* CONFIG_PNP */ /* * Module loading/unloading */ static int __init wbsd_drv_init(void) { int result; pr_info(DRIVER_NAME ": Winbond W83L51xD SD/MMC card interface driver\n"); pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); #ifdef CONFIG_PNP if (!param_nopnp) { result = pnp_register_driver(&wbsd_pnp_driver); if (result < 0) return result; } #endif /* CONFIG_PNP */ if (param_nopnp) { result = platform_driver_register(&wbsd_driver); if (result < 0) return result; wbsd_device = platform_device_alloc(DRIVER_NAME, -1); if (!wbsd_device) { platform_driver_unregister(&wbsd_driver); return -ENOMEM; } result = platform_device_add(wbsd_device); if (result) { platform_device_put(wbsd_device); platform_driver_unregister(&wbsd_driver); return result; } } return 0; } static void __exit wbsd_drv_exit(void) { #ifdef CONFIG_PNP if (!param_nopnp) pnp_unregister_driver(&wbsd_pnp_driver); #endif /* CONFIG_PNP */ if (param_nopnp) { platform_device_unregister(wbsd_device); platform_driver_unregister(&wbsd_driver); } DBG("unloaded\n"); } module_init(wbsd_drv_init); module_exit(wbsd_drv_exit); #ifdef CONFIG_PNP module_param_named(nopnp, param_nopnp, uint, 0444); #endif module_param_named(io, param_io, uint, 0444); module_param_named(irq, param_irq, uint, 0444); module_param_named(dma, param_dma, int, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); #ifdef CONFIG_PNP MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)"); #endif MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)"); MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)"); MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
gpl-2.0
NoelMacwan/Kernel-Honami-Togari-14.2.A.1.142
arch/sh/boards/board-edosk7760.c
7400
4620
/* * Renesas Europe EDOSK7760 Board Support * * Copyright (C) 2008 SPES Societa' Progettazione Elettronica e Software Ltd. * Author: Luca Santini <luca.santini@spesonline.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/smc91x.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/mtd/physmap.h> #include <asm/machvec.h> #include <asm/io.h> #include <asm/addrspace.h> #include <asm/delay.h> #include <asm/i2c-sh7760.h> #include <asm/sizes.h> /* Bus state controller registers for CS4 area */ #define BSC_CS4BCR 0xA4FD0010 #define BSC_CS4WCR 0xA4FD0030 #define SMC_IOBASE 0xA2000000 #define SMC_IO_OFFSET 0x300 #define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET) #define ETHERNET_IRQ 5 /* NOR flash */ static struct mtd_partition edosk7760_nor_flash_partitions[] = { { .name = "bootloader", .offset = 0, .size = SZ_256K, .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_2M, }, { .name = "fs", .offset = MTDPART_OFS_APPEND, .size = (26 << 20), }, { .name = "other", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data edosk7760_nor_flash_data = { .width = 4, .parts = edosk7760_nor_flash_partitions, .nr_parts = ARRAY_SIZE(edosk7760_nor_flash_partitions), }; static struct resource edosk7760_nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x00000000 + SZ_32M - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device edosk7760_nor_flash_device = { .name = "physmap-flash", .resource = edosk7760_nor_flash_resources, .num_resources = ARRAY_SIZE(edosk7760_nor_flash_resources), .dev = { .platform_data = &edosk7760_nor_flash_data, }, }; /* i2c initialization functions */ static struct sh7760_i2c_platdata i2c_pd = { .speed_khz = 400, }; static struct resource sh7760_i2c1_res[] = { { .start = SH7760_I2C1_MMIO, .end = SH7760_I2C1_MMIOEND, .flags = IORESOURCE_MEM, },{ .start = SH7760_I2C1_IRQ, .end = SH7760_I2C1_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh7760_i2c1_dev = { .dev = { .platform_data = &i2c_pd, }, .name = SH7760_I2C_DEVNAME, .id = 1, .resource = sh7760_i2c1_res, .num_resources = ARRAY_SIZE(sh7760_i2c1_res), }; static struct resource sh7760_i2c0_res[] = { { .start = SH7760_I2C0_MMIO, .end = SH7760_I2C0_MMIOEND, .flags = IORESOURCE_MEM, }, { .start = SH7760_I2C0_IRQ, .end = SH7760_I2C0_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh7760_i2c0_dev = { .dev = { .platform_data = &i2c_pd, }, .name = SH7760_I2C_DEVNAME, .id = 0, .resource = sh7760_i2c0_res, .num_resources = ARRAY_SIZE(sh7760_i2c0_res), }; /* eth initialization functions */ static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL, }; static struct resource smc91x_res[] = { [0] = { .start = SMC_IOADDR, .end = SMC_IOADDR + SZ_32 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = ETHERNET_IRQ, .end = ETHERNET_IRQ, .flags = IORESOURCE_IRQ , } }; static struct platform_device smc91x_dev = { .name = "smc91x", .id = -1, .num_resources = ARRAY_SIZE(smc91x_res), .resource = smc91x_res, .dev = { .platform_data = &smc91x_info, }, }; /* platform init code */ static struct platform_device *edosk7760_devices[] __initdata = { &smc91x_dev, &edosk7760_nor_flash_device, &sh7760_i2c0_dev, &sh7760_i2c1_dev, }; static int __init init_edosk7760_devices(void) { plat_irq_setup_pins(IRQ_MODE_IRQ); return platform_add_devices(edosk7760_devices, ARRAY_SIZE(edosk7760_devices)); } device_initcall(init_edosk7760_devices); /* * The Machine Vector */ struct sh_machine_vector mv_edosk7760 __initmv = { .mv_name = "EDOSK7760", .mv_nr_irqs = 128, };
gpl-2.0
wolverine2k/android_kernel_oppo_n1
net/netrom/nr_in.c
7912
7118
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/netrom.h> static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) { struct sk_buff *skbo, *skbn = skb; struct nr_sock *nr = nr_sk(sk); skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); nr_start_idletimer(sk); if (more) { nr->fraglen += skb->len; skb_queue_tail(&nr->frag_queue, skb); return 0; } if (!more && nr->fraglen > 0) { /* End of fragment */ nr->fraglen += skb->len; skb_queue_tail(&nr->frag_queue, skb); if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL) return 1; skb_reset_transport_header(skbn); while ((skbo = skb_dequeue(&nr->frag_queue)) != NULL) { skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len), skbo->len); kfree_skb(skbo); } nr->fraglen = 0; } return sock_queue_rcv_skb(sk, skbn); } /* * State machine for state 1, Awaiting Connection State. * The handling of the timer(s) is in file nr_timer.c. * Handling of state 0 and connection release is in netrom.c. */ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) { switch (frametype) { case NR_CONNACK: { struct nr_sock *nr = nr_sk(sk); nr_stop_t1timer(sk); nr_start_idletimer(sk); nr->your_index = skb->data[17]; nr->your_id = skb->data[18]; nr->vs = 0; nr->va = 0; nr->vr = 0; nr->vl = 0; nr->state = NR_STATE_3; nr->n2count = 0; nr->window = skb->data[20]; sk->sk_state = TCP_ESTABLISHED; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); break; } case NR_CONNACK | NR_CHOKE_FLAG: nr_disconnect(sk, ECONNREFUSED); break; case NR_RESET: if (sysctl_netrom_reset_circuit) nr_disconnect(sk, ECONNRESET); break; default: break; } return 0; } /* * State machine for state 2, Awaiting Release State. * The handling of the timer(s) is in file nr_timer.c * Handling of state 0 and connection release is in netrom.c. */ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) { switch (frametype) { case NR_CONNACK | NR_CHOKE_FLAG: nr_disconnect(sk, ECONNRESET); break; case NR_DISCREQ: nr_write_internal(sk, NR_DISCACK); case NR_DISCACK: nr_disconnect(sk, 0); break; case NR_RESET: if (sysctl_netrom_reset_circuit) nr_disconnect(sk, ECONNRESET); break; default: break; } return 0; } /* * State machine for state 3, Connected State. * The handling of the timer(s) is in file nr_timer.c * Handling of state 0 and connection release is in netrom.c. */ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) { struct nr_sock *nrom = nr_sk(sk); struct sk_buff_head temp_queue; struct sk_buff *skbn; unsigned short save_vr; unsigned short nr, ns; int queued = 0; nr = skb->data[18]; ns = skb->data[17]; switch (frametype) { case NR_CONNREQ: nr_write_internal(sk, NR_CONNACK); break; case NR_DISCREQ: nr_write_internal(sk, NR_DISCACK); nr_disconnect(sk, 0); break; case NR_CONNACK | NR_CHOKE_FLAG: case NR_DISCACK: nr_disconnect(sk, ECONNRESET); break; case NR_INFOACK: case NR_INFOACK | NR_CHOKE_FLAG: case NR_INFOACK | NR_NAK_FLAG: case NR_INFOACK | NR_NAK_FLAG | NR_CHOKE_FLAG: if (frametype & NR_CHOKE_FLAG) { nrom->condition |= NR_COND_PEER_RX_BUSY; nr_start_t4timer(sk); } else { nrom->condition &= ~NR_COND_PEER_RX_BUSY; nr_stop_t4timer(sk); } if (!nr_validate_nr(sk, nr)) { break; } if (frametype & NR_NAK_FLAG) { nr_frames_acked(sk, nr); nr_send_nak_frame(sk); } else { if (nrom->condition & NR_COND_PEER_RX_BUSY) { nr_frames_acked(sk, nr); } else { nr_check_iframes_acked(sk, nr); } } break; case NR_INFO: case NR_INFO | NR_NAK_FLAG: case NR_INFO | NR_CHOKE_FLAG: case NR_INFO | NR_MORE_FLAG: case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG: case NR_INFO | NR_CHOKE_FLAG | NR_MORE_FLAG: case NR_INFO | NR_NAK_FLAG | NR_MORE_FLAG: case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG | NR_MORE_FLAG: if (frametype & NR_CHOKE_FLAG) { nrom->condition |= NR_COND_PEER_RX_BUSY; nr_start_t4timer(sk); } else { nrom->condition &= ~NR_COND_PEER_RX_BUSY; nr_stop_t4timer(sk); } if (nr_validate_nr(sk, nr)) { if (frametype & NR_NAK_FLAG) { nr_frames_acked(sk, nr); nr_send_nak_frame(sk); } else { if (nrom->condition & NR_COND_PEER_RX_BUSY) { nr_frames_acked(sk, nr); } else { nr_check_iframes_acked(sk, nr); } } } queued = 1; skb_queue_head(&nrom->reseq_queue, skb); if (nrom->condition & NR_COND_OWN_RX_BUSY) break; skb_queue_head_init(&temp_queue); do { save_vr = nrom->vr; while ((skbn = skb_dequeue(&nrom->reseq_queue)) != NULL) { ns = skbn->data[17]; if (ns == nrom->vr) { if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) { nrom->vr = (nrom->vr + 1) % NR_MODULUS; } else { nrom->condition |= NR_COND_OWN_RX_BUSY; skb_queue_tail(&temp_queue, skbn); } } else if (nr_in_rx_window(sk, ns)) { skb_queue_tail(&temp_queue, skbn); } else { kfree_skb(skbn); } } while ((skbn = skb_dequeue(&temp_queue)) != NULL) { skb_queue_tail(&nrom->reseq_queue, skbn); } } while (save_vr != nrom->vr); /* * Window is full, ack it immediately. */ if (((nrom->vl + nrom->window) % NR_MODULUS) == nrom->vr) { nr_enquiry_response(sk); } else { if (!(nrom->condition & NR_COND_ACK_PENDING)) { nrom->condition |= NR_COND_ACK_PENDING; nr_start_t2timer(sk); } } break; case NR_RESET: if (sysctl_netrom_reset_circuit) nr_disconnect(sk, ECONNRESET); break; default: break; } return queued; } /* Higher level upcall for a LAPB frame - called with sk locked */ int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) { struct nr_sock *nr = nr_sk(sk); int queued = 0, frametype; if (nr->state == NR_STATE_0) return 0; frametype = skb->data[19]; switch (nr->state) { case NR_STATE_1: queued = nr_state1_machine(sk, skb, frametype); break; case NR_STATE_2: queued = nr_state2_machine(sk, skb, frametype); break; case NR_STATE_3: queued = nr_state3_machine(sk, skb, frametype); break; } nr_kick(sk); return queued; }
gpl-2.0
agat63/E4GT_ICS_kernel
arch/score/mm/cache.c
8680
7027
/* * arch/score/mm/cache.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/fs.h> #include <asm/mmu_context.h> /* Just flush entire Dcache!! You must ensure the page doesn't include instructions, because the function will not flush the Icache. The addr must be cache aligned. */ static void flush_data_cache_page(unsigned long addr) { unsigned int i; for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { __asm__ __volatile__( "cache 0x0e, [%0, 0]\n" "cache 0x1a, [%0, 0]\n" "nop\n" : : "r" (addr)); addr += L1_CACHE_BYTES; } } void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); unsigned long addr; if (PageHighMem(page)) return; if (mapping && !mapping_mapped(mapping)) { set_bit(PG_dcache_dirty, &(page)->flags); return; } /* * We could delay the flush for the !page_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } /* called by update_mmu_cache. */ void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = (vma->vm_flags & VM_EXEC); pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) { addr = (unsigned long) page_address(page); if (exec) flush_data_cache_page(addr); clear_bit(PG_dcache_dirty, &(page)->flags); } } static inline void setup_protection_map(void) { protection_map[0] = PAGE_NONE; protection_map[1] = PAGE_READONLY; protection_map[2] = PAGE_COPY; protection_map[3] = PAGE_COPY; protection_map[4] = PAGE_READONLY; protection_map[5] = PAGE_READONLY; protection_map[6] = PAGE_COPY; protection_map[7] = PAGE_COPY; protection_map[8] = PAGE_NONE; protection_map[9] = PAGE_READONLY; protection_map[10] = PAGE_SHARED; protection_map[11] = PAGE_SHARED; protection_map[12] = PAGE_READONLY; protection_map[13] = PAGE_READONLY; protection_map[14] = PAGE_SHARED; protection_map[15] = PAGE_SHARED; } void __devinit cpu_cache_init(void) { setup_protection_map(); } void flush_icache_all(void) { __asm__ __volatile__( "la r8, flush_icache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" : : : "r8"); } void flush_dcache_all(void) { __asm__ __volatile__( "la r8, flush_dcache_all\n" "cache 0x1f, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1a, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" : : : "r8"); } void flush_cache_all(void) { __asm__ __volatile__( "la r8, flush_cache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1f, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1a, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" : : : "r8"); } void flush_cache_mm(struct mm_struct *mm) { if (!(mm->context)) return; flush_cache_all(); } /*if we flush a range precisely , the processing may be very long. We must check each page in the range whether present. If the page is present, we can flush the range in the page. Be careful, the range may be cross two page, a page is present and another is not present. */ /* The interface is provided in hopes that the port can find a suitably efficient method for removing multiple page sized regions from the cache. */ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int exec = vma->vm_flags & VM_EXEC; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; if (!(mm->context)) return; pgdp = pgd_offset(mm, start); pudp = pud_offset(pgdp, start); pmdp = pmd_offset(pudp, start); ptep = pte_offset(pmdp, start); while (start <= end) { unsigned long tmpend; pgdp = pgd_offset(mm, start); pudp = pud_offset(pgdp, start); pmdp = pmd_offset(pudp, start); ptep = pte_offset(pmdp, start); if (!(pte_val(*ptep) & _PAGE_PRESENT)) { start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); continue; } tmpend = (start | (PAGE_SIZE-1)) > end ? end : (start | (PAGE_SIZE-1)); flush_dcache_range(start, tmpend); if (exec) flush_icache_range(start, tmpend); start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); } } void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int exec = vma->vm_flags & VM_EXEC; unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); flush_dcache_range(kaddr, kaddr + PAGE_SIZE); if (exec) flush_icache_range(kaddr, kaddr + PAGE_SIZE); } void flush_cache_sigtramp(unsigned long addr) { __asm__ __volatile__( "cache 0x02, [%0, 0]\n" "nop\nnop\nnop\nnop\nnop\n" "cache 0x02, [%0, 0x4]\n" "nop\nnop\nnop\nnop\nnop\n" "cache 0x0d, [%0, 0]\n" "nop\nnop\nnop\nnop\nnop\n" "cache 0x0d, [%0, 0x4]\n" "nop\nnop\nnop\nnop\nnop\n" "cache 0x1a, [%0, 0]\n" "nop\nnop\nnop\nnop\nnop\n" : : "r" (addr)); } /* 1. WB and invalid a cache line of Dcache 2. Drain Write Buffer the range must be smaller than PAGE_SIZE */ void flush_dcache_range(unsigned long start, unsigned long end) { int size, i; start = start & ~(L1_CACHE_BYTES - 1); end = end & ~(L1_CACHE_BYTES - 1); size = end - start; /* flush dcache to ram, and invalidate dcache lines. */ for (i = 0; i < size; i += L1_CACHE_BYTES) { __asm__ __volatile__( "cache 0x0e, [%0, 0]\n" "nop\nnop\nnop\nnop\nnop\n" "cache 0x1a, [%0, 0]\n" "nop\nnop\nnop\nnop\nnop\n" : : "r" (start)); start += L1_CACHE_BYTES; } } void flush_icache_range(unsigned long start, unsigned long end) { int size, i; start = start & ~(L1_CACHE_BYTES - 1); end = end & ~(L1_CACHE_BYTES - 1); size = end - start; /* invalidate icache lines. */ for (i = 0; i < size; i += L1_CACHE_BYTES) { __asm__ __volatile__( "cache 0x02, [%0, 0]\n" "nop\nnop\nnop\nnop\nnop\n" : : "r" (start)); start += L1_CACHE_BYTES; } }
gpl-2.0
ktoonsez/MIUIv4-I777
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
11240
12348
/* * Linux driver attachment glue for PCI based controllers. * * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c#47 $ */ #include "aic7xxx_osm.h" #include "aic7xxx_pci.h" /* Define the macro locally since it's different for different class of chips. */ #define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI) static const struct pci_device_id ahc_linux_pci_id_table[] = { /* aic7850 based controllers */ ID(ID_AHA_2902_04_10_15_20C_30C), /* aic7860 based controllers */ ID(ID_AHA_2930CU), ID(ID_AHA_1480A & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK), ID(ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK), /* aic7870 based controllers */ ID(ID_AHA_2940), ID(ID_AHA_3940), ID(ID_AHA_398X), ID(ID_AHA_2944), ID(ID_AHA_3944), ID(ID_AHA_4944), /* aic7880 based controllers */ ID(ID_AHA_2940U & ID_DEV_VENDOR_MASK), ID(ID_AHA_3940U & ID_DEV_VENDOR_MASK), ID(ID_AHA_2944U & ID_DEV_VENDOR_MASK), ID(ID_AHA_3944U & ID_DEV_VENDOR_MASK), ID(ID_AHA_398XU & ID_DEV_VENDOR_MASK), ID(ID_AHA_4944U & ID_DEV_VENDOR_MASK), ID(ID_AHA_2930U & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK), /* aic7890 based controllers */ ID(ID_AHA_2930U2), ID(ID_AHA_2940U2B), ID(ID_AHA_2940U2_OEM), ID(ID_AHA_2940U2), ID(ID_AHA_2950U2B), ID16(ID_AIC7890_ARO & ID_AIC7895_ARO_MASK), ID(ID_AAA_131U2), /* aic7890 based controllers */ ID(ID_AHA_29160), ID(ID_AHA_29160_CPQ), ID(ID_AHA_29160N), ID(ID_AHA_29160C), ID(ID_AHA_29160B), ID(ID_AHA_19160B), ID(ID_AIC7892_ARO), /* aic7892 based controllers */ ID(ID_AHA_2940U_DUAL), ID(ID_AHA_3940AU), ID(ID_AHA_3944AU), ID(ID_AIC7895_ARO), ID(ID_AHA_3950U2B_0), ID(ID_AHA_3950U2B_1), ID(ID_AHA_3950U2D_0), ID(ID_AHA_3950U2D_1), ID(ID_AIC7896_ARO), /* aic7899 based controllers */ ID(ID_AHA_3960D), ID(ID_AHA_3960D_CPQ), ID(ID_AIC7899_ARO), /* Generic chip probes for devices we don't know exactly. */ ID(ID_AIC7850 & ID_DEV_VENDOR_MASK), ID(ID_AIC7855 & ID_DEV_VENDOR_MASK), ID(ID_AIC7859 & ID_DEV_VENDOR_MASK), ID(ID_AIC7860 & ID_DEV_VENDOR_MASK), ID(ID_AIC7870 & ID_DEV_VENDOR_MASK), ID(ID_AIC7880 & ID_DEV_VENDOR_MASK), ID16(ID_AIC7890 & ID_9005_GENERIC_MASK), ID16(ID_AIC7892 & ID_9005_GENERIC_MASK), ID(ID_AIC7895 & ID_DEV_VENDOR_MASK), ID16(ID_AIC7896 & ID_9005_GENERIC_MASK), ID16(ID_AIC7899 & ID_9005_GENERIC_MASK), ID(ID_AIC7810 & ID_DEV_VENDOR_MASK), ID(ID_AIC7815 & ID_DEV_VENDOR_MASK), { 0 } }; MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table); #ifdef CONFIG_PM static int ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ahc_softc *ahc = pci_get_drvdata(pdev); int rc; if ((rc = ahc_suspend(ahc))) return rc; pci_save_state(pdev); pci_disable_device(pdev); if (mesg.event & PM_EVENT_SLEEP) pci_set_power_state(pdev, PCI_D3hot); return rc; } static int ahc_linux_pci_dev_resume(struct pci_dev *pdev) { struct ahc_softc *ahc = pci_get_drvdata(pdev); int rc; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if ((rc = pci_enable_device(pdev))) { dev_printk(KERN_ERR, &pdev->dev, "failed to enable device after resume (%d)\n", rc); return rc; } pci_set_master(pdev); ahc_pci_resume(ahc); return (ahc_resume(ahc)); } #endif static void ahc_linux_pci_dev_remove(struct pci_dev *pdev) { struct ahc_softc *ahc = pci_get_drvdata(pdev); u_long s; if (ahc->platform_data && ahc->platform_data->host) scsi_remove_host(ahc->platform_data->host); ahc_lock(ahc, &s); ahc_intr_enable(ahc, FALSE); ahc_unlock(ahc, &s); ahc_free(ahc); } static void ahc_linux_pci_inherit_flags(struct ahc_softc *ahc) { struct pci_dev *pdev = ahc->dev_softc, *master_pdev; unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); master_pdev = pci_get_slot(pdev->bus, master_devfn); if (master_pdev) { struct ahc_softc *master = pci_get_drvdata(master_pdev); if (master) { ahc->flags &= ~AHC_BIOS_ENABLED; ahc->flags |= master->flags & AHC_BIOS_ENABLED; ahc->flags &= ~AHC_PRIMARY_CHANNEL; ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL; } else printk(KERN_ERR "aic7xxx: no multichannel peer found!\n"); pci_dev_put(master_pdev); } } static int ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { char buf[80]; const uint64_t mask_39bit = 0x7FFFFFFFFFULL; struct ahc_softc *ahc; ahc_dev_softc_t pci; const struct ahc_pci_identity *entry; char *name; int error; struct device *dev = &pdev->dev; pci = pdev; entry = ahc_find_pci_device(pci); if (entry == NULL) return (-ENODEV); /* * Allocate a softc for this card and * set it up for attachment by our * common detect routine. */ sprintf(buf, "ahc_pci:%d:%d:%d", ahc_get_pci_bus(pci), ahc_get_pci_slot(pci), ahc_get_pci_function(pci)); name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); if (name == NULL) return (-ENOMEM); strcpy(name, buf); ahc = ahc_alloc(NULL, name); if (ahc == NULL) return (-ENOMEM); if (pci_enable_device(pdev)) { ahc_free(ahc); return (-ENODEV); } pci_set_master(pdev); if (sizeof(dma_addr_t) > 4 && ahc->features & AHC_LARGE_SCBS && dma_set_mask(dev, mask_39bit) == 0 && dma_get_required_mask(dev) > DMA_BIT_MASK(32)) { ahc->flags |= AHC_39BIT_ADDRESSING; } else { if (dma_set_mask(dev, DMA_BIT_MASK(32))) { ahc_free(ahc); printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); return (-ENODEV); } } ahc->dev_softc = pci; error = ahc_pci_config(ahc, entry); if (error != 0) { ahc_free(ahc); return (-error); } /* * Second Function PCI devices need to inherit some * settings from function 0. */ if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0) ahc_linux_pci_inherit_flags(ahc); pci_set_drvdata(pdev, ahc); ahc_linux_register_host(ahc, &aic7xxx_driver_template); return (0); } /******************************* PCI Routines *********************************/ uint32_t ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width) { switch (width) { case 1: { uint8_t retval; pci_read_config_byte(pci, reg, &retval); return (retval); } case 2: { uint16_t retval; pci_read_config_word(pci, reg, &retval); return (retval); } case 4: { uint32_t retval; pci_read_config_dword(pci, reg, &retval); return (retval); } default: panic("ahc_pci_read_config: Read size too big"); /* NOTREACHED */ return (0); } } void ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width) { switch (width) { case 1: pci_write_config_byte(pci, reg, value); break; case 2: pci_write_config_word(pci, reg, value); break; case 4: pci_write_config_dword(pci, reg, value); break; default: panic("ahc_pci_write_config: Write size too big"); /* NOTREACHED */ } } static struct pci_driver aic7xxx_pci_driver = { .name = "aic7xxx", .probe = ahc_linux_pci_dev_probe, #ifdef CONFIG_PM .suspend = ahc_linux_pci_dev_suspend, .resume = ahc_linux_pci_dev_resume, #endif .remove = ahc_linux_pci_dev_remove, .id_table = ahc_linux_pci_id_table }; int ahc_linux_pci_init(void) { return pci_register_driver(&aic7xxx_pci_driver); } void ahc_linux_pci_exit(void) { pci_unregister_driver(&aic7xxx_pci_driver); } static int ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base) { if (aic7xxx_allow_memio == 0) return (ENOMEM); *base = pci_resource_start(ahc->dev_softc, 0); if (*base == 0) return (ENOMEM); if (!request_region(*base, 256, "aic7xxx")) return (ENOMEM); return (0); } static int ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, resource_size_t *bus_addr, uint8_t __iomem **maddr) { resource_size_t start; int error; error = 0; start = pci_resource_start(ahc->dev_softc, 1); if (start != 0) { *bus_addr = start; if (!request_mem_region(start, 0x1000, "aic7xxx")) error = ENOMEM; if (error == 0) { *maddr = ioremap_nocache(start, 256); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); } } } else error = ENOMEM; return (error); } int ahc_pci_map_registers(struct ahc_softc *ahc) { uint32_t command; resource_size_t base; uint8_t __iomem *maddr; int error; /* * If its allowed, we prefer memory mapped access. */ command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, 4); command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN); base = 0; maddr = NULL; error = ahc_linux_pci_reserve_mem_region(ahc, &base, &maddr); if (error == 0) { ahc->platform_data->mem_busaddr = base; ahc->tag = BUS_SPACE_MEMIO; ahc->bsh.maddr = maddr; ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command | PCIM_CMD_MEMEN, 4); /* * Do a quick test to see if memory mapped * I/O is functioning correctly. */ if (ahc_pci_test_register_access(ahc) != 0) { printk("aic7xxx: PCI Device %d:%d:%d " "failed memory mapped test. Using PIO.\n", ahc_get_pci_bus(ahc->dev_softc), ahc_get_pci_slot(ahc->dev_softc), ahc_get_pci_function(ahc->dev_softc)); iounmap(maddr); release_mem_region(ahc->platform_data->mem_busaddr, 0x1000); ahc->bsh.maddr = NULL; maddr = NULL; } else command |= PCIM_CMD_MEMEN; } else { printk("aic7xxx: PCI%d:%d:%d MEM region 0x%llx " "unavailable. Cannot memory map device.\n", ahc_get_pci_bus(ahc->dev_softc), ahc_get_pci_slot(ahc->dev_softc), ahc_get_pci_function(ahc->dev_softc), (unsigned long long)base); } /* * We always prefer memory mapped access. */ if (maddr == NULL) { error = ahc_linux_pci_reserve_io_region(ahc, &base); if (error == 0) { ahc->tag = BUS_SPACE_PIO; ahc->bsh.ioport = (u_long)base; command |= PCIM_CMD_PORTEN; } else { printk("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] " "unavailable. Cannot map device.\n", ahc_get_pci_bus(ahc->dev_softc), ahc_get_pci_slot(ahc->dev_softc), ahc_get_pci_function(ahc->dev_softc), (unsigned long long)base); } } ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4); return (error); } int ahc_pci_map_int(struct ahc_softc *ahc) { int error; error = request_irq(ahc->dev_softc->irq, ahc_linux_isr, IRQF_SHARED, "aic7xxx", ahc); if (error == 0) ahc->platform_data->irq = ahc->dev_softc->irq; return (-error); }
gpl-2.0
Nick73/King_Kernel
arch/ia64/sn/kernel/iomv.c
13800
2281
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/acpi.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/vga.h> #include <asm/sn/nodepda.h> #include <asm/sn/simulator.h> #include <asm/sn/pda.h> #include <asm/sn/sn_cpuid.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/acpi.h> #define IS_LEGACY_VGA_IOPORT(p) \ (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df)) /** * sn_io_addr - convert an in/out port to an i/o address * @port: port to convert * * Legacy in/out instructions are converted to ld/st instructions * on IA64. This routine will convert a port number into a valid * SN i/o address. Used by sn_in*() and sn_out*(). */ void *sn_io_addr(unsigned long port) { if (!IS_RUNNING_ON_SIMULATOR()) { if (IS_LEGACY_VGA_IOPORT(port)) return (__ia64_mk_io_addr(port)); /* On sn2, legacy I/O ports don't point at anything */ if (port < (64 * 1024)) return NULL; if (SN_ACPI_BASE_SUPPORT()) return (__ia64_mk_io_addr(port)); else return ((void *)(port | __IA64_UNCACHED_OFFSET)); } else { /* but the simulator uses them... */ unsigned long addr; /* * word align port, but need more than 10 bits * for accessing registers in bedrock local block * (so we don't do port&0xfff) */ addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12); if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7) addr |= port; return (void *)addr; } } EXPORT_SYMBOL(sn_io_addr); /** * __sn_mmiowb - I/O space memory barrier * * See arch/ia64/include/asm/io.h and Documentation/DocBook/deviceiobook.tmpl * for details. * * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear. * See PV 871084 for details about the WAR about zero value. * */ void __sn_mmiowb(void) { volatile unsigned long *adr = pda->pio_write_status_addr; unsigned long val = pda->pio_write_status_val; while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val) cpu_relax(); } EXPORT_SYMBOL(__sn_mmiowb);
gpl-2.0
madmack/i747_kernel_ics
drivers/media/video/msm_zsl/mt9p012_bam.c
745
34644
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/kernel.h> #include <media/msm_camera.h> #include <mach/gpio.h> #include <mach/camera.h> #include "mt9p012.h" /*============================================================= SENSOR REGISTER DEFINES ==============================================================*/ #define MT9P012_REG_MODEL_ID 0x0000 #define MT9P012_MODEL_ID 0x2801 #define REG_GROUPED_PARAMETER_HOLD 0x0104 #define GROUPED_PARAMETER_HOLD 0x0100 #define GROUPED_PARAMETER_UPDATE 0x0000 #define REG_COARSE_INT_TIME 0x3012 #define REG_VT_PIX_CLK_DIV 0x0300 #define REG_VT_SYS_CLK_DIV 0x0302 #define REG_PRE_PLL_CLK_DIV 0x0304 #define REG_PLL_MULTIPLIER 0x0306 #define REG_OP_PIX_CLK_DIV 0x0308 #define REG_OP_SYS_CLK_DIV 0x030A #define REG_SCALE_M 0x0404 #define REG_FRAME_LENGTH_LINES 0x300A #define REG_LINE_LENGTH_PCK 0x300C #define REG_X_ADDR_START 0x3004 #define REG_Y_ADDR_START 0x3002 #define REG_X_ADDR_END 0x3008 #define REG_Y_ADDR_END 0x3006 #define REG_X_OUTPUT_SIZE 0x034C #define REG_Y_OUTPUT_SIZE 0x034E #define REG_FINE_INTEGRATION_TIME 0x3014 #define REG_ROW_SPEED 0x3016 #define MT9P012_REG_RESET_REGISTER 0x301A #define MT9P012_RESET_REGISTER_PWON 0x10CC #define MT9P012_RESET_REGISTER_PWOFF 0x10C8 #define REG_READ_MODE 0x3040 #define REG_GLOBAL_GAIN 0x305E #define REG_TEST_PATTERN_MODE 0x3070 #define MT9P012_REV_7 enum mt9p012_test_mode { TEST_OFF, TEST_1, TEST_2, TEST_3 }; enum mt9p012_resolution { QTR_SIZE, FULL_SIZE, INVALID_SIZE }; enum mt9p012_reg_update { /* Sensor egisters that need to be updated during initialization */ REG_INIT, /* Sensor egisters that needs periodic I2C writes */ UPDATE_PERIODIC, /* All the sensor Registers will be updated */ UPDATE_ALL, /* Not valid update */ UPDATE_INVALID }; enum mt9p012_setting { RES_PREVIEW, RES_CAPTURE }; /* actuator's Slave Address */ #define MT9P012_AF_I2C_ADDR 0x0A /* AF Total steps parameters */ #define MT9P012_STEPS_NEAR_TO_CLOSEST_INF 20 #define MT9P012_TOTAL_STEPS_NEAR_TO_FAR 20 #define MT9P012_MU5M0_PREVIEW_DUMMY_PIXELS 0 #define MT9P012_MU5M0_PREVIEW_DUMMY_LINES 0 /* Time in milisecs for waiting for the sensor to reset.*/ #define MT9P012_RESET_DELAY_MSECS 66 /* for 20 fps preview */ #define MT9P012_DEFAULT_CLOCK_RATE 24000000 #define MT9P012_DEFAULT_MAX_FPS 26 /* ???? */ struct mt9p012_work { struct work_struct work; }; static struct mt9p012_work *mt9p012_sensorw; static struct i2c_client *mt9p012_client; struct mt9p012_ctrl { const struct msm_camera_sensor_info *sensordata; int sensormode; uint32_t fps_divider; /* init to 1 * 0x00000400 */ uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */ uint16_t curr_lens_pos; uint16_t init_curr_lens_pos; uint16_t my_reg_gain; uint32_t my_reg_line_count; enum mt9p012_resolution prev_res; enum mt9p012_resolution pict_res; enum mt9p012_resolution curr_res; enum mt9p012_test_mode set_test; }; static uint16_t bam_macro, bam_infinite; static uint16_t bam_step_lookup_table[MT9P012_TOTAL_STEPS_NEAR_TO_FAR + 1]; static uint16_t update_type = UPDATE_PERIODIC; static struct mt9p012_ctrl *mt9p012_ctrl; static DECLARE_WAIT_QUEUE_HEAD(mt9p012_wait_queue); DEFINE_MUTEX(mt9p012_mut); /*=============================================================*/ static int mt9p012_i2c_rxdata(unsigned short saddr, int slength, unsigned char *rxdata, int rxlength) { struct i2c_msg msgs[] = { { .addr = saddr, .flags = 0, .len = slength, .buf = rxdata, }, { .addr = saddr, .flags = I2C_M_RD, .len = rxlength, .buf = rxdata, }, }; if (i2c_transfer(mt9p012_client->adapter, msgs, 2) < 0) { CDBG("mt9p012_i2c_rxdata failed!\n"); return -EIO; } return 0; } static int32_t mt9p012_i2c_read_b(unsigned short saddr, unsigned char raddr, unsigned short *rdata) { int32_t rc = 0; if (!rdata) return -EIO; rc = mt9p012_i2c_rxdata(saddr, 1, &raddr, 1); if (rc < 0) return rc; *rdata = raddr; if (rc < 0) CDBG("mt9p012_i2c_read_b failed!\n"); return rc; } static int32_t mt9p012_i2c_read_w(unsigned short saddr, unsigned short raddr, unsigned short *rdata) { int32_t rc = 0; unsigned char buf[4]; if (!rdata) return -EIO; memset(buf, 0, sizeof(buf)); buf[0] = (raddr & 0xFF00) >> 8; buf[1] = (raddr & 0x00FF); rc = mt9p012_i2c_rxdata(saddr, 2, buf, 2); if (rc < 0) return rc; *rdata = buf[0] << 8 | buf[1]; if (rc < 0) CDBG("mt9p012_i2c_read failed!\n"); return rc; } static int32_t mt9p012_i2c_txdata(unsigned short saddr, unsigned char *txdata, int length) { struct i2c_msg msg[] = { { .addr = saddr, .flags = 0, .len = length, .buf = txdata, }, }; if (i2c_transfer(mt9p012_client->adapter, msg, 1) < 0) { CDBG("mt9p012_i2c_txdata failed\n"); return -EIO; } return 0; } static int32_t mt9p012_i2c_write_b(unsigned short saddr, unsigned short baddr, unsigned short bdata) { int32_t rc = -EIO; unsigned char buf[2]; memset(buf, 0, sizeof(buf)); buf[0] = baddr; buf[1] = bdata; rc = mt9p012_i2c_txdata(saddr, buf, 2); if (rc < 0) CDBG("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!\n", saddr, baddr, bdata); return rc; } static int32_t mt9p012_i2c_write_w(unsigned short saddr, unsigned short waddr, unsigned short wdata) { int32_t rc = -EIO; unsigned char buf[4]; memset(buf, 0, sizeof(buf)); buf[0] = (waddr & 0xFF00) >> 8; buf[1] = (waddr & 0x00FF); buf[2] = (wdata & 0xFF00) >> 8; buf[3] = (wdata & 0x00FF); rc = mt9p012_i2c_txdata(saddr, buf, 4); if (rc < 0) CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n", waddr, wdata); return rc; } static int32_t mt9p012_i2c_write_w_table(struct mt9p012_i2c_reg_conf const *reg_conf_tbl, int num) { int i; int32_t rc = -EIO; for (i = 0; i < num; i++) { rc = mt9p012_i2c_write_w(mt9p012_client->addr, reg_conf_tbl->waddr, reg_conf_tbl->wdata); if (rc < 0) break; reg_conf_tbl++; } return rc; } static int32_t mt9p012_test(enum mt9p012_test_mode mo) { int32_t rc = 0; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return rc; if (mo == TEST_OFF) return 0; else { rc = mt9p012_i2c_write_w_table(mt9p012_regs.ttbl, mt9p012_regs.ttbl_size); if (rc < 0) return rc; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_TEST_PATTERN_MODE, (uint16_t) mo); if (rc < 0) return rc; } rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); if (rc < 0) return rc; return rc; } static int32_t mt9p012_lens_shading_enable(uint8_t is_enable) { int32_t rc = 0; CDBG("%s: entered. enable = %d\n", __func__, is_enable); rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return rc; rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3780, ((uint16_t) is_enable) << 15); if (rc < 0) return rc; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); CDBG("%s: exiting. rc = %d\n", __func__, rc); return rc; } static int32_t mt9p012_set_lc(void) { int32_t rc; rc = mt9p012_i2c_write_w_table(mt9p012_regs.rftbl, mt9p012_regs.rftbl_size); return rc; } static void mt9p012_get_pict_fps(uint16_t fps, uint16_t *pfps) { /* input fps is preview fps in Q8 format */ uint32_t divider; /*Q10 */ uint32_t pclk_mult; /*Q10 */ if (mt9p012_ctrl->prev_res == QTR_SIZE) { divider = (uint32_t) (((mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines * mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck) * 0x00000400) / (mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines * mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck)); pclk_mult = (uint32_t) ((mt9p012_regs.reg_pat[RES_CAPTURE]. pll_multiplier * 0x00000400) / (mt9p012_regs.reg_pat[RES_PREVIEW]. pll_multiplier)); } else { /* full size resolution used for preview. */ divider = 0x00000400; /*1.0 */ pclk_mult = 0x00000400; /*1.0 */ } /* Verify PCLK settings and frame sizes. */ *pfps = (uint16_t) (fps * divider * pclk_mult / 0x00000400 / 0x00000400); } static uint16_t mt9p012_get_prev_lines_pf(void) { if (mt9p012_ctrl->prev_res == QTR_SIZE) return mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines; else return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines; } static uint16_t mt9p012_get_prev_pixels_pl(void) { if (mt9p012_ctrl->prev_res == QTR_SIZE) return mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck; else return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck; } static uint16_t mt9p012_get_pict_lines_pf(void) { return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines; } static uint16_t mt9p012_get_pict_pixels_pl(void) { return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck; } static uint32_t mt9p012_get_pict_max_exp_lc(void) { uint16_t snapshot_lines_per_frame; if (mt9p012_ctrl->pict_res == QTR_SIZE) snapshot_lines_per_frame = mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1; else snapshot_lines_per_frame = mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1; return snapshot_lines_per_frame * 24; } static int32_t mt9p012_set_fps(struct fps_cfg *fps) { /* input is new fps in Q10 format */ int32_t rc = 0; enum mt9p012_setting setting; mt9p012_ctrl->fps_divider = fps->fps_div; mt9p012_ctrl->pict_fps_divider = fps->pict_fps_div; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return -EBUSY; if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) setting = RES_PREVIEW; else setting = RES_CAPTURE; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_FRAME_LENGTH_LINES, (mt9p012_regs.reg_pat[setting].frame_length_lines * fps->fps_div / 0x00000400)); if (rc < 0) return rc; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); return rc; } static int32_t mt9p012_write_exp_gain(uint16_t gain, uint32_t line) { uint16_t max_legal_gain = 0x01FF; uint32_t line_length_ratio = 0x00000400; enum mt9p012_setting setting; int32_t rc = 0; CDBG("Line:%d mt9p012_write_exp_gain \n", __LINE__); if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) { mt9p012_ctrl->my_reg_gain = gain; mt9p012_ctrl->my_reg_line_count = (uint16_t) line; } if (gain > max_legal_gain) { CDBG("Max legal gain Line:%d \n", __LINE__); gain = max_legal_gain; } /* Verify no overflow */ if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) { line = (uint32_t) (line * mt9p012_ctrl->fps_divider / 0x00000400); setting = RES_PREVIEW; } else { line = (uint32_t) (line * mt9p012_ctrl->pict_fps_divider / 0x00000400); setting = RES_CAPTURE; } /* Set digital gain to 1 */ #ifdef MT9P012_REV_7 gain |= 0x1000; #else gain |= 0x0200; #endif if ((mt9p012_regs.reg_pat[setting].frame_length_lines - 1) < line) { line_length_ratio = (uint32_t) (line * 0x00000400) / (mt9p012_regs.reg_pat[setting].frame_length_lines - 1); } else line_length_ratio = 0x00000400; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GLOBAL_GAIN, gain); if (rc < 0) { CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_COARSE_INT_TIME, line); if (rc < 0) { CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } CDBG("mt9p012_write_exp_gain: gain = %d, line = %d\n", gain, line); return rc; } static int32_t mt9p012_set_pict_exp_gain(uint16_t gain, uint32_t line) { int32_t rc = 0; CDBG("Line:%d mt9p012_set_pict_exp_gain \n", __LINE__); rc = mt9p012_write_exp_gain(gain, line); if (rc < 0) { CDBG("Line:%d mt9p012_set_pict_exp_gain failed... \n", __LINE__); return rc; } rc = mt9p012_i2c_write_w(mt9p012_client->addr, MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0002); if (rc < 0) { CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); return rc; } mdelay(5); /* camera_timed_wait(snapshot_wait*exposure_ratio); */ return rc; } static int32_t mt9p012_setting(enum mt9p012_reg_update rupdate, enum mt9p012_setting rt) { int32_t rc = 0; switch (rupdate) { case UPDATE_PERIODIC: if (rt == RES_PREVIEW || rt == RES_CAPTURE) { struct mt9p012_i2c_reg_conf ppc_tbl[] = { {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD}, {REG_ROW_SPEED, mt9p012_regs.reg_pat[rt].row_speed}, {REG_X_ADDR_START, mt9p012_regs.reg_pat[rt].x_addr_start}, {REG_X_ADDR_END, mt9p012_regs.reg_pat[rt].x_addr_end}, {REG_Y_ADDR_START, mt9p012_regs.reg_pat[rt].y_addr_start}, {REG_Y_ADDR_END, mt9p012_regs.reg_pat[rt].y_addr_end}, {REG_READ_MODE, mt9p012_regs.reg_pat[rt].read_mode}, {REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m}, {REG_X_OUTPUT_SIZE, mt9p012_regs.reg_pat[rt].x_output_size}, {REG_Y_OUTPUT_SIZE, mt9p012_regs.reg_pat[rt].y_output_size}, {REG_LINE_LENGTH_PCK, mt9p012_regs.reg_pat[rt].line_length_pck}, {REG_FRAME_LENGTH_LINES, (mt9p012_regs.reg_pat[rt].frame_length_lines * mt9p012_ctrl->fps_divider / 0x00000400)}, {REG_COARSE_INT_TIME, mt9p012_regs.reg_pat[rt].coarse_int_time}, {REG_FINE_INTEGRATION_TIME, mt9p012_regs.reg_pat[rt].fine_int_time}, {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE}, }; if (update_type == REG_INIT) { update_type = rupdate; return rc; } rc = mt9p012_i2c_write_w_table(&ppc_tbl[0], ARRAY_SIZE(ppc_tbl)); if (rc < 0) return rc; rc = mt9p012_test(mt9p012_ctrl->set_test); if (rc < 0) return rc; rc = mt9p012_i2c_write_w(mt9p012_client->addr, MT9P012_REG_RESET_REGISTER, MT9P012_RESET_REGISTER_PWON | 0x0002); if (rc < 0) return rc; mdelay(5); /* 15? wait for sensor to transition */ return rc; } break; /* UPDATE_PERIODIC */ case REG_INIT: if (rt == RES_PREVIEW || rt == RES_CAPTURE) { struct mt9p012_i2c_reg_conf ipc_tbl1[] = { {MT9P012_REG_RESET_REGISTER, MT9P012_RESET_REGISTER_PWOFF}, {REG_VT_PIX_CLK_DIV, mt9p012_regs.reg_pat[rt].vt_pix_clk_div}, {REG_VT_SYS_CLK_DIV, mt9p012_regs.reg_pat[rt].vt_sys_clk_div}, {REG_PRE_PLL_CLK_DIV, mt9p012_regs.reg_pat[rt].pre_pll_clk_div}, {REG_PLL_MULTIPLIER, mt9p012_regs.reg_pat[rt].pll_multiplier}, {REG_OP_PIX_CLK_DIV, mt9p012_regs.reg_pat[rt].op_pix_clk_div}, {REG_OP_SYS_CLK_DIV, mt9p012_regs.reg_pat[rt].op_sys_clk_div}, #ifdef MT9P012_REV_7 {0x30B0, 0x0001}, {0x308E, 0xE060}, {0x3092, 0x0A52}, {0x3094, 0x4656}, {0x3096, 0x5652}, {0x30CA, 0x8006}, {0x312A, 0xDD02}, {0x312C, 0x00E4}, {0x3170, 0x299A}, #endif /* optimized settings for noise */ {0x3088, 0x6FF6}, {0x3154, 0x0282}, {0x3156, 0x0381}, {0x3162, 0x04CE}, {0x0204, 0x0010}, {0x0206, 0x0010}, {0x0208, 0x0010}, {0x020A, 0x0010}, {0x020C, 0x0010}, {MT9P012_REG_RESET_REGISTER, MT9P012_RESET_REGISTER_PWON}, }; struct mt9p012_i2c_reg_conf ipc_tbl2[] = { {MT9P012_REG_RESET_REGISTER, MT9P012_RESET_REGISTER_PWOFF}, {REG_VT_PIX_CLK_DIV, mt9p012_regs.reg_pat[rt].vt_pix_clk_div}, {REG_VT_SYS_CLK_DIV, mt9p012_regs.reg_pat[rt].vt_sys_clk_div}, {REG_PRE_PLL_CLK_DIV, mt9p012_regs.reg_pat[rt].pre_pll_clk_div}, {REG_PLL_MULTIPLIER, mt9p012_regs.reg_pat[rt].pll_multiplier}, {REG_OP_PIX_CLK_DIV, mt9p012_regs.reg_pat[rt].op_pix_clk_div}, {REG_OP_SYS_CLK_DIV, mt9p012_regs.reg_pat[rt].op_sys_clk_div}, #ifdef MT9P012_REV_7 {0x30B0, 0x0001}, {0x308E, 0xE060}, {0x3092, 0x0A52}, {0x3094, 0x4656}, {0x3096, 0x5652}, {0x30CA, 0x8006}, {0x312A, 0xDD02}, {0x312C, 0x00E4}, {0x3170, 0x299A}, #endif /* optimized settings for noise */ {0x3088, 0x6FF6}, {0x3154, 0x0282}, {0x3156, 0x0381}, {0x3162, 0x04CE}, {0x0204, 0x0010}, {0x0206, 0x0010}, {0x0208, 0x0010}, {0x020A, 0x0010}, {0x020C, 0x0010}, {MT9P012_REG_RESET_REGISTER, MT9P012_RESET_REGISTER_PWON}, }; struct mt9p012_i2c_reg_conf ipc_tbl3[] = { {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD}, /* Set preview or snapshot mode */ {REG_ROW_SPEED, mt9p012_regs.reg_pat[rt].row_speed}, {REG_X_ADDR_START, mt9p012_regs.reg_pat[rt].x_addr_start}, {REG_X_ADDR_END, mt9p012_regs.reg_pat[rt].x_addr_end}, {REG_Y_ADDR_START, mt9p012_regs.reg_pat[rt].y_addr_start}, {REG_Y_ADDR_END, mt9p012_regs.reg_pat[rt].y_addr_end}, {REG_READ_MODE, mt9p012_regs.reg_pat[rt].read_mode}, {REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m}, {REG_X_OUTPUT_SIZE, mt9p012_regs.reg_pat[rt].x_output_size}, {REG_Y_OUTPUT_SIZE, mt9p012_regs.reg_pat[rt].y_output_size}, {REG_LINE_LENGTH_PCK, mt9p012_regs.reg_pat[rt].line_length_pck}, {REG_FRAME_LENGTH_LINES, mt9p012_regs.reg_pat[rt].frame_length_lines}, {REG_COARSE_INT_TIME, mt9p012_regs.reg_pat[rt].coarse_int_time}, {REG_FINE_INTEGRATION_TIME, mt9p012_regs.reg_pat[rt].fine_int_time}, {REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE}, }; /* reset fps_divider */ mt9p012_ctrl->fps_divider = 1 * 0x0400; rc = mt9p012_i2c_write_w_table(&ipc_tbl1[0], ARRAY_SIZE(ipc_tbl1)); if (rc < 0) return rc; rc = mt9p012_i2c_write_w_table(&ipc_tbl2[0], ARRAY_SIZE(ipc_tbl2)); if (rc < 0) return rc; mdelay(5); rc = mt9p012_i2c_write_w_table(&ipc_tbl3[0], ARRAY_SIZE(ipc_tbl3)); if (rc < 0) return rc; /* load lens shading */ rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); if (rc < 0) return rc; rc = mt9p012_set_lc(); if (rc < 0) return rc; rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); if (rc < 0) return rc; } update_type = rupdate; break; /* case REG_INIT: */ default: rc = -EINVAL; break; } /* switch (rupdate) */ return rc; } static int32_t mt9p012_video_config(int mode, int res) { int32_t rc; switch (res) { case QTR_SIZE: rc = mt9p012_setting(UPDATE_PERIODIC, RES_PREVIEW); if (rc < 0) return rc; CDBG("mt9p012 sensor configuration done!\n"); break; case FULL_SIZE: rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE); if (rc < 0) return rc; break; default: return 0; } /* switch */ mt9p012_ctrl->prev_res = res; mt9p012_ctrl->curr_res = res; mt9p012_ctrl->sensormode = mode; rc = mt9p012_write_exp_gain(mt9p012_ctrl->my_reg_gain, mt9p012_ctrl->my_reg_line_count); rc = mt9p012_i2c_write_w(mt9p012_client->addr, MT9P012_REG_RESET_REGISTER, 0x10cc | 0x0002); return rc; } static int32_t mt9p012_snapshot_config(int mode) { int32_t rc = 0; rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE); if (rc < 0) return rc; mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res; mt9p012_ctrl->sensormode = mode; return rc; } static int32_t mt9p012_raw_snapshot_config(int mode) { int32_t rc = 0; rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE); if (rc < 0) return rc; mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res; mt9p012_ctrl->sensormode = mode; return rc; } static int32_t mt9p012_power_down(void) { int32_t rc = 0; rc = mt9p012_i2c_write_w(mt9p012_client->addr, MT9P012_REG_RESET_REGISTER, MT9P012_RESET_REGISTER_PWOFF); mdelay(5); return rc; } static int32_t mt9p012_move_focus(int direction, int32_t num_steps) { int32_t rc; int16_t step_direction; int16_t actual_step; int16_t next_position; uint8_t code_val; uint8_t time_out; uint8_t temp_pos; uint16_t actual_position_target; if (num_steps > MT9P012_TOTAL_STEPS_NEAR_TO_FAR) num_steps = MT9P012_TOTAL_STEPS_NEAR_TO_FAR; else if (num_steps == 0) { CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__); return -EINVAL; } if (direction == MOVE_NEAR) step_direction = -1; else if (direction == MOVE_FAR) step_direction = 1; else { CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__); return -EINVAL; } if (mt9p012_ctrl->curr_lens_pos < mt9p012_ctrl->init_curr_lens_pos) mt9p012_ctrl->curr_lens_pos = mt9p012_ctrl->init_curr_lens_pos; actual_step = (int16_t) (step_direction * (int16_t) num_steps); next_position = (int16_t) (mt9p012_ctrl->curr_lens_pos + actual_step); if (next_position > MT9P012_TOTAL_STEPS_NEAR_TO_FAR) next_position = MT9P012_TOTAL_STEPS_NEAR_TO_FAR; else if (next_position < 0) next_position = 0; if (num_steps >= 10) time_out = 100; else time_out = 30; code_val = next_position; actual_position_target = bam_step_lookup_table[code_val]; rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29); if (rc < 0) return rc; temp_pos = (uint8_t) (actual_position_target >> 8); rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, temp_pos); if (rc < 0) return rc; temp_pos = (uint8_t) (actual_position_target & 0x00FF); /* code_val_lsb |= mode_mask; */ rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, temp_pos); if (rc < 0) return rc; rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, time_out); if (rc < 0) return rc; rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27); if (rc < 0) return rc; mdelay(time_out); /* Storing the current lens Position */ mt9p012_ctrl->curr_lens_pos = next_position; return rc; } static int32_t mt9p012_set_default_focus(void) { int32_t rc = 0; uint8_t temp_pos; /* Write the digital code for current to the actuator */ rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29); if (rc < 0) return rc; temp_pos = (uint8_t) (bam_infinite >> 8); rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, temp_pos); if (rc < 0) return rc; temp_pos = (uint8_t) (bam_infinite & 0x00FF); rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, temp_pos); if (rc < 0) return rc; rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, 0x64); if (rc < 0) return rc; rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27); if (rc < 0) return rc; mdelay(140); mt9p012_ctrl->curr_lens_pos = MT9P012_TOTAL_STEPS_NEAR_TO_FAR; return rc; } static int mt9p012_probe_init_done(const struct msm_camera_sensor_info *data) { gpio_direction_output(data->sensor_reset, 0); gpio_free(data->sensor_reset); return 0; } static int mt9p012_probe_init_sensor(const struct msm_camera_sensor_info *data) { int32_t rc; uint16_t chipid; rc = gpio_request(data->sensor_reset, "mt9p012"); if (!rc) gpio_direction_output(data->sensor_reset, 1); else goto init_probe_done; msleep(20); /* RESET the sensor image part via I2C command */ rc = mt9p012_i2c_write_w(mt9p012_client->addr, MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0001); if (rc < 0) { CDBG("sensor reset failed. rc = %d\n", rc); goto init_probe_fail; } msleep(MT9P012_RESET_DELAY_MSECS); /* 3. Read sensor Model ID: */ rc = mt9p012_i2c_read_w(mt9p012_client->addr, MT9P012_REG_MODEL_ID, &chipid); if (rc < 0) goto init_probe_fail; /* 4. Compare sensor ID to MT9T012VC ID: */ if (chipid != MT9P012_MODEL_ID) { rc = -ENODEV; goto init_probe_fail; } rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x306E, 0x9000); if (rc < 0) { CDBG("REV_7 write failed. rc = %d\n", rc); goto init_probe_fail; } /* RESET_REGISTER, enable parallel interface and disable serialiser */ rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x301A, 0x10CC); if (rc < 0) { CDBG("enable parallel interface failed. rc = %d\n", rc); goto init_probe_fail; } /* To disable the 2 extra lines */ rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3064, 0x0805); if (rc < 0) { CDBG("disable the 2 extra lines failed. rc = %d\n", rc); goto init_probe_fail; } goto init_probe_done; init_probe_fail: mt9p012_probe_init_done(data); init_probe_done: return rc; } static int mt9p012_sensor_open_init(const struct msm_camera_sensor_info *data) { int32_t rc; unsigned short temp_pos; uint8_t i; uint16_t temp; mt9p012_ctrl = kzalloc(sizeof(struct mt9p012_ctrl), GFP_KERNEL); if (!mt9p012_ctrl) { CDBG("mt9p012_init failed!\n"); rc = -ENOMEM; goto init_done; } mt9p012_ctrl->fps_divider = 1 * 0x00000400; mt9p012_ctrl->pict_fps_divider = 1 * 0x00000400; mt9p012_ctrl->set_test = TEST_OFF; mt9p012_ctrl->prev_res = QTR_SIZE; mt9p012_ctrl->pict_res = FULL_SIZE; if (data) mt9p012_ctrl->sensordata = data; msm_camio_camif_pad_reg_reset(); mdelay(20); rc = mt9p012_probe_init_sensor(data); if (rc < 0) goto init_fail1; if (mt9p012_ctrl->prev_res == QTR_SIZE) rc = mt9p012_setting(REG_INIT, RES_PREVIEW); else rc = mt9p012_setting(REG_INIT, RES_CAPTURE); if (rc < 0) { CDBG("mt9p012_setting failed. rc = %d\n", rc); goto init_fail1; } /* sensor : output enable */ rc = mt9p012_i2c_write_w(mt9p012_client->addr, MT9P012_REG_RESET_REGISTER, MT9P012_RESET_REGISTER_PWON); if (rc < 0) { CDBG("sensor output enable failed. rc = %d\n", rc); goto init_fail1; } /* enable AF actuator */ rc = gpio_request(mt9p012_ctrl->sensordata->vcm_pwd, "mt9p012"); if (!rc) gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 1); else { CDBG("mt9p012_ctrl gpio request failed!\n"); goto init_fail1; } mdelay(20); bam_infinite = 0; bam_macro = 0; /*initialize AF actuator */ mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x09); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x2E); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0A, 0x01); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x17, 0x06); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x16, 0x0A); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, 0x00); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, 0x00); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, 0x64); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27); mdelay(140); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, 0x03); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, 0xFF); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, 0x64); mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27); mdelay(140); if (mt9p012_i2c_read_b(MT9P012_AF_I2C_ADDR >> 1, 0x12, &temp_pos) >= 0) { bam_infinite = (uint16_t) temp_pos; if (mt9p012_i2c_read_b (MT9P012_AF_I2C_ADDR >> 1, 0x13, &temp_pos) >= 0) bam_infinite = (bam_infinite << 8) | ((uint16_t) temp_pos); } else { bam_infinite = 100; } if (mt9p012_i2c_read_b(MT9P012_AF_I2C_ADDR >> 1, 0x14, &temp_pos) >= 0) { bam_macro = (uint16_t) temp_pos; if (mt9p012_i2c_read_b (MT9P012_AF_I2C_ADDR >> 1, 0x15, &temp_pos) >= 0) bam_macro = (bam_macro << 8) | ((uint16_t) temp_pos); } temp = (bam_infinite - bam_macro) / MT9P012_TOTAL_STEPS_NEAR_TO_FAR; for (i = 0; i < MT9P012_TOTAL_STEPS_NEAR_TO_FAR; i++) bam_step_lookup_table[i] = bam_macro + temp * i; bam_step_lookup_table[MT9P012_TOTAL_STEPS_NEAR_TO_FAR] = bam_infinite; rc = mt9p012_set_default_focus(); if (rc >= 0) goto init_done; init_fail1: mt9p012_probe_init_done(data); kfree(mt9p012_ctrl); init_done: return rc; } static int mt9p012_init_client(struct i2c_client *client) { /* Initialize the MSM_CAMI2C Chip */ init_waitqueue_head(&mt9p012_wait_queue); return 0; } static int32_t mt9p012_set_sensor_mode(int mode, int res) { int32_t rc = 0; switch (mode) { case SENSOR_PREVIEW_MODE: rc = mt9p012_video_config(mode, res); break; case SENSOR_SNAPSHOT_MODE: rc = mt9p012_snapshot_config(mode); break; case SENSOR_RAW_SNAPSHOT_MODE: rc = mt9p012_raw_snapshot_config(mode); break; default: rc = -EINVAL; break; } return rc; } int mt9p012_sensor_config(void __user *argp) { struct sensor_cfg_data cdata; int rc = 0; if (copy_from_user(&cdata, (void *)argp, sizeof(struct sensor_cfg_data))) return -EFAULT; mutex_lock(&mt9p012_mut); CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype); switch (cdata.cfgtype) { case CFG_GET_PICT_FPS: mt9p012_get_pict_fps(cdata.cfg.gfps.prevfps, &(cdata.cfg.gfps.pictfps)); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PREV_L_PF: cdata.cfg.prevl_pf = mt9p012_get_prev_lines_pf(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PREV_P_PL: cdata.cfg.prevp_pl = mt9p012_get_prev_pixels_pl(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_L_PF: cdata.cfg.pictl_pf = mt9p012_get_pict_lines_pf(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_P_PL: cdata.cfg.pictp_pl = mt9p012_get_pict_pixels_pl(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_MAX_EXP_LC: cdata.cfg.pict_max_exp_lc = mt9p012_get_pict_max_exp_lc(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_SET_FPS: case CFG_SET_PICT_FPS: rc = mt9p012_set_fps(&(cdata.cfg.fps)); break; case CFG_SET_EXP_GAIN: rc = mt9p012_write_exp_gain(cdata.cfg.exp_gain.gain, cdata.cfg.exp_gain.line); break; case CFG_SET_PICT_EXP_GAIN: CDBG("Line:%d CFG_SET_PICT_EXP_GAIN \n", __LINE__); rc = mt9p012_set_pict_exp_gain(cdata.cfg.exp_gain.gain, cdata.cfg.exp_gain.line); break; case CFG_SET_MODE: rc = mt9p012_set_sensor_mode(cdata.mode, cdata.rs); break; case CFG_PWR_DOWN: rc = mt9p012_power_down(); break; case CFG_MOVE_FOCUS: CDBG("mt9p012_ioctl: CFG_MOVE_FOCUS: dir=%d steps=%d\n", cdata.cfg.focus.dir, cdata.cfg.focus.steps); rc = mt9p012_move_focus(cdata.cfg.focus.dir, cdata.cfg.focus.steps); break; case CFG_SET_DEFAULT_FOCUS: rc = mt9p012_set_default_focus(); break; case CFG_SET_EFFECT: rc = mt9p012_set_default_focus(); break; case CFG_SET_LENS_SHADING: CDBG("%s: CFG_SET_LENS_SHADING\n", __func__); rc = mt9p012_lens_shading_enable(cdata.cfg.lens_shading); break; case CFG_GET_AF_MAX_STEPS: cdata.max_steps = MT9P012_STEPS_NEAR_TO_CLOSEST_INF; if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; default: rc = -EINVAL; break; } mutex_unlock(&mt9p012_mut); return rc; } int mt9p012_sensor_release(void) { int rc = -EBADF; mutex_lock(&mt9p012_mut); mt9p012_power_down(); gpio_direction_output(mt9p012_ctrl->sensordata->sensor_reset, 0); gpio_free(mt9p012_ctrl->sensordata->sensor_reset); gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 0); gpio_free(mt9p012_ctrl->sensordata->vcm_pwd); kfree(mt9p012_ctrl); mt9p012_ctrl = NULL; CDBG("mt9p012_release completed\n"); mutex_unlock(&mt9p012_mut); return rc; } static int mt9p012_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; CDBG("mt9p012_probe called!\n"); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { CDBG("i2c_check_functionality failed\n"); goto probe_failure; } mt9p012_sensorw = kzalloc(sizeof(struct mt9p012_work), GFP_KERNEL); if (!mt9p012_sensorw) { CDBG("kzalloc failed.\n"); rc = -ENOMEM; goto probe_failure; } i2c_set_clientdata(client, mt9p012_sensorw); mt9p012_init_client(client); mt9p012_client = client; mdelay(50); CDBG("mt9p012_probe successed! rc = %d\n", rc); return 0; probe_failure: CDBG("mt9p012_probe failed! rc = %d\n", rc); return rc; } static int __exit mt9p012_remove(struct i2c_client *client) { struct mt9p012_work_t *sensorw = i2c_get_clientdata(client); free_irq(client->irq, sensorw); mt9p012_client = NULL; kfree(sensorw); return 0; } static const struct i2c_device_id mt9p012_i2c_id[] = { {"mt9p012", 0} }; static struct i2c_driver mt9p012_i2c_driver = { .id_table = mt9p012_i2c_id, .probe = mt9p012_i2c_probe, .remove = __exit_p(mt9p012_i2c_remove), .driver = { .name = "mt9p012", }, }; static int mt9p012_sensor_probe(const struct msm_camera_sensor_info *info, struct msm_sensor_ctrl *s) { int rc = i2c_add_driver(&mt9p012_i2c_driver); if (rc < 0 || mt9p012_client == NULL) { rc = -ENOTSUPP; goto probe_done; } msm_camio_clk_rate_set(MT9P012_DEFAULT_CLOCK_RATE); mdelay(20); rc = mt9p012_probe_init_sensor(info); if (rc < 0) goto probe_done; s->s_init = mt9p012_sensor_open_init; s->s_release = mt9p012_sensor_release; s->s_config = mt9p012_sensor_config; s->s_mount_angle = 0; mt9p012_probe_init_done(info); probe_done: CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__); return rc; } static int __mt9p012_probe(struct platform_device *pdev) { return msm_camera_drv_start(pdev, mt9p012_sensor_probe); } static struct platform_driver msm_camera_driver = { .probe = __mt9p012_probe, .driver = { .name = "msm_camera_mt9p012", .owner = THIS_MODULE, }, }; static int __init mt9p012_init(void) { return platform_driver_register(&msm_camera_driver); } module_init(mt9p012_init); void mt9p012_exit(void) { i2c_del_driver(&mt9p012_i2c_driver); }
gpl-2.0
DirtyUnicorns/android_kernel_lge_gee
net/dcb/dcbnl.c
1513
57044
/* * Copyright (c) 2008-2011, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Lucy Liu <lucy.liu@intel.com> */ #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/slab.h> #include <net/netlink.h> #include <net/rtnetlink.h> #include <linux/dcbnl.h> #include <net/dcbevent.h> #include <linux/rtnetlink.h> #include <linux/module.h> #include <net/sock.h> /** * Data Center Bridging (DCB) is a collection of Ethernet enhancements * intended to allow network traffic with differing requirements * (highly reliable, no drops vs. best effort vs. low latency) to operate * and co-exist on Ethernet. Current DCB features are: * * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a * framework for assigning bandwidth guarantees to traffic classes. * * Priority-based Flow Control (PFC) - provides a flow control mechanism which * can work independently for each 802.1p priority. * * Congestion Notification - provides a mechanism for end-to-end congestion * control for protocols which do not have built-in congestion management. * * More information about the emerging standards for these Ethernet features * can be found at: http://www.ieee802.org/1/pages/dcbridges.html * * This file implements an rtnetlink interface to allow configuration of DCB * features for capable devices. */ MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>"); MODULE_DESCRIPTION("Data Center Bridging netlink interface"); MODULE_LICENSE("GPL"); /**************** DCB attribute policies *************************************/ /* DCB netlink attributes policy */ static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, [DCB_ATTR_STATE] = {.type = NLA_U8}, [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, [DCB_ATTR_CAP] = {.type = NLA_NESTED}, [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, [DCB_ATTR_BCN] = {.type = NLA_NESTED}, [DCB_ATTR_APP] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, [DCB_ATTR_DCBX] = {.type = NLA_U8}, [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, }; /* DCB priority flow control to User Priority nested attributes */ static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB priority grouping nested attributes */ static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, }; /* DCB traffic class nested attributes. */ static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, }; /* DCB BCN nested attributes. */ static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, [DCB_BCN_ATTR_W] = {.type = NLA_U32}, [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, [DCB_BCN_ATTR_C] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB APP nested attributes. */ static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, [DCB_APP_ATTR_ID] = {.type = NLA_U16}, [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, }; /* IEEE 802.1Qaz nested attributes. */ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, }; static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)}, }; /* DCB number of traffic classes nested attributes. */ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, }; static LIST_HEAD(dcb_app_list); static DEFINE_SPINLOCK(dcb_lock); /* standard netlink reply call */ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct dcbmsg *dcb; struct nlmsghdr *nlh; int ret = -EINVAL; dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) return ret; nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = cmd; dcb->dcb_pad = 0; ret = nla_put_u8(dcbnl_skb, attr, value); if (ret) goto err; /* end the message, assign the nlmsg_len. */ nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) return -EINVAL; return 0; nlmsg_failure: err: kfree_skb(dcbnl_skb); return ret; } static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int ret = -EINVAL; /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ if (!netdev->dcbnl_ops->getstate) return ret; ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB, DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags); return ret; } static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; u8 value; int ret = -EINVAL; int i; int getall = 0; if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg) return ret; ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest); if (ret) goto err_out; dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) goto err_out; nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_PFC_GCFG; nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG); if (!nest) goto err; if (data[DCB_PFC_UP_ATTR_ALL]) getall = 1; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (!getall && !data[i]) continue; netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); ret = nla_put_u8(dcbnl_skb, i, value); if (ret) { nla_nest_cancel(dcbnl_skb, nest); goto err; } } nla_nest_end(dcbnl_skb, nest); nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) goto err_out; return 0; nlmsg_failure: err: kfree_skb(dcbnl_skb); err_out: return -EINVAL; } static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; u8 perm_addr[MAX_ADDR_LEN]; int ret = -EINVAL; if (!netdev->dcbnl_ops->getpermhwaddr) return ret; dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) goto err_out; nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_GPERM_HWADDR; memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) goto err_out; return 0; nlmsg_failure: kfree_skb(dcbnl_skb); err_out: return -EINVAL; } static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; u8 value; int ret = -EINVAL; int i; int getall = 0; if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap) return ret; ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], dcbnl_cap_nest); if (ret) goto err_out; dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) goto err_out; nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_GCAP; nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP); if (!nest) goto err; if (data[DCB_CAP_ATTR_ALL]) getall = 1; for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { if (!getall && !data[i]) continue; if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { ret = nla_put_u8(dcbnl_skb, i, value); if (ret) { nla_nest_cancel(dcbnl_skb, nest); goto err; } } } nla_nest_end(dcbnl_skb, nest); nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) goto err_out; return 0; nlmsg_failure: err: kfree_skb(dcbnl_skb); err_out: return -EINVAL; } static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; u8 value; int ret = -EINVAL; int i; int getall = 0; if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs) return ret; ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest); if (ret) { ret = -EINVAL; goto err_out; } dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) { ret = -EINVAL; goto err_out; } nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_GNUMTCS; nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS); if (!nest) { ret = -EINVAL; goto err; } if (data[DCB_NUMTCS_ATTR_ALL]) getall = 1; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); if (!ret) { ret = nla_put_u8(dcbnl_skb, i, value); if (ret) { nla_nest_cancel(dcbnl_skb, nest); ret = -EINVAL; goto err; } } else { goto err; } } nla_nest_end(dcbnl_skb, nest); nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) { ret = -EINVAL; goto err_out; } return 0; nlmsg_failure: err: kfree_skb(dcbnl_skb); err_out: return ret; } static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; int ret = -EINVAL; u8 value; int i; if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs) return ret; ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest); if (ret) { ret = -EINVAL; goto err; } for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); if (ret) goto operr; } operr: ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS, DCB_ATTR_NUMTCS, pid, seq, flags); err: return ret; } static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int ret = -EINVAL; if (!netdev->dcbnl_ops->getpfcstate) return ret; ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB, DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE, pid, seq, flags); return ret; } static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int ret = -EINVAL; u8 value; if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate) return ret; value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); netdev->dcbnl_ops->setpfcstate(netdev, value); ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE, pid, seq, flags); return ret; } static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; struct nlattr *app_nest; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; u16 id; u8 up, idtype; int ret = -EINVAL; if (!tb[DCB_ATTR_APP]) goto out; ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest); if (ret) goto out; ret = -EINVAL; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID])) goto out; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) goto out; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); if (netdev->dcbnl_ops->getapp) { up = netdev->dcbnl_ops->getapp(netdev, idtype, id); } else { struct dcb_app app = { .selector = idtype, .protocol = id, }; up = dcb_getapp(netdev, &app); } /* send this back */ dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) goto out; nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_GAPP; app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); if (!app_nest) goto out_cancel; ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); if (ret) goto out_cancel; ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id); if (ret) goto out_cancel; ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up); if (ret) goto out_cancel; nla_nest_end(dcbnl_skb, app_nest); nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) goto nlmsg_failure; goto out; out_cancel: nla_nest_cancel(dcbnl_skb, app_nest); nlmsg_failure: kfree_skb(dcbnl_skb); out: return ret; } static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int err, ret = -EINVAL; u16 id; u8 up, idtype; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; if (!tb[DCB_ATTR_APP]) goto out; ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest); if (ret) goto out; ret = -EINVAL; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID]) || (!app_tb[DCB_APP_ATTR_PRIORITY])) goto out; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) goto out; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); if (netdev->dcbnl_ops->setapp) { err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); } else { struct dcb_app app; app.selector = idtype; app.protocol = id; app.priority = up; err = dcb_setapp(netdev, &app); } ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, pid, seq, flags); out: return ret; } static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags, int dir) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; struct nlattr *pg_nest, *param_nest, *data; struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; u8 prio, pgid, tc_pct, up_map; int ret = -EINVAL; int getall = 0; int i; if (!tb[DCB_ATTR_PG_CFG] || !netdev->dcbnl_ops->getpgtccfgtx || !netdev->dcbnl_ops->getpgtccfgrx || !netdev->dcbnl_ops->getpgbwgcfgtx || !netdev->dcbnl_ops->getpgbwgcfgrx) return ret; ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); if (ret) goto err_out; dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) goto err_out; nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG; pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG); if (!pg_nest) goto err; if (pg_tb[DCB_PG_ATTR_TC_ALL]) getall = 1; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!getall && !pg_tb[i]) continue; if (pg_tb[DCB_PG_ATTR_TC_ALL]) data = pg_tb[DCB_PG_ATTR_TC_ALL]; else data = pg_tb[i]; ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data, dcbnl_tc_param_nest); if (ret) goto err_pg; param_nest = nla_nest_start(dcbnl_skb, i); if (!param_nest) goto err_pg; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } else { /* Tx */ netdev->dcbnl_ops->getpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } if (param_tb[DCB_TC_ATTR_PARAM_PGID] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_PGID, pgid); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); if (ret) goto err_param; } nla_nest_end(dcbnl_skb, param_nest); } if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) getall = 1; else getall = 0; for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!getall && !pg_tb[i]) continue; tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } else { /* Tx */ netdev->dcbnl_ops->getpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } ret = nla_put_u8(dcbnl_skb, i, tc_pct); if (ret) goto err_pg; } nla_nest_end(dcbnl_skb, pg_nest); nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) goto err_out; return 0; err_param: nla_nest_cancel(dcbnl_skb, param_nest); err_pg: nla_nest_cancel(dcbnl_skb, pg_nest); nlmsg_failure: err: kfree_skb(dcbnl_skb); err_out: ret = -EINVAL; return ret; } static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0); } static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1); } static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int ret = -EINVAL; u8 value; if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate) return ret; value = nla_get_u8(tb[DCB_ATTR_STATE]); ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value), RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE, pid, seq, flags); return ret; } static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; int i; int ret = -EINVAL; u8 value; if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg) return ret; ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest); if (ret) goto err; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); netdev->dcbnl_ops->setpfccfg(netdev, data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); } ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG, pid, seq, flags); err: return ret; } static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int ret = -EINVAL; if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall) return ret; ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); return ret; } static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags, int dir) { struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; int ret = -EINVAL; int i; u8 pgid; u8 up_map; u8 prio; u8 tc_pct; if (!tb[DCB_ATTR_PG_CFG] || !netdev->dcbnl_ops->setpgtccfgtx || !netdev->dcbnl_ops->setpgtccfgrx || !netdev->dcbnl_ops->setpgbwgcfgtx || !netdev->dcbnl_ops->setpgbwgcfgrx) return ret; ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); if (ret) goto err; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!pg_tb[i]) continue; ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, pg_tb[i], dcbnl_tc_param_nest); if (ret) goto err; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) prio = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); if (param_tb[DCB_TC_ATTR_PARAM_PGID]) pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) up_map = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } else { /* Tx */ netdev->dcbnl_ops->setpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!pg_tb[i]) continue; tc_pct = nla_get_u8(pg_tb[i]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } else { /* Tx */ netdev->dcbnl_ops->setpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } } ret = dcbnl_reply(0, RTM_SETDCB, (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG), DCB_ATTR_PG_CFG, pid, seq, flags); err: return ret; } static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0); } static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1); } static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; struct nlattr *bcn_nest; struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; u8 value_byte; u32 value_integer; int ret = -EINVAL; bool getall = false; int i; if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp || !netdev->dcbnl_ops->getbcncfg) return ret; ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest); if (ret) goto err_out; dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) goto err_out; nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_BCN_GCFG; bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN); if (!bcn_nest) goto err; if (bcn_tb[DCB_BCN_ATTR_ALL]) getall = true; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, &value_byte); ret = nla_put_u8(dcbnl_skb, i, value_byte); if (ret) goto err_bcn; } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcncfg(netdev, i, &value_integer); ret = nla_put_u32(dcbnl_skb, i, value_integer); if (ret) goto err_bcn; } nla_nest_end(dcbnl_skb, bcn_nest); nlmsg_end(dcbnl_skb, nlh); ret = rtnl_unicast(dcbnl_skb, &init_net, pid); if (ret) goto err_out; return 0; err_bcn: nla_nest_cancel(dcbnl_skb, bcn_nest); nlmsg_failure: err: kfree_skb(dcbnl_skb); err_out: ret = -EINVAL; return ret; } static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; int i; int ret = -EINVAL; u8 value_byte; u32 value_int; if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg || !netdev->dcbnl_ops->setbcnrp) return ret; ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest); if (ret) goto err; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (data[i] == NULL) continue; value_byte = nla_get_u8(data[i]); netdev->dcbnl_ops->setbcnrp(netdev, data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (data[i] == NULL) continue; value_int = nla_get_u32(data[i]); netdev->dcbnl_ops->setbcncfg(netdev, i, value_int); } ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN, pid, seq, flags); err: return ret; } static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, int app_nested_type, int app_info_type, int app_entry_type) { struct dcb_peer_app_info info; struct dcb_app *table = NULL; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; u16 app_count; int err; /** * retrieve the peer app configuration form the driver. If the driver * handlers fail exit without doing anything */ err = ops->peer_getappinfo(netdev, &info, &app_count); if (!err && app_count) { table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL); if (!table) return -ENOMEM; err = ops->peer_getapptable(netdev, table); } if (!err) { u16 i; struct nlattr *app; /** * build the message, from here on the only possible failure * is due to the skb size */ err = -EMSGSIZE; app = nla_nest_start(skb, app_nested_type); if (!app) goto nla_put_failure; if (app_info_type) NLA_PUT(skb, app_info_type, sizeof(info), &info); for (i = 0; i < app_count; i++) NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app), &table[i]); nla_nest_end(skb, app); } err = 0; nla_put_failure: kfree(table); return err; } /* Handle IEEE 802.1Qaz GET commands. */ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *ieee, *app; struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int dcbx; int err = -EMSGSIZE; NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); ieee = nla_nest_start(skb, DCB_ATTR_IEEE); if (!ieee) goto nla_put_failure; if (ops->ieee_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err) NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets); } if (ops->ieee_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err) NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc); } app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); if (!app) goto nla_put_failure; spin_lock(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), &itr->app); if (err) { spin_unlock(&dcb_lock); goto nla_put_failure; } } } if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock(&dcb_lock); nla_nest_end(skb, app); /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err) NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets); } if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err) NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc); } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_APP_UNSPEC, DCB_ATTR_IEEE_APP); if (err) goto nla_put_failure; } nla_nest_end(skb, ieee); if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) goto nla_put_failure; } return 0; nla_put_failure: return err; } static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, int dir) { u8 pgid, up_map, prio, tc_pct; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; struct nlattr *pg = nla_nest_start(skb, i); if (!pg) goto nla_put_failure; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { struct nlattr *tc_nest = nla_nest_start(skb, i); if (!tc_nest) goto nla_put_failure; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); else ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); nla_nest_end(skb, tc_nest); } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); else ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); NLA_PUT_U8(skb, i, tc_pct); } nla_nest_end(skb, pg); return 0; nla_put_failure: return -EMSGSIZE; } static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *cee, *app; struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int dcbx, i, err = -EMSGSIZE; u8 value; NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); cee = nla_nest_start(skb, DCB_ATTR_CEE); if (!cee) goto nla_put_failure; /* local pg */ if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { err = dcbnl_cee_pg_fill(skb, netdev, 1); if (err) goto nla_put_failure; } if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { err = dcbnl_cee_pg_fill(skb, netdev, 0); if (err) goto nla_put_failure; } /* local pfc */ if (ops->getpfccfg) { struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC); if (!pfc_nest) goto nla_put_failure; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); NLA_PUT_U8(skb, i, value); } nla_nest_end(skb, pfc_nest); } /* local app */ spin_lock(&dcb_lock); app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { struct nlattr *app_nest = nla_nest_start(skb, DCB_ATTR_APP); if (!app_nest) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, itr->app.selector); if (err) goto dcb_unlock; err = nla_put_u16(skb, DCB_APP_ATTR_ID, itr->app.protocol); if (err) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, itr->app.priority); if (err) goto dcb_unlock; nla_nest_end(skb, app_nest); } } nla_nest_end(skb, app); if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT); if (!feat) goto nla_put_failure; for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; i++) if (!ops->getfeatcfg(netdev, i, &value)) NLA_PUT_U8(skb, i, value); nla_nest_end(skb, feat); } /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err) NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); } if (ops->cee_peer_getpfc) { struct cee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err) NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_CEE_PEER_APP_TABLE, DCB_ATTR_CEE_PEER_APP_INFO, DCB_ATTR_CEE_PEER_APP); if (err) goto nla_put_failure; } nla_nest_end(skb, cee); /* DCBX state */ if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) goto nla_put_failure; } return 0; dcb_unlock: spin_unlock(&dcb_lock); nla_put_failure: return err; } static int dcbnl_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 pid, int dcbx_ver) { struct net *net = dev_net(dev); struct sk_buff *skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int err; if (!ops) return -EOPNOTSUPP; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0); if (nlh == NULL) { nlmsg_free(skb); return -EMSGSIZE; } dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = cmd; if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) err = dcbnl_ieee_fill(skb, dev); else err = dcbnl_cee_fill(skb, dev); if (err < 0) { /* Report error to broadcast listeners */ nlmsg_cancel(skb, nlh); kfree_skb(skb); rtnl_set_sk_err(net, RTNLGRP_DCB, err); } else { /* End nlmsg and notify broadcast listeners */ nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); } return err; } int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 pid) { return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE); } EXPORT_SYMBOL(dcbnl_ieee_notify); int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 pid) { return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE); } EXPORT_SYMBOL(dcbnl_cee_notify); /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not * be completed the entire msg is aborted and error value is returned. * No attempt is made to reconcile the case where only part of the * cmd can be completed. */ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int err = -EOPNOTSUPP; if (!ops) return err; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); if (err) return err; if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); err = ops->ieee_setets(netdev, ets); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { struct nlattr *attr; int rem; nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { struct dcb_app *app_data; if (nla_type(attr) != DCB_ATTR_IEEE_APP) continue; app_data = nla_data(attr); if (ops->ieee_setapp) err = ops->ieee_setapp(netdev, app_data); else err = dcb_ieee_setapp(netdev, app_data); if (err) goto err; } } err: dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE, pid, seq, flags); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); return err; } static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct net *net = dev_net(netdev); struct sk_buff *skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int err; if (!ops) return -EOPNOTSUPP; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); if (nlh == NULL) { nlmsg_free(skb); return -EMSGSIZE; } dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_IEEE_GET; err = dcbnl_ieee_fill(skb, netdev); if (err < 0) { nlmsg_cancel(skb, nlh); kfree_skb(skb); } else { nlmsg_end(skb, nlh); err = rtnl_unicast(skb, net, pid); } return err; } static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int err = -EOPNOTSUPP; if (!ops) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); if (err) return err; if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { struct nlattr *attr; int rem; nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { struct dcb_app *app_data; if (nla_type(attr) != DCB_ATTR_IEEE_APP) continue; app_data = nla_data(attr); if (ops->ieee_delapp) err = ops->ieee_delapp(netdev, app_data); else err = dcb_ieee_delapp(netdev, app_data); if (err) goto err; } } err: dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE, pid, seq, flags); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); return err; } /* DCBX configuration */ static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int ret; if (!netdev->dcbnl_ops->getdcbx) return -EOPNOTSUPP; ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB, DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags); return ret; } static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { int ret; u8 value; if (!netdev->dcbnl_ops->setdcbx) return -EOPNOTSUPP; if (!tb[DCB_ATTR_DCBX]) return -EINVAL; value = nla_get_u8(tb[DCB_ATTR_DCBX]); ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value), RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX, pid, seq, flags); return ret; } static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct sk_buff *dcbnl_skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; u8 value; int ret, i; int getall = 0; if (!netdev->dcbnl_ops->getfeatcfg) return -EOPNOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest); if (ret) goto err_out; dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!dcbnl_skb) { ret = -ENOBUFS; goto err_out; } nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_GFEATCFG; nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG); if (!nest) { ret = -EMSGSIZE; goto nla_put_failure; } if (data[DCB_FEATCFG_ATTR_ALL]) getall = 1; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); if (!ret) ret = nla_put_u8(dcbnl_skb, i, value); if (ret) { nla_nest_cancel(dcbnl_skb, nest); goto nla_put_failure; } } nla_nest_end(dcbnl_skb, nest); nlmsg_end(dcbnl_skb, nlh); return rtnl_unicast(dcbnl_skb, &init_net, pid); nla_put_failure: nlmsg_cancel(dcbnl_skb, nlh); nlmsg_failure: kfree_skb(dcbnl_skb); err_out: return ret; } static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; int ret, i; u8 value; if (!netdev->dcbnl_ops->setfeatcfg) return -ENOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest); if (ret) goto err; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); if (ret) goto err; } err: dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG, pid, seq, flags); return ret; } /* Handle CEE DCBX GET commands. */ static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { struct net *net = dev_net(netdev); struct sk_buff *skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int err; if (!ops) return -EOPNOTSUPP; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); if (nlh == NULL) { nlmsg_free(skb); return -EMSGSIZE; } dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_CEE_GET; err = dcbnl_cee_fill(skb, netdev); if (err < 0) { nlmsg_cancel(skb, nlh); nlmsg_free(skb); } else { nlmsg_end(skb, nlh); err = rtnl_unicast(skb, net, pid); } return err; } static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct net_device *netdev; struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh); struct nlattr *tb[DCB_ATTR_MAX + 1]; u32 pid = skb ? NETLINK_CB(skb).pid : 0; int ret = -EINVAL; if (!net_eq(net, &init_net)) return -EINVAL; ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, dcbnl_rtnl_policy); if (ret < 0) return ret; if (!tb[DCB_ATTR_IFNAME]) return -EINVAL; netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME])); if (!netdev) return -EINVAL; if (!netdev->dcbnl_ops) goto errout; switch (dcb->cmd) { case DCB_CMD_GSTATE: ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PFC_GCFG: ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_GPERM_HWADDR: ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PGTX_GCFG: ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PGRX_GCFG: ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_BCN_GCFG: ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_SSTATE: ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PFC_SCFG: ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_SET_ALL: ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PGTX_SCFG: ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PGRX_SCFG: ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_GCAP: ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_GNUMTCS: ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_SNUMTCS: ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PFC_GSTATE: ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_PFC_SSTATE: ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_BCN_SCFG: ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_GAPP: ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_SAPP: ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_IEEE_SET: ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_IEEE_GET: ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_IEEE_DEL: ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_GDCBX: ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_SDCBX: ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_GFEATCFG: ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_SFEATCFG: ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; case DCB_CMD_CEE_GET: ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq, nlh->nlmsg_flags); goto out; default: goto errout; } errout: ret = -EINVAL; out: dev_put(netdev); return ret; } /** * dcb_getapp - retrieve the DCBX application user priority * * On success returns a non-zero 802.1p user priority bitmap * otherwise returns 0 as the invalid user priority bitmap to * indicate an error. */ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == dev->ifindex) { prio = itr->app.priority; break; } } spin_unlock(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_getapp); /** * dcb_setapp - add CEE dcb application data to app list * * Priority 0 is an invalid priority in CEE spec. This routine * removes applications from the app list if the priority is * set to zero. */ int dcb_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type *itr; struct dcb_app_type event; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock(&dcb_lock); /* Search for existing match and replace */ list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == new->selector && itr->app.protocol == new->protocol && itr->ifindex == dev->ifindex) { if (new->priority) itr->app.priority = new->priority; else { list_del(&itr->list); kfree(itr); } goto out; } } /* App type does not exist add new application type */ if (new->priority) { struct dcb_app_type *entry; entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC); if (!entry) { spin_unlock(&dcb_lock); return -ENOMEM; } memcpy(&entry->app, new, sizeof(*new)); entry->ifindex = dev->ifindex; list_add(&entry->list, &dcb_app_list); } out: spin_unlock(&dcb_lock); call_dcbevent_notifiers(DCB_APP_EVENT, &event); return 0; } EXPORT_SYMBOL(dcb_setapp); /** * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority * * Helper routine which on success returns a non-zero 802.1Qaz user * priority bitmap otherwise returns 0 to indicate the dcb_app was * not found in APP list. */ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == dev->ifindex) { prio |= 1 << itr->app.priority; } } spin_unlock(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_ieee_getapp_mask); /** * dcb_ieee_setapp - add IEEE dcb application data to app list * * This adds Application data to the list. Multiple application * entries may exists for the same selector and protocol as long * as the priorities are different. */ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type *itr, *entry; struct dcb_app_type event; int err = 0; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock(&dcb_lock); /* Search for existing match and abort if found */ list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == new->selector && itr->app.protocol == new->protocol && itr->app.priority == new->priority && itr->ifindex == dev->ifindex) { err = -EEXIST; goto out; } } /* App entry does not exist add new entry */ entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC); if (!entry) { err = -ENOMEM; goto out; } memcpy(&entry->app, new, sizeof(*new)); entry->ifindex = dev->ifindex; list_add(&entry->list, &dcb_app_list); out: spin_unlock(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_setapp); /** * dcb_ieee_delapp - delete IEEE dcb application data from list * * This removes a matching APP data from the APP list */ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) { struct dcb_app_type *itr; struct dcb_app_type event; int err = -ENOENT; event.ifindex = dev->ifindex; memcpy(&event.app, del, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock(&dcb_lock); /* Search for existing match and remove it. */ list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == del->selector && itr->app.protocol == del->protocol && itr->app.priority == del->priority && itr->ifindex == dev->ifindex) { list_del(&itr->list); kfree(itr); err = 0; goto out; } } out: spin_unlock(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_delapp); static void dcb_flushapp(void) { struct dcb_app_type *app; struct dcb_app_type *tmp; spin_lock(&dcb_lock); list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { list_del(&app->list); kfree(app); } spin_unlock(&dcb_lock); } static int __init dcbnl_init(void) { INIT_LIST_HEAD(&dcb_app_list); rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL); return 0; } module_init(dcbnl_init); static void __exit dcbnl_exit(void) { rtnl_unregister(PF_UNSPEC, RTM_GETDCB); rtnl_unregister(PF_UNSPEC, RTM_SETDCB); dcb_flushapp(); } module_exit(dcbnl_exit);
gpl-2.0
IADcodes/cubebone_kernel_shw-m250s
net/sctp/transport.c
1769
19888
/* SCTP kernel implementation * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 International Business Machines Corp. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * This module provides the abstraction for an SCTP tranport representing * a remote transport address. For local transport addresses, we just use * union sctp_addr. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Hui Huang <hui.huang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/types.h> #include <linux/random.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* 1st Level Abstractions. */ /* Initialize a new transport from provided memory. */ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, const union sctp_addr *addr, gfp_t gfp) { /* Copy in the address. */ peer->ipaddr = *addr; peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); memset(&peer->saddr, 0, sizeof(union sctp_addr)); /* From 6.3.1 RTO Calculation: * * C1) Until an RTT measurement has been made for a packet sent to the * given destination transport address, set RTO to the protocol * parameter 'RTO.Initial'. */ peer->rto = msecs_to_jiffies(sctp_rto_initial); peer->last_time_heard = jiffies; peer->last_time_ecne_reduced = jiffies; peer->param_flags = SPP_HB_DISABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* Initialize the default path max_retrans. */ peer->pathmaxrxt = sctp_max_retrans_path; INIT_LIST_HEAD(&peer->transmitted); INIT_LIST_HEAD(&peer->send_ready); INIT_LIST_HEAD(&peer->transports); setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, (unsigned long)peer); setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, (unsigned long)peer); setup_timer(&peer->proto_unreach_timer, sctp_generate_proto_unreach_event, (unsigned long)peer); /* Initialize the 64-bit random nonce sent with heartbeat. */ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); atomic_set(&peer->refcnt, 1); return peer; } /* Allocate and initialize a new transport. */ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, gfp_t gfp) { struct sctp_transport *transport; transport = t_new(struct sctp_transport, gfp); if (!transport) goto fail; if (!sctp_transport_init(transport, addr, gfp)) goto fail_init; transport->malloced = 1; SCTP_DBG_OBJCNT_INC(transport); return transport; fail_init: kfree(transport); fail: return NULL; } /* This transport is no longer needed. Free up if possible, or * delay until it last reference count. */ void sctp_transport_free(struct sctp_transport *transport) { transport->dead = 1; /* Try to delete the heartbeat timer. */ if (del_timer(&transport->hb_timer)) sctp_transport_put(transport); /* Delete the T3_rtx timer if it's active. * There is no point in not doing this now and letting * structure hang around in memory since we know * the tranport is going away. */ if (timer_pending(&transport->T3_rtx_timer) && del_timer(&transport->T3_rtx_timer)) sctp_transport_put(transport); /* Delete the ICMP proto unreachable timer if it's active. */ if (timer_pending(&transport->proto_unreach_timer) && del_timer(&transport->proto_unreach_timer)) sctp_association_put(transport->asoc); sctp_transport_put(transport); } /* Destroy the transport data structure. * Assumes there are no more users of this structure. */ static void sctp_transport_destroy(struct sctp_transport *transport) { SCTP_ASSERT(transport->dead, "Transport is not dead", return); if (transport->asoc) sctp_association_put(transport->asoc); sctp_packet_free(&transport->packet); dst_release(transport->dst); kfree(transport); SCTP_DBG_OBJCNT_DEC(transport); } /* Start T3_rtx timer if it is not already running and update the heartbeat * timer. This routine is called every time a DATA chunk is sent. */ void sctp_transport_reset_timers(struct sctp_transport *transport) { /* RFC 2960 6.3.2 Retransmission Timer Rules * * R1) Every time a DATA chunk is sent to any address(including a * retransmission), if the T3-rtx timer of that address is not running * start it running so that it will expire after the RTO of that * address. */ if (!timer_pending(&transport->T3_rtx_timer)) if (!mod_timer(&transport->T3_rtx_timer, jiffies + transport->rto)) sctp_transport_hold(transport); /* When a data chunk is sent, reset the heartbeat interval. */ if (!mod_timer(&transport->hb_timer, sctp_transport_timeout(transport))) sctp_transport_hold(transport); } /* This transport has been assigned to an association. * Initialize fields from the association or from the sock itself. * Register the reference count in the association. */ void sctp_transport_set_owner(struct sctp_transport *transport, struct sctp_association *asoc) { transport->asoc = asoc; sctp_association_hold(asoc); } /* Initialize the pmtu of a transport. */ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) { /* If we don't have a fresh route, look one up */ if (!transport->dst || transport->dst->obsolete > 1) { dst_release(transport->dst); transport->af_specific->get_dst(transport, &transport->saddr, &transport->fl, sk); } if (transport->dst) { transport->pathmtu = dst_mtu(transport->dst); } else transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } /* this is a complete rip-off from __sk_dst_check * the cookie is always 0 since this is how it's used in the * pmtu code */ static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { struct dst_entry *dst = t->dst; if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) { dst_release(t->dst); t->dst = NULL; return NULL; } return dst; } void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { struct dst_entry *dst; if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); /* Use default minimum segment size and disable * pmtu discovery on this transport. */ t->pathmtu = SCTP_DEFAULT_MINSEGMENT; } else { t->pathmtu = pmtu; } dst = sctp_transport_dst_check(t); if (dst) dst->ops->update_pmtu(dst, pmtu); } /* Caches the dst entry and source address for a transport's destination * address. */ void sctp_transport_route(struct sctp_transport *transport, union sctp_addr *saddr, struct sctp_sock *opt) { struct sctp_association *asoc = transport->asoc; struct sctp_af *af = transport->af_specific; af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); if (saddr) memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); else af->get_saddr(opt, transport, &transport->fl); if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { return; } if (transport->dst) { transport->pathmtu = dst_mtu(transport->dst); /* Initialize sk->sk_rcv_saddr, if the transport is the * association's active path for getsockname(). */ if (asoc && (!asoc->peer.primary_path || (transport == asoc->peer.active_path))) opt->pf->af->to_sk_saddr(&transport->saddr, asoc->base.sk); } else transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } /* Hold a reference to a transport. */ void sctp_transport_hold(struct sctp_transport *transport) { atomic_inc(&transport->refcnt); } /* Release a reference to a transport and clean up * if there are no more references. */ void sctp_transport_put(struct sctp_transport *transport) { if (atomic_dec_and_test(&transport->refcnt)) sctp_transport_destroy(transport); } /* Update transport's RTO based on the newly calculated RTT. */ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) { /* Check for valid transport. */ SCTP_ASSERT(tp, "NULL transport", return); /* We should not be doing any RTO updates unless rto_pending is set. */ SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return); if (tp->rttvar || tp->srtt) { /* 6.3.1 C3) When a new RTT measurement R' is made, set * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' */ /* Note: The above algorithm has been rewritten to * express rto_beta and rto_alpha as inverse powers * of two. * For example, assuming the default value of RTO.Alpha of * 1/8, rto_alpha would be expressed as 3. */ tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta) + ((abs(tp->srtt - rtt)) >> sctp_rto_beta); tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha) + (rtt >> sctp_rto_alpha); } else { /* 6.3.1 C2) When the first RTT measurement R is made, set * SRTT <- R, RTTVAR <- R/2. */ tp->srtt = rtt; tp->rttvar = rtt >> 1; } /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. */ if (tp->rttvar == 0) tp->rttvar = SCTP_CLOCK_GRANULARITY; /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ tp->rto = tp->srtt + (tp->rttvar << 2); /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min * seconds then it is rounded up to RTO.Min seconds. */ if (tp->rto < tp->asoc->rto_min) tp->rto = tp->asoc->rto_min; /* 6.3.1 C7) A maximum value may be placed on RTO provided it is * at least RTO.max seconds. */ if (tp->rto > tp->asoc->rto_max) tp->rto = tp->asoc->rto_max; tp->rtt = rtt; /* Reset rto_pending so that a new RTT measurement is started when a * new data chunk is sent. */ tp->rto_pending = 0; SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " "rttvar: %d, rto: %ld\n", __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); } /* This routine updates the transport's cwnd and partial_bytes_acked * parameters based on the bytes acked in the received SACK. */ void sctp_transport_raise_cwnd(struct sctp_transport *transport, __u32 sack_ctsn, __u32 bytes_acked) { struct sctp_association *asoc = transport->asoc; __u32 cwnd, ssthresh, flight_size, pba, pmtu; cwnd = transport->cwnd; flight_size = transport->flight_size; /* See if we need to exit Fast Recovery first */ if (asoc->fast_recovery && TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) asoc->fast_recovery = 0; /* The appropriate cwnd increase algorithm is performed if, and only * if the cumulative TSN whould advanced and the congestion window is * being fully utilized. */ if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) || (flight_size < cwnd)) return; ssthresh = transport->ssthresh; pba = transport->partial_bytes_acked; pmtu = transport->asoc->pathmtu; if (cwnd <= ssthresh) { /* RFC 4960 7.2.1 * o When cwnd is less than or equal to ssthresh, an SCTP * endpoint MUST use the slow-start algorithm to increase * cwnd only if the current congestion window is being fully * utilized, an incoming SACK advances the Cumulative TSN * Ack Point, and the data sender is not in Fast Recovery. * Only when these three conditions are met can the cwnd be * increased; otherwise, the cwnd MUST not be increased. * If these conditions are met, then cwnd MUST be increased * by, at most, the lesser of 1) the total size of the * previously outstanding DATA chunk(s) acknowledged, and * 2) the destination's path MTU. This upper bound protects * against the ACK-Splitting attack outlined in [SAVAGE99]. */ if (asoc->fast_recovery) return; if (bytes_acked > pmtu) cwnd += pmtu; else cwnd += bytes_acked; SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " "bytes_acked: %d, cwnd: %d, ssthresh: %d, " "flight_size: %d, pba: %d\n", __func__, transport, bytes_acked, cwnd, ssthresh, flight_size, pba); } else { /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, * upon each SACK arrival that advances the Cumulative TSN Ack * Point, increase partial_bytes_acked by the total number of * bytes of all new chunks acknowledged in that SACK including * chunks acknowledged by the new Cumulative TSN Ack and by * Gap Ack Blocks. * * When partial_bytes_acked is equal to or greater than cwnd * and before the arrival of the SACK the sender had cwnd or * more bytes of data outstanding (i.e., before arrival of the * SACK, flightsize was greater than or equal to cwnd), * increase cwnd by MTU, and reset partial_bytes_acked to * (partial_bytes_acked - cwnd). */ pba += bytes_acked; if (pba >= cwnd) { cwnd += pmtu; pba = ((cwnd < pba) ? (pba - cwnd) : 0); } SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " "transport: %p, bytes_acked: %d, cwnd: %d, " "ssthresh: %d, flight_size: %d, pba: %d\n", __func__, transport, bytes_acked, cwnd, ssthresh, flight_size, pba); } transport->cwnd = cwnd; transport->partial_bytes_acked = pba; } /* This routine is used to lower the transport's cwnd when congestion is * detected. */ void sctp_transport_lower_cwnd(struct sctp_transport *transport, sctp_lower_cwnd_t reason) { struct sctp_association *asoc = transport->asoc; switch (reason) { case SCTP_LOWER_CWND_T3_RTX: /* RFC 2960 Section 7.2.3, sctpimpguide * When the T3-rtx timer expires on an address, SCTP should * perform slow start by: * ssthresh = max(cwnd/2, 4*MTU) * cwnd = 1*MTU * partial_bytes_acked = 0 */ transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = asoc->pathmtu; /* T3-rtx also clears fast recovery */ asoc->fast_recovery = 0; break; case SCTP_LOWER_CWND_FAST_RTX: /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the * destination address(es) to which the missing DATA chunks * were last sent, according to the formula described in * Section 7.2.3. * * RFC 2960 7.2.3, sctpimpguide Upon detection of packet * losses from SACK (see Section 7.2.4), An endpoint * should do the following: * ssthresh = max(cwnd/2, 4*MTU) * cwnd = ssthresh * partial_bytes_acked = 0 */ if (asoc->fast_recovery) return; /* Mark Fast recovery */ asoc->fast_recovery = 1; asoc->fast_recovery_exit = asoc->next_tsn - 1; transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = transport->ssthresh; break; case SCTP_LOWER_CWND_ECNE: /* RFC 2481 Section 6.1.2. * If the sender receives an ECN-Echo ACK packet * then the sender knows that congestion was encountered in the * network on the path from the sender to the receiver. The * indication of congestion should be treated just as a * congestion loss in non-ECN Capable TCP. That is, the TCP * source halves the congestion window "cwnd" and reduces the * slow start threshold "ssthresh". * A critical condition is that TCP does not react to * congestion indications more than once every window of * data (or more loosely more than once every round-trip time). */ if (time_after(jiffies, transport->last_time_ecne_reduced + transport->rtt)) { transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = transport->ssthresh; transport->last_time_ecne_reduced = jiffies; } break; case SCTP_LOWER_CWND_INACTIVE: /* RFC 2960 Section 7.2.1, sctpimpguide * When the endpoint does not transmit data on a given * transport address, the cwnd of the transport address * should be adjusted to max(cwnd/2, 4*MTU) per RTO. * NOTE: Although the draft recommends that this check needs * to be done every RTO interval, we do it every hearbeat * interval. */ transport->cwnd = max(transport->cwnd/2, 4*asoc->pathmtu); break; } transport->partial_bytes_acked = 0; SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " "%d ssthresh: %d\n", __func__, transport, reason, transport->cwnd, transport->ssthresh); } /* Apply Max.Burst limit to the congestion window: * sctpimpguide-05 2.14.2 * D) When the time comes for the sender to * transmit new DATA chunks, the protocol parameter Max.Burst MUST * first be applied to limit how many new DATA chunks may be sent. * The limit is applied by adjusting cwnd as follows: * if ((flightsize+ Max.Burst * MTU) < cwnd) * cwnd = flightsize + Max.Burst * MTU */ void sctp_transport_burst_limited(struct sctp_transport *t) { struct sctp_association *asoc = t->asoc; u32 old_cwnd = t->cwnd; u32 max_burst_bytes; if (t->burst_limited) return; max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); if (max_burst_bytes < old_cwnd) { t->cwnd = max_burst_bytes; t->burst_limited = old_cwnd; } } /* Restore the old cwnd congestion window, after the burst had it's * desired effect. */ void sctp_transport_burst_reset(struct sctp_transport *t) { if (t->burst_limited) { t->cwnd = t->burst_limited; t->burst_limited = 0; } } /* What is the next timeout value for this transport? */ unsigned long sctp_transport_timeout(struct sctp_transport *t) { unsigned long timeout; timeout = t->rto + sctp_jitter(t->rto); if (t->state != SCTP_UNCONFIRMED) timeout += t->hbinterval; timeout += jiffies; return timeout; } /* Reset transport variables to their initial values */ void sctp_transport_reset(struct sctp_transport *t) { struct sctp_association *asoc = t->asoc; /* RFC 2960 (bis), Section 5.2.4 * All the congestion control parameters (e.g., cwnd, ssthresh) * related to this peer MUST be reset to their initial values * (see Section 6.2.1) */ t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); t->burst_limited = 0; t->ssthresh = asoc->peer.i.a_rwnd; t->rto = asoc->rto_initial; t->rtt = 0; t->srtt = 0; t->rttvar = 0; /* Reset these additional varibles so that we have a clean * slate. */ t->partial_bytes_acked = 0; t->flight_size = 0; t->error_count = 0; t->rto_pending = 0; t->hb_sent = 0; /* Initialize the state information for SFR-CACC */ t->cacc.changeover_active = 0; t->cacc.cycling_changeover = 0; t->cacc.next_tsn_at_change = 0; t->cacc.cacc_saw_newack = 0; }
gpl-2.0
CyanogenMod/android_kernel_google_steelhead
arch/arm/mach-davinci/da850.c
2281
29882
/* * TI DA850/OMAP-L138 chip specific setup * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ * * Derived from: arch/arm/mach-davinci/da830.c * Original Copyrights follow: * * 2009 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/init.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/cpufreq.h> #include <linux/regulator/consumer.h> #include <asm/mach/map.h> #include <mach/psc.h> #include <mach/irqs.h> #include <mach/cputype.h> #include <mach/common.h> #include <mach/time.h> #include <mach/da8xx.h> #include <mach/cpufreq.h> #include <mach/pm.h> #include <mach/gpio.h> #include "clock.h" #include "mux.h" /* SoC specific clock flags */ #define DA850_CLK_ASYNC3 BIT(16) #define DA850_PLL1_BASE 0x01e1a000 #define DA850_TIMER64P2_BASE 0x01f0c000 #define DA850_TIMER64P3_BASE 0x01f0d000 #define DA850_REF_FREQ 24000000 #define CFGCHIP3_ASYNC3_CLKSRC BIT(4) #define CFGCHIP3_PLL1_MASTER_LOCK BIT(5) #define CFGCHIP0_PLL_MASTER_LOCK BIT(4) static int da850_set_armrate(struct clk *clk, unsigned long rate); static int da850_round_armrate(struct clk *clk, unsigned long rate); static int da850_set_pll0rate(struct clk *clk, unsigned long armrate); static struct pll_data pll0_data = { .num = 1, .phys_base = DA8XX_PLL0_BASE, .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV, }; static struct clk ref_clk = { .name = "ref_clk", .rate = DA850_REF_FREQ, }; static struct clk pll0_clk = { .name = "pll0", .parent = &ref_clk, .pll_data = &pll0_data, .flags = CLK_PLL, .set_rate = da850_set_pll0rate, }; static struct clk pll0_aux_clk = { .name = "pll0_aux_clk", .parent = &pll0_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk pll0_sysclk2 = { .name = "pll0_sysclk2", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV2, }; static struct clk pll0_sysclk3 = { .name = "pll0_sysclk3", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV3, .set_rate = davinci_set_sysclk_rate, .maxrate = 100000000, }; static struct clk pll0_sysclk4 = { .name = "pll0_sysclk4", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV4, }; static struct clk pll0_sysclk5 = { .name = "pll0_sysclk5", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV5, }; static struct clk pll0_sysclk6 = { .name = "pll0_sysclk6", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV6, }; static struct clk pll0_sysclk7 = { .name = "pll0_sysclk7", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV7, }; static struct pll_data pll1_data = { .num = 2, .phys_base = DA850_PLL1_BASE, .flags = PLL_HAS_POSTDIV, }; static struct clk pll1_clk = { .name = "pll1", .parent = &ref_clk, .pll_data = &pll1_data, .flags = CLK_PLL, }; static struct clk pll1_aux_clk = { .name = "pll1_aux_clk", .parent = &pll1_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk pll1_sysclk2 = { .name = "pll1_sysclk2", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV2, }; static struct clk pll1_sysclk3 = { .name = "pll1_sysclk3", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV3, }; static struct clk pll1_sysclk4 = { .name = "pll1_sysclk4", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV4, }; static struct clk pll1_sysclk5 = { .name = "pll1_sysclk5", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV5, }; static struct clk pll1_sysclk6 = { .name = "pll0_sysclk6", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV6, }; static struct clk pll1_sysclk7 = { .name = "pll1_sysclk7", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV7, }; static struct clk i2c0_clk = { .name = "i2c0", .parent = &pll0_aux_clk, }; static struct clk timerp64_0_clk = { .name = "timer0", .parent = &pll0_aux_clk, }; static struct clk timerp64_1_clk = { .name = "timer1", .parent = &pll0_aux_clk, }; static struct clk arm_rom_clk = { .name = "arm_rom", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_ARM_RAM_ROM, .flags = ALWAYS_ENABLED, }; static struct clk tpcc0_clk = { .name = "tpcc0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_TPCC, .flags = ALWAYS_ENABLED | CLK_PSC, }; static struct clk tptc0_clk = { .name = "tptc0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_TPTC0, .flags = ALWAYS_ENABLED, }; static struct clk tptc1_clk = { .name = "tptc1", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_TPTC1, .flags = ALWAYS_ENABLED, }; static struct clk tpcc1_clk = { .name = "tpcc1", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_TPCC1, .gpsc = 1, .flags = CLK_PSC | ALWAYS_ENABLED, }; static struct clk tptc2_clk = { .name = "tptc2", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_TPTC2, .gpsc = 1, .flags = ALWAYS_ENABLED, }; static struct clk uart0_clk = { .name = "uart0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_UART0, }; static struct clk uart1_clk = { .name = "uart1", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_UART1, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk uart2_clk = { .name = "uart2", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_UART2, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk aintc_clk = { .name = "aintc", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC0_AINTC, .flags = ALWAYS_ENABLED, }; static struct clk gpio_clk = { .name = "gpio", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_GPIO, .gpsc = 1, }; static struct clk i2c1_clk = { .name = "i2c1", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_I2C, .gpsc = 1, }; static struct clk emif3_clk = { .name = "emif3", .parent = &pll0_sysclk5, .lpsc = DA8XX_LPSC1_EMIF3C, .gpsc = 1, .flags = ALWAYS_ENABLED, }; static struct clk arm_clk = { .name = "arm", .parent = &pll0_sysclk6, .lpsc = DA8XX_LPSC0_ARM, .flags = ALWAYS_ENABLED, .set_rate = da850_set_armrate, .round_rate = da850_round_armrate, }; static struct clk rmii_clk = { .name = "rmii", .parent = &pll0_sysclk7, }; static struct clk emac_clk = { .name = "emac", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_CPGMAC, .gpsc = 1, }; static struct clk mcasp_clk = { .name = "mcasp", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_McASP0, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk lcdc_clk = { .name = "lcdc", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_LCDC, .gpsc = 1, }; static struct clk mmcsd0_clk = { .name = "mmcsd0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_MMC_SD, }; static struct clk mmcsd1_clk = { .name = "mmcsd1", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_MMC_SD1, .gpsc = 1, }; static struct clk aemif_clk = { .name = "aemif", .parent = &pll0_sysclk3, .lpsc = DA8XX_LPSC0_EMIF25, .flags = ALWAYS_ENABLED, }; static struct clk usb11_clk = { .name = "usb11", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_USB11, .gpsc = 1, }; static struct clk usb20_clk = { .name = "usb20", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_USB20, .gpsc = 1, }; static struct clk spi0_clk = { .name = "spi0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_SPI0, }; static struct clk spi1_clk = { .name = "spi1", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_SPI1, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk_lookup da850_clks[] = { CLK(NULL, "ref", &ref_clk), CLK(NULL, "pll0", &pll0_clk), CLK(NULL, "pll0_aux", &pll0_aux_clk), CLK(NULL, "pll0_sysclk2", &pll0_sysclk2), CLK(NULL, "pll0_sysclk3", &pll0_sysclk3), CLK(NULL, "pll0_sysclk4", &pll0_sysclk4), CLK(NULL, "pll0_sysclk5", &pll0_sysclk5), CLK(NULL, "pll0_sysclk6", &pll0_sysclk6), CLK(NULL, "pll0_sysclk7", &pll0_sysclk7), CLK(NULL, "pll1", &pll1_clk), CLK(NULL, "pll1_aux", &pll1_aux_clk), CLK(NULL, "pll1_sysclk2", &pll1_sysclk2), CLK(NULL, "pll1_sysclk3", &pll1_sysclk3), CLK(NULL, "pll1_sysclk4", &pll1_sysclk4), CLK(NULL, "pll1_sysclk5", &pll1_sysclk5), CLK(NULL, "pll1_sysclk6", &pll1_sysclk6), CLK(NULL, "pll1_sysclk7", &pll1_sysclk7), CLK("i2c_davinci.1", NULL, &i2c0_clk), CLK(NULL, "timer0", &timerp64_0_clk), CLK("watchdog", NULL, &timerp64_1_clk), CLK(NULL, "arm_rom", &arm_rom_clk), CLK(NULL, "tpcc0", &tpcc0_clk), CLK(NULL, "tptc0", &tptc0_clk), CLK(NULL, "tptc1", &tptc1_clk), CLK(NULL, "tpcc1", &tpcc1_clk), CLK(NULL, "tptc2", &tptc2_clk), CLK(NULL, "uart0", &uart0_clk), CLK(NULL, "uart1", &uart1_clk), CLK(NULL, "uart2", &uart2_clk), CLK(NULL, "aintc", &aintc_clk), CLK(NULL, "gpio", &gpio_clk), CLK("i2c_davinci.2", NULL, &i2c1_clk), CLK(NULL, "emif3", &emif3_clk), CLK(NULL, "arm", &arm_clk), CLK(NULL, "rmii", &rmii_clk), CLK("davinci_emac.1", NULL, &emac_clk), CLK("davinci-mcasp.0", NULL, &mcasp_clk), CLK("da8xx_lcdc.0", NULL, &lcdc_clk), CLK("davinci_mmc.0", NULL, &mmcsd0_clk), CLK("davinci_mmc.1", NULL, &mmcsd1_clk), CLK(NULL, "aemif", &aemif_clk), CLK(NULL, "usb11", &usb11_clk), CLK(NULL, "usb20", &usb20_clk), CLK("spi_davinci.0", NULL, &spi0_clk), CLK("spi_davinci.1", NULL, &spi1_clk), CLK(NULL, NULL, NULL), }; /* * Device specific mux setup * * soc description mux mode mode mux dbg * reg offset mask mode */ static const struct mux_config da850_pins[] = { #ifdef CONFIG_DAVINCI_MUX /* UART0 function */ MUX_CFG(DA850, NUART0_CTS, 3, 24, 15, 2, false) MUX_CFG(DA850, NUART0_RTS, 3, 28, 15, 2, false) MUX_CFG(DA850, UART0_RXD, 3, 16, 15, 2, false) MUX_CFG(DA850, UART0_TXD, 3, 20, 15, 2, false) /* UART1 function */ MUX_CFG(DA850, UART1_RXD, 4, 24, 15, 2, false) MUX_CFG(DA850, UART1_TXD, 4, 28, 15, 2, false) /* UART2 function */ MUX_CFG(DA850, UART2_RXD, 4, 16, 15, 2, false) MUX_CFG(DA850, UART2_TXD, 4, 20, 15, 2, false) /* I2C1 function */ MUX_CFG(DA850, I2C1_SCL, 4, 16, 15, 4, false) MUX_CFG(DA850, I2C1_SDA, 4, 20, 15, 4, false) /* I2C0 function */ MUX_CFG(DA850, I2C0_SDA, 4, 12, 15, 2, false) MUX_CFG(DA850, I2C0_SCL, 4, 8, 15, 2, false) /* EMAC function */ MUX_CFG(DA850, MII_TXEN, 2, 4, 15, 8, false) MUX_CFG(DA850, MII_TXCLK, 2, 8, 15, 8, false) MUX_CFG(DA850, MII_COL, 2, 12, 15, 8, false) MUX_CFG(DA850, MII_TXD_3, 2, 16, 15, 8, false) MUX_CFG(DA850, MII_TXD_2, 2, 20, 15, 8, false) MUX_CFG(DA850, MII_TXD_1, 2, 24, 15, 8, false) MUX_CFG(DA850, MII_TXD_0, 2, 28, 15, 8, false) MUX_CFG(DA850, MII_RXCLK, 3, 0, 15, 8, false) MUX_CFG(DA850, MII_RXDV, 3, 4, 15, 8, false) MUX_CFG(DA850, MII_RXER, 3, 8, 15, 8, false) MUX_CFG(DA850, MII_CRS, 3, 12, 15, 8, false) MUX_CFG(DA850, MII_RXD_3, 3, 16, 15, 8, false) MUX_CFG(DA850, MII_RXD_2, 3, 20, 15, 8, false) MUX_CFG(DA850, MII_RXD_1, 3, 24, 15, 8, false) MUX_CFG(DA850, MII_RXD_0, 3, 28, 15, 8, false) MUX_CFG(DA850, MDIO_CLK, 4, 0, 15, 8, false) MUX_CFG(DA850, MDIO_D, 4, 4, 15, 8, false) MUX_CFG(DA850, RMII_TXD_0, 14, 12, 15, 8, false) MUX_CFG(DA850, RMII_TXD_1, 14, 8, 15, 8, false) MUX_CFG(DA850, RMII_TXEN, 14, 16, 15, 8, false) MUX_CFG(DA850, RMII_CRS_DV, 15, 4, 15, 8, false) MUX_CFG(DA850, RMII_RXD_0, 14, 24, 15, 8, false) MUX_CFG(DA850, RMII_RXD_1, 14, 20, 15, 8, false) MUX_CFG(DA850, RMII_RXER, 14, 28, 15, 8, false) MUX_CFG(DA850, RMII_MHZ_50_CLK, 15, 0, 15, 0, false) /* McASP function */ MUX_CFG(DA850, ACLKR, 0, 0, 15, 1, false) MUX_CFG(DA850, ACLKX, 0, 4, 15, 1, false) MUX_CFG(DA850, AFSR, 0, 8, 15, 1, false) MUX_CFG(DA850, AFSX, 0, 12, 15, 1, false) MUX_CFG(DA850, AHCLKR, 0, 16, 15, 1, false) MUX_CFG(DA850, AHCLKX, 0, 20, 15, 1, false) MUX_CFG(DA850, AMUTE, 0, 24, 15, 1, false) MUX_CFG(DA850, AXR_15, 1, 0, 15, 1, false) MUX_CFG(DA850, AXR_14, 1, 4, 15, 1, false) MUX_CFG(DA850, AXR_13, 1, 8, 15, 1, false) MUX_CFG(DA850, AXR_12, 1, 12, 15, 1, false) MUX_CFG(DA850, AXR_11, 1, 16, 15, 1, false) MUX_CFG(DA850, AXR_10, 1, 20, 15, 1, false) MUX_CFG(DA850, AXR_9, 1, 24, 15, 1, false) MUX_CFG(DA850, AXR_8, 1, 28, 15, 1, false) MUX_CFG(DA850, AXR_7, 2, 0, 15, 1, false) MUX_CFG(DA850, AXR_6, 2, 4, 15, 1, false) MUX_CFG(DA850, AXR_5, 2, 8, 15, 1, false) MUX_CFG(DA850, AXR_4, 2, 12, 15, 1, false) MUX_CFG(DA850, AXR_3, 2, 16, 15, 1, false) MUX_CFG(DA850, AXR_2, 2, 20, 15, 1, false) MUX_CFG(DA850, AXR_1, 2, 24, 15, 1, false) MUX_CFG(DA850, AXR_0, 2, 28, 15, 1, false) /* LCD function */ MUX_CFG(DA850, LCD_D_7, 16, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_6, 16, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_5, 16, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_4, 16, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_3, 16, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_2, 16, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_1, 17, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_0, 17, 4, 15, 2, false) MUX_CFG(DA850, LCD_D_15, 17, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_14, 17, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_13, 17, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_12, 17, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_11, 17, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_10, 17, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_9, 18, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_8, 18, 4, 15, 2, false) MUX_CFG(DA850, LCD_PCLK, 18, 24, 15, 2, false) MUX_CFG(DA850, LCD_HSYNC, 19, 0, 15, 2, false) MUX_CFG(DA850, LCD_VSYNC, 19, 4, 15, 2, false) MUX_CFG(DA850, NLCD_AC_ENB_CS, 19, 24, 15, 2, false) /* MMC/SD0 function */ MUX_CFG(DA850, MMCSD0_DAT_0, 10, 8, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_1, 10, 12, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_2, 10, 16, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_3, 10, 20, 15, 2, false) MUX_CFG(DA850, MMCSD0_CLK, 10, 0, 15, 2, false) MUX_CFG(DA850, MMCSD0_CMD, 10, 4, 15, 2, false) /* EMIF2.5/EMIFA function */ MUX_CFG(DA850, EMA_D_7, 9, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_6, 9, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_5, 9, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_4, 9, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_3, 9, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_2, 9, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_1, 9, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_0, 9, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_1, 12, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_2, 12, 20, 15, 1, false) MUX_CFG(DA850, NEMA_CS_3, 7, 4, 15, 1, false) MUX_CFG(DA850, NEMA_CS_4, 7, 8, 15, 1, false) MUX_CFG(DA850, NEMA_WE, 7, 16, 15, 1, false) MUX_CFG(DA850, NEMA_OE, 7, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_0, 12, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_3, 12, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_4, 12, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_5, 12, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_6, 12, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_7, 12, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_8, 11, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_9, 11, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_10, 11, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_11, 11, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_12, 11, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_13, 11, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_14, 11, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_15, 11, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_16, 10, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_17, 10, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_18, 10, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_19, 10, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_20, 10, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_21, 10, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_22, 10, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_23, 10, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_8, 8, 28, 15, 1, false) MUX_CFG(DA850, EMA_D_9, 8, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_10, 8, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_11, 8, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_12, 8, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_13, 8, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_14, 8, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_15, 8, 0, 15, 1, false) MUX_CFG(DA850, EMA_BA_1, 5, 24, 15, 1, false) MUX_CFG(DA850, EMA_CLK, 6, 0, 15, 1, false) MUX_CFG(DA850, EMA_WAIT_1, 6, 24, 15, 1, false) MUX_CFG(DA850, NEMA_CS_2, 7, 0, 15, 1, false) /* GPIO function */ MUX_CFG(DA850, GPIO2_4, 6, 12, 15, 8, false) MUX_CFG(DA850, GPIO2_6, 6, 4, 15, 8, false) MUX_CFG(DA850, GPIO2_8, 5, 28, 15, 8, false) MUX_CFG(DA850, GPIO2_15, 5, 0, 15, 8, false) MUX_CFG(DA850, GPIO3_12, 7, 12, 15, 8, false) MUX_CFG(DA850, GPIO3_13, 7, 8, 15, 8, false) MUX_CFG(DA850, GPIO4_0, 10, 28, 15, 8, false) MUX_CFG(DA850, GPIO4_1, 10, 24, 15, 8, false) MUX_CFG(DA850, GPIO6_13, 13, 8, 15, 8, false) MUX_CFG(DA850, RTC_ALARM, 0, 28, 15, 2, false) #endif }; const short da850_i2c0_pins[] __initdata = { DA850_I2C0_SDA, DA850_I2C0_SCL, -1 }; const short da850_i2c1_pins[] __initdata = { DA850_I2C1_SCL, DA850_I2C1_SDA, -1 }; const short da850_lcdcntl_pins[] __initdata = { DA850_LCD_D_0, DA850_LCD_D_1, DA850_LCD_D_2, DA850_LCD_D_3, DA850_LCD_D_4, DA850_LCD_D_5, DA850_LCD_D_6, DA850_LCD_D_7, DA850_LCD_D_8, DA850_LCD_D_9, DA850_LCD_D_10, DA850_LCD_D_11, DA850_LCD_D_12, DA850_LCD_D_13, DA850_LCD_D_14, DA850_LCD_D_15, DA850_LCD_PCLK, DA850_LCD_HSYNC, DA850_LCD_VSYNC, DA850_NLCD_AC_ENB_CS, -1 }; /* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */ static u8 da850_default_priorities[DA850_N_CP_INTC_IRQ] = { [IRQ_DA8XX_COMMTX] = 7, [IRQ_DA8XX_COMMRX] = 7, [IRQ_DA8XX_NINT] = 7, [IRQ_DA8XX_EVTOUT0] = 7, [IRQ_DA8XX_EVTOUT1] = 7, [IRQ_DA8XX_EVTOUT2] = 7, [IRQ_DA8XX_EVTOUT3] = 7, [IRQ_DA8XX_EVTOUT4] = 7, [IRQ_DA8XX_EVTOUT5] = 7, [IRQ_DA8XX_EVTOUT6] = 7, [IRQ_DA8XX_EVTOUT7] = 7, [IRQ_DA8XX_CCINT0] = 7, [IRQ_DA8XX_CCERRINT] = 7, [IRQ_DA8XX_TCERRINT0] = 7, [IRQ_DA8XX_AEMIFINT] = 7, [IRQ_DA8XX_I2CINT0] = 7, [IRQ_DA8XX_MMCSDINT0] = 7, [IRQ_DA8XX_MMCSDINT1] = 7, [IRQ_DA8XX_ALLINT0] = 7, [IRQ_DA8XX_RTC] = 7, [IRQ_DA8XX_SPINT0] = 7, [IRQ_DA8XX_TINT12_0] = 7, [IRQ_DA8XX_TINT34_0] = 7, [IRQ_DA8XX_TINT12_1] = 7, [IRQ_DA8XX_TINT34_1] = 7, [IRQ_DA8XX_UARTINT0] = 7, [IRQ_DA8XX_KEYMGRINT] = 7, [IRQ_DA850_MPUADDRERR0] = 7, [IRQ_DA8XX_CHIPINT0] = 7, [IRQ_DA8XX_CHIPINT1] = 7, [IRQ_DA8XX_CHIPINT2] = 7, [IRQ_DA8XX_CHIPINT3] = 7, [IRQ_DA8XX_TCERRINT1] = 7, [IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7, [IRQ_DA8XX_C0_RX_PULSE] = 7, [IRQ_DA8XX_C0_TX_PULSE] = 7, [IRQ_DA8XX_C0_MISC_PULSE] = 7, [IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7, [IRQ_DA8XX_C1_RX_PULSE] = 7, [IRQ_DA8XX_C1_TX_PULSE] = 7, [IRQ_DA8XX_C1_MISC_PULSE] = 7, [IRQ_DA8XX_MEMERR] = 7, [IRQ_DA8XX_GPIO0] = 7, [IRQ_DA8XX_GPIO1] = 7, [IRQ_DA8XX_GPIO2] = 7, [IRQ_DA8XX_GPIO3] = 7, [IRQ_DA8XX_GPIO4] = 7, [IRQ_DA8XX_GPIO5] = 7, [IRQ_DA8XX_GPIO6] = 7, [IRQ_DA8XX_GPIO7] = 7, [IRQ_DA8XX_GPIO8] = 7, [IRQ_DA8XX_I2CINT1] = 7, [IRQ_DA8XX_LCDINT] = 7, [IRQ_DA8XX_UARTINT1] = 7, [IRQ_DA8XX_MCASPINT] = 7, [IRQ_DA8XX_ALLINT1] = 7, [IRQ_DA8XX_SPINT1] = 7, [IRQ_DA8XX_UHPI_INT1] = 7, [IRQ_DA8XX_USB_INT] = 7, [IRQ_DA8XX_IRQN] = 7, [IRQ_DA8XX_RWAKEUP] = 7, [IRQ_DA8XX_UARTINT2] = 7, [IRQ_DA8XX_DFTSSINT] = 7, [IRQ_DA8XX_EHRPWM0] = 7, [IRQ_DA8XX_EHRPWM0TZ] = 7, [IRQ_DA8XX_EHRPWM1] = 7, [IRQ_DA8XX_EHRPWM1TZ] = 7, [IRQ_DA850_SATAINT] = 7, [IRQ_DA850_TINTALL_2] = 7, [IRQ_DA8XX_ECAP0] = 7, [IRQ_DA8XX_ECAP1] = 7, [IRQ_DA8XX_ECAP2] = 7, [IRQ_DA850_MMCSDINT0_1] = 7, [IRQ_DA850_MMCSDINT1_1] = 7, [IRQ_DA850_T12CMPINT0_2] = 7, [IRQ_DA850_T12CMPINT1_2] = 7, [IRQ_DA850_T12CMPINT2_2] = 7, [IRQ_DA850_T12CMPINT3_2] = 7, [IRQ_DA850_T12CMPINT4_2] = 7, [IRQ_DA850_T12CMPINT5_2] = 7, [IRQ_DA850_T12CMPINT6_2] = 7, [IRQ_DA850_T12CMPINT7_2] = 7, [IRQ_DA850_T12CMPINT0_3] = 7, [IRQ_DA850_T12CMPINT1_3] = 7, [IRQ_DA850_T12CMPINT2_3] = 7, [IRQ_DA850_T12CMPINT3_3] = 7, [IRQ_DA850_T12CMPINT4_3] = 7, [IRQ_DA850_T12CMPINT5_3] = 7, [IRQ_DA850_T12CMPINT6_3] = 7, [IRQ_DA850_T12CMPINT7_3] = 7, [IRQ_DA850_RPIINT] = 7, [IRQ_DA850_VPIFINT] = 7, [IRQ_DA850_CCINT1] = 7, [IRQ_DA850_CCERRINT1] = 7, [IRQ_DA850_TCERRINT2] = 7, [IRQ_DA850_TINTALL_3] = 7, [IRQ_DA850_MCBSP0RINT] = 7, [IRQ_DA850_MCBSP0XINT] = 7, [IRQ_DA850_MCBSP1RINT] = 7, [IRQ_DA850_MCBSP1XINT] = 7, [IRQ_DA8XX_ARMCLKSTOPREQ] = 7, }; static struct map_desc da850_io_desc[] = { { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHYS), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = DA8XX_CP_INTC_VIRT, .pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE), .length = DA8XX_CP_INTC_SIZE, .type = MT_DEVICE }, { .virtual = SRAM_VIRT, .pfn = __phys_to_pfn(DA8XX_ARM_RAM_BASE), .length = SZ_8K, .type = MT_DEVICE }, }; static u32 da850_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE }; /* Contents of JTAG ID register used to identify exact cpu type */ static struct davinci_id da850_ids[] = { { .variant = 0x0, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138", }, { .variant = 0x1, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138/am18x", }, }; static struct davinci_timer_instance da850_timer_instance[4] = { { .base = DA8XX_TIMER64P0_BASE, .bottom_irq = IRQ_DA8XX_TINT12_0, .top_irq = IRQ_DA8XX_TINT34_0, }, { .base = DA8XX_TIMER64P1_BASE, .bottom_irq = IRQ_DA8XX_TINT12_1, .top_irq = IRQ_DA8XX_TINT34_1, }, { .base = DA850_TIMER64P2_BASE, .bottom_irq = IRQ_DA850_TINT12_2, .top_irq = IRQ_DA850_TINT34_2, }, { .base = DA850_TIMER64P3_BASE, .bottom_irq = IRQ_DA850_TINT12_3, .top_irq = IRQ_DA850_TINT34_3, }, }; /* * T0_BOT: Timer 0, bottom : Used for clock_event * T0_TOP: Timer 0, top : Used for clocksource * T1_BOT, T1_TOP: Timer 1, bottom & top: Used for watchdog timer */ static struct davinci_timer_info da850_timer_info = { .timers = da850_timer_instance, .clockevent_id = T0_BOT, .clocksource_id = T0_TOP, }; static void da850_set_async3_src(int pllnum) { struct clk *clk, *newparent = pllnum ? &pll1_sysclk2 : &pll0_sysclk2; struct clk_lookup *c; unsigned int v; int ret; for (c = da850_clks; c->clk; c++) { clk = c->clk; if (clk->flags & DA850_CLK_ASYNC3) { ret = clk_set_parent(clk, newparent); WARN(ret, "DA850: unable to re-parent clock %s", clk->name); } } v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); if (pllnum) v |= CFGCHIP3_ASYNC3_CLKSRC; else v &= ~CFGCHIP3_ASYNC3_CLKSRC; __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); } #ifdef CONFIG_CPU_FREQ /* * Notes: * According to the TRM, minimum PLLM results in maximum power savings. * The OPP definitions below should keep the PLLM as low as possible. * * The output of the PLLM must be between 300 to 600 MHz. */ struct da850_opp { unsigned int freq; /* in KHz */ unsigned int prediv; unsigned int mult; unsigned int postdiv; unsigned int cvdd_min; /* in uV */ unsigned int cvdd_max; /* in uV */ }; static const struct da850_opp da850_opp_456 = { .freq = 456000, .prediv = 1, .mult = 19, .postdiv = 1, .cvdd_min = 1300000, .cvdd_max = 1350000, }; static const struct da850_opp da850_opp_408 = { .freq = 408000, .prediv = 1, .mult = 17, .postdiv = 1, .cvdd_min = 1300000, .cvdd_max = 1350000, }; static const struct da850_opp da850_opp_372 = { .freq = 372000, .prediv = 2, .mult = 31, .postdiv = 1, .cvdd_min = 1200000, .cvdd_max = 1320000, }; static const struct da850_opp da850_opp_300 = { .freq = 300000, .prediv = 1, .mult = 25, .postdiv = 2, .cvdd_min = 1200000, .cvdd_max = 1320000, }; static const struct da850_opp da850_opp_200 = { .freq = 200000, .prediv = 1, .mult = 25, .postdiv = 3, .cvdd_min = 1100000, .cvdd_max = 1160000, }; static const struct da850_opp da850_opp_96 = { .freq = 96000, .prediv = 1, .mult = 20, .postdiv = 5, .cvdd_min = 1000000, .cvdd_max = 1050000, }; #define OPP(freq) \ { \ .index = (unsigned int) &da850_opp_##freq, \ .frequency = freq * 1000, \ } static struct cpufreq_frequency_table da850_freq_table[] = { OPP(456), OPP(408), OPP(372), OPP(300), OPP(200), OPP(96), { .index = 0, .frequency = CPUFREQ_TABLE_END, }, }; #ifdef CONFIG_REGULATOR static int da850_set_voltage(unsigned int index); static int da850_regulator_init(void); #endif static struct davinci_cpufreq_config cpufreq_info = { .freq_table = da850_freq_table, #ifdef CONFIG_REGULATOR .init = da850_regulator_init, .set_voltage = da850_set_voltage, #endif }; #ifdef CONFIG_REGULATOR static struct regulator *cvdd; static int da850_set_voltage(unsigned int index) { struct da850_opp *opp; if (!cvdd) return -ENODEV; opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); } static int da850_regulator_init(void) { cvdd = regulator_get(NULL, "cvdd"); if (WARN(IS_ERR(cvdd), "Unable to obtain voltage regulator for CVDD;" " voltage scaling unsupported\n")) { return PTR_ERR(cvdd); } return 0; } #endif static struct platform_device da850_cpufreq_device = { .name = "cpufreq-davinci", .dev = { .platform_data = &cpufreq_info, }, .id = -1, }; unsigned int da850_max_speed = 300000; int __init da850_register_cpufreq(char *async_clk) { int i; /* cpufreq driver can help keep an "async" clock constant */ if (async_clk) clk_add_alias("async", da850_cpufreq_device.name, async_clk, NULL); for (i = 0; i < ARRAY_SIZE(da850_freq_table); i++) { if (da850_freq_table[i].frequency <= da850_max_speed) { cpufreq_info.freq_table = &da850_freq_table[i]; break; } } return platform_device_register(&da850_cpufreq_device); } static int da850_round_armrate(struct clk *clk, unsigned long rate) { int i, ret = 0, diff; unsigned int best = (unsigned int) -1; struct cpufreq_frequency_table *table = cpufreq_info.freq_table; rate /= 1000; /* convert to kHz */ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { diff = table[i].frequency - rate; if (diff < 0) diff = -diff; if (diff < best) { best = diff; ret = table[i].frequency; } } return ret * 1000; } static int da850_set_armrate(struct clk *clk, unsigned long index) { struct clk *pllclk = &pll0_clk; return clk_set_rate(pllclk, index); } static int da850_set_pll0rate(struct clk *clk, unsigned long index) { unsigned int prediv, mult, postdiv; struct da850_opp *opp; struct pll_data *pll = clk->pll_data; int ret; opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; prediv = opp->prediv; mult = opp->mult; postdiv = opp->postdiv; ret = davinci_set_pllrate(pll, prediv, mult, postdiv); if (WARN_ON(ret)) return ret; return 0; } #else int __init da850_register_cpufreq(char *async_clk) { return 0; } static int da850_set_armrate(struct clk *clk, unsigned long rate) { return -EINVAL; } static int da850_set_pll0rate(struct clk *clk, unsigned long armrate) { return -EINVAL; } static int da850_round_armrate(struct clk *clk, unsigned long rate) { return clk->rate; } #endif int da850_register_pm(struct platform_device *pdev) { int ret; struct davinci_pm_config *pdata = pdev->dev.platform_data; ret = davinci_cfg_reg(DA850_RTC_ALARM); if (ret) return ret; pdata->ddr2_ctlr_base = da8xx_get_mem_ctlr(); pdata->deepsleep_reg = DA8XX_SYSCFG1_VIRT(DA8XX_DEEPSLEEP_REG); pdata->ddrpsc_num = DA8XX_LPSC1_EMIF3C; pdata->cpupll_reg_base = ioremap(DA8XX_PLL0_BASE, SZ_4K); if (!pdata->cpupll_reg_base) return -ENOMEM; pdata->ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K); if (!pdata->ddrpll_reg_base) { ret = -ENOMEM; goto no_ddrpll_mem; } pdata->ddrpsc_reg_base = ioremap(DA8XX_PSC1_BASE, SZ_4K); if (!pdata->ddrpsc_reg_base) { ret = -ENOMEM; goto no_ddrpsc_mem; } return platform_device_register(pdev); no_ddrpsc_mem: iounmap(pdata->ddrpll_reg_base); no_ddrpll_mem: iounmap(pdata->cpupll_reg_base); return ret; } static struct davinci_soc_info davinci_soc_info_da850 = { .io_desc = da850_io_desc, .io_desc_num = ARRAY_SIZE(da850_io_desc), .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG, .ids = da850_ids, .ids_num = ARRAY_SIZE(da850_ids), .cpu_clks = da850_clks, .psc_bases = da850_psc_bases, .psc_bases_num = ARRAY_SIZE(da850_psc_bases), .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120, .pinmux_pins = da850_pins, .pinmux_pins_num = ARRAY_SIZE(da850_pins), .intc_base = DA8XX_CP_INTC_BASE, .intc_type = DAVINCI_INTC_TYPE_CP_INTC, .intc_irq_prios = da850_default_priorities, .intc_irq_num = DA850_N_CP_INTC_IRQ, .timer_info = &da850_timer_info, .gpio_type = GPIO_TYPE_DAVINCI, .gpio_base = DA8XX_GPIO_BASE, .gpio_num = 144, .gpio_irq = IRQ_DA8XX_GPIO0, .serial_dev = &da8xx_serial_device, .emac_pdata = &da8xx_emac_pdata, .sram_dma = DA8XX_ARM_RAM_BASE, .sram_len = SZ_8K, .reset_device = &da8xx_wdt_device, }; void __init da850_init(void) { unsigned int v; davinci_common_init(&davinci_soc_info_da850); da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K); if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module")) return; da8xx_syscfg1_base = ioremap(DA8XX_SYSCFG1_BASE, SZ_4K); if (WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module")) return; /* * Move the clock source of Async3 domain to PLL1 SYSCLK2. * This helps keeping the peripherals on this domain insulated * from CPU frequency changes caused by DVFS. The firmware sets * both PLL0 and PLL1 to the same frequency so, there should not * be any noticeable change even in non-DVFS use cases. */ da850_set_async3_src(1); /* Unlock writing to PLL0 registers */ v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG)); v &= ~CFGCHIP0_PLL_MASTER_LOCK; __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG)); /* Unlock writing to PLL1 registers */ v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); v &= ~CFGCHIP3_PLL1_MASTER_LOCK; __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); }
gpl-2.0
binarybishop/android_kernel_msm8660-common
drivers/media/video/pvrusb2/pvrusb2-v4l2.c
2537
32574
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/version.h> #include "pvrusb2-context.h" #include "pvrusb2-hdw.h" #include "pvrusb2.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #include "pvrusb2-ioread.h" #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> struct pvr2_v4l2_dev; struct pvr2_v4l2_fh; struct pvr2_v4l2; struct pvr2_v4l2_dev { struct video_device devbase; /* MUST be first! */ struct pvr2_v4l2 *v4lp; struct pvr2_context_stream *stream; /* Information about this device: */ enum pvr2_config config; /* Expected stream format */ int v4l_type; /* V4L defined type for this device node */ enum pvr2_v4l_type minor_type; /* pvr2-understood minor device type */ }; struct pvr2_v4l2_fh { struct pvr2_channel channel; struct pvr2_v4l2_dev *pdi; enum v4l2_priority prio; struct pvr2_ioread *rhp; struct file *file; struct pvr2_v4l2 *vhead; struct pvr2_v4l2_fh *vnext; struct pvr2_v4l2_fh *vprev; wait_queue_head_t wait_data; int fw_mode_flag; /* Map contiguous ordinal value to input id */ unsigned char *input_map; unsigned int input_cnt; }; struct pvr2_v4l2 { struct pvr2_channel channel; struct pvr2_v4l2_fh *vfirst; struct pvr2_v4l2_fh *vlast; struct v4l2_prio_state prio; /* streams - Note that these must be separately, individually, * allocated pointers. This is because the v4l core is going to * manage their deletion - separately, individually... */ struct pvr2_v4l2_dev *dev_video; struct pvr2_v4l2_dev *dev_radio; }; static int video_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(video_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "Offset for device's video dev minor"); static int radio_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Offset for device's radio dev minor"); static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(vbi_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); static struct v4l2_capability pvr_capability ={ .driver = "pvrusb2", .card = "Hauppauge WinTV pvr-usb2", .bus_info = "usb", .version = KERNEL_VERSION(0, 9, 0), .capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | V4L2_CAP_READWRITE), .reserved = {0,0,0,0} }; static struct v4l2_fmtdesc pvr_fmtdesc [] = { { .index = 0, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = V4L2_FMT_FLAG_COMPRESSED, .description = "MPEG1/2", // This should really be V4L2_PIX_FMT_MPEG, but xawtv // breaks when I do that. .pixelformat = 0, // V4L2_PIX_FMT_MPEG, .reserved = { 0, 0, 0, 0 } } }; #define PVR_FORMAT_PIX 0 #define PVR_FORMAT_VBI 1 static struct v4l2_format pvr_format [] = { [PVR_FORMAT_PIX] = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt = { .pix = { .width = 720, .height = 576, // This should really be V4L2_PIX_FMT_MPEG, // but xawtv breaks when I do that. .pixelformat = 0, // V4L2_PIX_FMT_MPEG, .field = V4L2_FIELD_INTERLACED, .bytesperline = 0, // doesn't make sense // here //FIXME : Don't know what to put here... .sizeimage = (32*1024), .colorspace = 0, // doesn't make sense here .priv = 0 } } }, [PVR_FORMAT_VBI] = { .type = V4L2_BUF_TYPE_VBI_CAPTURE, .fmt = { .vbi = { .sampling_rate = 27000000, .offset = 248, .samples_per_line = 1443, .sample_format = V4L2_PIX_FMT_GREY, .start = { 0, 0 }, .count = { 0, 0 }, .flags = 0, .reserved = { 0, 0 } } } } }; /* * pvr_ioctl() * * This is part of Video 4 Linux API. The procedure handles ioctl() calls. * */ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) { struct pvr2_v4l2_fh *fh = file->private_data; struct pvr2_v4l2 *vp = fh->vhead; struct pvr2_v4l2_dev *pdi = fh->pdi; struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; long ret = -EINVAL; if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd); } if (!pvr2_hdw_dev_ok(hdw)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "ioctl failed - bad or no context"); return -EFAULT; } /* check priority */ switch (cmd) { case VIDIOC_S_CTRL: case VIDIOC_S_STD: case VIDIOC_S_INPUT: case VIDIOC_S_TUNER: case VIDIOC_S_FREQUENCY: ret = v4l2_prio_check(&vp->prio, fh->prio); if (ret) return ret; } switch (cmd) { case VIDIOC_QUERYCAP: { struct v4l2_capability *cap = arg; memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); strlcpy(cap->bus_info,pvr2_hdw_get_bus_info(hdw), sizeof(cap->bus_info)); strlcpy(cap->card,pvr2_hdw_get_desc(hdw),sizeof(cap->card)); ret = 0; break; } case VIDIOC_G_PRIORITY: { enum v4l2_priority *p = arg; *p = v4l2_prio_max(&vp->prio); ret = 0; break; } case VIDIOC_S_PRIORITY: { enum v4l2_priority *prio = arg; ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio); break; } case VIDIOC_ENUMSTD: { struct v4l2_standard *vs = (struct v4l2_standard *)arg; int idx = vs->index; ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1); break; } case VIDIOC_G_STD: { int val = 0; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val); *(v4l2_std_id *)arg = val; break; } case VIDIOC_S_STD: { ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR), *(v4l2_std_id *)arg); break; } case VIDIOC_ENUMINPUT: { struct pvr2_ctrl *cptr; struct v4l2_input *vi = (struct v4l2_input *)arg; struct v4l2_input tmp; unsigned int cnt; int val; cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); memset(&tmp,0,sizeof(tmp)); tmp.index = vi->index; ret = 0; if (vi->index >= fh->input_cnt) { ret = -EINVAL; break; } val = fh->input_map[vi->index]; switch (val) { case PVR2_CVAL_INPUT_TV: case PVR2_CVAL_INPUT_DTV: case PVR2_CVAL_INPUT_RADIO: tmp.type = V4L2_INPUT_TYPE_TUNER; break; case PVR2_CVAL_INPUT_SVIDEO: case PVR2_CVAL_INPUT_COMPOSITE: tmp.type = V4L2_INPUT_TYPE_CAMERA; break; default: ret = -EINVAL; break; } if (ret < 0) break; cnt = 0; pvr2_ctrl_get_valname(cptr,val, tmp.name,sizeof(tmp.name)-1,&cnt); tmp.name[cnt] = 0; /* Don't bother with audioset, since this driver currently always switches the audio whenever the video is switched. */ /* Handling std is a tougher problem. It doesn't make sense in cases where a device might be multi-standard. We could just copy out the current value for the standard, but it can change over time. For now just leave it zero. */ memcpy(vi, &tmp, sizeof(tmp)); ret = 0; break; } case VIDIOC_G_INPUT: { unsigned int idx; struct pvr2_ctrl *cptr; struct v4l2_input *vi = (struct v4l2_input *)arg; int val; cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); val = 0; ret = pvr2_ctrl_get_value(cptr,&val); vi->index = 0; for (idx = 0; idx < fh->input_cnt; idx++) { if (fh->input_map[idx] == val) { vi->index = idx; break; } } break; } case VIDIOC_S_INPUT: { struct v4l2_input *vi = (struct v4l2_input *)arg; if (vi->index >= fh->input_cnt) { ret = -ERANGE; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT), fh->input_map[vi->index]); break; } case VIDIOC_ENUMAUDIO: { /* pkt: FIXME: We are returning one "fake" input here which could very well be called "whatever_we_like". This is for apps that want to see an audio input just to feel comfortable, as well as to test if it can do stereo or sth. There is actually no guarantee that the actual audio input cannot change behind the app's back, but most applications should not mind that either. Hopefully, mplayer people will work with us on this (this whole mess is to support mplayer pvr://), or Hans will come up with a more standard way to say "we have inputs but we don 't want you to change them independent of video" which will sort this mess. */ struct v4l2_audio *vin = arg; ret = -EINVAL; if (vin->index > 0) break; strncpy(vin->name, "PVRUSB2 Audio",14); vin->capability = V4L2_AUDCAP_STEREO; ret = 0; break; break; } case VIDIOC_G_AUDIO: { /* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */ struct v4l2_audio *vin = arg; memset(vin,0,sizeof(*vin)); vin->index = 0; strncpy(vin->name, "PVRUSB2 Audio",14); vin->capability = V4L2_AUDCAP_STEREO; ret = 0; break; } case VIDIOC_S_AUDIO: { ret = -EINVAL; break; } case VIDIOC_G_TUNER: { struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; if (vt->index != 0) break; /* Only answer for the 1st tuner */ pvr2_hdw_execute_tuner_poll(hdw); ret = pvr2_hdw_get_tuner_status(hdw,vt); break; } case VIDIOC_S_TUNER: { struct v4l2_tuner *vt=(struct v4l2_tuner *)arg; if (vt->index != 0) break; ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE), vt->audmode); break; } case VIDIOC_S_FREQUENCY: { const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; unsigned long fv; struct v4l2_tuner vt; int cur_input; struct pvr2_ctrl *ctrlp; ret = pvr2_hdw_get_tuner_status(hdw,&vt); if (ret != 0) break; ctrlp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); ret = pvr2_ctrl_get_value(ctrlp,&cur_input); if (ret != 0) break; if (vf->type == V4L2_TUNER_RADIO) { if (cur_input != PVR2_CVAL_INPUT_RADIO) { pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_RADIO); } } else { if (cur_input == PVR2_CVAL_INPUT_RADIO) { pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_TV); } } fv = vf->frequency; if (vt.capability & V4L2_TUNER_CAP_LOW) { fv = (fv * 125) / 2; } else { fv = fv * 62500; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),fv); break; } case VIDIOC_G_FREQUENCY: { struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; int val = 0; int cur_input; struct v4l2_tuner vt; ret = pvr2_hdw_get_tuner_status(hdw,&vt); if (ret != 0) break; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY), &val); if (ret != 0) break; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT), &cur_input); if (cur_input == PVR2_CVAL_INPUT_RADIO) { vf->type = V4L2_TUNER_RADIO; } else { vf->type = V4L2_TUNER_ANALOG_TV; } if (vt.capability & V4L2_TUNER_CAP_LOW) { val = (val * 2) / 125; } else { val /= 62500; } vf->frequency = val; break; } case VIDIOC_ENUM_FMT: { struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg; /* Only one format is supported : mpeg.*/ if (fd->index != 0) break; memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc)); ret = 0; break; } case VIDIOC_G_FMT: { struct v4l2_format *vf = (struct v4l2_format *)arg; int val; switch(vf->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format)); val = 0; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES), &val); vf->fmt.pix.width = val; val = 0; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES), &val); vf->fmt.pix.height = val; ret = 0; break; case V4L2_BUF_TYPE_VBI_CAPTURE: // ????? Still need to figure out to do VBI correctly ret = -EINVAL; break; default: ret = -EINVAL; break; } break; } case VIDIOC_TRY_FMT: case VIDIOC_S_FMT: { struct v4l2_format *vf = (struct v4l2_format *)arg; ret = 0; switch(vf->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: { int lmin,lmax,ldef; struct pvr2_ctrl *hcp,*vcp; int h = vf->fmt.pix.height; int w = vf->fmt.pix.width; hcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES); vcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES); lmin = pvr2_ctrl_get_min(hcp); lmax = pvr2_ctrl_get_max(hcp); pvr2_ctrl_get_def(hcp, &ldef); if (w == -1) { w = ldef; } else if (w < lmin) { w = lmin; } else if (w > lmax) { w = lmax; } lmin = pvr2_ctrl_get_min(vcp); lmax = pvr2_ctrl_get_max(vcp); pvr2_ctrl_get_def(vcp, &ldef); if (h == -1) { h = ldef; } else if (h < lmin) { h = lmin; } else if (h > lmax) { h = lmax; } memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format)); vf->fmt.pix.width = w; vf->fmt.pix.height = h; if (cmd == VIDIOC_S_FMT) { pvr2_ctrl_set_value(hcp,vf->fmt.pix.width); pvr2_ctrl_set_value(vcp,vf->fmt.pix.height); } } break; case V4L2_BUF_TYPE_VBI_CAPTURE: // ????? Still need to figure out to do VBI correctly ret = -EINVAL; break; default: ret = -EINVAL; break; } break; } case VIDIOC_STREAMON: { if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ ret = -EPERM; break; } ret = pvr2_hdw_set_stream_type(hdw,pdi->config); if (ret < 0) return ret; ret = pvr2_hdw_set_streaming(hdw,!0); break; } case VIDIOC_STREAMOFF: { if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ ret = -EPERM; break; } ret = pvr2_hdw_set_streaming(hdw,0); break; } case VIDIOC_QUERYCTRL: { struct pvr2_ctrl *cptr; int val; struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg; ret = 0; if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) { cptr = pvr2_hdw_get_ctrl_nextv4l( hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL)); if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr); } else { cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id); } if (!cptr) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x not implemented here", vc->id); ret = -EINVAL; break; } pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x mapping name=%s (%s)", vc->id,pvr2_ctrl_get_name(cptr), pvr2_ctrl_get_desc(cptr)); strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name)); vc->flags = pvr2_ctrl_get_v4lflags(cptr); pvr2_ctrl_get_def(cptr, &val); vc->default_value = val; switch (pvr2_ctrl_get_type(cptr)) { case pvr2_ctl_enum: vc->type = V4L2_CTRL_TYPE_MENU; vc->minimum = 0; vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1; vc->step = 1; break; case pvr2_ctl_bool: vc->type = V4L2_CTRL_TYPE_BOOLEAN; vc->minimum = 0; vc->maximum = 1; vc->step = 1; break; case pvr2_ctl_int: vc->type = V4L2_CTRL_TYPE_INTEGER; vc->minimum = pvr2_ctrl_get_min(cptr); vc->maximum = pvr2_ctrl_get_max(cptr); vc->step = 1; break; default: pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x name=%s not mappable", vc->id,pvr2_ctrl_get_name(cptr)); ret = -EINVAL; break; } break; } case VIDIOC_QUERYMENU: { struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg; unsigned int cnt = 0; ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id), vm->index, vm->name,sizeof(vm->name)-1, &cnt); vm->name[cnt] = 0; break; } case VIDIOC_G_CTRL: { struct v4l2_control *vc = (struct v4l2_control *)arg; int val = 0; ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), &val); vc->value = val; break; } case VIDIOC_S_CTRL: { struct v4l2_control *vc = (struct v4l2_control *)arg; ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), vc->value); break; } case VIDIOC_G_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; unsigned int idx; int val; ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val); if (ret) { ctls->error_idx = idx; break; } /* Ensure that if read as a 64 bit value, the user will still get a hopefully sane value */ ctrl->value64 = 0; ctrl->value = val; } break; } case VIDIOC_S_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; unsigned int idx; ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id), ctrl->value); if (ret) { ctls->error_idx = idx; break; } } break; } case VIDIOC_TRY_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; struct pvr2_ctrl *pctl; unsigned int idx; /* For the moment just validate that the requested control actually exists. */ ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id); if (!pctl) { ret = -EINVAL; ctls->error_idx = idx; break; } } break; } case VIDIOC_CROPCAP: { struct v4l2_cropcap *cap = (struct v4l2_cropcap *)arg; if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_hdw_get_cropcap(hdw, cap); cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */ break; } case VIDIOC_G_CROP: { struct v4l2_crop *crop = (struct v4l2_crop *)arg; int val = 0; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.left = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.top = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.width = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.height = val; } case VIDIOC_S_CROP: { struct v4l2_crop *crop = (struct v4l2_crop *)arg; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), crop->c.left); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), crop->c.top); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), crop->c.width); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), crop->c.height); if (ret != 0) { ret = -EINVAL; break; } } case VIDIOC_LOG_STATUS: { pvr2_hdw_trigger_module_log(hdw); ret = 0; break; } #ifdef CONFIG_VIDEO_ADV_DEBUG case VIDIOC_DBG_S_REGISTER: case VIDIOC_DBG_G_REGISTER: { u64 val; struct v4l2_dbg_register *req = (struct v4l2_dbg_register *)arg; if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val; ret = pvr2_hdw_register_access( hdw, &req->match, req->reg, cmd == VIDIOC_DBG_S_REGISTER, &val); if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val; break; } #endif default : ret = -EINVAL; break; } pvr2_hdw_commit_ctl(hdw); if (ret < 0) { if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl failure, ret=%ld", ret); } else { if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl failure, ret=%ld" " command was:", ret); v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw), cmd); } } } else { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl complete, ret=%ld (0x%lx)", ret, ret); } return ret; } static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) { struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw; enum pvr2_config cfg = dip->config; char msg[80]; unsigned int mcnt; /* Construct the unregistration message *before* we actually perform the unregistration step. By doing it this way we don't have to worry about potentially touching deleted resources. */ mcnt = scnprintf(msg, sizeof(msg) - 1, "pvrusb2: unregistered device %s [%s]", video_device_node_name(&dip->devbase), pvr2_config_get_name(cfg)); msg[mcnt] = 0; pvr2_hdw_v4l_store_minor_number(hdw,dip->minor_type,-1); /* Paranoia */ dip->v4lp = NULL; dip->stream = NULL; /* Actual deallocation happens later when all internal references are gone. */ video_unregister_device(&dip->devbase); printk(KERN_INFO "%s\n", msg); } static void pvr2_v4l2_dev_disassociate_parent(struct pvr2_v4l2_dev *dip) { if (!dip) return; if (!dip->devbase.parent) return; dip->devbase.parent = NULL; device_move(&dip->devbase.dev, NULL, DPM_ORDER_NONE); } static void pvr2_v4l2_destroy_no_lock(struct pvr2_v4l2 *vp) { if (vp->dev_video) { pvr2_v4l2_dev_destroy(vp->dev_video); vp->dev_video = NULL; } if (vp->dev_radio) { pvr2_v4l2_dev_destroy(vp->dev_radio); vp->dev_radio = NULL; } pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_v4l2 id=%p",vp); pvr2_channel_done(&vp->channel); kfree(vp); } static void pvr2_video_device_release(struct video_device *vdev) { struct pvr2_v4l2_dev *dev; dev = container_of(vdev,struct pvr2_v4l2_dev,devbase); kfree(dev); } static void pvr2_v4l2_internal_check(struct pvr2_channel *chp) { struct pvr2_v4l2 *vp; vp = container_of(chp,struct pvr2_v4l2,channel); if (!vp->channel.mc_head->disconnect_flag) return; pvr2_v4l2_dev_disassociate_parent(vp->dev_video); pvr2_v4l2_dev_disassociate_parent(vp->dev_radio); if (vp->vfirst) return; pvr2_v4l2_destroy_no_lock(vp); } static long pvr2_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, pvr2_v4l2_do_ioctl); } static int pvr2_v4l2_release(struct file *file) { struct pvr2_v4l2_fh *fhp = file->private_data; struct pvr2_v4l2 *vp = fhp->vhead; struct pvr2_hdw *hdw = fhp->channel.mc_head->hdw; pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release"); if (fhp->rhp) { struct pvr2_stream *sp; pvr2_hdw_set_streaming(hdw,0); sp = pvr2_ioread_get_stream(fhp->rhp); if (sp) pvr2_stream_set_callback(sp,NULL,NULL); pvr2_ioread_destroy(fhp->rhp); fhp->rhp = NULL; } v4l2_prio_close(&vp->prio, fhp->prio); file->private_data = NULL; if (fhp->vnext) { fhp->vnext->vprev = fhp->vprev; } else { vp->vlast = fhp->vprev; } if (fhp->vprev) { fhp->vprev->vnext = fhp->vnext; } else { vp->vfirst = fhp->vnext; } fhp->vnext = NULL; fhp->vprev = NULL; fhp->vhead = NULL; pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p",fhp); if (fhp->input_map) { kfree(fhp->input_map); fhp->input_map = NULL; } kfree(fhp); if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) { pvr2_v4l2_destroy_no_lock(vp); } return 0; } static int pvr2_v4l2_open(struct file *file) { struct pvr2_v4l2_dev *dip; /* Our own context pointer */ struct pvr2_v4l2_fh *fhp; struct pvr2_v4l2 *vp; struct pvr2_hdw *hdw; unsigned int input_mask = 0; unsigned int input_cnt,idx; int ret = 0; dip = container_of(video_devdata(file),struct pvr2_v4l2_dev,devbase); vp = dip->v4lp; hdw = vp->channel.hdw; pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_open"); if (!pvr2_hdw_dev_ok(hdw)) { pvr2_trace(PVR2_TRACE_OPEN_CLOSE, "pvr2_v4l2_open: hardware not ready"); return -EIO; } fhp = kzalloc(sizeof(*fhp),GFP_KERNEL); if (!fhp) { return -ENOMEM; } init_waitqueue_head(&fhp->wait_data); fhp->pdi = dip; pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp); pvr2_channel_init(&fhp->channel,vp->channel.mc_head); if (dip->v4l_type == VFL_TYPE_RADIO) { /* Opening device as a radio, legal input selection subset is just the radio. */ input_mask = (1 << PVR2_CVAL_INPUT_RADIO); } else { /* Opening the main V4L device, legal input selection subset includes all analog inputs. */ input_mask = ((1 << PVR2_CVAL_INPUT_RADIO) | (1 << PVR2_CVAL_INPUT_TV) | (1 << PVR2_CVAL_INPUT_COMPOSITE) | (1 << PVR2_CVAL_INPUT_SVIDEO)); } ret = pvr2_channel_limit_inputs(&fhp->channel,input_mask); if (ret) { pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p (input mask error)", fhp); kfree(fhp); return ret; } input_mask &= pvr2_hdw_get_input_available(hdw); input_cnt = 0; for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) { if (input_mask & (1 << idx)) input_cnt++; } fhp->input_cnt = input_cnt; fhp->input_map = kzalloc(input_cnt,GFP_KERNEL); if (!fhp->input_map) { pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p (input map failure)", fhp); kfree(fhp); return -ENOMEM; } input_cnt = 0; for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) { if (!(input_mask & (1 << idx))) continue; fhp->input_map[input_cnt++] = idx; } fhp->vnext = NULL; fhp->vprev = vp->vlast; if (vp->vlast) { vp->vlast->vnext = fhp; } else { vp->vfirst = fhp; } vp->vlast = fhp; fhp->vhead = vp; fhp->file = file; file->private_data = fhp; v4l2_prio_open(&vp->prio, &fhp->prio); fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw); return 0; } static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp) { wake_up(&fhp->wait_data); } static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh) { int ret; struct pvr2_stream *sp; struct pvr2_hdw *hdw; if (fh->rhp) return 0; if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ return -EPERM; } /* First read() attempt. Try to claim the stream and start it... */ if ((ret = pvr2_channel_claim_stream(&fh->channel, fh->pdi->stream)) != 0) { /* Someone else must already have it */ return ret; } fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream); if (!fh->rhp) { pvr2_channel_claim_stream(&fh->channel,NULL); return -ENOMEM; } hdw = fh->channel.mc_head->hdw; sp = fh->pdi->stream->stream; pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh); pvr2_hdw_set_stream_type(hdw,fh->pdi->config); if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret; return pvr2_ioread_set_enabled(fh->rhp,!0); } static ssize_t pvr2_v4l2_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { struct pvr2_v4l2_fh *fh = file->private_data; int ret; if (fh->fw_mode_flag) { struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; char *tbuf; int c1,c2; int tcnt = 0; unsigned int offs = *ppos; tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL); if (!tbuf) return -ENOMEM; while (count) { c1 = count; if (c1 > PAGE_SIZE) c1 = PAGE_SIZE; c2 = pvr2_hdw_cpufw_get(hdw,offs,tbuf,c1); if (c2 < 0) { tcnt = c2; break; } if (!c2) break; if (copy_to_user(buff,tbuf,c2)) { tcnt = -EFAULT; break; } offs += c2; tcnt += c2; buff += c2; count -= c2; *ppos += c2; } kfree(tbuf); return tcnt; } if (!fh->rhp) { ret = pvr2_v4l2_iosetup(fh); if (ret) { return ret; } } for (;;) { ret = pvr2_ioread_read(fh->rhp,buff,count); if (ret >= 0) break; if (ret != -EAGAIN) break; if (file->f_flags & O_NONBLOCK) break; /* Doing blocking I/O. Wait here. */ ret = wait_event_interruptible( fh->wait_data, pvr2_ioread_avail(fh->rhp) >= 0); if (ret < 0) break; } return ret; } static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; struct pvr2_v4l2_fh *fh = file->private_data; int ret; if (fh->fw_mode_flag) { mask |= POLLIN | POLLRDNORM; return mask; } if (!fh->rhp) { ret = pvr2_v4l2_iosetup(fh); if (ret) return POLLERR; } poll_wait(file,&fh->wait_data,wait); if (pvr2_ioread_avail(fh->rhp) >= 0) { mask |= POLLIN | POLLRDNORM; } return mask; } static const struct v4l2_file_operations vdev_fops = { .owner = THIS_MODULE, .open = pvr2_v4l2_open, .release = pvr2_v4l2_release, .read = pvr2_v4l2_read, .ioctl = pvr2_v4l2_ioctl, .poll = pvr2_v4l2_poll, }; static struct video_device vdev_template = { .fops = &vdev_fops, }; static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip, struct pvr2_v4l2 *vp, int v4l_type) { struct usb_device *usbdev; int mindevnum; int unit_number; int *nr_ptr = NULL; dip->v4lp = vp; usbdev = pvr2_hdw_get_dev(vp->channel.mc_head->hdw); dip->v4l_type = v4l_type; switch (v4l_type) { case VFL_TYPE_GRABBER: dip->stream = &vp->channel.mc_head->video_stream; dip->config = pvr2_config_mpeg; dip->minor_type = pvr2_v4l_type_video; nr_ptr = video_nr; if (!dip->stream) { pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l video dev" " due to missing stream instance\n"); return; } break; case VFL_TYPE_VBI: dip->config = pvr2_config_vbi; dip->minor_type = pvr2_v4l_type_vbi; nr_ptr = vbi_nr; break; case VFL_TYPE_RADIO: dip->stream = &vp->channel.mc_head->video_stream; dip->config = pvr2_config_mpeg; dip->minor_type = pvr2_v4l_type_radio; nr_ptr = radio_nr; break; default: /* Bail out (this should be impossible) */ pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev" " due to unrecognized config\n"); return; } memcpy(&dip->devbase,&vdev_template,sizeof(vdev_template)); dip->devbase.release = pvr2_video_device_release; mindevnum = -1; unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw); if (nr_ptr && (unit_number >= 0) && (unit_number < PVR_NUM)) { mindevnum = nr_ptr[unit_number]; } dip->devbase.parent = &usbdev->dev; if ((video_register_device(&dip->devbase, dip->v4l_type, mindevnum) < 0) && (video_register_device(&dip->devbase, dip->v4l_type, -1) < 0)) { pr_err(KBUILD_MODNAME ": Failed to register pvrusb2 v4l device\n"); } printk(KERN_INFO "pvrusb2: registered device %s [%s]\n", video_device_node_name(&dip->devbase), pvr2_config_get_name(dip->config)); pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw, dip->minor_type,dip->devbase.minor); } struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp) { struct pvr2_v4l2 *vp; vp = kzalloc(sizeof(*vp),GFP_KERNEL); if (!vp) return vp; pvr2_channel_init(&vp->channel,mnp); pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_v4l2 id=%p",vp); vp->channel.check_func = pvr2_v4l2_internal_check; /* register streams */ vp->dev_video = kzalloc(sizeof(*vp->dev_video),GFP_KERNEL); if (!vp->dev_video) goto fail; pvr2_v4l2_dev_init(vp->dev_video,vp,VFL_TYPE_GRABBER); if (pvr2_hdw_get_input_available(vp->channel.mc_head->hdw) & (1 << PVR2_CVAL_INPUT_RADIO)) { vp->dev_radio = kzalloc(sizeof(*vp->dev_radio),GFP_KERNEL); if (!vp->dev_radio) goto fail; pvr2_v4l2_dev_init(vp->dev_radio,vp,VFL_TYPE_RADIO); } return vp; fail: pvr2_trace(PVR2_TRACE_STRUCT,"Failure creating pvr2_v4l2 id=%p",vp); pvr2_v4l2_destroy_no_lock(vp); return NULL; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
vmobi-gogh/android_kernel_samsung_gogh
sound/core/isadma.c
4073
3072
/* * ISA DMA support functions * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * Defining following add some delay. Maybe this helps for some broken * ISA DMA controllers. */ #undef HAVE_REALLY_SLOW_DMA_CONTROLLER #include <sound/core.h> #include <asm/dma.h> /** * snd_dma_program - program an ISA DMA transfer * @dma: the dma number * @addr: the physical address of the buffer * @size: the DMA transfer size * @mode: the DMA transfer mode, DMA_MODE_XXX * * Programs an ISA DMA transfer for the given buffer. */ void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma, mode); set_dma_addr(dma, addr); set_dma_count(dma, size); if (!(mode & DMA_MODE_NO_ENABLE)) enable_dma(dma); release_dma_lock(flags); } EXPORT_SYMBOL(snd_dma_program); /** * snd_dma_disable - stop the ISA DMA transfer * @dma: the dma number * * Stops the ISA DMA transfer. */ void snd_dma_disable(unsigned long dma) { unsigned long flags; flags = claim_dma_lock(); clear_dma_ff(dma); disable_dma(dma); release_dma_lock(flags); } EXPORT_SYMBOL(snd_dma_disable); /** * snd_dma_pointer - return the current pointer to DMA transfer buffer in bytes * @dma: the dma number * @size: the dma transfer size * * Returns the current pointer in DMA tranfer buffer in bytes */ unsigned int snd_dma_pointer(unsigned long dma, unsigned int size) { unsigned long flags; unsigned int result, result1; flags = claim_dma_lock(); clear_dma_ff(dma); if (!isa_dma_bridge_buggy) disable_dma(dma); result = get_dma_residue(dma); /* * HACK - read the counter again and choose higher value in order to * avoid reading during counter lower byte roll over if the * isa_dma_bridge_buggy is set. */ result1 = get_dma_residue(dma); if (!isa_dma_bridge_buggy) enable_dma(dma); release_dma_lock(flags); if (unlikely(result < result1)) result = result1; #ifdef CONFIG_SND_DEBUG if (result > size) snd_printk(KERN_ERR "pointer (0x%x) for DMA #%ld is greater than transfer size (0x%x)\n", result, dma, size); #endif if (result >= size || result == 0) return 0; else return size - result; } EXPORT_SYMBOL(snd_dma_pointer);
gpl-2.0
TREX-ROM/android_kernel_lge_hammerheadcaf
arch/powerpc/sysdev/qe_lib/ucc.c
4841
5577
/* * arch/powerpc/sysdev/qe_lib/ucc.c * * QE UCC API Set - UCC specific routines implementations. * * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/export.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/immap_qe.h> #include <asm/qe.h> #include <asm/ucc.h> int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) { unsigned long flags; if (ucc_num > UCC_MAX_NUM - 1) return -EINVAL; spin_lock_irqsave(&cmxgcr_lock, flags); clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); spin_unlock_irqrestore(&cmxgcr_lock, flags); return 0; } EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); /* Configure the UCC to either Slow or Fast. * * A given UCC can be figured to support either "slow" devices (e.g. UART) * or "fast" devices (e.g. Ethernet). * * 'ucc_num' is the UCC number, from 0 - 7. * * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit * must always be set to 1. */ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) { u8 __iomem *guemr; /* The GUEMR register is at the same location for both slow and fast devices, so we just use uccX.slow.guemr. */ switch (ucc_num) { case 0: guemr = &qe_immr->ucc1.slow.guemr; break; case 1: guemr = &qe_immr->ucc2.slow.guemr; break; case 2: guemr = &qe_immr->ucc3.slow.guemr; break; case 3: guemr = &qe_immr->ucc4.slow.guemr; break; case 4: guemr = &qe_immr->ucc5.slow.guemr; break; case 5: guemr = &qe_immr->ucc6.slow.guemr; break; case 6: guemr = &qe_immr->ucc7.slow.guemr; break; case 7: guemr = &qe_immr->ucc8.slow.guemr; break; default: return -EINVAL; } clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, UCC_GUEMR_SET_RESERVED3 | speed); return 0; } static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr, unsigned int *reg_num, unsigned int *shift) { unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3); *reg_num = cmx + 1; *cmxucr = &qe_immr->qmx.cmxucr[cmx]; *shift = 16 - 8 * (ucc_num & 2); } int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) { __be32 __iomem *cmxucr; unsigned int reg_num; unsigned int shift; /* check if the UCC number is in range. */ if (ucc_num > UCC_MAX_NUM - 1) return -EINVAL; get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift); if (set) setbits32(cmxucr, mask << shift); else clrbits32(cmxucr, mask << shift); return 0; } int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, enum comm_dir mode) { __be32 __iomem *cmxucr; unsigned int reg_num; unsigned int shift; u32 clock_bits = 0; /* check if the UCC number is in range. */ if (ucc_num > UCC_MAX_NUM - 1) return -EINVAL; /* The communications direction must be RX or TX */ if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) return -EINVAL; get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift); switch (reg_num) { case 1: switch (clock) { case QE_BRG1: clock_bits = 1; break; case QE_BRG2: clock_bits = 2; break; case QE_BRG7: clock_bits = 3; break; case QE_BRG8: clock_bits = 4; break; case QE_CLK9: clock_bits = 5; break; case QE_CLK10: clock_bits = 6; break; case QE_CLK11: clock_bits = 7; break; case QE_CLK12: clock_bits = 8; break; case QE_CLK15: clock_bits = 9; break; case QE_CLK16: clock_bits = 10; break; default: break; } break; case 2: switch (clock) { case QE_BRG5: clock_bits = 1; break; case QE_BRG6: clock_bits = 2; break; case QE_BRG7: clock_bits = 3; break; case QE_BRG8: clock_bits = 4; break; case QE_CLK13: clock_bits = 5; break; case QE_CLK14: clock_bits = 6; break; case QE_CLK19: clock_bits = 7; break; case QE_CLK20: clock_bits = 8; break; case QE_CLK15: clock_bits = 9; break; case QE_CLK16: clock_bits = 10; break; default: break; } break; case 3: switch (clock) { case QE_BRG9: clock_bits = 1; break; case QE_BRG10: clock_bits = 2; break; case QE_BRG15: clock_bits = 3; break; case QE_BRG16: clock_bits = 4; break; case QE_CLK3: clock_bits = 5; break; case QE_CLK4: clock_bits = 6; break; case QE_CLK17: clock_bits = 7; break; case QE_CLK18: clock_bits = 8; break; case QE_CLK7: clock_bits = 9; break; case QE_CLK8: clock_bits = 10; break; case QE_CLK16: clock_bits = 11; break; default: break; } break; case 4: switch (clock) { case QE_BRG13: clock_bits = 1; break; case QE_BRG14: clock_bits = 2; break; case QE_BRG15: clock_bits = 3; break; case QE_BRG16: clock_bits = 4; break; case QE_CLK5: clock_bits = 5; break; case QE_CLK6: clock_bits = 6; break; case QE_CLK21: clock_bits = 7; break; case QE_CLK22: clock_bits = 8; break; case QE_CLK7: clock_bits = 9; break; case QE_CLK8: clock_bits = 10; break; case QE_CLK16: clock_bits = 11; break; default: break; } break; default: break; } /* Check for invalid combination of clock and UCC number */ if (!clock_bits) return -ENOENT; if (mode == COMM_DIR_RX) shift += 4; clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, clock_bits << shift); return 0; }
gpl-2.0
coderzstas/android_kernel_asus_grouper
drivers/parport/parport_cs.c
8169
5882
/*====================================================================== A driver for PCMCIA parallel port adapters (specifically, for the Quatech SPP-100 EPP card: other cards will probably require driver tweaks) parport_cs.c 1.29 2002/10/11 06:57:41 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/interrupt.h> #include <linux/parport.h> #include <linux/parport_pc.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("PCMCIA parallel port card driver"); MODULE_LICENSE("Dual MPL/GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) INT_MODULE_PARM(epp_mode, 1); /*====================================================================*/ #define FORCE_EPP_MODE 0x08 typedef struct parport_info_t { struct pcmcia_device *p_dev; int ndev; struct parport *port; } parport_info_t; static void parport_detach(struct pcmcia_device *p_dev); static int parport_config(struct pcmcia_device *link); static void parport_cs_release(struct pcmcia_device *); static int parport_probe(struct pcmcia_device *link) { parport_info_t *info; dev_dbg(&link->dev, "parport_attach()\n"); /* Create new parport device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; link->priv = info; info->p_dev = link; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return parport_config(link); } /* parport_attach */ static void parport_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "parport_detach\n"); parport_cs_release(link); kfree(link->priv); } /* parport_detach */ static int parport_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; return pcmcia_request_io(p_dev); } static int parport_config(struct pcmcia_device *link) { parport_info_t *info = link->priv; struct parport *p; int ret; dev_dbg(&link->dev, "parport_config\n"); if (epp_mode) link->config_index |= FORCE_EPP_MODE; ret = pcmcia_loop_config(link, parport_config_check, NULL); if (ret) goto failed; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; p = parport_pc_probe_port(link->resource[0]->start, link->resource[1]->start, link->irq, PARPORT_DMA_NONE, &link->dev, IRQF_SHARED); if (p == NULL) { printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at " "0x%3x, irq %u failed\n", (unsigned int) link->resource[0]->start, link->irq); goto failed; } p->modes |= PARPORT_MODE_PCSPP; if (epp_mode) p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP; info->ndev = 1; info->port = p; return 0; failed: parport_cs_release(link); return -ENODEV; } /* parport_config */ static void parport_cs_release(struct pcmcia_device *link) { parport_info_t *info = link->priv; dev_dbg(&link->dev, "parport_release\n"); if (info->ndev) { struct parport *p = info->port; parport_pc_unregister_port(p); } info->ndev = 0; pcmcia_disable_device(link); } /* parport_cs_release */ static const struct pcmcia_device_id parport_ids[] = { PCMCIA_DEVICE_FUNC_ID(3), PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc), PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, parport_ids); static struct pcmcia_driver parport_cs_driver = { .owner = THIS_MODULE, .name = "parport_cs", .probe = parport_probe, .remove = parport_detach, .id_table = parport_ids, }; static int __init init_parport_cs(void) { return pcmcia_register_driver(&parport_cs_driver); } static void __exit exit_parport_cs(void) { pcmcia_unregister_driver(&parport_cs_driver); } module_init(init_parport_cs); module_exit(exit_parport_cs);
gpl-2.0