repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
cafecongnghe/android_kernel_lge_mako | drivers/mmc/host/of_mmc_spi.c | 5137 | 4311 | /*
* OpenFirmware bindings for the MMC-over-SPI driver
*
* Copyright (c) MontaVista Software, Inc. 2008.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
#ifndef NO_IRQ
#define NO_IRQ 0
#endif
MODULE_LICENSE("GPL");
enum {
CD_GPIO = 0,
WP_GPIO,
NUM_GPIOS,
};
struct of_mmc_spi {
int gpios[NUM_GPIOS];
bool alow_gpios[NUM_GPIOS];
int detect_irq;
struct mmc_spi_platform_data pdata;
};
static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)
{
return container_of(dev->platform_data, struct of_mmc_spi, pdata);
}
static int of_mmc_spi_read_gpio(struct device *dev, int gpio_num)
{
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
bool active_low = oms->alow_gpios[gpio_num];
bool value = gpio_get_value(oms->gpios[gpio_num]);
return active_low ^ value;
}
static int of_mmc_spi_get_cd(struct device *dev)
{
return of_mmc_spi_read_gpio(dev, CD_GPIO);
}
static int of_mmc_spi_get_ro(struct device *dev)
{
return of_mmc_spi_read_gpio(dev, WP_GPIO);
}
static int of_mmc_spi_init(struct device *dev,
irqreturn_t (*irqhandler)(int, void *), void *mmc)
{
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0,
dev_name(dev), mmc);
}
static void of_mmc_spi_exit(struct device *dev, void *mmc)
{
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
free_irq(oms->detect_irq, mmc);
}
struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct device_node *np = dev->of_node;
struct of_mmc_spi *oms;
const u32 *voltage_ranges;
int num_ranges;
int i;
int ret = -EINVAL;
if (dev->platform_data || !np)
return dev->platform_data;
oms = kzalloc(sizeof(*oms), GFP_KERNEL);
if (!oms)
return NULL;
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
if (!voltage_ranges || !num_ranges) {
dev_err(dev, "OF: voltage-ranges unspecified\n");
goto err_ocr;
}
for (i = 0; i < num_ranges; i++) {
const int j = i * 2;
u32 mask;
mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
be32_to_cpu(voltage_ranges[j + 1]));
if (!mask) {
ret = -EINVAL;
dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
goto err_ocr;
}
oms->pdata.ocr_mask |= mask;
}
for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) {
enum of_gpio_flags gpio_flags;
oms->gpios[i] = of_get_gpio_flags(np, i, &gpio_flags);
if (!gpio_is_valid(oms->gpios[i]))
continue;
ret = gpio_request(oms->gpios[i], dev_name(dev));
if (ret < 0) {
oms->gpios[i] = -EINVAL;
continue;
}
if (gpio_flags & OF_GPIO_ACTIVE_LOW)
oms->alow_gpios[i] = true;
}
if (gpio_is_valid(oms->gpios[CD_GPIO]))
oms->pdata.get_cd = of_mmc_spi_get_cd;
if (gpio_is_valid(oms->gpios[WP_GPIO]))
oms->pdata.get_ro = of_mmc_spi_get_ro;
oms->detect_irq = irq_of_parse_and_map(np, 0);
if (oms->detect_irq != NO_IRQ) {
oms->pdata.init = of_mmc_spi_init;
oms->pdata.exit = of_mmc_spi_exit;
} else {
oms->pdata.caps |= MMC_CAP_NEEDS_POLL;
}
dev->platform_data = &oms->pdata;
return dev->platform_data;
err_ocr:
kfree(oms);
return NULL;
}
EXPORT_SYMBOL(mmc_spi_get_pdata);
void mmc_spi_put_pdata(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct device_node *np = dev->of_node;
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
int i;
if (!dev->platform_data || !np)
return;
for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) {
if (gpio_is_valid(oms->gpios[i]))
gpio_free(oms->gpios[i]);
}
kfree(oms);
dev->platform_data = NULL;
}
EXPORT_SYMBOL(mmc_spi_put_pdata);
| gpl-2.0 |
maxfu/legacy_android_kernel_exynos4210 | arch/arm/mach-vt8500/devices-vt8500.c | 7953 | 2689 | /* linux/arch/arm/mach-vt8500/devices-vt8500.c
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <mach/vt8500_regs.h>
#include <mach/vt8500_irqs.h>
#include <mach/i8042.h>
#include "devices.h"
void __init vt8500_set_resources(void)
{
struct resource tmp[3];
tmp[0] = wmt_mmio_res(VT8500_LCDC_BASE, SZ_1K);
tmp[1] = wmt_irq_res(IRQ_LCDC);
wmt_res_add(&vt8500_device_lcdc, tmp, 2);
tmp[0] = wmt_mmio_res(VT8500_UART0_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART0);
wmt_res_add(&vt8500_device_uart0, tmp, 2);
tmp[0] = wmt_mmio_res(VT8500_UART1_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART1);
wmt_res_add(&vt8500_device_uart1, tmp, 2);
tmp[0] = wmt_mmio_res(VT8500_UART2_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART2);
wmt_res_add(&vt8500_device_uart2, tmp, 2);
tmp[0] = wmt_mmio_res(VT8500_UART3_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART3);
wmt_res_add(&vt8500_device_uart3, tmp, 2);
tmp[0] = wmt_mmio_res(VT8500_EHCI_BASE, SZ_512);
tmp[1] = wmt_irq_res(IRQ_EHCI);
wmt_res_add(&vt8500_device_ehci, tmp, 2);
tmp[0] = wmt_mmio_res(VT8500_GEGEA_BASE, SZ_256);
wmt_res_add(&vt8500_device_ge_rops, tmp, 1);
tmp[0] = wmt_mmio_res(VT8500_PWM_BASE, 0x44);
wmt_res_add(&vt8500_device_pwm, tmp, 1);
tmp[0] = wmt_mmio_res(VT8500_RTC_BASE, 0x2c);
tmp[1] = wmt_irq_res(IRQ_RTC);
tmp[2] = wmt_irq_res(IRQ_RTCSM);
wmt_res_add(&vt8500_device_rtc, tmp, 3);
}
static void __init vt8500_set_externs(void)
{
/* Non-resource-aware stuff */
wmt_ic_base = VT8500_IC_BASE;
wmt_gpio_base = VT8500_GPIO_BASE;
wmt_pmc_base = VT8500_PMC_BASE;
wmt_i8042_base = VT8500_PS2_BASE;
wmt_nr_irqs = VT8500_NR_IRQS;
wmt_timer_irq = IRQ_PMCOS0;
wmt_gpio_ext_irq[0] = IRQ_EXT0;
wmt_gpio_ext_irq[1] = IRQ_EXT1;
wmt_gpio_ext_irq[2] = IRQ_EXT2;
wmt_gpio_ext_irq[3] = IRQ_EXT3;
wmt_gpio_ext_irq[4] = IRQ_EXT4;
wmt_gpio_ext_irq[5] = IRQ_EXT5;
wmt_gpio_ext_irq[6] = IRQ_EXT6;
wmt_gpio_ext_irq[7] = IRQ_EXT7;
wmt_i8042_kbd_irq = IRQ_PS2KBD;
wmt_i8042_aux_irq = IRQ_PS2MOUSE;
}
void __init vt8500_map_io(void)
{
iotable_init(wmt_io_desc, ARRAY_SIZE(wmt_io_desc));
/* Should be done before interrupts and timers are initialized */
vt8500_set_externs();
}
| gpl-2.0 |
madprogrammer/linux-aurora | drivers/scsi/aic7xxx/aic79xx_osm_pci.c | 11281 | 10619 | /*
* Linux driver attachment glue for PCI based U320 controllers.
*
* Copyright (c) 2000-2001 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm_pci.c#25 $
*/
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
#include "aic79xx_pci.h"
/* Define the macro locally since it's different for different class of chips.
*/
#define ID(x) \
ID2C(x), \
ID2C(IDIROC(x))
static const struct pci_device_id ahd_linux_pci_id_table[] = {
/* aic7901 based controllers */
ID(ID_AHA_29320A),
ID(ID_AHA_29320ALP),
ID(ID_AHA_29320LPE),
/* aic7902 based controllers */
ID(ID_AHA_29320),
ID(ID_AHA_29320B),
ID(ID_AHA_29320LP),
ID(ID_AHA_39320),
ID(ID_AHA_39320_B),
ID(ID_AHA_39320A),
ID(ID_AHA_39320D),
ID(ID_AHA_39320D_HP),
ID(ID_AHA_39320D_B),
ID(ID_AHA_39320D_B_HP),
/* Generic chip probes for devices we don't know exactly. */
ID16(ID_AIC7901 & ID_9005_GENERIC_MASK),
ID(ID_AIC7901A & ID_DEV_VENDOR_MASK),
ID16(ID_AIC7902 & ID_9005_GENERIC_MASK),
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table);
#ifdef CONFIG_PM
static int
ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ahd_softc *ahd = pci_get_drvdata(pdev);
int rc;
if ((rc = ahd_suspend(ahd)))
return rc;
ahd_pci_suspend(ahd);
pci_save_state(pdev);
pci_disable_device(pdev);
if (mesg.event & PM_EVENT_SLEEP)
pci_set_power_state(pdev, PCI_D3hot);
return rc;
}
static int
ahd_linux_pci_dev_resume(struct pci_dev *pdev)
{
struct ahd_softc *ahd = pci_get_drvdata(pdev);
int rc;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if ((rc = pci_enable_device(pdev))) {
dev_printk(KERN_ERR, &pdev->dev,
"failed to enable device after resume (%d)\n", rc);
return rc;
}
pci_set_master(pdev);
ahd_pci_resume(ahd);
ahd_resume(ahd);
return rc;
}
#endif
static void
ahd_linux_pci_dev_remove(struct pci_dev *pdev)
{
struct ahd_softc *ahd = pci_get_drvdata(pdev);
u_long s;
if (ahd->platform_data && ahd->platform_data->host)
scsi_remove_host(ahd->platform_data->host);
ahd_lock(ahd, &s);
ahd_intr_enable(ahd, FALSE);
ahd_unlock(ahd, &s);
ahd_free(ahd);
}
static void
ahd_linux_pci_inherit_flags(struct ahd_softc *ahd)
{
struct pci_dev *pdev = ahd->dev_softc, *master_pdev;
unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
master_pdev = pci_get_slot(pdev->bus, master_devfn);
if (master_pdev) {
struct ahd_softc *master = pci_get_drvdata(master_pdev);
if (master) {
ahd->flags &= ~AHD_BIOS_ENABLED;
ahd->flags |= master->flags & AHD_BIOS_ENABLED;
} else
printk(KERN_ERR "aic79xx: no multichannel peer found!\n");
pci_dev_put(master_pdev);
}
}
static int
ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
char buf[80];
struct ahd_softc *ahd;
ahd_dev_softc_t pci;
const struct ahd_pci_identity *entry;
char *name;
int error;
struct device *dev = &pdev->dev;
pci = pdev;
entry = ahd_find_pci_device(pci);
if (entry == NULL)
return (-ENODEV);
/*
* Allocate a softc for this card and
* set it up for attachment by our
* common detect routine.
*/
sprintf(buf, "ahd_pci:%d:%d:%d",
ahd_get_pci_bus(pci),
ahd_get_pci_slot(pci),
ahd_get_pci_function(pci));
name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
if (name == NULL)
return (-ENOMEM);
strcpy(name, buf);
ahd = ahd_alloc(NULL, name);
if (ahd == NULL)
return (-ENOMEM);
if (pci_enable_device(pdev)) {
ahd_free(ahd);
return (-ENODEV);
}
pci_set_master(pdev);
if (sizeof(dma_addr_t) > 4) {
const u64 required_mask = dma_get_required_mask(dev);
if (required_mask > DMA_BIT_MASK(39) &&
dma_set_mask(dev, DMA_BIT_MASK(64)) == 0)
ahd->flags |= AHD_64BIT_ADDRESSING;
else if (required_mask > DMA_BIT_MASK(32) &&
dma_set_mask(dev, DMA_BIT_MASK(39)) == 0)
ahd->flags |= AHD_39BIT_ADDRESSING;
else
dma_set_mask(dev, DMA_BIT_MASK(32));
} else {
dma_set_mask(dev, DMA_BIT_MASK(32));
}
ahd->dev_softc = pci;
error = ahd_pci_config(ahd, entry);
if (error != 0) {
ahd_free(ahd);
return (-error);
}
/*
* Second Function PCI devices need to inherit some
* * settings from function 0.
*/
if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
ahd_linux_pci_inherit_flags(ahd);
pci_set_drvdata(pdev, ahd);
ahd_linux_register_host(ahd, &aic79xx_driver_template);
return (0);
}
static struct pci_driver aic79xx_pci_driver = {
.name = "aic79xx",
.probe = ahd_linux_pci_dev_probe,
#ifdef CONFIG_PM
.suspend = ahd_linux_pci_dev_suspend,
.resume = ahd_linux_pci_dev_resume,
#endif
.remove = ahd_linux_pci_dev_remove,
.id_table = ahd_linux_pci_id_table
};
int
ahd_linux_pci_init(void)
{
return pci_register_driver(&aic79xx_pci_driver);
}
void
ahd_linux_pci_exit(void)
{
pci_unregister_driver(&aic79xx_pci_driver);
}
static int
ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base,
resource_size_t *base2)
{
*base = pci_resource_start(ahd->dev_softc, 0);
/*
* This is really the 3rd bar and should be at index 2,
* but the Linux PCI code doesn't know how to "count" 64bit
* bars.
*/
*base2 = pci_resource_start(ahd->dev_softc, 3);
if (*base == 0 || *base2 == 0)
return (ENOMEM);
if (!request_region(*base, 256, "aic79xx"))
return (ENOMEM);
if (!request_region(*base2, 256, "aic79xx")) {
release_region(*base, 256);
return (ENOMEM);
}
return (0);
}
static int
ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
resource_size_t *bus_addr,
uint8_t __iomem **maddr)
{
resource_size_t start;
resource_size_t base_page;
u_long base_offset;
int error = 0;
if (aic79xx_allow_memio == 0)
return (ENOMEM);
if ((ahd->bugs & AHD_PCIX_MMAPIO_BUG) != 0)
return (ENOMEM);
start = pci_resource_start(ahd->dev_softc, 1);
base_page = start & PAGE_MASK;
base_offset = start - base_page;
if (start != 0) {
*bus_addr = start;
if (!request_mem_region(start, 0x1000, "aic79xx"))
error = ENOMEM;
if (!error) {
*maddr = ioremap_nocache(base_page, base_offset + 512);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
} else
*maddr += base_offset;
}
} else
error = ENOMEM;
return (error);
}
int
ahd_pci_map_registers(struct ahd_softc *ahd)
{
uint32_t command;
resource_size_t base;
uint8_t __iomem *maddr;
int error;
/*
* If its allowed, we prefer memory mapped access.
*/
command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, 4);
command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN);
base = 0;
maddr = NULL;
error = ahd_linux_pci_reserve_mem_region(ahd, &base, &maddr);
if (error == 0) {
ahd->platform_data->mem_busaddr = base;
ahd->tags[0] = BUS_SPACE_MEMIO;
ahd->bshs[0].maddr = maddr;
ahd->tags[1] = BUS_SPACE_MEMIO;
ahd->bshs[1].maddr = maddr + 0x100;
ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
command | PCIM_CMD_MEMEN, 4);
if (ahd_pci_test_register_access(ahd) != 0) {
printk("aic79xx: PCI Device %d:%d:%d "
"failed memory mapped test. Using PIO.\n",
ahd_get_pci_bus(ahd->dev_softc),
ahd_get_pci_slot(ahd->dev_softc),
ahd_get_pci_function(ahd->dev_softc));
iounmap(maddr);
release_mem_region(ahd->platform_data->mem_busaddr,
0x1000);
ahd->bshs[0].maddr = NULL;
maddr = NULL;
} else
command |= PCIM_CMD_MEMEN;
} else if (bootverbose) {
printk("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
"unavailable. Cannot memory map device.\n",
ahd_get_pci_bus(ahd->dev_softc),
ahd_get_pci_slot(ahd->dev_softc),
ahd_get_pci_function(ahd->dev_softc),
(unsigned long long)base);
}
if (maddr == NULL) {
resource_size_t base2;
error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
if (error == 0) {
ahd->tags[0] = BUS_SPACE_PIO;
ahd->tags[1] = BUS_SPACE_PIO;
ahd->bshs[0].ioport = (u_long)base;
ahd->bshs[1].ioport = (u_long)base2;
command |= PCIM_CMD_PORTEN;
} else {
printk("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
"0x%llx unavailable. Cannot map device.\n",
ahd_get_pci_bus(ahd->dev_softc),
ahd_get_pci_slot(ahd->dev_softc),
ahd_get_pci_function(ahd->dev_softc),
(unsigned long long)base,
(unsigned long long)base2);
}
}
ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
return (error);
}
int
ahd_pci_map_int(struct ahd_softc *ahd)
{
int error;
error = request_irq(ahd->dev_softc->irq, ahd_linux_isr,
IRQF_SHARED, "aic79xx", ahd);
if (!error)
ahd->platform_data->irq = ahd->dev_softc->irq;
return (-error);
}
void
ahd_power_state_change(struct ahd_softc *ahd, ahd_power_state new_state)
{
pci_set_power_state(ahd->dev_softc, new_state);
}
| gpl-2.0 |
shukiz/VAR-SOM-AM33-SDK7-TI-Kernel-3-12-y | fs/ocfs2/inode.c | 18 | 40477 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* inode.c
*
* vfs' aops, fops, dops and iops
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <asm/byteorder.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dir.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "namei.h"
#include "suballoc.h"
#include "super.h"
#include "symlink.h"
#include "sysfile.h"
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
struct ocfs2_find_inode_args
{
u64 fi_blkno;
unsigned long fi_ino;
unsigned int fi_flags;
unsigned int fi_sysfile_type;
};
static struct lock_class_key ocfs2_sysfile_lock_key[NUM_SYSTEM_INODES];
static int ocfs2_read_locked_inode(struct inode *inode,
struct ocfs2_find_inode_args *args);
static int ocfs2_init_locked_inode(struct inode *inode, void *opaque);
static int ocfs2_find_actor(struct inode *inode, void *opaque);
static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh);
void ocfs2_set_inode_flags(struct inode *inode)
{
unsigned int flags = OCFS2_I(inode)->ip_attr;
inode->i_flags &= ~(S_IMMUTABLE |
S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
if (flags & OCFS2_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
if (flags & OCFS2_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & OCFS2_APPEND_FL)
inode->i_flags |= S_APPEND;
if (flags & OCFS2_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & OCFS2_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
}
/* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */
void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi)
{
unsigned int flags = oi->vfs_inode.i_flags;
oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL|
OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL);
if (flags & S_SYNC)
oi->ip_attr |= OCFS2_SYNC_FL;
if (flags & S_APPEND)
oi->ip_attr |= OCFS2_APPEND_FL;
if (flags & S_IMMUTABLE)
oi->ip_attr |= OCFS2_IMMUTABLE_FL;
if (flags & S_NOATIME)
oi->ip_attr |= OCFS2_NOATIME_FL;
if (flags & S_DIRSYNC)
oi->ip_attr |= OCFS2_DIRSYNC_FL;
}
struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
{
struct ocfs2_find_inode_args args;
args.fi_blkno = blkno;
args.fi_flags = 0;
args.fi_ino = ino_from_blkno(sb, blkno);
args.fi_sysfile_type = 0;
return ilookup5(sb, blkno, ocfs2_find_actor, &args);
}
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
int sysfile_type)
{
struct inode *inode = NULL;
struct super_block *sb = osb->sb;
struct ocfs2_find_inode_args args;
trace_ocfs2_iget_begin((unsigned long long)blkno, flags,
sysfile_type);
/* Ok. By now we've either got the offsets passed to us by the
* caller, or we just pulled them off the bh. Lets do some
* sanity checks to make sure they're OK. */
if (blkno == 0) {
inode = ERR_PTR(-EINVAL);
mlog_errno(PTR_ERR(inode));
goto bail;
}
args.fi_blkno = blkno;
args.fi_flags = flags;
args.fi_ino = ino_from_blkno(sb, blkno);
args.fi_sysfile_type = sysfile_type;
inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor,
ocfs2_init_locked_inode, &args);
/* inode was *not* in the inode cache. 2.6.x requires
* us to do our own read_inode call and unlock it
* afterwards. */
if (inode == NULL) {
inode = ERR_PTR(-ENOMEM);
mlog_errno(PTR_ERR(inode));
goto bail;
}
trace_ocfs2_iget5_locked(inode->i_state);
if (inode->i_state & I_NEW) {
ocfs2_read_locked_inode(inode, &args);
unlock_new_inode(inode);
}
if (is_bad_inode(inode)) {
iput(inode);
inode = ERR_PTR(-ESTALE);
goto bail;
}
bail:
if (!IS_ERR(inode)) {
trace_ocfs2_iget_end(inode,
(unsigned long long)OCFS2_I(inode)->ip_blkno);
}
return inode;
}
/*
* here's how inodes get read from disk:
* iget5_locked -> find_actor -> OCFS2_FIND_ACTOR
* found? : return the in-memory inode
* not found? : get_new_inode -> OCFS2_INIT_LOCKED_INODE
*/
static int ocfs2_find_actor(struct inode *inode, void *opaque)
{
struct ocfs2_find_inode_args *args = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int ret = 0;
args = opaque;
mlog_bug_on_msg(!inode, "No inode in find actor!\n");
trace_ocfs2_find_actor(inode, inode->i_ino, opaque, args->fi_blkno);
if (oi->ip_blkno != args->fi_blkno)
goto bail;
ret = 1;
bail:
return ret;
}
/*
* initialize the new inode, but don't do anything that would cause
* us to sleep.
* return 0 on success, 1 on failure
*/
static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
{
struct ocfs2_find_inode_args *args = opaque;
static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
ocfs2_file_ip_alloc_sem_key;
inode->i_ino = args->fi_ino;
OCFS2_I(inode)->ip_blkno = args->fi_blkno;
if (args->fi_sysfile_type != 0)
lockdep_set_class(&inode->i_mutex,
&ocfs2_sysfile_lock_key[args->fi_sysfile_type]);
if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE ||
args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE ||
args->fi_sysfile_type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
args->fi_sysfile_type == LOCAL_GROUP_QUOTA_SYSTEM_INODE)
lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
&ocfs2_quota_ip_alloc_sem_key);
else
lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
&ocfs2_file_ip_alloc_sem_key);
return 0;
}
void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
int create_ino)
{
struct super_block *sb;
struct ocfs2_super *osb;
int use_plocks = 1;
sb = inode->i_sb;
osb = OCFS2_SB(sb);
if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
ocfs2_mount_local(osb) || !ocfs2_stack_supports_plocks())
use_plocks = 0;
/*
* These have all been checked by ocfs2_read_inode_block() or set
* by ocfs2_mknod_locked(), so a failure is a code bug.
*/
BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); /* This means that read_inode
cannot create a superblock
inode today. change if
that is needed. */
BUG_ON(!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)));
BUG_ON(le32_to_cpu(fe->i_fs_generation) != osb->fs_generation);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
inode->i_version = 1;
inode->i_generation = le32_to_cpu(fe->i_generation);
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
inode->i_mode = le16_to_cpu(fe->i_mode);
i_uid_write(inode, le32_to_cpu(fe->i_uid));
i_gid_write(inode, le32_to_cpu(fe->i_gid));
/* Fast symlinks will have i_size but no allocated clusters. */
if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
inode->i_blocks = 0;
inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
} else {
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_mapping->a_ops = &ocfs2_aops;
}
inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno))
mlog(ML_ERROR,
"ip_blkno %llu != i_blkno %llu!\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)le64_to_cpu(fe->i_blkno));
set_nlink(inode, ocfs2_read_links_count(fe));
trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno,
le32_to_cpu(fe->i_flags));
if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
inode->i_flags |= S_NOQUOTA;
}
if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
} else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
} else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) {
inode->i_flags |= S_NOQUOTA;
} else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
/* we can't actually hit this as read_inode can't
* handle superblocks today ;-) */
BUG();
}
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
if (use_plocks)
inode->i_fop = &ocfs2_fops;
else
inode->i_fop = &ocfs2_fops_no_plocks;
inode->i_op = &ocfs2_file_iops;
i_size_write(inode, le64_to_cpu(fe->i_size));
break;
case S_IFDIR:
inode->i_op = &ocfs2_dir_iops;
if (use_plocks)
inode->i_fop = &ocfs2_dops;
else
inode->i_fop = &ocfs2_dops_no_plocks;
i_size_write(inode, le64_to_cpu(fe->i_size));
OCFS2_I(inode)->ip_dir_lock_gen = 1;
break;
case S_IFLNK:
inode->i_op = &ocfs2_symlink_inode_operations;
i_size_write(inode, le64_to_cpu(fe->i_size));
break;
default:
inode->i_op = &ocfs2_special_file_iops;
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
break;
}
if (create_ino) {
inode->i_ino = ino_from_blkno(inode->i_sb,
le64_to_cpu(fe->i_blkno));
/*
* If we ever want to create system files from kernel,
* the generation argument to
* ocfs2_inode_lock_res_init() will have to change.
*/
BUG_ON(le32_to_cpu(fe->i_flags) & OCFS2_SYSTEM_FL);
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres,
OCFS2_LOCK_TYPE_META, 0, inode);
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
OCFS2_LOCK_TYPE_OPEN, 0, inode);
}
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres,
OCFS2_LOCK_TYPE_RW, inode->i_generation,
inode);
ocfs2_set_inode_flags(inode);
OCFS2_I(inode)->ip_last_used_slot = 0;
OCFS2_I(inode)->ip_last_used_group = 0;
if (S_ISDIR(inode->i_mode))
ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
OCFS2_RESV_FLAG_DIR);
}
static int ocfs2_read_locked_inode(struct inode *inode,
struct ocfs2_find_inode_args *args)
{
struct super_block *sb;
struct ocfs2_super *osb;
struct ocfs2_dinode *fe;
struct buffer_head *bh = NULL;
int status, can_lock;
u32 generation = 0;
status = -EINVAL;
if (inode == NULL || inode->i_sb == NULL) {
mlog(ML_ERROR, "bad inode\n");
return status;
}
sb = inode->i_sb;
osb = OCFS2_SB(sb);
if (!args) {
mlog(ML_ERROR, "bad inode args\n");
make_bad_inode(inode);
return status;
}
/*
* To improve performance of cold-cache inode stats, we take
* the cluster lock here if possible.
*
* Generally, OCFS2 never trusts the contents of an inode
* unless it's holding a cluster lock, so taking it here isn't
* a correctness issue as much as it is a performance
* improvement.
*
* There are three times when taking the lock is not a good idea:
*
* 1) During startup, before we have initialized the DLM.
*
* 2) If we are reading certain system files which never get
* cluster locks (local alloc, truncate log).
*
* 3) If the process doing the iget() is responsible for
* orphan dir recovery. We're holding the orphan dir lock and
* can get into a deadlock with another process on another
* node in ->delete_inode().
*
* #1 and #2 can be simply solved by never taking the lock
* here for system files (which are the only type we read
* during mount). It's a heavier approach, but our main
* concern is user-accessible files anyway.
*
* #3 works itself out because we'll eventually take the
* cluster lock before trusting anything anyway.
*/
can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE)
&& !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY)
&& !ocfs2_mount_local(osb);
trace_ocfs2_read_locked_inode(
(unsigned long long)OCFS2_I(inode)->ip_blkno, can_lock);
/*
* To maintain backwards compatibility with older versions of
* ocfs2-tools, we still store the generation value for system
* files. The only ones that actually matter to userspace are
* the journals, but it's easier and inexpensive to just flag
* all system files similarly.
*/
if (args->fi_flags & OCFS2_FI_FLAG_SYSFILE)
generation = osb->fs_generation;
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres,
OCFS2_LOCK_TYPE_META,
generation, inode);
ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
OCFS2_LOCK_TYPE_OPEN,
0, inode);
if (can_lock) {
status = ocfs2_open_lock(inode);
if (status) {
make_bad_inode(inode);
mlog_errno(status);
return status;
}
status = ocfs2_inode_lock(inode, NULL, 0);
if (status) {
make_bad_inode(inode);
mlog_errno(status);
return status;
}
}
if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
status = ocfs2_try_open_lock(inode, 0);
if (status) {
make_bad_inode(inode);
return status;
}
}
if (can_lock) {
status = ocfs2_read_inode_block_full(inode, &bh,
OCFS2_BH_IGNORE_CACHE);
} else {
status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
/*
* If buffer is in jbd, then its checksum may not have been
* computed as yet.
*/
if (!status && !buffer_jbd(bh))
status = ocfs2_validate_inode_block(osb->sb, bh);
}
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = -EINVAL;
fe = (struct ocfs2_dinode *) bh->b_data;
/*
* This is a code bug. Right now the caller needs to
* understand whether it is asking for a system file inode or
* not so the proper lock names can be built.
*/
mlog_bug_on_msg(!!(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) !=
!!(args->fi_flags & OCFS2_FI_FLAG_SYSFILE),
"Inode %llu: system file state is ambigous\n",
(unsigned long long)args->fi_blkno);
if (S_ISCHR(le16_to_cpu(fe->i_mode)) ||
S_ISBLK(le16_to_cpu(fe->i_mode)))
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
ocfs2_populate_inode(inode, fe, 0);
BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno));
status = 0;
bail:
if (can_lock)
ocfs2_inode_unlock(inode, 0);
if (status < 0)
make_bad_inode(inode);
if (args && bh)
brelse(bh);
return status;
}
void ocfs2_sync_blockdev(struct super_block *sb)
{
sync_blockdev(sb->s_bdev);
}
static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh)
{
int status = 0;
struct ocfs2_dinode *fe;
handle_t *handle = NULL;
fe = (struct ocfs2_dinode *) fe_bh->b_data;
/*
* This check will also skip truncate of inodes with inline
* data and fast symlinks.
*/
if (fe->i_clusters) {
if (ocfs2_should_order_data(inode))
ocfs2_begin_ordered_truncate(inode, 0);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto out;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out;
}
i_size_write(inode, 0);
status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
if (status < 0) {
mlog_errno(status);
goto out;
}
ocfs2_commit_trans(osb, handle);
handle = NULL;
status = ocfs2_commit_truncate(osb, inode, fe_bh);
if (status < 0) {
mlog_errno(status);
goto out;
}
}
out:
if (handle)
ocfs2_commit_trans(osb, handle);
return status;
}
static int ocfs2_remove_inode(struct inode *inode,
struct buffer_head *di_bh,
struct inode *orphan_dir_inode,
struct buffer_head *orphan_dir_bh)
{
int status;
struct inode *inode_alloc_inode = NULL;
struct buffer_head *inode_alloc_bh = NULL;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
inode_alloc_inode =
ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
le16_to_cpu(di->i_suballoc_slot));
if (!inode_alloc_inode) {
status = -EEXIST;
mlog_errno(status);
goto bail;
}
mutex_lock(&inode_alloc_inode->i_mutex);
status = ocfs2_inode_lock(inode_alloc_inode, &inode_alloc_bh, 1);
if (status < 0) {
mutex_unlock(&inode_alloc_inode->i_mutex);
mlog_errno(status);
goto bail;
}
handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS +
ocfs2_quota_trans_credits(inode->i_sb));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto bail_unlock;
}
if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
orphan_dir_bh);
if (status < 0) {
mlog_errno(status);
goto bail_commit;
}
}
/* set the inodes dtime */
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail_commit;
}
di->i_dtime = cpu_to_le64(CURRENT_TIME.tv_sec);
di->i_flags &= cpu_to_le32(~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL));
ocfs2_journal_dirty(handle, di_bh);
ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh);
dquot_free_inode(inode);
status = ocfs2_free_dinode(handle, inode_alloc_inode,
inode_alloc_bh, di);
if (status < 0)
mlog_errno(status);
bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock:
ocfs2_inode_unlock(inode_alloc_inode, 1);
mutex_unlock(&inode_alloc_inode->i_mutex);
brelse(inode_alloc_bh);
bail:
iput(inode_alloc_inode);
return status;
}
/*
* Serialize with orphan dir recovery. If the process doing
* recovery on this orphan dir does an iget() with the dir
* i_mutex held, we'll deadlock here. Instead we detect this
* and exit early - recovery will wipe this inode for us.
*/
static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
int slot)
{
int ret = 0;
spin_lock(&osb->osb_lock);
if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) {
ret = -EDEADLK;
goto out;
}
/* This signals to the orphan recovery process that it should
* wait for us to handle the wipe. */
osb->osb_orphan_wipes[slot]++;
out:
spin_unlock(&osb->osb_lock);
trace_ocfs2_check_orphan_recovery_state(slot, ret);
return ret;
}
static void ocfs2_signal_wipe_completion(struct ocfs2_super *osb,
int slot)
{
spin_lock(&osb->osb_lock);
osb->osb_orphan_wipes[slot]--;
spin_unlock(&osb->osb_lock);
wake_up(&osb->osb_wipe_event);
}
static int ocfs2_wipe_inode(struct inode *inode,
struct buffer_head *di_bh)
{
int status, orphaned_slot = -1;
struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
orphaned_slot = le16_to_cpu(di->i_orphaned_slot);
status = ocfs2_check_orphan_recovery_state(osb, orphaned_slot);
if (status)
return status;
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
orphaned_slot);
if (!orphan_dir_inode) {
status = -EEXIST;
mlog_errno(status);
goto bail;
}
/* Lock the orphan dir. The lock will be held for the entire
* delete_inode operation. We do this now to avoid races with
* recovery completion on other nodes. */
mutex_lock(&orphan_dir_inode->i_mutex);
status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
if (status < 0) {
mutex_unlock(&orphan_dir_inode->i_mutex);
mlog_errno(status);
goto bail;
}
}
/* we do this while holding the orphan dir lock because we
* don't want recovery being run from another node to try an
* inode delete underneath us -- this will result in two nodes
* truncating the same file! */
status = ocfs2_truncate_for_delete(osb, inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_dir;
}
/* Remove any dir index tree */
if (S_ISDIR(inode->i_mode)) {
status = ocfs2_dx_dir_truncate(inode, di_bh);
if (status) {
mlog_errno(status);
goto bail_unlock_dir;
}
}
/*Free extended attribute resources associated with this inode.*/
status = ocfs2_xattr_remove(inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_dir;
}
status = ocfs2_remove_refcount_tree(inode, di_bh);
if (status < 0) {
mlog_errno(status);
goto bail_unlock_dir;
}
status = ocfs2_remove_inode(inode, di_bh, orphan_dir_inode,
orphan_dir_bh);
if (status < 0)
mlog_errno(status);
bail_unlock_dir:
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)
return status;
ocfs2_inode_unlock(orphan_dir_inode, 1);
mutex_unlock(&orphan_dir_inode->i_mutex);
brelse(orphan_dir_bh);
bail:
iput(orphan_dir_inode);
ocfs2_signal_wipe_completion(osb, orphaned_slot);
return status;
}
/* There is a series of simple checks that should be done before a
* trylock is even considered. Encapsulate those in this function. */
static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
{
int ret = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task,
(unsigned long long)oi->ip_blkno,
oi->ip_flags);
/* We shouldn't be getting here for the root directory
* inode.. */
if (inode == osb->root_inode) {
mlog(ML_ERROR, "Skipping delete of root inode.\n");
goto bail;
}
/*
* If we're coming from downconvert_thread we can't go into our own
* voting [hello, deadlock city!] so we cannot delete the inode. But
* since we dropped last inode ref when downconverting dentry lock,
* we cannot have the file open and thus the node doing unlink will
* take care of deleting the inode.
*/
if (current == osb->dc_task)
goto bail;
spin_lock(&oi->ip_lock);
/* OCFS2 *never* deletes system files. This should technically
* never get here as system file inodes should always have a
* positive link count. */
if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
mlog(ML_ERROR, "Skipping delete of system file %llu\n",
(unsigned long long)oi->ip_blkno);
goto bail_unlock;
}
/* If we have allowd wipe of this inode for another node, it
* will be marked here so we can safely skip it. Recovery will
* cleanup any inodes we might inadvertently skip here. */
if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE)
goto bail_unlock;
ret = 1;
bail_unlock:
spin_unlock(&oi->ip_lock);
bail:
return ret;
}
/* Query the cluster to determine whether we should wipe an inode from
* disk or not.
*
* Requires the inode to have the cluster lock. */
static int ocfs2_query_inode_wipe(struct inode *inode,
struct buffer_head *di_bh,
int *wipe)
{
int status = 0, reason = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di;
*wipe = 0;
trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno,
inode->i_nlink);
/* While we were waiting for the cluster lock in
* ocfs2_delete_inode, another node might have asked to delete
* the inode. Recheck our flags to catch this. */
if (!ocfs2_inode_is_valid_to_delete(inode)) {
reason = 1;
goto bail;
}
/* Now that we have an up to date inode, we can double check
* the link count. */
if (inode->i_nlink)
goto bail;
/* Do some basic inode verification... */
di = (struct ocfs2_dinode *) di_bh->b_data;
if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL)) &&
!(oi->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
/*
* Inodes in the orphan dir must have ORPHANED_FL. The only
* inodes that come back out of the orphan dir are reflink
* targets. A reflink target may be moved out of the orphan
* dir between the time we scan the directory and the time we
* process it. This would lead to HAS_REFCOUNT_FL being set but
* ORPHANED_FL not.
*/
if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
reason = 2;
goto bail;
}
/* for lack of a better error? */
status = -EEXIST;
mlog(ML_ERROR,
"Inode %llu (on-disk %llu) not orphaned! "
"Disk flags 0x%x, inode flags 0x%x\n",
(unsigned long long)oi->ip_blkno,
(unsigned long long)le64_to_cpu(di->i_blkno),
le32_to_cpu(di->i_flags), oi->ip_flags);
goto bail;
}
/* has someone already deleted us?! baaad... */
if (di->i_dtime) {
status = -EEXIST;
mlog_errno(status);
goto bail;
}
/*
* This is how ocfs2 determines whether an inode is still live
* within the cluster. Every node takes a shared read lock on
* the inode open lock in ocfs2_read_locked_inode(). When we
* get to ->delete_inode(), each node tries to convert it's
* lock to an exclusive. Trylocks are serialized by the inode
* meta data lock. If the upconvert succeeds, we know the inode
* is no longer live and can be deleted.
*
* Though we call this with the meta data lock held, the
* trylock keeps us from ABBA deadlock.
*/
status = ocfs2_try_open_lock(inode, 1);
if (status == -EAGAIN) {
status = 0;
reason = 3;
goto bail;
}
if (status < 0) {
mlog_errno(status);
goto bail;
}
*wipe = 1;
trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot));
bail:
trace_ocfs2_query_inode_wipe_end(status, reason);
return status;
}
/* Support function for ocfs2_delete_inode. Will help us keep the
* inode data in a consistent state for clear_inode. Always truncates
* pages, optionally sync's them first. */
static void ocfs2_cleanup_delete_inode(struct inode *inode,
int sync_data)
{
trace_ocfs2_cleanup_delete_inode(
(unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
if (sync_data)
filemap_write_and_wait(inode->i_mapping);
truncate_inode_pages(&inode->i_data, 0);
}
static void ocfs2_delete_inode(struct inode *inode)
{
int wipe, status;
sigset_t oldset;
struct buffer_head *di_bh = NULL;
trace_ocfs2_delete_inode(inode->i_ino,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
is_bad_inode(inode));
/* When we fail in read_inode() we mark inode as bad. The second test
* catches the case when inode allocation fails before allocating
* a block for inode. */
if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno)
goto bail;
if (!ocfs2_inode_is_valid_to_delete(inode)) {
/* It's probably not necessary to truncate_inode_pages
* here but we do it for safety anyway (it will most
* likely be a no-op anyway) */
ocfs2_cleanup_delete_inode(inode, 0);
goto bail;
}
dquot_initialize(inode);
/* We want to block signals in delete_inode as the lock and
* messaging paths may return us -ERESTARTSYS. Which would
* cause us to exit early, resulting in inodes being orphaned
* forever. */
ocfs2_block_signals(&oldset);
/*
* Synchronize us against ocfs2_get_dentry. We take this in
* shared mode so that all nodes can still concurrently
* process deletes.
*/
status = ocfs2_nfs_sync_lock(OCFS2_SB(inode->i_sb), 0);
if (status < 0) {
mlog(ML_ERROR, "getting nfs sync lock(PR) failed %d\n", status);
ocfs2_cleanup_delete_inode(inode, 0);
goto bail_unblock;
}
/* Lock down the inode. This gives us an up to date view of
* it's metadata (for verification), and allows us to
* serialize delete_inode on multiple nodes.
*
* Even though we might be doing a truncate, we don't take the
* allocation lock here as it won't be needed - nobody will
* have the file open.
*/
status = ocfs2_inode_lock(inode, &di_bh, 1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
ocfs2_cleanup_delete_inode(inode, 0);
goto bail_unlock_nfs_sync;
}
/* Query the cluster. This will be the final decision made
* before we go ahead and wipe the inode. */
status = ocfs2_query_inode_wipe(inode, di_bh, &wipe);
if (!wipe || status < 0) {
/* Error and remote inode busy both mean we won't be
* removing the inode, so they take almost the same
* path. */
if (status < 0)
mlog_errno(status);
/* Someone in the cluster has disallowed a wipe of
* this inode, or it was never completely
* orphaned. Write out the pages and exit now. */
ocfs2_cleanup_delete_inode(inode, 1);
goto bail_unlock_inode;
}
ocfs2_cleanup_delete_inode(inode, 0);
status = ocfs2_wipe_inode(inode, di_bh);
if (status < 0) {
if (status != -EDEADLK)
mlog_errno(status);
goto bail_unlock_inode;
}
/*
* Mark the inode as successfully deleted.
*
* This is important for ocfs2_clear_inode() as it will check
* this flag and skip any checkpointing work
*
* ocfs2_stuff_meta_lvb() also uses this flag to invalidate
* the LVB for other nodes.
*/
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_DELETED;
bail_unlock_inode:
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
bail_unlock_nfs_sync:
ocfs2_nfs_sync_unlock(OCFS2_SB(inode->i_sb), 0);
bail_unblock:
ocfs2_unblock_signals(&oldset);
bail:
return;
}
static void ocfs2_clear_inode(struct inode *inode)
{
int status;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
clear_inode(inode);
trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink);
mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
"Inode=%lu\n", inode->i_ino);
dquot_drop(inode);
/* To preven remote deletes we hold open lock before, now it
* is time to unlock PR and EX open locks. */
ocfs2_open_unlock(inode);
/* Do these before all the other work so that we don't bounce
* the downconvert thread while waiting to destroy the locks. */
ocfs2_mark_lockres_freeing(osb, &oi->ip_rw_lockres);
ocfs2_mark_lockres_freeing(osb, &oi->ip_inode_lockres);
ocfs2_mark_lockres_freeing(osb, &oi->ip_open_lockres);
ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
&oi->ip_la_data_resv);
ocfs2_resv_init_once(&oi->ip_la_data_resv);
/* We very well may get a clear_inode before all an inodes
* metadata has hit disk. Of course, we can't drop any cluster
* locks until the journal has finished with it. The only
* exception here are successfully wiped inodes - their
* metadata can now be considered to be part of the system
* inodes from which it came. */
if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED))
ocfs2_checkpoint_inode(inode);
mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
"Clear inode of %llu, inode has io markers\n",
(unsigned long long)oi->ip_blkno);
ocfs2_extent_map_trunc(inode, 0);
status = ocfs2_drop_inode_locks(inode);
if (status < 0)
mlog_errno(status);
ocfs2_lock_res_free(&oi->ip_rw_lockres);
ocfs2_lock_res_free(&oi->ip_inode_lockres);
ocfs2_lock_res_free(&oi->ip_open_lockres);
ocfs2_metadata_cache_exit(INODE_CACHE(inode));
mlog_bug_on_msg(INODE_CACHE(inode)->ci_num_cached,
"Clear inode of %llu, inode has %u cache items\n",
(unsigned long long)oi->ip_blkno,
INODE_CACHE(inode)->ci_num_cached);
mlog_bug_on_msg(!(INODE_CACHE(inode)->ci_flags & OCFS2_CACHE_FL_INLINE),
"Clear inode of %llu, inode has a bad flag\n",
(unsigned long long)oi->ip_blkno);
mlog_bug_on_msg(spin_is_locked(&oi->ip_lock),
"Clear inode of %llu, inode is locked\n",
(unsigned long long)oi->ip_blkno);
mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex),
"Clear inode of %llu, io_mutex is locked\n",
(unsigned long long)oi->ip_blkno);
mutex_unlock(&oi->ip_io_mutex);
/*
* down_trylock() returns 0, down_write_trylock() returns 1
* kernel 1, world 0
*/
mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
"Clear inode of %llu, alloc_sem is locked\n",
(unsigned long long)oi->ip_blkno);
up_write(&oi->ip_alloc_sem);
mlog_bug_on_msg(oi->ip_open_count,
"Clear inode of %llu has open count %d\n",
(unsigned long long)oi->ip_blkno, oi->ip_open_count);
/* Clear all other flags. */
oi->ip_flags = 0;
oi->ip_dir_start_lookup = 0;
oi->ip_blkno = 0ULL;
/*
* ip_jinode is used to track txns against this inode. We ensure that
* the journal is flushed before journal shutdown. Thus it is safe to
* have inodes get cleaned up after journal shutdown.
*/
jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
&oi->ip_jinode);
}
void ocfs2_evict_inode(struct inode *inode)
{
if (!inode->i_nlink ||
(OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) {
ocfs2_delete_inode(inode);
} else {
truncate_inode_pages(&inode->i_data, 0);
}
ocfs2_clear_inode(inode);
}
/* Called under inode_lock, with no more references on the
* struct inode, so it's safe here to check the flags field
* and to manipulate i_nlink without any other locks. */
int ocfs2_drop_inode(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int res;
trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink, oi->ip_flags);
if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
res = 1;
else
res = generic_drop_inode(inode);
return res;
}
/*
* This is called from our getattr.
*/
int ocfs2_inode_revalidate(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
int status = 0;
trace_ocfs2_inode_revalidate(inode,
inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL,
inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0);
if (!inode) {
status = -ENOENT;
goto bail;
}
spin_lock(&OCFS2_I(inode)->ip_lock);
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&OCFS2_I(inode)->ip_lock);
status = -ENOENT;
goto bail;
}
spin_unlock(&OCFS2_I(inode)->ip_lock);
/* Let ocfs2_inode_lock do the work of updating our struct
* inode for us. */
status = ocfs2_inode_lock(inode, NULL, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto bail;
}
ocfs2_inode_unlock(inode, 0);
bail:
return status;
}
/*
* Updates a disk inode from a
* struct inode.
* Only takes ip_lock.
*/
int ocfs2_mark_inode_dirty(handle_t *handle,
struct inode *inode,
struct buffer_head *bh)
{
int status;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
trace_ocfs2_mark_inode_dirty((unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto leave;
}
spin_lock(&OCFS2_I(inode)->ip_lock);
fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
ocfs2_get_inode_flags(OCFS2_I(inode));
fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr);
fe->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
spin_unlock(&OCFS2_I(inode)->ip_lock);
fe->i_size = cpu_to_le64(i_size_read(inode));
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_uid = cpu_to_le32(i_uid_read(inode));
fe->i_gid = cpu_to_le32(i_gid_read(inode));
fe->i_mode = cpu_to_le16(inode->i_mode);
fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
ocfs2_journal_dirty(handle, bh);
leave:
return status;
}
/*
*
* Updates a struct inode from a disk inode.
* does no i/o, only takes ip_lock.
*/
void ocfs2_refresh_inode(struct inode *inode,
struct ocfs2_dinode *fe)
{
spin_lock(&OCFS2_I(inode)->ip_lock);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
ocfs2_set_inode_flags(inode);
i_size_write(inode, le64_to_cpu(fe->i_size));
set_nlink(inode, ocfs2_read_links_count(fe));
i_uid_write(inode, le32_to_cpu(fe->i_uid));
i_gid_write(inode, le32_to_cpu(fe->i_gid));
inode->i_mode = le16_to_cpu(fe->i_mode);
if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
inode->i_blocks = 0;
else
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
spin_unlock(&OCFS2_I(inode)->ip_lock);
}
int ocfs2_validate_inode_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
if (rc) {
mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
(unsigned long long)bh->b_blocknr);
goto bail;
}
/*
* Errors after here are fatal.
*/
rc = -EINVAL;
if (!OCFS2_IS_VALID_DINODE(di)) {
ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
di->i_signature);
goto bail;
}
if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(di->i_blkno));
goto bail;
}
if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
ocfs2_error(sb,
"Invalid dinode #%llu: OCFS2_VALID_FL not set\n",
(unsigned long long)bh->b_blocknr);
goto bail;
}
if (le32_to_cpu(di->i_fs_generation) !=
OCFS2_SB(sb)->fs_generation) {
ocfs2_error(sb,
"Invalid dinode #%llu: fs_generation is %u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(di->i_fs_generation));
goto bail;
}
rc = 0;
bail:
return rc;
}
int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh,
int flags)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_blocks(INODE_CACHE(inode), OCFS2_I(inode)->ip_blkno,
1, &tmp, flags, ocfs2_validate_inode_block);
/* If ocfs2_read_blocks() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh)
{
return ocfs2_read_inode_block_full(inode, bh, 0);
}
static u64 ocfs2_inode_cache_owner(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
return oi->ip_blkno;
}
static struct super_block *ocfs2_inode_cache_get_super(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
return oi->vfs_inode.i_sb;
}
static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
spin_lock(&oi->ip_lock);
}
static void ocfs2_inode_cache_unlock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
spin_unlock(&oi->ip_lock);
}
static void ocfs2_inode_cache_io_lock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
mutex_lock(&oi->ip_io_mutex);
}
static void ocfs2_inode_cache_io_unlock(struct ocfs2_caching_info *ci)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
mutex_unlock(&oi->ip_io_mutex);
}
const struct ocfs2_caching_operations ocfs2_inode_caching_ops = {
.co_owner = ocfs2_inode_cache_owner,
.co_get_super = ocfs2_inode_cache_get_super,
.co_cache_lock = ocfs2_inode_cache_lock,
.co_cache_unlock = ocfs2_inode_cache_unlock,
.co_io_lock = ocfs2_inode_cache_io_lock,
.co_io_unlock = ocfs2_inode_cache_io_unlock,
};
| gpl-2.0 |
dduval/kernel-rhel3 | arch/parisc/kernel/ccio-dma.c | 18 | 47538 | /*
** ccio-dma.c:
** DMA management routines for first generation cache-coherent machines.
** Program U2/Uturn in "Virtual Mode" and use the I/O MMU.
**
** (c) Copyright 2000 Grant Grundler
** (c) Copyright 2000 Ryan Bradetich
** (c) Copyright 2000 Hewlett-Packard Company
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
**
** "Real Mode" operation refers to U2/Uturn chip operation.
** U2/Uturn were designed to perform coherency checks w/o using
** the I/O MMU - basically what x86 does.
**
** Philipp Rumpf has a "Real Mode" driver for PCX-W machines at:
** CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc
** cvs -z3 co linux/arch/parisc/kernel/dma-rm.c
**
** I've rewritten his code to work under TPG's tree. See ccio-rm-dma.c.
**
** Drawbacks of using Real Mode are:
** o outbound DMA is slower - U2 won't prefetch data (GSC+ XQL signal).
** o Inbound DMA less efficient - U2 can't use DMA_FAST attribute.
** o Ability to do scatter/gather in HW is lost.
** o Doesn't work under PCX-U/U+ machines since they didn't follow
** the coherency design originally worked out. Only PCX-W does.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/string.h>
#define PCI_DEBUG
#include <linux/pci.h>
#undef PCI_DEBUG
#include <asm/byteorder.h>
#include <asm/cache.h> /* for L1_CACHE_BYTES */
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/gsc.h> /* for gsc_writeN()... */
#include <asm/hardware.h> /* for register_module() */
/*
** Choose "ccio" since that's what HP-UX calls it.
** Make it easier for folks to migrate from one to the other :^)
*/
#define MODULE_NAME "ccio"
#undef DEBUG_CCIO_RES
#undef DEBUG_CCIO_RUN
#undef DEBUG_CCIO_INIT
#undef DEBUG_CCIO_RUN_SG
#include <linux/proc_fs.h>
#include <asm/runway.h> /* for proc_runway_root */
#ifdef DEBUG_CCIO_INIT
#define DBG_INIT(x...) printk(x)
#else
#define DBG_INIT(x...)
#endif
#ifdef DEBUG_CCIO_RUN
#define DBG_RUN(x...) printk(x)
#else
#define DBG_RUN(x...)
#endif
#ifdef DEBUG_CCIO_RES
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
#ifdef DEBUG_CCIO_RUN_SG
#define DBG_RUN_SG(x...) printk(x)
#else
#define DBG_RUN_SG(x...)
#endif
#define CCIO_INLINE /* inline */
#define WRITE_U32(value, addr) gsc_writel(value, (u32 *)(addr))
#define READ_U32(addr) gsc_readl((u32 *)(addr))
#define U2_IOA_RUNWAY 0x580
#define U2_BC_GSC 0x501
#define UTURN_IOA_RUNWAY 0x581
#define UTURN_BC_GSC 0x502
#define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */
#define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
#define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
struct ioa_registers {
/* Runway Supervisory Set */
volatile int32_t unused1[12];
volatile uint32_t io_command; /* Offset 12 */
volatile uint32_t io_status; /* Offset 13 */
volatile uint32_t io_control; /* Offset 14 */
volatile int32_t unused2[1];
/* Runway Auxiliary Register Set */
volatile uint32_t io_err_resp; /* Offset 0 */
volatile uint32_t io_err_info; /* Offset 1 */
volatile uint32_t io_err_req; /* Offset 2 */
volatile uint32_t io_err_resp_hi; /* Offset 3 */
volatile uint32_t io_tlb_entry_m; /* Offset 4 */
volatile uint32_t io_tlb_entry_l; /* Offset 5 */
volatile uint32_t unused3[1];
volatile uint32_t io_pdir_base; /* Offset 7 */
volatile uint32_t io_io_low_hv; /* Offset 8 */
volatile uint32_t io_io_high_hv; /* Offset 9 */
volatile uint32_t unused4[1];
volatile uint32_t io_chain_id_mask; /* Offset 11 */
volatile uint32_t unused5[2];
volatile uint32_t io_io_low; /* Offset 14 */
volatile uint32_t io_io_high; /* Offset 15 */
};
struct ioc {
struct ioa_registers *ioc_hpa; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
u32 res_hint; /* next available IOVP -
circular search */
u32 res_size; /* size of resource map in bytes */
spinlock_t res_lock;
#ifdef CONFIG_PROC_FS
#define CCIO_SEARCH_SAMPLE 0x100
unsigned long avg_search[CCIO_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
unsigned long used_pages;
unsigned long msingle_calls;
unsigned long msingle_pages;
unsigned long msg_calls;
unsigned long msg_pages;
unsigned long usingle_calls;
unsigned long usingle_pages;
unsigned long usg_calls;
unsigned long usg_pages;
unsigned short cujo20_bug;
#endif
/* STUFF We don't need in performance path */
u32 pdir_size; /* in bytes, determined by IOV Space size */
u32 chainid_shift; /* specify bit location of chain_id */
struct ioc *next; /* Linked list of discovered iocs */
const char *name; /* device name from firmware */
unsigned int hw_path; /* the hardware path this ioc is associatd with */
struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */
struct resource mmio_region[2]; /* The "routed" MMIO regions */
};
/* Ratio of Host MEM to IOV Space size */
static unsigned long ccio_mem_ratio = 4;
static struct ioc *ioc_list;
static int ioc_count;
/**************************************************************
*
* I/O Pdir Resource Management
*
* Bits set in the resource map are in use.
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
* This was was copied from sba_iommu.c. Don't try to unify
* the two resource managers unless a way to have different
* allocation policies is also adjusted. We'd like to avoid
* I/O TLB thrashing by having resource allocation policy
* match the I/O TLB replacement policy.
*
***************************************************************/
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
#define IOVP_MASK PAGE_MASK
/* Convert from IOVP to IOVA and vice versa. */
#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
/*
** Don't worry about the 150% average search length on a miss.
** If the search wraps around, and passes the res_hint, it will
** cause the kernel to panic anyhow.
*/
#define CCIO_SEARCH_LOOP(ioc, res_idx, mask_ptr, size) \
for(; res_ptr < res_end; ++res_ptr) { \
if(0 == (*res_ptr & *mask_ptr)) { \
*res_ptr |= *mask_ptr; \
res_idx = (int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
ioc->res_hint = res_idx + (size >> 3); \
goto resource_found; \
} \
}
#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
u##size *mask_ptr = (u##size *)&mask; \
CCIO_SEARCH_LOOP(ioc, res_idx, mask_ptr, size); \
res_ptr = (u##size *)&(ioc)->res_map[0]; \
CCIO_SEARCH_LOOP(ioa, res_idx, mask_ptr, size);
/*
** Find available bit in this ioa's resource map.
** Use a "circular" search:
** o Most IOVA's are "temporary" - avg search time should be small.
** o keep a history of what happened for debugging
** o KISS.
**
** Perf optimizations:
** o search for log2(size) bits at a time.
** o search for available resource bits using byte/word/whatever.
** o use different search for "large" (eg > 4 pages) or "very large"
** (eg > 16 pages) mappings.
*/
/**
* ccio_alloc_range - Allocate pages in the ioc's resource map.
* @ioc: The I/O Controller.
* @pages_needed: The requested number of pages to be mapped into the
* I/O Pdir...
*
* This function searches the resource map of the ioc to locate a range
* of available pages for the requested size.
*/
static int
ccio_alloc_range(struct ioc *ioc, unsigned long pages_needed)
{
int res_idx;
unsigned long mask;
#ifdef CONFIG_PROC_FS
unsigned long cr_start = mfctl(16);
#endif
ASSERT(pages_needed);
ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(pages_needed <= BITS_PER_LONG);
mask = ~(~0UL >> pages_needed);
DBG_RES("%s() size: %d pages_needed %d mask 0x%08lx\n",
__FUNCTION__, size, pages_needed, mask);
/*
** "seek and ye shall find"...praying never hurts either...
** ggg sacrifices another 710 to the computer gods.
*/
if(pages_needed <= 8) {
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
} else if(pages_needed <= 16) {
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 16);
} else if(pages_needed <= 32) {
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 32);
#ifdef __LP64__
} else if(pages_needed <= 64) {
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 64);
#endif
} else {
panic(__FILE__ ": %s() Too many pages to map. pages_needed: %ld\n",
__FUNCTION__, pages_needed);
}
panic(__FILE__ ": %s() I/O MMU is out of mapping resources.\n",
__FUNCTION__);
resource_found:
DBG_RES("%s() res_idx %d mask 0x%08lx res_hint: %d\n",
__FUNCTION__, res_idx, mask, ioc->res_hint);
#ifdef CONFIG_PROC_FS
{
unsigned long cr_end = mfctl(16);
unsigned long tmp = cr_end - cr_start;
/* check for roll over */
cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
}
ioc->avg_search[ioc->avg_idx++] = cr_start;
ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
ioc->used_pages += pages_needed;
#endif
/*
** return the bit address.
*/
return res_idx << 3;
}
#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
u##size *mask_ptr = (u##size *)&mask; \
ASSERT((*res_ptr & *mask_ptr) == *mask_ptr); \
*res_ptr &= ~(*mask_ptr);
/**
* ccio_free_range - Free pages from the ioc's resource map.
* @ioc: The I/O Controller.
* @iova: The I/O Virtual Address.
* @pages_mapped: The requested number of pages to be freed from the
* I/O Pdir.
*
* This function frees the resouces allocated for the iova.
*/
static void
ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
{
unsigned long mask;
unsigned long iovp = CCIO_IOVP(iova);
unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
ASSERT(pages_mapped);
ASSERT((pages_mapped * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(pages_mapped <= BITS_PER_LONG);
mask = ~(~0UL >> pages_mapped);
DBG_RES("%s(): res_idx: %d pages_mapped %d mask 0x%08lx\n",
__FUNCTION__, res_idx, pages_mapped, mask);
#ifdef CONFIG_PROC_FS
ioc->used_pages -= pages_mapped;
#endif
if(pages_mapped <= 8) {
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
} else if(pages_mapped <= 16) {
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 16);
} else if(pages_mapped <= 32) {
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 32);
#ifdef __LP64__
} else if(pages_mapped <= 64) {
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 64);
#endif
} else {
panic(__FILE__ ":%s() Too many pages to unmap.\n",
__FUNCTION__);
}
}
/****************************************************************
**
** CCIO dma_ops support routines
**
*****************************************************************/
typedef unsigned long space_t;
#define KERNEL_SPACE 0
/*
** DMA "Page Type" and Hints
** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be
** set for subcacheline DMA transfers since we don't want to damage the
** other part of a cacheline.
** o SAFE_DMA must be set for "memory" allocated via pci_alloc_consistent().
** This bit tells U2 to do R/M/W for partial cachelines. "Streaming"
** data can avoid this if the mapping covers full cache lines.
** o STOP_MOST is needed for atomicity across cachelines.
** Apperently only "some EISA devices" need this.
** Using CONFIG_ISA is hack. Only the IOA with EISA under it needs
** to use this hint iff the EISA devices needs this feature.
** According to the U2 ERS, STOP_MOST enabled pages hurt performance.
** o PREFETCH should *not* be set for cases like Multiple PCI devices
** behind GSCtoPCI (dino) bus converter. Only one cacheline per GSC
** device can be fetched and multiply DMA streams will thrash the
** prefetch buffer and burn memory bandwidth. See 6.7.3 "Prefetch Rules
** and Invalidation of Prefetch Entries".
**
** FIXME: the default hints need to be per GSC device - not global.
**
** HP-UX dorks: linux device driver programming model is totally different
** than HP-UX's. HP-UX always sets HINT_PREFETCH since it's drivers
** do special things to work on non-coherent platforms...linux has to
** be much more careful with this.
*/
#define IOPDIR_VALID 0x01UL
#define HINT_SAFE_DMA 0x02UL /* used for pci_alloc_consistent() pages */
#ifdef CONFIG_ISA /* EISA support really */
#define HINT_STOP_MOST 0x04UL /* LSL support */
#else
#define HINT_STOP_MOST 0x00UL /* only needed for "some EISA devices" */
#endif
#define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */
#define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE */
/*
** Use direction (ie PCI_DMA_TODEVICE) to pick hint.
** ccio_alloc_consistent() depends on this to get SAFE_DMA
** when it passes in BIDIRECTIONAL flag.
*/
static u32 hint_lookup[] = {
[PCI_DMA_BIDIRECTIONAL] HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
[PCI_DMA_TODEVICE] HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
[PCI_DMA_FROMDEVICE] HINT_STOP_MOST | IOPDIR_VALID,
[PCI_DMA_NONE] 0, /* not valid */
};
/**
* ccio_io_pdir_entry - Initialize an I/O Pdir.
* @pdir_ptr: A pointer into I/O Pdir.
* @sid: The Space Identifier.
* @vba: The virtual address.
* @hints: The DMA Hint.
*
* Given a virtual address (vba, arg2) and space id, (sid, arg1),
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
* entry consists of 8 bytes as shown below (MSB == bit 0):
*
*
* WORD 0:
* +------+----------------+-----------------------------------------------+
* | Phys | Virtual Index | Phys |
* | 0:3 | 0:11 | 4:19 |
* |4 bits| 12 bits | 16 bits |
* +------+----------------+-----------------------------------------------+
* WORD 1:
* +-----------------------+-----------------------------------------------+
* | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
* | 20:39 | | Enable |Enable | |Enable|DMA | |
* | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
* +-----------------------+-----------------------------------------------+
*
* The virtual index field is filled with the results of the LCI
* (Load Coherence Index) instruction. The 8 bits used for the virtual
* index are bits 12:19 of the value returned by LCI.
*/
void CCIO_INLINE
ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, void * vba, unsigned long hints)
{
register unsigned long pa = (volatile unsigned long) vba;
register unsigned long ci; /* coherent index */
/* We currently only support kernel addresses */
ASSERT(sid == KERNEL_SPACE);
mtsp(sid,1);
/*
** WORD 1 - low order word
** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well.
*/
pa = virt_to_phys(vba);
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa;
/*
** WORD 0 - high order word
*/
#ifdef __LP64__
/*
** get bits 12:15 of physical address
** shift bits 16:31 of physical address
** and deposit them
*/
asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
#else
pa = 0;
#endif
/*
** get CPU coherency index bits
** Grab virtual index [0:11]
** Deposit virt_idx bits into I/O PDIR word
*/
asm volatile ("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
((u32 *)pdir_ptr)[0] = (u32) pa;
/* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
** PCX-U/U+ do. (eg C200/C240)
** PCX-T'? Don't know. (eg C110 or similar K-class)
**
** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit".
** Hopefully we can patch (NOP) these out at boot time somehow.
**
** "Since PCX-U employs an offset hash that is incompatible with
** the real mode coherence index generation of U2, the PDIR entry
** must be flushed to memory to retain coherence."
*/
asm volatile("fdc 0(%0)" : : "r" (pdir_ptr));
asm volatile("sync");
}
/**
* ccio_clear_io_tlb - Remove stale entries from the I/O TLB.
* @ioc: The I/O Controller.
* @iovp: The I/O Virtual Page.
* @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
*
* Purge invalid I/O PDIR entries from the I/O TLB.
*
* FIXME: Can we change the byte_cnt to pages_mapped?
*/
static CCIO_INLINE void
ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
{
u32 chain_size = 1 << ioc->chainid_shift;
iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */
byte_cnt += chain_size;
while(byte_cnt > chain_size) {
WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_hpa->io_command);
iovp += chain_size;
byte_cnt -= chain_size;
}
}
/**
* ccio_mark_invalid - Mark the I/O Pdir entries invalid.
* @ioc: The I/O Controller.
* @iova: The I/O Virtual Address.
* @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
*
* Mark the I/O Pdir entries invalid and blow away the corresponding I/O
* TLB entries.
*
* FIXME: at some threshhold it might be "cheaper" to just blow
* away the entire I/O TLB instead of individual entries.
*
* FIXME: Uturn has 256 TLB entries. We don't need to purge every
* PDIR entry - just once for each possible TLB entry.
* (We do need to maker I/O PDIR entries invalid regardless).
*
* FIXME: Can we change byte_cnt to pages_mapped?
*/
static CCIO_INLINE void
ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32)CCIO_IOVP(iova);
size_t saved_byte_cnt;
/* round up to nearest page size */
saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE);
while(byte_cnt > 0) {
/* invalidate one page at a time */
unsigned int idx = PDIR_INDEX(iovp);
char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
ASSERT(idx < (ioc->pdir_size / sizeof(u64)));
pdir_ptr[7] = 0; /* clear only VALID bit */
/*
** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
** PCX-U/U+ do. (eg C200/C240)
** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit".
**
** Hopefully someone figures out how to patch (NOP) the
** FDC/SYNC out at boot time.
*/
asm volatile("fdc 0(%0)" : : "r" (pdir_ptr[7]));
iovp += IOVP_SIZE;
byte_cnt -= IOVP_SIZE;
}
asm volatile("sync");
ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
}
/****************************************************************
**
** CCIO dma_ops
**
*****************************************************************/
/**
* ccio_dma_supported - Verify the IOMMU supports the DMA address range.
* @dev: The PCI device.
* @mask: A bit mask describing the DMA address range of the device.
*
* This function implements the pci_dma_supported function.
*/
static int
ccio_dma_supported(struct pci_dev *dev, u64 mask)
{
if(dev == NULL) {
printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
BUG();
return 0;
}
/* only support 32-bit devices (ie PCI/GSC) */
return (int)(mask == 0xffffffffUL);
}
/**
* ccio_map_single - Map an address range into the IOMMU.
* @dev: The PCI device.
* @addr: The start address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_map_single function.
*/
static dma_addr_t
ccio_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
{
int idx;
struct ioc *ioc;
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
u64 *pdir_start;
unsigned long hint = hint_lookup[direction];
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
ASSERT(size > 0);
/* save offset bits */
offset = ((unsigned long) addr) & ~IOVP_MASK;
/* round up to nearest IOVP_SIZE */
size = ROUNDUP(size + offset, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
ioc->msingle_calls++;
ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
idx = ccio_alloc_range(ioc, (size >> IOVP_SHIFT));
iovp = (dma_addr_t)MKIOVP(idx);
pdir_start = &(ioc->pdir_base[idx]);
DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
__FUNCTION__, addr, (long)iovp | offset, size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
hint |= HINT_SAFE_DMA;
while(size > 0) {
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
DBG_RUN(" pdir %p %08x%08x\n",
pdir_start,
(u32) (((u32 *) pdir_start)[0]),
(u32) (((u32 *) pdir_start)[1]));
++pdir_start;
addr += IOVP_SIZE;
size -= IOVP_SIZE;
}
spin_unlock_irqrestore(&ioc->res_lock, flags);
/* form complete address */
return CCIO_IOVA(iovp, offset);
}
/**
* ccio_unmap_single - Unmap an address range from the IOMMU.
* @dev: The PCI device.
* @addr: The start address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_unmap_single function.
*/
static void
ccio_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
int direction)
{
struct ioc *ioc;
unsigned long flags;
dma_addr_t offset = iova & ~IOVP_MASK;
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
DBG_RUN("%s() iovp 0x%lx/%x\n",
__FUNCTION__, (long)iova, size);
iova ^= offset; /* clear offset bits */
size += offset;
size = ROUNDUP(size, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
ioc->usingle_calls++;
ioc->usingle_pages += size >> IOVP_SHIFT;
#endif
ccio_mark_invalid(ioc, iova, size);
ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
spin_unlock_irqrestore(&ioc->res_lock, flags);
}
/**
* ccio_alloc_consistent - Allocate a consistent DMA mapping.
* @dev: The PCI device.
* @size: The length of the DMA region.
* @dma_handle: The DMA address handed back to the device (not the cpu).
*
* This function implements the pci_alloc_consistent function.
*/
static void *
ccio_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma_handle)
{
void *ret;
#if 0
/* GRANT Need to establish hierarchy for non-PCI devs as well
** and then provide matching gsc_map_xxx() functions for them as well.
*/
if(!hwdev) {
/* only support PCI */
*dma_handle = 0;
return 0;
}
#endif
ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size));
if (ret) {
memset(ret, 0, size);
*dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
}
return ret;
}
/**
* ccio_free_consistent - Free a consistent DMA mapping.
* @dev: The PCI device.
* @size: The length of the DMA region.
* @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
* @dma_handle: The device address returned from the ccio_alloc_consistent.
*
* This function implements the pci_free_consistent function.
*/
static void
ccio_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
ccio_unmap_single(dev, dma_handle, size, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x80000000UL
/**
* ccio_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
* @ioc: The I/O Controller.
* @startsg: The scatter/gather list of coalesced chunks.
* @nents: The number of entries in the scatter/gather list.
* @hint: The DMA Hint.
*
* This function inserts the coalesced scatter/gather list chunks into the
* I/O Controller's I/O Pdir.
*/
static CCIO_INLINE int
ccio_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint)
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
int n_mappings = 0;
u64 *pdirp = 0;
unsigned long dma_offset = 0;
dma_sg--;
while (nents-- > 0) {
int cnt = sg_dma_len(startsg);
sg_dma_len(startsg) = 0;
DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
(unsigned long)sg_dma_address(startsg), cnt,
sg_virt_addr(startsg), startsg->length
);
/*
** Look for the start of a new DMA stream
*/
if(sg_dma_address(startsg) & PIDE_FLAG) {
u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~IOVP_MASK;
sg_dma_address(startsg) = 0;
dma_sg++;
sg_dma_address(dma_sg) = pide;
pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
n_mappings++;
}
/*
** Look for a VCONTIG chunk
*/
if (cnt) {
unsigned long vaddr = (unsigned long) sg_virt_addr(startsg);
ASSERT(pdirp);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
sg_dma_len(dma_sg) += cnt;
cnt += dma_offset;
dma_offset=0; /* only want offset on first chunk */
cnt = ROUNDUP(cnt, IOVP_SIZE);
#ifdef CONFIG_PROC_FS
ioc->msg_pages += cnt >> IOVP_SHIFT;
#endif
do {
ccio_io_pdir_entry(pdirp, KERNEL_SPACE,
(void *)vaddr, hint);
vaddr += IOVP_SIZE;
cnt -= IOVP_SIZE;
pdirp++;
} while (cnt > 0);
}
startsg++;
}
return(n_mappings);
}
/*
** First pass is to walk the SG list and determine where the breaks are
** in the DMA stream. Allocates PDIR entries but does not fill them.
** Returns the number of DMA chunks.
**
** Doing the fill seperate from the coalescing/allocation keeps the
** code simpler. Future enhancement could make one pass through
** the sglist do both.
*/
static CCIO_INLINE int
ccio_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents)
{
struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
unsigned long vcontig_len; /* len of VCONTIG chunk */
unsigned long vcontig_end;
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
while (nents > 0) {
/*
** Prepare for first/next DMA stream
*/
dma_sg = vcontig_sg = startsg;
dma_len = vcontig_len = vcontig_end = startsg->length;
vcontig_end += (unsigned long) sg_virt_addr(startsg);
dma_offset = (unsigned long) sg_virt_addr(startsg) & ~IOVP_MASK;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
sg_dma_len(startsg) = 0;
/*
** This loop terminates one iteration "early" since
** it's always looking one "ahead".
*/
while(--nents > 0) {
unsigned long startsg_end;
startsg++;
startsg_end = (unsigned long) sg_virt_addr(startsg) +
startsg->length;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
sg_dma_len(startsg) = 0;
/*
** First make sure current dma stream won't
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
if(ROUNDUP(dma_len + dma_offset + startsg->length,
IOVP_SIZE) > DMA_CHUNK_SIZE)
break;
/*
** Append the next transaction?
*/
if(vcontig_end == (unsigned long) sg_virt_addr(startsg)) {
vcontig_len += startsg->length;
vcontig_end += startsg->length;
dma_len += startsg->length;
continue;
}
/*
** Not virtually contigous.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
sg_dma_len(vcontig_sg) = vcontig_len;
vcontig_sg = startsg;
vcontig_len = startsg->length;
break;
}
/*
** End of DMA Stream
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
sg_dma_len(vcontig_sg) = vcontig_len;
dma_len = ROUNDUP(dma_len + dma_offset, IOVP_SIZE);
sg_dma_address(dma_sg) =
PIDE_FLAG
| (ccio_alloc_range(ioc, (dma_len >> IOVP_SHIFT)) << IOVP_SHIFT)
| dma_offset;
n_mappings++;
}
return n_mappings;
}
/**
* ccio_map_sg - Map the scatter/gather list into the IOMMU.
* @dev: The PCI device.
* @sglist: The scatter/gather list to be mapped in the IOMMU.
* @nents: The number of entries in the scatter/gather list.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_map_sg function.
*/
static int
ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
int direction)
{
struct ioc *ioc;
int coalesced, filled = 0;
unsigned long flags;
unsigned long hint = hint_lookup[direction];
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
/* Fast path single entry scatterlists. */
if(nents == 1) {
sg_dma_address(sglist)= ccio_map_single(dev,
sg_virt_addr(sglist),
sglist->length,
direction);
sg_dma_len(sglist)= sglist->length;
return 1;
}
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
ioc->msg_calls++;
#endif
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = ccio_coalesce_chunks(ioc, sglist, nents);
/*
** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
*/
filled = ccio_fill_pdir(ioc, sglist, nents, hint);
spin_unlock_irqrestore(&ioc->res_lock, flags);
ASSERT(coalesced == filled);
DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
return filled;
}
/**
* ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU.
* @dev: The PCI device.
* @sglist: The scatter/gather list to be unmapped from the IOMMU.
* @nents: The number of entries in the scatter/gather list.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_unmap_sg function.
*/
static void
ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
int direction)
{
struct ioc *ioc;
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__FUNCTION__, nents, sg_virt_address(sglist), sglist->length);
#ifdef CONFIG_PROC_FS
ioc->usg_calls++;
#endif
while(sg_dma_len(sglist) && nents--) {
#ifdef CONFIG_PROC_FS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
ccio_unmap_single(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction);
++sglist;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
}
static struct pci_dma_ops ccio_ops = {
ccio_dma_supported,
ccio_alloc_consistent,
ccio_free_consistent,
ccio_map_single,
ccio_unmap_single,
ccio_map_sg,
ccio_unmap_sg,
NULL, /* dma_sync_single : NOP for U2/Uturn */
NULL, /* dma_sync_sg : ditto */
};
#ifdef CONFIG_PROC_FS
static int proc_append(char *src, int len, char **dst, off_t *offset, int *max)
{
if (len < *offset) {
*offset -= len;
return 0;
}
if (*offset > 0) {
src += *offset;
len -= *offset;
*offset = 0;
}
if (len > *max) {
len = *max;
}
memcpy(*dst, src, len);
*dst += len;
*max -= len;
return (*max == 0);
}
static int ccio_proc_info(char *buf, char **start, off_t offset, int count,
int *eof, void *data)
{
int max = count;
char tmp[80]; /* width of an ANSI-standard terminal */
struct ioc *ioc = ioc_list;
while (ioc != NULL) {
unsigned int total_pages = ioc->res_size << 3;
unsigned long avg = 0, min, max;
int j, len;
len = sprintf(tmp, "%s\n", ioc->name);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "Cujo 2.0 bug : %s\n",
(ioc->cujo20_bug ? "yes" : "no"));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "IO PDIR size : %d bytes (%d entries)\n",
total_pages * 8, total_pages);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "IO PDIR entries : %ld free %ld used (%d%%)\n",
total_pages - ioc->used_pages, ioc->used_pages,
(int)(ioc->used_pages * 100 / total_pages));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "Resource bitmap : %d bytes (%d pages)\n",
ioc->res_size, total_pages);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
min = max = ioc->avg_search[0];
for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
avg += ioc->avg_search[j];
if(ioc->avg_search[j] > max)
max = ioc->avg_search[j];
if(ioc->avg_search[j] < min)
min = ioc->avg_search[j];
}
avg /= CCIO_SEARCH_SAMPLE;
len = sprintf(tmp, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
min, avg, max);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
ioc->msingle_calls, ioc->msingle_pages,
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
/* KLUGE - unmap_sg calls unmap_single for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
max = ioc->usingle_pages - ioc->usg_pages;
len = sprintf(tmp, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
min, max, (int)((max * 1000)/min));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
ioc->msg_calls, ioc->msg_pages,
(int)((ioc->msg_pages * 1000)/ioc->msg_calls));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
ioc->usg_calls, ioc->usg_pages,
(int)((ioc->usg_pages * 1000)/ioc->usg_calls));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
ioc = ioc->next;
}
if (count == 0) {
*eof = 1;
}
return (max - count);
}
static int ccio_resource_map(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
struct ioc *ioc = ioc_list;
buf[0] = '\0';
while (ioc != NULL) {
u32 *res_ptr = (u32 *)ioc->res_map;
int j;
for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
if ((j & 7) == 0)
strcat(buf,"\n ");
sprintf(buf, "%s %08x", buf, *res_ptr);
res_ptr++;
}
strcat(buf, "\n\n");
ioc = ioc->next;
break; /* XXX - remove me */
}
return strlen(buf);
}
#endif
/**
* ccio_find_ioc - Find the ioc in the ioc_list
* @hw_path: The hardware path of the ioc.
*
* This function searches the ioc_list for an ioc that matches
* the provide hardware path.
*/
static struct ioc * ccio_find_ioc(int hw_path)
{
int i;
struct ioc *ioc;
ioc = ioc_list;
for (i = 0; i < ioc_count; i++) {
if (ioc->hw_path == hw_path)
return ioc;
ioc = ioc->next;
}
return NULL;
}
/**
* ccio_get_iommu - Find the iommu which controls this device
* @dev: The parisc device.
*
* This function searches through the registerd IOMMU's and returns the
* appropriate IOMMU for the device based upon the devices hardware path.
*/
void * ccio_get_iommu(const struct parisc_device *dev)
{
dev = find_pa_parent_type(dev, HPHW_IOA);
if (!dev)
return NULL;
return ccio_find_ioc(dev->hw_path);
}
#define CUJO_20_STEP 0x10000000 /* inc upper nibble */
/* Cujo 2.0 has a bug which will silently corrupt data being transferred
* to/from certain pages. To avoid this happening, we mark these pages
* as `used', and ensure that nothing will try to allocate from them.
*/
void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp)
{
unsigned int idx;
struct ioc *ioc = ccio_get_iommu(dev);
u8 *res_ptr;
#ifdef CONFIG_PROC_FS
ioc->cujo20_bug = 1;
#endif
res_ptr = ioc->res_map;
idx = PDIR_INDEX(iovp) >> 3;
while (idx < ioc->res_size) {
res_ptr[idx] |= 0xff;
idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
}
}
#if 0
/* GRANT - is this needed for U2 or not? */
/*
** Get the size of the I/O TLB for this I/O MMU.
**
** If spa_shift is non-zero (ie probably U2),
** then calculate the I/O TLB size using spa_shift.
**
** Otherwise we are supposed to get the IODC entry point ENTRY TLB
** and execute it. However, both U2 and Uturn firmware supplies spa_shift.
** I think only Java (K/D/R-class too?) systems don't do this.
*/
static int
ccio_get_iotlb_size(struct parisc_device *dev)
{
if (dev->spa_shift == 0) {
panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__);
}
return (1 << dev->spa_shift);
}
#else
/* Uturn supports 256 TLB entries */
#define CCIO_CHAINID_SHIFT 8
#define CCIO_CHAINID_MASK 0xff
#endif /* 0 */
/**
* ccio_ioc_init - Initalize the I/O Controller
* @ioc: The I/O Controller.
*
* Initalize the I/O Controller which includes setting up the
* I/O Page Directory, the resource map, and initalizing the
* U2/Uturn chip into virtual mode.
*/
static void
ccio_ioc_init(struct ioc *ioc)
{
int i, iov_order;
u32 iova_space_size;
unsigned long physmem;
/*
** Determine IOVA Space size from memory size.
**
** Ideally, PCI drivers would register the maximum number
** of DMA they can have outstanding for each device they
** own. Next best thing would be to guess how much DMA
** can be outstanding based on PCI Class/sub-class. Both
** methods still require some "extra" to support PCI
** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
*/
/* limit IOVA space size to 1MB-1GB */
physmem = num_physpages << PAGE_SHIFT;
if(physmem < (ccio_mem_ratio * 1024 * 1024)) {
iova_space_size = 1024 * 1024;
#ifdef __LP64__
} else if(physmem > (ccio_mem_ratio * 512 * 1024 * 1024)) {
iova_space_size = 512 * 1024 * 1024;
#endif
} else {
iova_space_size = (u32)(physmem / ccio_mem_ratio);
}
/*
** iova space must be log2() in size.
** thus, pdir/res_map will also be log2().
*/
/* We could use larger page sizes in order to *decrease* the number
** of mappings needed. (ie 8k pages means 1/2 the mappings).
**
** Note: Grant Grunder says "Using 8k I/O pages isn't trivial either
** since the pages must also be physically contiguous - typically
** this is the case under linux."
*/
iov_order = get_order(iova_space_size) >> (IOVP_SHIFT - PAGE_SHIFT);
ASSERT(iov_order <= (30 - IOVP_SHIFT)); /* iova_space_size <= 1GB */
ASSERT(iov_order >= (20 - IOVP_SHIFT)); /* iova_space_size >= 1MB */
iova_space_size = 1 << (iov_order + IOVP_SHIFT);
ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
ASSERT(ioc->pdir_size < 4 * 1024 * 1024); /* max pdir size < 4MB */
/* Verify it's a power of two */
ASSERT((1 << get_order(ioc->pdir_size)) == (ioc->pdir_size >> PAGE_SHIFT));
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits) PDIR size 0x%0x",
__FUNCTION__, ioc->ioc_hpa, physmem>>20, iova_space_size>>20,
iov_order + PAGE_SHIFT, ioc->pdir_size);
ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);
}
memset(ioc->pdir_base, 0, ioc->pdir_size);
ASSERT((((unsigned long)ioc->pdir_base) & PAGE_MASK) == (unsigned long)ioc->pdir_base);
DBG_INIT(" base %p", ioc->pdir_base);
/* resource map size dictated by pdir_size */
ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->res_size));
if(NULL == ioc->res_map) {
panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__);
}
memset(ioc->res_map, 0, ioc->res_size);
/* Initialize the res_hint to 16 */
ioc->res_hint = 16;
/* Initialize the spinlock */
spin_lock_init(&ioc->res_lock);
/*
** Chainid is the upper most bits of an IOVP used to determine
** which TLB entry an IOVP will use.
*/
ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
/*
** Initialize IOA hardware
*/
WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
&ioc->ioc_hpa->io_chain_id_mask);
WRITE_U32(virt_to_phys(ioc->pdir_base),
&ioc->ioc_hpa->io_pdir_base);
/*
** Go to "Virtual Mode"
*/
WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_hpa->io_control);
/*
** Initialize all I/O TLB entries to 0 (Valid bit off).
*/
WRITE_U32(0, &ioc->ioc_hpa->io_tlb_entry_m);
WRITE_U32(0, &ioc->ioc_hpa->io_tlb_entry_l);
for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
&ioc->ioc_hpa->io_command);
}
}
static void
ccio_init_resource(struct resource *res, char *name, unsigned long ioaddr)
{
int result;
res->flags = IORESOURCE_MEM;
res->start = (unsigned long)(signed) __raw_readl(ioaddr) << 16;
res->end = (unsigned long)(signed) (__raw_readl(ioaddr + 4) << 16) - 1;
if (res->end + 1 == res->start)
return;
res->name = name;
result = request_resource(&iomem_resource, res);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim CCIO bus address space (%lx,%lx)\n",
__FILE__, res->start, res->end);
}
}
static void __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
sprintf(name, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, (unsigned long)&ioc->ioc_hpa->io_io_low);
ccio_init_resource(res + 1, name,
(unsigned long)&ioc->ioc_hpa->io_io_low_hv);
}
static void expand_ioc_area(struct ioc *ioc, unsigned long size,
unsigned long min, unsigned long max, unsigned long align)
{
#ifdef NASTY_HACK_FOR_K_CLASS
__raw_writel(0xfffff600, (unsigned long)&(ioc->ioc_hpa->io_io_high));
ioc->mmio_region[0].end = 0xf5ffffff;
#endif
}
static struct resource *ccio_get_resource(struct ioc* ioc,
const struct parisc_device *dev)
{
if (!ioc) {
return &iomem_resource;
} else if ((ioc->mmio_region->start <= dev->hpa) &&
(dev->hpa < ioc->mmio_region->end)) {
return ioc->mmio_region;
} else if (((ioc->mmio_region + 1)->start <= dev->hpa) &&
(dev->hpa < (ioc->mmio_region + 1)->end)) {
return ioc->mmio_region + 1;
} else {
return NULL;
}
}
int ccio_allocate_resource(const struct parisc_device *dev,
struct resource *res, unsigned long size,
unsigned long min, unsigned long max, unsigned long align,
void (*alignf)(void *, struct resource *, unsigned long, unsigned long),
void *alignf_data)
{
struct ioc *ioc = ccio_get_iommu(dev);
struct resource *parent = ccio_get_resource(ioc, dev);
if (!parent)
return -EBUSY;
if (!allocate_resource(parent, res, size, min, max, align, alignf,
alignf_data))
return 0;
expand_ioc_area(ioc, size, min, max, align);
return allocate_resource(parent, res, size, min, max, align, alignf,
alignf_data);
}
int ccio_request_resource(const struct parisc_device *dev,
struct resource *res)
{
struct ioc *ioc = ccio_get_iommu(dev);
struct resource *parent = ccio_get_resource(ioc, dev);
return request_resource(parent, res);
}
/**
* ccio_probe - Determine if ccio should claim this device.
* @dev: The device which has been found
*
* Determine if ccio should claim this chip (return 0) or not (return 1).
* If so, initialize the chip and tell other partners in crime they
* have work to do.
*/
static int ccio_probe(struct parisc_device *dev)
{
int i;
struct ioc *ioc, **ioc_p = &ioc_list;
ioc = kmalloc(sizeof(struct ioc), GFP_KERNEL);
if (ioc == NULL) {
printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
return 1;
}
memset(ioc, 0, sizeof(struct ioc));
ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name, dev->hpa);
for (i = 0; i < ioc_count; i++) {
ioc_p = &(*ioc_p)->next;
}
*ioc_p = ioc;
ioc->hw_path = dev->hw_path;
ioc->ioc_hpa = (struct ioa_registers *)dev->hpa;
ccio_ioc_init(ioc);
ccio_init_resources(ioc);
hppa_dma_ops = &ccio_ops;
if (ioc_count == 0) {
/* XXX: Create separate entries for each ioc */
create_proc_read_entry(MODULE_NAME, S_IRWXU, proc_runway_root,
ccio_proc_info, NULL);
create_proc_read_entry(MODULE_NAME"-bitmap", S_IRWXU,
proc_runway_root, ccio_resource_map, NULL);
}
ioc_count++;
return 0;
}
struct pci_dev * ccio_get_fake(const struct parisc_device *dev)
{
struct ioc *ioc;
dev = find_pa_parent_type(dev, HPHW_IOA);
if(!dev)
return NULL;
ioc = ccio_find_ioc(dev->hw_path);
if(!ioc)
return NULL;
if(ioc->fake_pci_dev)
return ioc->fake_pci_dev;
ioc->fake_pci_dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
if(ioc->fake_pci_dev == NULL) {
printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
return NULL;
}
memset(ioc->fake_pci_dev, 0, sizeof(struct pci_dev));
ioc->fake_pci_dev->sysdata = kmalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
if(ioc->fake_pci_dev->sysdata == NULL) {
printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
return NULL;
}
HBA_DATA(ioc->fake_pci_dev->sysdata)->iommu = ioc;
return ioc->fake_pci_dev;
}
/* We *can't* support JAVA (T600). Venture there at your own risk. */
static struct parisc_device_id ccio_tbl[] = {
{ HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */
{ HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */
{ 0, }
};
static struct parisc_driver ccio_driver = {
name: "U2/Uturn",
id_table: ccio_tbl,
probe: ccio_probe,
};
/**
* ccio_init - ccio initalization procedure.
*
* Register this driver.
*/
void __init ccio_init(void)
{
register_parisc_driver(&ccio_driver);
}
| gpl-2.0 |
makiftasova/JediOutcastLinux | code/ff/ff_MultiCompound.cpp | 18 | 4139 | #include "common_headers.h"
#ifdef _IMMERSION
////------------------
/// MultiCompound::Add
//----------------------
// Insert a single compound effect if it does not already exist.
// Only fails when parameter is NULL.
//
qboolean MultiCompound::Add( MultiEffect *effect )
{
return effect ? ( mSet.insert( effect ), qtrue ) : qfalse;
}
////------------------
/// MultiCompound::Add
//----------------------
// Merge set of compound effects with current set. NULL pointers are excluded.
// Returns false if set contains any NULL pointers.
//
qboolean MultiCompound::Add( Set &effect )
{
qboolean result = qtrue;
for
( Set::iterator itSet = effect.begin()
; itSet != effect.end()
; itSet++
){
result &= Add( *itSet );
}
return result;
}
////--------------------
/// MultiCompound::Start
//------------------------
// Analogous to CImmCompoundEffect::Start. Starts all contained compound effects.
// Returns false if any effect returns false.
//
qboolean MultiCompound::Start()
{
qboolean result = qtrue;
for
( Set::iterator itSet = mSet.begin()
; itSet != mSet.end()
; itSet++
){
result &= (*itSet)->Start();
}
return qboolean
( result
&& mSet.size() != 0
);
}
qboolean MultiCompound::IsPlaying()
{
for
( Set::iterator itSet = mSet.begin()
; itSet != mSet.end()
; itSet++
){
if ( !(*itSet)->IsPlaying() )
return qfalse;
}
return qtrue;
}
////----------------------------
/// MultiCompound::EnsurePlaying
//--------------------------------
// Starts any contained compound effects if they are not currently playing.
// Returns false if any effect returns false or any are playing.
//
qboolean MultiCompound::EnsurePlaying()
{
qboolean result = qtrue;
if ( !IsPlaying() )
{
for
( Set::iterator itSet = mSet.begin()
; itSet != mSet.end()
; itSet++
){
result &= (*itSet)->Start();
}
}
return qboolean
( result
&& mSet.size() != 0
);
}
////-------------------
/// MultiCompound::Stop
//-----------------------
// Analogous to CImmCompoundEffect::Stop. Stops all contained compound effects.
// Returns false if any effect returns false.
//
qboolean MultiCompound::Stop()
{
qboolean result = qtrue;
for
( Set::iterator itSet = mSet.begin()
; itSet != mSet.end()
; itSet++
){
result &= qboolean( (*itSet)->Stop() );
}
return qboolean
( result
&& mSet.size() != 0
);
}
////-----------------------------
/// MultiCompound::ChangeDuration
//---------------------------------
// Changes duration of all compounds.
// Returns false if any effect returns false.
//
qboolean MultiCompound::ChangeDuration( DWORD Duration )
{
qboolean result = qtrue;
for
( Set::iterator itSet = mSet.begin()
; itSet != mSet.end()
; itSet++
){
result &= (*itSet)->ChangeDuration( Duration );
}
return qboolean
( result
&& mSet.size() != 0
);
}
////-------------------------
/// MultiCompound::ChangeGain
//-----------------------------
// Changes gain of all compounds.
// Returns false if any effect returns false.
//
qboolean MultiCompound::ChangeGain( DWORD Gain )
{
qboolean result = qtrue;
for
( Set::iterator itSet = mSet.begin()
; itSet != mSet.end()
; itSet++
){
result &= (*itSet)->ChangeGain( Gain );
}
return qboolean
( result
&& mSet.size() != 0
);
}
////--------------------------
/// MultiCompound::operator ==
//------------------------------
// Returns qtrue if the sets are EXACTLY equal, including order. This is not good
// in general. (Fix me)
//
qboolean MultiCompound::operator == ( MultiCompound &compound )
{
Set &other = compound.mSet;
qboolean result = qfalse;
if ( mSet.size() == other.size() )
{
for
( Set::iterator itSet = mSet.begin(), itOther = other.begin()
; itSet != mSet.end()
//&& itOther != other.end() // assumed since mSet.size() == other.size()
&& (*itSet) == (*itOther)
; itSet++, itOther++
);
result = qboolean( itSet == mSet.end() );
}
return result;
}
#endif // _IMMERSION | gpl-2.0 |
NormandyCM11/android_kernel_nokia_normandy | drivers/leds/leds-tricolor.c | 18 | 9671 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <asm/mach-types.h>
#include <mach/pmic.h>
#include <mach/gpio-v1.h>
#include <mach/oem_rapi_client.h>
#include <mach/socinfo.h>
#define DEBUG_TRICOLOR_LED 0
static int qrd5_led_flash_en1 = 13;
static int qrd7_led_flash_en = 96;
enum tri_color_led_color {
LED_COLOR_RED,
LED_COLOR_GREEN,
LED_COLOR_BLUE,
LED_COLOR_MAX
};
enum tri_led_status{
ALL_OFF,
ALL_ON,
BLUE_ON,
BLUE_OFF,
RED_ON,
RED_OFF,
GREEN_ON,
GREEN_OFF,
BLUE_BLINK,
RED_BLINK,
GREEN_BLINK,
BLUE_BLINK_OFF,
RED_BLINK_OFF,
GREEN_BLINK_OFF,
LED_MAX
};
struct tricolor_led_data {
struct msm_rpc_client *rpc_client;
spinlock_t led_lock;
int led_data[4];
struct led_classdev leds[4]; /* blue, green, red, flashlight */
};
static void call_oem_rapi_client_streaming_function(struct msm_rpc_client *client,
char *input)
{
struct oem_rapi_client_streaming_func_arg client_arg = {
OEM_RAPI_CLIENT_EVENT_TRI_COLOR_LED_WORK,
NULL,
(void *)NULL,
sizeof(input),
input,
0,
0,
0
};
struct oem_rapi_client_streaming_func_ret client_ret = {
(uint32_t *)NULL,
(char *)NULL
};
int ret = oem_rapi_client_streaming_function(client, &client_arg, &client_ret);
if (ret)
printk(KERN_ERR
"oem_rapi_client_streaming_function() error=%d\n", ret);
}
static ssize_t led_blink_solid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret = 0;
enum tri_color_led_color color = LED_COLOR_MAX;
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct tricolor_led_data *tricolor_led = NULL;
if (!strcmp(led_cdev->name, "red")) {
color = LED_COLOR_RED;
} else if (!strcmp(led_cdev->name, "green")) {
color = LED_COLOR_GREEN;
} else {
color = LED_COLOR_BLUE;
}
tricolor_led = container_of(led_cdev, struct tricolor_led_data, leds[color]);
if(!tricolor_led)
printk(KERN_ERR "%s tricolor_led is NULL ",__func__);
ret = sprintf(buf, "%u\n", tricolor_led->led_data[color]);
return ret;
}
static ssize_t led_blink_solid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int blink = 0;
unsigned long flags = 0;
enum tri_led_status input = LED_MAX;
enum tri_color_led_color color = LED_COLOR_MAX;
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct tricolor_led_data *tricolor_led = NULL;
if (!strcmp(led_cdev->name, "red")) {
color = LED_COLOR_RED;
} else if (!strcmp(led_cdev->name, "green")) {
color = LED_COLOR_GREEN;
} else {
color = LED_COLOR_BLUE;
}
tricolor_led = container_of(led_cdev, struct tricolor_led_data, leds[color]);
if(!tricolor_led)
printk(KERN_ERR "%s tricolor_led is NULL ",__func__);
sscanf(buf, "%d", &blink);
#if DEBUG_TRICOLOR_LED
printk("tricolor %s is %d\n",led_cdev->name, blink);
#endif
spin_lock_irqsave(&tricolor_led->led_lock, flags);
if(blink){
switch(color) {
case LED_COLOR_RED:
input = RED_BLINK;
break;
case LED_COLOR_GREEN:
input = GREEN_BLINK;
break;
case LED_COLOR_BLUE:
input = BLUE_BLINK;
break;
default:
break;
}
} else {
switch(color) {
case LED_COLOR_RED:
input = RED_BLINK_OFF;
break;
case LED_COLOR_GREEN:
input = GREEN_BLINK_OFF;
break;
case LED_COLOR_BLUE:
input = BLUE_BLINK_OFF;
break;
default:
break;
}
}
tricolor_led->led_data[color] = blink;
spin_unlock_irqrestore(&tricolor_led->led_lock, flags);
call_oem_rapi_client_streaming_function(tricolor_led->rpc_client, (char*)&input);
return size;
}
static DEVICE_ATTR(blink, 0644, led_blink_solid_show, led_blink_solid_store);
static void led_brightness_set_tricolor(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct tricolor_led_data *tricolor_led = NULL;
enum tri_color_led_color color = LED_COLOR_MAX;
enum tri_led_status input = LED_MAX;
unsigned long flags = 0;
if (!strcmp(led_cdev->name, "red")) {
color = LED_COLOR_RED;
} else if (!strcmp(led_cdev->name, "green")) {
color = LED_COLOR_GREEN;
} else {
color = LED_COLOR_BLUE;
}
tricolor_led = container_of(led_cdev, struct tricolor_led_data, leds[color]);
if(!tricolor_led)
printk(KERN_ERR "%s tricolor_led is NULL ",__func__);
spin_lock_irqsave(&tricolor_led->led_lock, flags);
if(brightness){
switch(color) {
case LED_COLOR_RED:
input = RED_ON;
break;
case LED_COLOR_GREEN:
input = GREEN_ON;
break;
case LED_COLOR_BLUE:
input = BLUE_ON;
break;
default:
break;
}
} else {
switch(color) {
case LED_COLOR_RED:
input = RED_OFF;
break;
case LED_COLOR_GREEN:
input = GREEN_OFF;
break;
case LED_COLOR_BLUE:
input = BLUE_OFF;
break;
default:
break;
}
}
spin_unlock_irqrestore(&tricolor_led->led_lock, flags);
call_oem_rapi_client_streaming_function(tricolor_led->rpc_client, (char*)&input);
}
static void led_brightness_set_flash(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
if(brightness){
if(machine_is_msm8625_qrd5() || machine_is_msm7x27a_qrd5a())
gpio_set_value(qrd5_led_flash_en1, 1);
else if(machine_is_msm8625_qrd7())
gpio_set_value(qrd7_led_flash_en, 1);
} else {
if(machine_is_msm8625_qrd5() || machine_is_msm7x27a_qrd5a())
gpio_set_value(qrd5_led_flash_en1, 0);
else if(machine_is_msm8625_qrd7())
gpio_set_value(qrd7_led_flash_en, 0);
}
}
static int tricolor_led_probe(struct platform_device *pdev)
{
int ret = 0;
int i, j;
struct tricolor_led_data *tricolor_led;
printk(KERN_ERR "tricolor leds and flashlight: probe init \n");
tricolor_led = kzalloc(sizeof(struct tricolor_led_data), GFP_KERNEL);
if (tricolor_led == NULL) {
printk(KERN_ERR "tricolor_led_probe: no memory for device\n");
ret = -ENOMEM;
goto err;
}
memset(tricolor_led, 0, sizeof(struct tricolor_led_data));
spin_lock_init(&tricolor_led->led_lock);
/* initialize tricolor_led->pc_client */
tricolor_led->rpc_client = oem_rapi_client_init();
ret = IS_ERR(tricolor_led->rpc_client);
if (ret) {
printk(KERN_ERR "[tricolor-led] cannot initialize rpc_client!\n");
tricolor_led->rpc_client = NULL;
goto err_init_rpc_client;
}
tricolor_led->leds[0].name = "red";
tricolor_led->leds[0].brightness_set = led_brightness_set_tricolor;
tricolor_led->leds[1].name = "green";
tricolor_led->leds[1].brightness_set = led_brightness_set_tricolor;
tricolor_led->leds[2].name = "blue";
tricolor_led->leds[2].brightness_set = led_brightness_set_tricolor;
tricolor_led->leds[3].name = "flashlight";
tricolor_led->leds[3].brightness_set = led_brightness_set_flash;
for (i = 0; i < 4; i++) { /* red, green, blue, flashlight */
ret = led_classdev_register(&pdev->dev, &tricolor_led->leds[i]);
if (ret) {
printk(KERN_ERR
"tricolor_led: led_classdev_register failed\n");
goto err_led_classdev_register_failed;
}
}
for (i = 0; i < 4; i++) {
ret = device_create_file(tricolor_led->leds[i].dev, &dev_attr_blink);
if (ret) {
printk(KERN_ERR
"tricolor_led: device_create_file failed\n");
goto err_out_attr_blink;
}
}
dev_set_drvdata(&pdev->dev, tricolor_led);
return 0;
err_out_attr_blink:
for (j = 0; j < i; j++)
device_remove_file(tricolor_led->leds[j].dev, &dev_attr_blink);
i = 4;
err_led_classdev_register_failed:
for (j = 0; j < i; j++)
led_classdev_unregister(&tricolor_led->leds[j]);
err_init_rpc_client:
/* If above errors occurred, close pdata->rpc_client */
if (tricolor_led->rpc_client) {
oem_rapi_client_close();
printk(KERN_ERR "tri-color-led: oem_rapi_client_close\n");
}
kfree(tricolor_led);
err:
return ret;
}
static int __devexit tricolor_led_remove(struct platform_device *pdev)
{
struct tricolor_led_data *tricolor_led;
int i;
printk(KERN_ERR "tricolor_led_remove: remove\n");
tricolor_led = platform_get_drvdata(pdev);
for (i = 0; i < 4; i++) {
device_remove_file(tricolor_led->leds[i].dev, &dev_attr_blink);
led_classdev_unregister(&tricolor_led->leds[i]);
}
/* close tricolor_led->rpc_client */
oem_rapi_client_close();
tricolor_led->rpc_client = NULL;
kfree(tricolor_led);
return 0;
}
static struct platform_driver tricolor_led_driver = {
.probe = tricolor_led_probe,
.remove = __devexit_p(tricolor_led_remove),
.suspend = NULL,
.resume = NULL,
.driver = {
.name = "tricolor leds and flashlight",
.owner = THIS_MODULE,
},
};
static int __init tricolor_led_init(void)
{
printk(KERN_ERR "tricolor_leds_backlight_init: module init\n");
return platform_driver_register(&tricolor_led_driver);
}
static void __exit tricolor_led_exit(void)
{
printk(KERN_ERR "tricolor_leds_backlight_exit: module exit\n");
platform_driver_unregister(&tricolor_led_driver);
}
MODULE_AUTHOR("rockie cheng");
MODULE_DESCRIPTION("tricolor leds and flashlight driver");
MODULE_LICENSE("GPL");
module_init(tricolor_led_init);
module_exit(tricolor_led_exit);
| gpl-2.0 |
sunny256/linux | arch/powerpc/platforms/powernv/opal-memory-errors.c | 18 | 4066 | /*
* OPAL asynchronus Memory error handling support in PowerNV.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright 2013 IBM Corporation
* Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/machdep.h>
#include <asm/opal.h>
#include <asm/cputable.h>
static int opal_mem_err_nb_init;
static LIST_HEAD(opal_memory_err_list);
static DEFINE_SPINLOCK(opal_mem_err_lock);
struct OpalMsgNode {
struct list_head list;
struct opal_msg msg;
};
static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
{
uint64_t paddr_start, paddr_end;
pr_debug("%s: Retrieved memory error event, type: 0x%x\n",
__func__, merr_evt->type);
switch (merr_evt->type) {
case OPAL_MEM_ERR_TYPE_RESILIENCE:
paddr_start = be64_to_cpu(merr_evt->u.resilience.physical_address_start);
paddr_end = be64_to_cpu(merr_evt->u.resilience.physical_address_end);
break;
case OPAL_MEM_ERR_TYPE_DYN_DALLOC:
paddr_start = be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_start);
paddr_end = be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_end);
break;
default:
return;
}
for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
memory_failure(paddr_start >> PAGE_SHIFT, 0, 0);
}
}
static void handle_memory_error(void)
{
unsigned long flags;
struct OpalMemoryErrorData *merr_evt;
struct OpalMsgNode *msg_node;
spin_lock_irqsave(&opal_mem_err_lock, flags);
while (!list_empty(&opal_memory_err_list)) {
msg_node = list_entry(opal_memory_err_list.next,
struct OpalMsgNode, list);
list_del(&msg_node->list);
spin_unlock_irqrestore(&opal_mem_err_lock, flags);
merr_evt = (struct OpalMemoryErrorData *)
&msg_node->msg.params[0];
handle_memory_error_event(merr_evt);
kfree(msg_node);
spin_lock_irqsave(&opal_mem_err_lock, flags);
}
spin_unlock_irqrestore(&opal_mem_err_lock, flags);
}
static void mem_error_handler(struct work_struct *work)
{
handle_memory_error();
}
static DECLARE_WORK(mem_error_work, mem_error_handler);
/*
* opal_memory_err_event - notifier handler that queues up the opal message
* to be preocessed later.
*/
static int opal_memory_err_event(struct notifier_block *nb,
unsigned long msg_type, void *msg)
{
unsigned long flags;
struct OpalMsgNode *msg_node;
if (msg_type != OPAL_MSG_MEM_ERR)
return 0;
msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
if (!msg_node) {
pr_err("MEMORY_ERROR: out of memory, Opal message event not"
"handled\n");
return -ENOMEM;
}
memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
spin_lock_irqsave(&opal_mem_err_lock, flags);
list_add(&msg_node->list, &opal_memory_err_list);
spin_unlock_irqrestore(&opal_mem_err_lock, flags);
schedule_work(&mem_error_work);
return 0;
}
static struct notifier_block opal_mem_err_nb = {
.notifier_call = opal_memory_err_event,
.next = NULL,
.priority = 0,
};
static int __init opal_mem_err_init(void)
{
int ret;
if (!opal_mem_err_nb_init) {
ret = opal_message_notifier_register(
OPAL_MSG_MEM_ERR, &opal_mem_err_nb);
if (ret) {
pr_err("%s: Can't register OPAL event notifier (%d)\n",
__func__, ret);
return ret;
}
opal_mem_err_nb_init = 1;
}
return 0;
}
machine_device_initcall(powernv, opal_mem_err_init);
| gpl-2.0 |
FrancescoCG/Crazy-Kernel1-TW-Kernel | fs/open.c | 18 | 27150 | /*
* linux/fs/open.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/fsnotify.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/namei.h>
#include <linux/backing-dev.h>
#include <linux/capability.h>
#include <linux/securebits.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/rcupdate.h>
#include <linux/audit.h>
#include <linux/falloc.h>
#include <linux/fs_struct.h>
#include <linux/ima.h>
#include <linux/dnotify.h>
#include "internal.h"
int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
struct file *filp)
{
int ret;
struct iattr newattrs;
/* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
if (length < 0)
return -EINVAL;
newattrs.ia_size = length;
newattrs.ia_valid = ATTR_SIZE | time_attrs;
if (filp) {
newattrs.ia_file = filp;
newattrs.ia_valid |= ATTR_FILE;
}
/* Remove suid/sgid on truncate too */
ret = should_remove_suid(dentry);
if (ret)
newattrs.ia_valid |= ret | ATTR_FORCE;
mutex_lock(&dentry->d_inode->i_mutex);
ret = notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
return ret;
}
static long do_sys_truncate(const char __user *pathname, loff_t length)
{
struct path path;
struct inode *inode;
int error;
error = -EINVAL;
if (length < 0) /* sorry, but loff_t says... */
goto out;
error = user_path(pathname, &path);
if (error)
goto out;
inode = path.dentry->d_inode;
/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
error = -EISDIR;
if (S_ISDIR(inode->i_mode))
goto dput_and_out;
error = -EINVAL;
if (!S_ISREG(inode->i_mode))
goto dput_and_out;
error = mnt_want_write(path.mnt);
if (error)
goto dput_and_out;
error = inode_permission(inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
error = -EPERM;
if (IS_APPEND(inode))
goto mnt_drop_write_and_out;
error = get_write_access(inode);
if (error)
goto mnt_drop_write_and_out;
/*
* Make sure that there are no leases. get_write_access() protects
* against the truncate racing with a lease-granting setlease().
*/
error = break_lease(inode, O_WRONLY);
if (error)
goto put_write_and_out;
error = locks_verify_truncate(inode, NULL, length);
if (!error)
error = security_path_truncate(&path);
if (!error)
error = do_truncate(path.dentry, length, 0, NULL);
put_write_and_out:
put_write_access(inode);
mnt_drop_write_and_out:
mnt_drop_write(path.mnt);
dput_and_out:
path_put(&path);
out:
return error;
}
SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
{
return do_sys_truncate(path, length);
}
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
struct inode * inode;
struct dentry *dentry;
struct file * file;
int error;
error = -EINVAL;
if (length < 0)
goto out;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
/* explicitly opened as large or we are on 64-bit box */
if (file->f_flags & O_LARGEFILE)
small = 0;
dentry = file->f_path.dentry;
inode = dentry->d_inode;
error = -EINVAL;
if (!S_ISREG(inode->i_mode) || !(file->f_mode & FMODE_WRITE))
goto out_putf;
error = -EINVAL;
/* Cannot ftruncate over 2^31 bytes without large file support */
if (small && length > MAX_NON_LFS)
goto out_putf;
error = -EPERM;
if (IS_APPEND(inode))
goto out_putf;
error = locks_verify_truncate(inode, file, length);
if (!error)
error = security_path_truncate(&file->f_path);
if (!error)
error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
out_putf:
fput(file);
out:
return error;
}
SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
{
long ret = do_sys_ftruncate(fd, length, 1);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(2, ret, fd, length);
return ret;
}
/* LFS versions of truncate are only needed on 32 bit machines */
#if BITS_PER_LONG == 32
SYSCALL_DEFINE(truncate64)(const char __user * path, loff_t length)
{
return do_sys_truncate(path, length);
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_truncate64(long path, loff_t length)
{
return SYSC_truncate64((const char __user *) path, length);
}
SYSCALL_ALIAS(sys_truncate64, SyS_truncate64);
#endif
SYSCALL_DEFINE(ftruncate64)(unsigned int fd, loff_t length)
{
long ret = do_sys_ftruncate(fd, length, 0);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(2, ret, fd, length);
return ret;
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_ftruncate64(long fd, loff_t length)
{
return SYSC_ftruncate64((unsigned int) fd, length);
}
SYSCALL_ALIAS(sys_ftruncate64, SyS_ftruncate64);
#endif
#endif /* BITS_PER_LONG == 32 */
int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file->f_path.dentry->d_inode;
long ret;
if (offset < 0 || len <= 0)
return -EINVAL;
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
/* Punch hole must have keep size set */
if ((mode & FALLOC_FL_PUNCH_HOLE) &&
!(mode & FALLOC_FL_KEEP_SIZE))
return -EOPNOTSUPP;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
/* It's not possible punch hole on append only file */
if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
return -EPERM;
if (IS_IMMUTABLE(inode))
return -EPERM;
/*
* Revalidate the write permissions, in case security policy has
* changed since the files were opened.
*/
ret = security_file_permission(file, MAY_WRITE);
if (ret)
return ret;
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
/*
* Let individual file system decide if it supports preallocation
* for directories or not.
*/
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -ENODEV;
/* Check for wrap through zero too */
if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
return -EFBIG;
if (!file->f_op->fallocate)
return -EOPNOTSUPP;
return file->f_op->fallocate(file, mode, offset, len);
}
SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
{
struct file *file;
int error = -EBADF;
file = fget(fd);
if (file) {
error = do_fallocate(file, mode, offset, len);
fput(file);
}
return error;
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_fallocate(long fd, long mode, loff_t offset, loff_t len)
{
return SYSC_fallocate((int)fd, (int)mode, offset, len);
}
SYSCALL_ALIAS(sys_fallocate, SyS_fallocate);
#endif
/*
* access() needs to use the real uid/gid, not the effective uid/gid.
* We do this by temporarily clearing all FS-related capabilities and
* switching the fsuid/fsgid around to the real ones.
*/
SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
{
const struct cred *old_cred;
struct cred *override_cred;
struct path path;
struct inode *inode;
int res;
if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */
return -EINVAL;
override_cred = prepare_creds();
if (!override_cred)
return -ENOMEM;
override_cred->fsuid = override_cred->uid;
override_cred->fsgid = override_cred->gid;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
/* Clear the capabilities if we switch to a non-root user */
if (override_cred->uid)
cap_clear(override_cred->cap_effective);
else
override_cred->cap_effective =
override_cred->cap_permitted;
}
old_cred = override_creds(override_cred);
res = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
if (res)
goto out;
inode = path.dentry->d_inode;
if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
/*
* MAY_EXEC on regular files is denied if the fs is mounted
* with the "noexec" flag.
*/
res = -EACCES;
if (path.mnt->mnt_flags & MNT_NOEXEC)
goto out_path_release;
}
res = inode_permission(inode, mode | MAY_ACCESS);
/* SuS v2 requires we report a read only fs too */
if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
goto out_path_release;
/*
* This is a rare case where using __mnt_is_readonly()
* is OK without a mnt_want/drop_write() pair. Since
* no actual write to the fs is performed here, we do
* not need to telegraph to that to anyone.
*
* By doing this, we accept that this access is
* inherently racy and know that the fs may change
* state before we even see this result.
*/
if (__mnt_is_readonly(path.mnt))
res = -EROFS;
out_path_release:
path_put(&path);
out:
revert_creds(old_cred);
put_cred(override_cred);
return res;
}
SYSCALL_DEFINE2(access, const char __user *, filename, int, mode)
{
return sys_faccessat(AT_FDCWD, filename, mode);
}
SYSCALL_DEFINE1(chdir, const char __user *, filename)
{
struct path path;
int error;
error = user_path_dir(filename, &path);
if (error)
goto out;
error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
set_fs_pwd(current->fs, &path);
dput_and_out:
path_put(&path);
out:
return error;
}
SYSCALL_DEFINE1(fchdir, unsigned int, fd)
{
struct file *file;
struct inode *inode;
int error, fput_needed;
error = -EBADF;
file = fget_raw_light(fd, &fput_needed);
if (!file)
goto out;
inode = file->f_path.dentry->d_inode;
error = -ENOTDIR;
if (!S_ISDIR(inode->i_mode))
goto out_putf;
error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
if (!error)
set_fs_pwd(current->fs, &file->f_path);
out_putf:
fput_light(file, fput_needed);
out:
return error;
}
SYSCALL_DEFINE1(chroot, const char __user *, filename)
{
struct path path;
int error;
error = user_path_dir(filename, &path);
if (error)
goto out;
error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
error = -EPERM;
if (!capable(CAP_SYS_CHROOT))
goto dput_and_out;
error = security_path_chroot(&path);
if (error)
goto dput_and_out;
set_fs_root(current->fs, &path);
error = 0;
dput_and_out:
path_put(&path);
out:
return error;
}
static int chmod_common(struct path *path, umode_t mode)
{
struct inode *inode = path->dentry->d_inode;
struct iattr newattrs;
int error;
error = mnt_want_write(path->mnt);
if (error)
return error;
mutex_lock(&inode->i_mutex);
error = security_path_chmod(path, mode);
if (error)
goto out_unlock;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
error = notify_change(path->dentry, &newattrs);
out_unlock:
mutex_unlock(&inode->i_mutex);
mnt_drop_write(path->mnt);
return error;
}
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
{
struct file * file;
int err = -EBADF;
file = fget(fd);
if (file) {
audit_inode(NULL, file->f_path.dentry);
err = chmod_common(&file->f_path, mode);
fput(file);
}
return err;
}
SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode)
{
struct path path;
int error;
error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
if (!error) {
error = chmod_common(&path, mode);
path_put(&path);
}
return error;
}
SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
{
return sys_fchmodat(AT_FDCWD, filename, mode);
}
static int chown_common(struct path *path, uid_t user, gid_t group)
{
struct inode *inode = path->dentry->d_inode;
int error;
struct iattr newattrs;
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
newattrs.ia_valid |= ATTR_UID;
newattrs.ia_uid = user;
}
if (group != (gid_t) -1) {
newattrs.ia_valid |= ATTR_GID;
newattrs.ia_gid = group;
}
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |=
ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
mutex_lock(&inode->i_mutex);
error = security_path_chown(path, user, group);
if (!error)
error = notify_change(path->dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
return error;
}
SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
{
struct path path;
int error;
error = user_path(filename, &path);
if (error)
goto out;
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
out:
return error;
}
SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
gid_t, group, int, flag)
{
struct path path;
int error = -EINVAL;
int lookup_flags;
if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
goto out;
lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
if (flag & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
out:
return error;
}
SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
{
struct path path;
int error;
error = user_lpath(filename, &path);
if (error)
goto out;
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
out:
return error;
}
SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
{
struct file * file;
int error = -EBADF;
struct dentry * dentry;
file = fget(fd);
if (!file)
goto out;
error = mnt_want_write_file(file);
if (error)
goto out_fput;
dentry = file->f_path.dentry;
audit_inode(NULL, dentry);
error = chown_common(&file->f_path, user, group);
mnt_drop_write_file(file);
out_fput:
fput(file);
out:
return error;
}
/*
* You have to be very careful that these write
* counts get cleaned up in error cases and
* upon __fput(). This should probably never
* be called outside of __dentry_open().
*/
static inline int __get_file_write_access(struct inode *inode,
struct vfsmount *mnt)
{
int error;
error = get_write_access(inode);
if (error)
return error;
/*
* Do not take mount writer counts on
* special files since no writes to
* the mount itself will occur.
*/
if (!special_file(inode->i_mode)) {
/*
* Balanced in __fput()
*/
error = mnt_want_write(mnt);
if (error)
put_write_access(inode);
}
return error;
}
static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
struct file *f,
int (*open)(struct inode *, struct file *),
const struct cred *cred)
{
static const struct file_operations empty_fops = {};
struct inode *inode;
int error;
f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
FMODE_PREAD | FMODE_PWRITE;
if (unlikely(f->f_flags & O_PATH))
f->f_mode = FMODE_PATH;
inode = dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
error = __get_file_write_access(inode, mnt);
if (error)
goto cleanup_file;
if (!special_file(inode->i_mode))
file_take_write(f);
}
f->f_mapping = inode->i_mapping;
f->f_path.dentry = dentry;
f->f_path.mnt = mnt;
f->f_pos = 0;
file_sb_list_add(f, inode->i_sb);
if (unlikely(f->f_mode & FMODE_PATH)) {
f->f_op = &empty_fops;
return f;
}
f->f_op = fops_get(inode->i_fop);
error = security_dentry_open(f, cred);
if (error)
goto cleanup_all;
error = break_lease(inode, f->f_flags);
if (error)
goto cleanup_all;
if (!open && f->f_op)
open = f->f_op->open;
if (open) {
error = open(inode, f);
if (error)
goto cleanup_all;
}
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(inode);
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
/* NB: we're sure to have correct a_ops only after f_op->open */
if (f->f_flags & O_DIRECT) {
if (!f->f_mapping->a_ops ||
((!f->f_mapping->a_ops->direct_IO) &&
(!f->f_mapping->a_ops->get_xip_mem))) {
fput(f);
f = ERR_PTR(-EINVAL);
}
}
return f;
cleanup_all:
fops_put(f->f_op);
if (f->f_mode & FMODE_WRITE) {
put_write_access(inode);
if (!special_file(inode->i_mode)) {
/*
* We don't consider this a real
* mnt_want/drop_write() pair
* because it all happenend right
* here, so just reset the state.
*/
file_reset_write(f);
mnt_drop_write(mnt);
}
}
file_sb_list_del(f);
f->f_path.dentry = NULL;
f->f_path.mnt = NULL;
cleanup_file:
put_filp(f);
dput(dentry);
mntput(mnt);
return ERR_PTR(error);
}
/**
* lookup_instantiate_filp - instantiates the open intent filp
* @nd: pointer to nameidata
* @dentry: pointer to dentry
* @open: open callback
*
* Helper for filesystems that want to use lookup open intents and pass back
* a fully instantiated struct file to the caller.
* This function is meant to be called from within a filesystem's
* lookup method.
* Beware of calling it for non-regular files! Those ->open methods might block
* (e.g. in fifo_open), leaving you with parent locked (and in case of fifo,
* leading to a deadlock, as nobody can open that fifo anymore, because
* another process to open fifo will block on locked parent when doing lookup).
* Note that in case of error, nd->intent.open.file is destroyed, but the
* path information remains valid.
* If the open callback is set to NULL, then the standard f_op->open()
* filesystem callback is substituted.
*/
struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
int (*open)(struct inode *, struct file *))
{
const struct cred *cred = current_cred();
if (IS_ERR(nd->intent.open.file))
goto out;
if (IS_ERR(dentry))
goto out_err;
nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
nd->intent.open.file,
open, cred);
out:
return nd->intent.open.file;
out_err:
release_open_intent(nd);
nd->intent.open.file = ERR_CAST(dentry);
goto out;
}
EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
/**
* nameidata_to_filp - convert a nameidata to an open filp.
* @nd: pointer to nameidata
* @flags: open flags
*
* Note that this function destroys the original nameidata
*/
struct file *nameidata_to_filp(struct nameidata *nd)
{
const struct cred *cred = current_cred();
struct file *filp;
/* Pick up the filp from the open intent */
filp = nd->intent.open.file;
nd->intent.open.file = NULL;
/* Has the filesystem initialised the file for us? */
if (filp->f_path.dentry == NULL) {
path_get(&nd->path);
filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
NULL, cred);
}
return filp;
}
/*
* dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an
* error.
*/
struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
const struct cred *cred)
{
int error;
struct file *f;
validate_creds(cred);
/* We must always pass in a valid mount pointer. */
BUG_ON(!mnt);
error = -ENFILE;
f = get_empty_filp();
if (f == NULL) {
dput(dentry);
mntput(mnt);
return ERR_PTR(error);
}
f->f_flags = flags;
return __dentry_open(dentry, mnt, f, NULL, cred);
}
EXPORT_SYMBOL(dentry_open);
static void __put_unused_fd(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = files_fdtable(files);
__clear_open_fd(fd, fdt);
if (fd < files->next_fd)
files->next_fd = fd;
}
void put_unused_fd(unsigned int fd)
{
struct files_struct *files = current->files;
spin_lock(&files->file_lock);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
}
EXPORT_SYMBOL(put_unused_fd);
/*
* Install a file pointer in the fd array.
*
* The VFS is full of places where we drop the files lock between
* setting the open_fds bitmap and installing the file in the file
* array. At any such point, we are vulnerable to a dup2() race
* installing a file in the array before us. We need to detect this and
* fput() the struct file we are about to overwrite in this case.
*
* It should never happen - if we allow dup2() do it, _really_ bad things
* will follow.
*/
void fd_install(unsigned int fd, struct file *file)
{
struct files_struct *files = current->files;
struct fdtable *fdt;
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
BUG_ON(fdt->fd[fd] != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
spin_unlock(&files->file_lock);
}
EXPORT_SYMBOL(fd_install);
static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
{
int lookup_flags = 0;
int acc_mode;
if (flags & O_CREAT)
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
op->mode = 0;
/* Must never be set by userspace */
flags &= ~FMODE_NONOTIFY;
/*
* O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
* check for O_DSYNC if the need any syncing at all we enforce it's
* always set instead of having to deal with possibly weird behaviour
* for malicious applications setting only __O_SYNC.
*/
if (flags & __O_SYNC)
flags |= O_DSYNC;
/*
* If we have O_PATH in the open flag. Then we
* cannot have anything other than the below set of flags
*/
if (flags & O_PATH) {
flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH;
acc_mode = 0;
} else {
acc_mode = MAY_OPEN | ACC_MODE(flags);
}
op->open_flag = flags;
/* O_TRUNC implies we need access checks for write permissions */
if (flags & O_TRUNC)
acc_mode |= MAY_WRITE;
/* Allow the LSM permission hook to distinguish append
access from general write access. */
if (flags & O_APPEND)
acc_mode |= MAY_APPEND;
op->acc_mode = acc_mode;
op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN;
if (flags & O_CREAT) {
op->intent |= LOOKUP_CREATE;
if (flags & O_EXCL)
op->intent |= LOOKUP_EXCL;
}
if (flags & O_DIRECTORY)
lookup_flags |= LOOKUP_DIRECTORY;
if (!(flags & O_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
return lookup_flags;
}
/**
* filp_open - open file and return file pointer
*
* @filename: path to open
* @flags: open flags as per the open(2) second argument
* @mode: mode for the new file if O_CREAT is set, else ignored
*
* This is the helper to open a file from kernelspace if you really
* have to. But in generally you should not do this, so please move
* along, nothing to see here..
*/
struct file *filp_open(const char *filename, int flags, umode_t mode)
{
struct open_flags op;
int lookup = build_open_flags(flags, mode, &op);
return do_filp_open(AT_FDCWD, filename, &op, lookup);
}
EXPORT_SYMBOL(filp_open);
struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
const char *filename, int flags)
{
struct open_flags op;
int lookup = build_open_flags(flags, 0, &op);
if (flags & O_CREAT)
return ERR_PTR(-EINVAL);
if (!filename && (flags & O_DIRECTORY))
if (!dentry->d_inode->i_op->lookup)
return ERR_PTR(-ENOTDIR);
return do_file_open_root(dentry, mnt, filename, &op, lookup);
}
EXPORT_SYMBOL(file_open_root);
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
int lookup = build_open_flags(flags, mode, &op);
char *tmp = getname(filename);
int fd = PTR_ERR(tmp);
if (!IS_ERR(tmp)) {
fd = get_unused_fd_flags(flags);
if (fd >= 0) {
struct file *f = do_filp_open(dfd, tmp, &op, lookup);
if (IS_ERR(f)) {
put_unused_fd(fd);
fd = PTR_ERR(f);
} else {
fsnotify_open(f);
fd_install(fd, f);
}
}
putname(tmp);
}
return fd;
}
SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
{
long ret;
if (force_o_largefile())
flags |= O_LARGEFILE;
ret = do_sys_open(AT_FDCWD, filename, flags, mode);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(3, ret, filename, flags, mode);
return ret;
}
SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
umode_t, mode)
{
long ret;
if (force_o_largefile())
flags |= O_LARGEFILE;
ret = do_sys_open(dfd, filename, flags, mode);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(4, ret, dfd, filename, flags, mode);
return ret;
}
#ifndef __alpha__
/*
* For backward compatibility? Maybe this should be moved
* into arch/i386 instead?
*/
SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode)
{
return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
}
#endif
/*
* "id" is the POSIX thread ID. We use the
* files pointer for this..
*/
int filp_close(struct file *filp, fl_owner_t id)
{
int retval = 0;
if (!file_count(filp)) {
printk(KERN_ERR "VFS: Close: file count is 0\n");
return 0;
}
if (filp->f_op && filp->f_op->flush)
retval = filp->f_op->flush(filp, id);
if (likely(!(filp->f_mode & FMODE_PATH))) {
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
}
security_file_close(filp);
fput(filp);
return retval;
}
EXPORT_SYMBOL(filp_close);
/*
* Careful here! We test whether the file pointer is NULL before
* releasing the fd. This ensures that one clone task can't release
* an fd while another clone is opening it.
*/
SYSCALL_DEFINE1(close, unsigned int, fd)
{
struct file * filp;
struct files_struct *files = current->files;
struct fdtable *fdt;
int retval;
#ifdef CONFIG_SEC_DEBUG_ZERO_FD_CLOSE
if (fd == 0 && strcmp(current->group_leader->comm,"mediaserver") == 0)
panic("trying to close fd=0");
#endif
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (fd >= fdt->max_fds)
goto out_unlock;
filp = fdt->fd[fd];
if (!filp)
goto out_unlock;
rcu_assign_pointer(fdt->fd[fd], NULL);
__clear_close_on_exec(fd, fdt);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
retval = filp_close(filp, files);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
return retval;
out_unlock:
spin_unlock(&files->file_lock);
return -EBADF;
}
EXPORT_SYMBOL(sys_close);
/*
* This routine simulates a hangup on the tty, to arrange that users
* are given clean terminals at login time.
*/
SYSCALL_DEFINE0(vhangup)
{
if (capable(CAP_SYS_TTY_CONFIG)) {
tty_vhangup_self();
return 0;
}
return -EPERM;
}
/*
* Called when an inode is about to be open.
* We use this to disallow opening large files on 32bit systems if
* the caller didn't specify O_LARGEFILE. On 64bit systems we force
* on this flag in sys_open.
*/
int generic_file_open(struct inode * inode, struct file * filp)
{
if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EOVERFLOW;
return 0;
}
EXPORT_SYMBOL(generic_file_open);
/*
* This is used by subsystems that don't want seekable
* file descriptors. The function is not supposed to ever fail, the only
* reason it returns an 'int' and not 'void' is so that it can be plugged
* directly into file_operations structure.
*/
int nonseekable_open(struct inode *inode, struct file *filp)
{
filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
return 0;
}
EXPORT_SYMBOL(nonseekable_open);
| gpl-2.0 |
circa-one/OpenJK-Alt | code/client/snd_mix.cpp | 18 | 9161 | /*
===========================================================================
Copyright (C) 1999 - 2005, Id Software, Inc.
Copyright (C) 2000 - 2013, Raven Software, Inc.
Copyright (C) 2001 - 2013, Activision, Inc.
Copyright (C) 2013 - 2015, OpenJK contributors
This file is part of the OpenJK source code.
OpenJK is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
===========================================================================
*/
// snd_mix.c -- portable code to mix sounds for snd_dma.c
#include "../server/exe_headers.h"
#include "snd_local.h"
portable_samplepair_t paintbuffer[PAINTBUFFER_SIZE];
int *snd_p, snd_linear_count, snd_vol;
short *snd_out;
void S_WriteLinearBlastStereo16 (void)
{
int i;
int val;
for (i=0 ; i<snd_linear_count ; i+=2)
{
val = snd_p[i]>>8;
if (val > 0x7fff)
snd_out[i] = 0x7fff;
else if (val < (short)0x8000)
snd_out[i] = (short)0x8000;
else
snd_out[i] = val;
val = snd_p[i+1]>>8;
if (val > 0x7fff)
snd_out[i+1] = 0x7fff;
else if (val < (short)0x8000)
snd_out[i+1] = (short)0x8000;
else
snd_out[i+1] = val;
}
}
void S_TransferStereo16 (unsigned long *pbuf, int endtime)
{
int lpos;
int ls_paintedtime;
snd_p = (int *) paintbuffer;
ls_paintedtime = s_paintedtime;
while (ls_paintedtime < endtime)
{
// handle recirculating buffer issues
lpos = ls_paintedtime & ((dma.samples>>1)-1);
snd_out = (short *) pbuf + (lpos<<1);
snd_linear_count = (dma.samples>>1) - lpos;
if (ls_paintedtime + snd_linear_count > endtime)
snd_linear_count = endtime - ls_paintedtime;
snd_linear_count <<= 1;
// write a linear blast of samples
S_WriteLinearBlastStereo16 ();
snd_p += snd_linear_count;
ls_paintedtime += (snd_linear_count>>1);
}
}
/*
===================
S_TransferPaintBuffer
===================
*/
void S_TransferPaintBuffer(int endtime)
{
int out_idx;
int count;
int out_mask;
int *p;
int step;
int val;
unsigned long *pbuf;
pbuf = (unsigned long *)dma.buffer;
if ( s_testsound->integer ) {
int i;
int count;
// write a fixed sine wave
count = (endtime - s_paintedtime);
for (i=0 ; i<count ; i++)
paintbuffer[i].left = paintbuffer[i].right = (int)(sin((s_paintedtime+i)*0.1)*20000*256);
}
if (dma.samplebits == 16 && dma.channels == 2)
{ // optimized case
S_TransferStereo16 (pbuf, endtime);
}
else
{ // general case
p = (int *) paintbuffer;
count = (endtime - s_paintedtime) * dma.channels;
out_mask = dma.samples - 1;
out_idx = s_paintedtime * dma.channels & out_mask;
step = 3 - dma.channels;
if (dma.samplebits == 16)
{
short *out = (short *) pbuf;
while (count--)
{
val = *p >> 8;
p+= step;
if (val > 0x7fff)
val = 0x7fff;
else if (val < (short)0x8000)
val = (short)0x8000;
out[out_idx] = (short)val;
out_idx = (out_idx + 1) & out_mask;
}
}
else if (dma.samplebits == 8)
{
unsigned char *out = (unsigned char *) pbuf;
while (count--)
{
val = *p >> 8;
p+= step;
if (val > 0x7fff)
val = 0x7fff;
else if (val < (short)0x8000)
val = (short)0x8000;
out[out_idx] = (short)((val>>8) + 128);
out_idx = (out_idx + 1) & out_mask;
}
}
}
}
/*
===============================================================================
CHANNEL MIXING
===============================================================================
*/
static void S_PaintChannelFrom16( channel_t *ch, const sfx_t *sfx, int count, int sampleOffset, int bufferOffset )
{
portable_samplepair_t *pSamplesDest;
int iData;
int iLeftVol = ch->leftvol * snd_vol;
int iRightVol = ch->rightvol * snd_vol;
pSamplesDest = &paintbuffer[ bufferOffset ];
for ( int i=0 ; i<count ; i++ )
{
iData = sfx->pSoundData[ sampleOffset++ ];
pSamplesDest[i].left += (iData * iLeftVol )>>8;
pSamplesDest[i].right += (iData * iRightVol)>>8;
}
}
void S_PaintChannelFromMP3( channel_t *ch, const sfx_t *sc, int count, int sampleOffset, int bufferOffset )
{
int data;
int leftvol, rightvol;
signed short *sfx;
int i;
portable_samplepair_t *samp;
static short tempMP3Buffer[PAINTBUFFER_SIZE];
MP3Stream_GetSamples( ch, sampleOffset, count, tempMP3Buffer, qfalse ); // qfalse = not stereo
leftvol = ch->leftvol*snd_vol;
rightvol = ch->rightvol*snd_vol;
sfx = tempMP3Buffer;
samp = &paintbuffer[ bufferOffset ];
while ( count & 3 ) {
data = *sfx;
samp->left += (data * leftvol)>>8;
samp->right += (data * rightvol)>>8;
sfx++;
samp++;
count--;
}
for ( i=0 ; i<count ; i += 4 ) {
data = sfx[i];
samp[i].left += (data * leftvol)>>8;
samp[i].right += (data * rightvol)>>8;
data = sfx[i+1];
samp[i+1].left += (data * leftvol)>>8;
samp[i+1].right += (data * rightvol)>>8;
data = sfx[i+2];
samp[i+2].left += (data * leftvol)>>8;
samp[i+2].right += (data * rightvol)>>8;
data = sfx[i+3];
samp[i+3].left += (data * leftvol)>>8;
samp[i+3].right += (data * rightvol)>>8;
}
}
// subroutinised to save code dup (called twice) -ste
//
void ChannelPaint(channel_t *ch, sfx_t *sc, int count, int sampleOffset, int bufferOffset)
{
switch (sc->eSoundCompressionMethod)
{
case ct_16:
S_PaintChannelFrom16 (ch, sc, count, sampleOffset, bufferOffset);
break;
case ct_MP3:
S_PaintChannelFromMP3 (ch, sc, count, sampleOffset, bufferOffset);
break;
default:
assert(0); // debug aid, ignored in release. FIXME: Should we ERR_DROP here for badness-catch?
break;
}
}
void S_PaintChannels( int endtime ) {
int i;
int end;
channel_t *ch;
sfx_t *sc;
int ltime, count;
int sampleOffset;
int normal_vol,voice_vol;
snd_vol = normal_vol = s_volume->value*256.0f;
voice_vol = (s_volumeVoice->value*256.0f);
//Com_Printf ("%i to %i\n", s_paintedtime, endtime);
while ( s_paintedtime < endtime ) {
// if paintbuffer is smaller than DMA buffer
// we may need to fill it multiple times
end = endtime;
if ( endtime - s_paintedtime > PAINTBUFFER_SIZE ) {
end = s_paintedtime + PAINTBUFFER_SIZE;
}
// clear the paint buffer to either music or zeros
if ( s_rawend < s_paintedtime ) {
if ( s_rawend ) {
//Com_DPrintf ("background sound underrun\n");
}
memset(paintbuffer, 0, (end - s_paintedtime) * sizeof(portable_samplepair_t));
} else {
// copy from the streaming sound source
int s;
int stop;
stop = (end < s_rawend) ? end : s_rawend;
for ( i = s_paintedtime ; i < stop ; i++ ) {
s = i&(MAX_RAW_SAMPLES-1);
paintbuffer[i-s_paintedtime] = s_rawsamples[s];
}
// if (i != end)
// Com_Printf ("partial stream\n");
// else
// Com_Printf ("full stream\n");
for ( ; i < end ; i++ ) {
paintbuffer[i-s_paintedtime].left =
paintbuffer[i-s_paintedtime].right = 0;
}
}
// paint in the channels.
ch = s_channels;
for ( i = 0; i < MAX_CHANNELS ; i++, ch++ ) {
if ( !ch->thesfx || (ch->leftvol<0.25 && ch->rightvol<0.25 )) {
continue;
}
if ( ch->entchannel == CHAN_VOICE || ch->entchannel == CHAN_VOICE_ATTEN || ch->entchannel == CHAN_VOICE_GLOBAL )
snd_vol = voice_vol;
else
snd_vol = normal_vol;
ltime = s_paintedtime;
sc = ch->thesfx;
// we might have to make 2 passes if it is
// a looping sound effect and the end of
// the sameple is hit...
//
do
{
if (ch->loopSound) {
sampleOffset = ltime % sc->iSoundLengthInSamples;
} else {
sampleOffset = ltime - ch->startSample;
}
count = end - ltime;
if ( sampleOffset + count > sc->iSoundLengthInSamples ) {
count = sc->iSoundLengthInSamples - sampleOffset;
}
if ( count > 0 ) {
ChannelPaint(ch, sc, count, sampleOffset, ltime - s_paintedtime);
ltime += count;
}
} while ( ltime < end && ch->loopSound );
}
/* temprem
// paint in the looped channels.
ch = loop_channels;
for ( i = 0; i < numLoopChannels ; i++, ch++ ) {
if ( !ch->thesfx || (!ch->leftvol && !ch->rightvol )) {
continue;
}
{
ltime = s_paintedtime;
sc = ch->thesfx;
if (sc->soundData==NULL || sc->soundLength==0) {
continue;
}
// we might have to make two passes if it
// is a looping sound effect and the end of
// the sample is hit
do {
sampleOffset = (ltime % sc->soundLength);
count = end - ltime;
if ( sampleOffset + count > sc->soundLength ) {
count = sc->soundLength - sampleOffset;
}
if ( count > 0 )
{
ChannelPaint(ch, sc, count, sampleOffset, ltime - s_paintedtime);
ltime += count;
}
} while ( ltime < end);
}
}
*/
// transfer out according to DMA format
S_TransferPaintBuffer( end );
s_paintedtime = end;
}
}
| gpl-2.0 |
alexander-barabash/KVM-replay | fs/f2fs/inode.c | 18 | 8246 | /*
* fs/f2fs/inode.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/bitops.h>
#include "f2fs.h"
#include "node.h"
#include <trace/events/f2fs.h>
void f2fs_set_inode_flags(struct inode *inode)
{
unsigned int flags = F2FS_I(inode)->i_flags;
unsigned int new_fl = 0;
if (flags & FS_SYNC_FL)
new_fl |= S_SYNC;
if (flags & FS_APPEND_FL)
new_fl |= S_APPEND;
if (flags & FS_IMMUTABLE_FL)
new_fl |= S_IMMUTABLE;
if (flags & FS_NOATIME_FL)
new_fl |= S_NOATIME;
if (flags & FS_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
set_mask_bits(&inode->i_flags,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
}
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
{
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
if (ri->i_addr[0])
inode->i_rdev =
old_decode_dev(le32_to_cpu(ri->i_addr[0]));
else
inode->i_rdev =
new_decode_dev(le32_to_cpu(ri->i_addr[1]));
}
}
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
{
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
ri->i_addr[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
ri->i_addr[1] = 0;
} else {
ri->i_addr[0] = 0;
ri->i_addr[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
ri->i_addr[2] = 0;
}
}
}
static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct page *node_page;
struct f2fs_inode *ri;
/* Check if ino is within scope */
if (check_nid_range(sbi, inode->i_ino)) {
f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
(unsigned long) inode->i_ino);
return -EINVAL;
}
node_page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(node_page))
return PTR_ERR(node_page);
ri = F2FS_INODE(node_page);
inode->i_mode = le16_to_cpu(ri->i_mode);
i_uid_write(inode, le32_to_cpu(ri->i_uid));
i_gid_write(inode, le32_to_cpu(ri->i_gid));
set_nlink(inode, le32_to_cpu(ri->i_links));
inode->i_size = le64_to_cpu(ri->i_size);
inode->i_blocks = le64_to_cpu(ri->i_blocks);
inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
inode->i_generation = le32_to_cpu(ri->i_generation);
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
fi->i_flags = le32_to_cpu(ri->i_flags);
fi->flags = 0;
fi->i_advise = ri->i_advise;
fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level;
get_extent_info(&fi->ext, ri->i_ext);
get_inline_info(fi, ri);
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
f2fs_put_page(node_page, 1);
return 0;
}
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct inode *inode;
int ret = 0;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
trace_f2fs_iget(inode);
return inode;
}
if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
goto make_now;
ret = do_read_inode(inode);
if (ret)
goto bad_inode;
make_now:
if (ino == F2FS_NODE_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_node_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
} else if (ino == F2FS_META_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_meta_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
} else if (S_ISREG(inode->i_mode)) {
inode->i_op = &f2fs_file_inode_operations;
inode->i_fop = &f2fs_file_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &f2fs_symlink_inode_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &f2fs_special_inode_operations;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
} else {
ret = -EIO;
goto bad_inode;
}
unlock_new_inode(inode);
trace_f2fs_iget(inode);
return inode;
bad_inode:
iget_failed(inode);
trace_f2fs_iget_exit(inode, ret);
return ERR_PTR(ret);
}
void update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
f2fs_wait_on_page_writeback(node_page, NODE);
ri = F2FS_INODE(node_page);
ri->i_mode = cpu_to_le16(inode->i_mode);
ri->i_advise = F2FS_I(inode)->i_advise;
ri->i_uid = cpu_to_le32(i_uid_read(inode));
ri->i_gid = cpu_to_le32(i_gid_read(inode));
ri->i_links = cpu_to_le32(inode->i_nlink);
ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(inode->i_blocks);
set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
set_raw_inline(F2FS_I(inode), ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
ri->i_generation = cpu_to_le32(inode->i_generation);
ri->i_dir_level = F2FS_I(inode)->i_dir_level;
__set_inode_rdev(inode, ri);
set_cold_node(inode, node_page);
set_page_dirty(node_page);
clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
}
void update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct page *node_page;
retry:
node_page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(node_page)) {
int err = PTR_ERR(node_page);
if (err == -ENOMEM) {
cond_resched();
goto retry;
} else if (err != -ENOENT) {
f2fs_stop_checkpoint(sbi);
}
return;
}
update_inode(inode, node_page);
f2fs_put_page(node_page, 1);
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
inode->i_ino == F2FS_META_INO(sbi))
return 0;
if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
return 0;
/*
* We need to lock here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
*/
f2fs_lock_op(sbi);
update_inode_page(inode);
f2fs_unlock_op(sbi);
if (wbc)
f2fs_balance_fs(sbi);
return 0;
}
/*
* Called at the last iput() if i_nlink is zero
*/
void f2fs_evict_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
inode->i_ino == F2FS_META_INO(sbi))
goto no_delete;
f2fs_bug_on(get_dirty_dents(inode));
remove_dirty_dir_inode(inode);
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
sb_start_intwrite(inode->i_sb);
set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode))
f2fs_truncate(inode);
f2fs_lock_op(sbi);
remove_inode_page(inode);
stat_dec_inline_inode(inode);
f2fs_unlock_op(sbi);
sb_end_intwrite(inode->i_sb);
no_delete:
clear_inode(inode);
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
}
| gpl-2.0 |
Jackeagle/android_kernel_lge_d838 | drivers/usb/gadget/composite.c | 18 | 55549 | /*
* composite.c - infrastructure for Composite USB Gadgets
*
* Copyright (C) 2006-2008 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/utsname.h>
#include <linux/usb/composite.h>
#include <asm/unaligned.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
#ifdef CONFIG_USB_G_LGE_ANDROID
#include <linux/usb/cdc.h>
#endif
#ifdef CONFIG_LGE_PM
#include <mach/board_lge.h>
#endif
#if defined(CONFIG_USB_DWC3_MSM_VZW_SUPPORT)
int lge_usb_config_finish;
extern void send_drv_state_uevent(int usb_drv_state);
#endif
/*
* The code in this file is utility code, used to build a gadget driver
* from one or more "function" drivers, one or more "configuration"
* objects, and a "usb_composite_driver" by gluing them together along
* with the relevant device-wide data.
*/
/* big enough to hold our biggest descriptor */
#define USB_BUFSIZ 4096
#ifdef CONFIG_SMB349_VZW_FAST_CHG
bool usb_connected_flag;
EXPORT_SYMBOL(usb_connected_flag);
bool usb_configured_flag;
EXPORT_SYMBOL(usb_configured_flag);
extern void set_vzw_usb_charging_state(int kind_of_state);
#endif
static struct usb_composite_driver *composite;
static int (*composite_gadget_bind)(struct usb_composite_dev *cdev);
/* Some systems will need runtime overrides for the product identifiers
* published in the device descriptor, either numbers or strings or both.
* String parameters are in UTF-8 (superset of ASCII's 7 bit characters).
*/
static ushort idVendor;
module_param(idVendor, ushort, 0);
MODULE_PARM_DESC(idVendor, "USB Vendor ID");
static ushort idProduct;
module_param(idProduct, ushort, 0);
MODULE_PARM_DESC(idProduct, "USB Product ID");
static ushort bcdDevice;
module_param(bcdDevice, ushort, 0);
MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
static char *iManufacturer;
module_param(iManufacturer, charp, 0);
MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
static char *iProduct;
module_param(iProduct, charp, 0);
MODULE_PARM_DESC(iProduct, "USB Product string");
static char *iSerialNumber;
module_param(iSerialNumber, charp, 0);
MODULE_PARM_DESC(iSerialNumber, "SerialNumber string");
static char composite_manufacturer[50];
#ifdef CONFIG_USB_G_LGE_MULTIPLE_CONFIGURATION
static int nSetConfig = 0;
#endif //
/*-------------------------------------------------------------------------*/
/**
* next_ep_desc() - advance to the next EP descriptor
* @t: currect pointer within descriptor array
*
* Return: next EP descriptor or NULL
*
* Iterate over @t until either EP descriptor found or
* NULL (that indicates end of list) encountered
*/
static struct usb_descriptor_header**
next_ep_desc(struct usb_descriptor_header **t)
{
for (; *t; t++) {
if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
return t;
}
return NULL;
}
/*
* for_each_ep_desc()- iterate over endpoint descriptors in the
* descriptors list
* @start: pointer within descriptor array.
* @ep_desc: endpoint descriptor to use as the loop cursor
*/
#define for_each_ep_desc(start, ep_desc) \
for (ep_desc = next_ep_desc(start); \
ep_desc; ep_desc = next_ep_desc(ep_desc+1))
/**
* config_ep_by_speed() - configures the given endpoint
* according to gadget speed.
* @g: pointer to the gadget
* @f: usb function
* @_ep: the endpoint to configure
*
* Return: error code, 0 on success
*
* This function chooses the right descriptors for a given
* endpoint according to gadget speed and saves it in the
* endpoint desc field. If the endpoint already has a descriptor
* assigned to it - overwrites it with currently corresponding
* descriptor. The endpoint maxpacket field is updated according
* to the chosen descriptor.
* Note: the supplied function should hold all the descriptors
* for supported speeds
*/
int config_ep_by_speed(struct usb_gadget *g,
struct usb_function *f,
struct usb_ep *_ep)
{
struct usb_composite_dev *cdev = get_gadget_data(g);
struct usb_endpoint_descriptor *chosen_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
int want_comp_desc = 0;
struct usb_descriptor_header **d_spd; /* cursor for speed desc */
if (!g || !f || !_ep)
return -EIO;
/* select desired speed */
switch (g->speed) {
case USB_SPEED_SUPER:
if (gadget_is_superspeed(g)) {
speed_desc = f->ss_descriptors;
want_comp_desc = 1;
break;
}
/* else: Fall trough */
case USB_SPEED_HIGH:
if (gadget_is_dualspeed(g)) {
speed_desc = f->hs_descriptors;
break;
}
/* else: fall through */
default:
speed_desc = f->descriptors;
}
/* find descriptors */
for_each_ep_desc(speed_desc, d_spd) {
chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
if (chosen_desc->bEndpointAddress == _ep->address)
goto ep_found;
}
return -EIO;
ep_found:
/* commit results */
_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
_ep->mult = 0;
if (!want_comp_desc)
return 0;
/*
* Companion descriptor should follow EP descriptor
* USB 3.0 spec, #9.6.7
*/
comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd);
if (!comp_desc ||
(comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
return -EIO;
_ep->comp_desc = comp_desc;
if (g->speed == USB_SPEED_SUPER) {
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
_ep->mult = comp_desc->bmAttributes & 0x3;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
if (comp_desc->bMaxBurst != 0)
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
_ep->maxburst = 1;
break;
}
}
return 0;
}
/**
* usb_add_function() - add a function to a configuration
* @config: the configuration
* @function: the function being added
* Context: single threaded during gadget setup
*
* After initialization, each configuration must have one or more
* functions added to it. Adding a function involves calling its @bind()
* method to allocate resources such as interface and string identifiers
* and endpoints.
*
* This function returns the value of the function's bind(), which is
* zero for success else a negative errno value.
*/
int usb_add_function(struct usb_configuration *config,
struct usb_function *function)
{
int value = -EINVAL;
DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
function->name, function,
config->label, config);
if (!function->set_alt || !function->disable)
goto done;
function->config = config;
list_add_tail(&function->list, &config->functions);
/* REVISIT *require* function->bind? */
if (function->bind) {
value = function->bind(config, function);
if (value < 0) {
list_del(&function->list);
function->config = NULL;
}
} else
value = 0;
/* We allow configurations that don't work at both speeds.
* If we run into a lowspeed Linux system, treat it the same
* as full speed ... it's the function drivers that will need
* to avoid bulk and ISO transfers.
*/
if (!config->fullspeed && function->descriptors)
config->fullspeed = true;
if (!config->highspeed && function->hs_descriptors)
config->highspeed = true;
if (!config->superspeed && function->ss_descriptors)
config->superspeed = true;
done:
if (value)
DBG(config->cdev, "adding '%s'/%p --> %d\n",
function->name, function, value);
return value;
}
/**
* usb_function_deactivate - prevent function and gadget enumeration
* @function: the function that isn't yet ready to respond
*
* Blocks response of the gadget driver to host enumeration by
* preventing the data line pullup from being activated. This is
* normally called during @bind() processing to change from the
* initial "ready to respond" state, or when a required resource
* becomes available.
*
* For example, drivers that serve as a passthrough to a userspace
* daemon can block enumeration unless that daemon (such as an OBEX,
* MTP, or print server) is ready to handle host requests.
*
* Not all systems support software control of their USB peripheral
* data pullups.
*
* Returns zero on success, else negative errno.
*/
int usb_function_deactivate(struct usb_function *function)
{
struct usb_composite_dev *cdev = function->config->cdev;
unsigned long flags;
int status = 0;
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->deactivations == 0)
status = usb_gadget_disconnect(cdev->gadget);
if (status == 0)
cdev->deactivations++;
spin_unlock_irqrestore(&cdev->lock, flags);
return status;
}
/**
* usb_function_activate - allow function and gadget enumeration
* @function: function on which usb_function_activate() was called
*
* Reverses effect of usb_function_deactivate(). If no more functions
* are delaying their activation, the gadget driver will respond to
* host enumeration procedures.
*
* Returns zero on success, else negative errno.
*/
int usb_function_activate(struct usb_function *function)
{
struct usb_composite_dev *cdev = function->config->cdev;
int status = 0;
spin_lock(&cdev->lock);
if (WARN_ON(cdev->deactivations == 0))
status = -EINVAL;
else {
cdev->deactivations--;
if (cdev->deactivations == 0)
status = usb_gadget_connect(cdev->gadget);
}
spin_unlock(&cdev->lock);
return status;
}
/**
* usb_interface_id() - allocate an unused interface ID
* @config: configuration associated with the interface
* @function: function handling the interface
* Context: single threaded during gadget setup
*
* usb_interface_id() is called from usb_function.bind() callbacks to
* allocate new interface IDs. The function driver will then store that
* ID in interface, association, CDC union, and other descriptors. It
* will also handle any control requests targeted at that interface,
* particularly changing its altsetting via set_alt(). There may
* also be class-specific or vendor-specific requests to handle.
*
* All interface identifier should be allocated using this routine, to
* ensure that for example different functions don't wrongly assign
* different meanings to the same identifier. Note that since interface
* identifiers are configuration-specific, functions used in more than
* one configuration (or more than once in a given configuration) need
* multiple versions of the relevant descriptors.
*
* Returns the interface ID which was allocated; or -ENODEV if no
* more interface IDs can be allocated.
*/
int usb_interface_id(struct usb_configuration *config,
struct usb_function *function)
{
unsigned id = config->next_interface_id;
if (id < MAX_CONFIG_INTERFACES) {
config->interface[id] = function;
config->next_interface_id = id + 1;
return id;
}
return -ENODEV;
}
static int config_buf(struct usb_configuration *config,
enum usb_device_speed speed, void *buf, u8 type)
{
struct usb_config_descriptor *c = buf;
void *next = buf + USB_DT_CONFIG_SIZE;
int len = USB_BUFSIZ - USB_DT_CONFIG_SIZE;
struct usb_function *f;
int status;
/* write the config descriptor */
c = buf;
c->bLength = USB_DT_CONFIG_SIZE;
c->bDescriptorType = type;
/* wTotalLength is written later */
c->bNumInterfaces = config->next_interface_id;
c->bConfigurationValue = config->bConfigurationValue;
c->iConfiguration = config->iConfiguration;
c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
c->bMaxPower = config->bMaxPower ? :
(CONFIG_USB_GADGET_VBUS_DRAW / config->cdev->vbus_draw_units);
/* There may be e.g. OTG descriptors */
if (config->descriptors) {
status = usb_descriptor_fillbuf(next, len,
config->descriptors);
if (status < 0)
return status;
len -= status;
next += status;
}
/* add each function's descriptors */
list_for_each_entry(f, &config->functions, list) {
struct usb_descriptor_header **descriptors;
switch (speed) {
case USB_SPEED_SUPER:
descriptors = f->ss_descriptors;
break;
case USB_SPEED_HIGH:
descriptors = f->hs_descriptors;
break;
default:
descriptors = f->descriptors;
}
if (!descriptors)
continue;
status = usb_descriptor_fillbuf(next, len,
(const struct usb_descriptor_header **) descriptors);
if (status < 0)
return status;
len -= status;
next += status;
}
len = next - buf;
c->wTotalLength = cpu_to_le16(len);
return len;
}
static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
{
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c;
u8 type = w_value >> 8;
enum usb_device_speed speed = USB_SPEED_UNKNOWN;
if (gadget->speed == USB_SPEED_SUPER)
speed = gadget->speed;
else if (gadget_is_dualspeed(gadget)) {
int hs = 0;
if (gadget->speed == USB_SPEED_HIGH)
hs = 1;
if (type == USB_DT_OTHER_SPEED_CONFIG)
hs = !hs;
if (hs)
speed = USB_SPEED_HIGH;
}
/* This is a lookup by config *INDEX* */
w_value &= 0xff;
list_for_each_entry(c, &cdev->configs, list) {
/* ignore configs that won't work at this speed */
switch (speed) {
case USB_SPEED_SUPER:
if (!c->superspeed)
continue;
break;
case USB_SPEED_HIGH:
if (!c->highspeed)
continue;
break;
default:
if (!c->fullspeed)
continue;
}
if (w_value == 0)
return config_buf(c, speed, cdev->req->buf, type);
w_value--;
}
return -EINVAL;
}
static int count_configs(struct usb_composite_dev *cdev, unsigned type)
{
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c;
unsigned count = 0;
int hs = 0;
int ss = 0;
if (gadget_is_dualspeed(gadget)) {
if (gadget->speed == USB_SPEED_HIGH)
hs = 1;
if (gadget->speed == USB_SPEED_SUPER)
ss = 1;
if (type == USB_DT_DEVICE_QUALIFIER)
hs = !hs;
}
list_for_each_entry(c, &cdev->configs, list) {
/* ignore configs that won't work at this speed */
if (ss) {
if (!c->superspeed)
continue;
} else if (hs) {
if (!c->highspeed)
continue;
} else {
if (!c->fullspeed)
continue;
}
count++;
}
return count;
}
/**
* bos_desc() - prepares the BOS descriptor.
* @cdev: pointer to usb_composite device to generate the bos
* descriptor for
*
* This function generates the BOS (Binary Device Object)
* descriptor and its device capabilities descriptors. The BOS
* descriptor should be supported by a SuperSpeed device.
*/
static int bos_desc(struct usb_composite_dev *cdev)
{
struct usb_ext_cap_descriptor *usb_ext;
struct usb_ss_cap_descriptor *ss_cap;
struct usb_dcd_config_params dcd_config_params;
struct usb_bos_descriptor *bos = cdev->req->buf;
bos->bLength = USB_DT_BOS_SIZE;
bos->bDescriptorType = USB_DT_BOS;
bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE);
bos->bNumDeviceCaps = 0;
/*
* A SuperSpeed device shall include the USB2.0 extension descriptor
* and shall support LPM when operating in USB2.0 HS mode, as well as
* a HS device when operating in USB2.1 HS mode.
*/
usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
bos->bNumDeviceCaps++;
le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
if (gadget_is_superspeed(cdev->gadget)) {
/*
* The Superspeed USB Capability descriptor shall be
* implemented by all SuperSpeed devices.
*/
ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
bos->bNumDeviceCaps++;
le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
ss_cap->bmAttributes = 0; /* LTM is not supported yet */
ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
USB_FULL_SPEED_OPERATION |
USB_HIGH_SPEED_OPERATION |
USB_5GBPS_OPERATION);
ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
/* Get Controller configuration */
if (cdev->gadget->ops->get_config_params)
cdev->gadget->ops->get_config_params
(&dcd_config_params);
else {
dcd_config_params.bU1devExitLat =
USB_DEFAULT_U1_DEV_EXIT_LAT;
dcd_config_params.bU2DevExitLat =
cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
}
ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
}
return le16_to_cpu(bos->wTotalLength);
}
static void device_qual(struct usb_composite_dev *cdev)
{
struct usb_qualifier_descriptor *qual = cdev->req->buf;
qual->bLength = sizeof(*qual);
qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER;
/* POLICY: same bcdUSB and device type info at both speeds */
qual->bcdUSB = cdev->desc.bcdUSB;
qual->bDeviceClass = cdev->desc.bDeviceClass;
qual->bDeviceSubClass = cdev->desc.bDeviceSubClass;
qual->bDeviceProtocol = cdev->desc.bDeviceProtocol;
/* ASSUME same EP0 fifo size at both speeds */
qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket;
qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER);
qual->bRESERVED = 0;
}
/*-------------------------------------------------------------------------*/
static void reset_config(struct usb_composite_dev *cdev)
{
struct usb_function *f;
DBG(cdev, "reset config\n");
list_for_each_entry(f, &cdev->config->functions, list) {
if (f->disable)
f->disable(f);
bitmap_zero(f->endpoints, 32);
}
cdev->config = NULL;
#if defined(CONFIG_USB_DWC3_MSM_VZW_SUPPORT)
lge_usb_config_finish = 0;
#endif
}
static int set_config(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl, unsigned number)
{
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c = NULL;
int result = -EINVAL;
unsigned power = gadget_is_otg(gadget) ? 8 : 100;
int tmp;
if (number) {
list_for_each_entry(c, &cdev->configs, list) {
if (c->bConfigurationValue == number) {
/*
* We disable the FDs of the previous
* configuration only if the new configuration
* is a valid one
*/
if (cdev->config)
reset_config(cdev);
result = 0;
break;
}
}
if (result < 0)
goto done;
} else { /* Zero configuration value - need to reset the config */
if (cdev->config)
reset_config(cdev);
result = 0;
}
INFO(cdev, "%s config #%d: %s\n",
usb_speed_string(gadget->speed),
number, c ? c->label : "unconfigured");
if (!c)
goto done;
cdev->config = c;
#if defined(CONFIG_USB_DWC3_MSM_VZW_SUPPORT)
if (result == 0)
lge_usb_config_finish = 1;
#endif
/* Initialize all interfaces by setting them to altsetting zero. */
for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
struct usb_function *f = c->interface[tmp];
struct usb_descriptor_header **descriptors;
if (!f)
break;
/*
* Record which endpoints are used by the function. This is used
* to dispatch control requests targeted at that endpoint to the
* function's setup callback instead of the current
* configuration's setup callback.
*/
switch (gadget->speed) {
case USB_SPEED_SUPER:
descriptors = f->ss_descriptors;
break;
case USB_SPEED_HIGH:
descriptors = f->hs_descriptors;
break;
default:
descriptors = f->descriptors;
}
for (; *descriptors; ++descriptors) {
struct usb_endpoint_descriptor *ep;
int addr;
if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
continue;
ep = (struct usb_endpoint_descriptor *)*descriptors;
addr = ((ep->bEndpointAddress & 0x80) >> 3)
| (ep->bEndpointAddress & 0x0f);
set_bit(addr, f->endpoints);
}
result = f->set_alt(f, tmp, 0);
if (result < 0) {
DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
tmp, f->name, f, result);
reset_config(cdev);
goto done;
}
if (result == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
"%s: interface %d (%s) requested delayed status\n",
__func__, tmp, f->name);
cdev->delayed_status++;
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
}
/* when we return, be sure our power usage is valid */
power = c->bMaxPower ? (cdev->vbus_draw_units * c->bMaxPower) :
CONFIG_USB_GADGET_VBUS_DRAW;
done:
#ifdef CONFIG_LGE_PM
if (lge_pm_get_cable_type() == CABLE_56K ||
lge_pm_get_cable_type() == CABLE_130K ||
lge_pm_get_cable_type() == CABLE_910K)
usb_gadget_vbus_draw(gadget, lge_pm_get_usb_current());
else
usb_gadget_vbus_draw(gadget, power);
#else /* google original */
usb_gadget_vbus_draw(gadget, power);
#endif
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
#ifdef CONFIG_USB_G_LGE_MULTIPLE_CONFIGURATION
if( !result )
nSetConfig = number;
INFO(cdev, "%s : Set Config Number is %d\n", __func__, nSetConfig );
#endif //
return result;
}
/**
* usb_add_config() - add a configuration to a device.
* @cdev: wraps the USB gadget
* @config: the configuration, with bConfigurationValue assigned
* @bind: the configuration's bind function
* Context: single threaded during gadget setup
*
* One of the main tasks of a composite @bind() routine is to
* add each of the configurations it supports, using this routine.
*
* This function returns the value of the configuration's @bind(), which
* is zero for success else a negative errno value. Binding configurations
* assigns global resources including string IDs, and per-configuration
* resources such as interface IDs and endpoints.
*/
int usb_add_config(struct usb_composite_dev *cdev,
struct usb_configuration *config,
int (*bind)(struct usb_configuration *))
{
int status = -EINVAL;
struct usb_configuration *c;
DBG(cdev, "adding config #%u '%s'/%p\n",
config->bConfigurationValue,
config->label, config);
if (!config->bConfigurationValue || !bind)
goto done;
/* Prevent duplicate configuration identifiers */
list_for_each_entry(c, &cdev->configs, list) {
if (c->bConfigurationValue == config->bConfigurationValue) {
status = -EBUSY;
goto done;
}
}
config->cdev = cdev;
list_add_tail(&config->list, &cdev->configs);
INIT_LIST_HEAD(&config->functions);
config->next_interface_id = 0;
memset(config->interface, 0, sizeof(config->interface));
status = bind(config);
if (status < 0) {
list_del(&config->list);
config->cdev = NULL;
} else {
unsigned i;
DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
config->bConfigurationValue, config,
config->superspeed ? " super" : "",
config->highspeed ? " high" : "",
config->fullspeed
? (gadget_is_dualspeed(cdev->gadget)
? " full"
: " full/low")
: "");
for (i = 0; i < MAX_CONFIG_INTERFACES; i++) {
struct usb_function *f = config->interface[i];
if (!f)
continue;
DBG(cdev, " interface %d = %s/%p\n",
i, f->name, f);
}
}
/* set_alt(), or next bind(), sets up
* ep->driver_data as needed.
*/
usb_ep_autoconfig_reset(cdev->gadget);
done:
if (status)
DBG(cdev, "added config '%s'/%u --> %d\n", config->label,
config->bConfigurationValue, status);
return status;
}
static int unbind_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
while (!list_empty(&config->functions)) {
struct usb_function *f;
f = list_first_entry(&config->functions,
struct usb_function, list);
list_del(&f->list);
if (f->unbind) {
DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
f->unbind(config, f);
/* may free memory for "f" */
}
}
if (config->unbind) {
DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
config->unbind(config);
/* may free memory for "c" */
}
return 0;
}
/**
* usb_remove_config() - remove a configuration from a device.
* @cdev: wraps the USB gadget
* @config: the configuration
*
* Drivers must call usb_gadget_disconnect before calling this function
* to disconnect the device from the host and make sure the host will not
* try to enumerate the device while we are changing the config list.
*/
int usb_remove_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
unsigned long flags;
spin_lock_irqsave(&cdev->lock, flags);
if (WARN_ON(!config->cdev)) {
spin_unlock_irqrestore(&cdev->lock, flags);
return 0;
}
if (cdev->config == config)
reset_config(cdev);
list_del(&config->list);
spin_unlock_irqrestore(&cdev->lock, flags);
return unbind_config(cdev, config);
}
/*-------------------------------------------------------------------------*/
/* We support strings in multiple languages ... string descriptor zero
* says which languages are supported. The typical case will be that
* only one language (probably English) is used, with I18N handled on
* the host side.
*/
static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf)
{
const struct usb_gadget_strings *s;
u16 language;
__le16 *tmp;
while (*sp) {
s = *sp;
language = cpu_to_le16(s->language);
for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) {
if (*tmp == language)
goto repeat;
}
*tmp++ = language;
repeat:
sp++;
}
}
static int lookup_string(
struct usb_gadget_strings **sp,
void *buf,
u16 language,
int id
)
{
struct usb_gadget_strings *s;
int value;
while (*sp) {
s = *sp++;
if (s->language != language)
continue;
value = usb_gadget_get_string(s, id, buf);
if (value > 0)
return value;
}
return -EINVAL;
}
static int get_string(struct usb_composite_dev *cdev,
void *buf, u16 language, int id)
{
struct usb_configuration *c;
struct usb_function *f;
int len;
const char *str;
/* Yes, not only is USB's I18N support probably more than most
* folk will ever care about ... also, it's all supported here.
* (Except for UTF8 support for Unicode's "Astral Planes".)
*/
/* 0 == report all available language codes */
if (id == 0) {
struct usb_string_descriptor *s = buf;
struct usb_gadget_strings **sp;
memset(s, 0, 256);
s->bDescriptorType = USB_DT_STRING;
sp = composite->strings;
if (sp)
collect_langs(sp, s->wData);
list_for_each_entry(c, &cdev->configs, list) {
sp = c->strings;
if (sp)
collect_langs(sp, s->wData);
list_for_each_entry(f, &c->functions, list) {
sp = f->strings;
if (sp)
collect_langs(sp, s->wData);
}
}
for (len = 0; len <= 126 && s->wData[len]; len++)
continue;
if (!len)
return -EINVAL;
s->bLength = 2 * (len + 1);
return s->bLength;
}
/* Otherwise, look up and return a specified string. First
* check if the string has not been overridden.
*/
if (cdev->manufacturer_override == id)
str = iManufacturer ?: composite->iManufacturer ?:
composite_manufacturer;
else if (cdev->product_override == id)
str = iProduct ?: composite->iProduct;
else if (cdev->serial_override == id)
str = iSerialNumber;
else
str = NULL;
if (str) {
struct usb_gadget_strings strings = {
.language = language,
.strings = &(struct usb_string) { 0xff, str }
};
return usb_gadget_get_string(&strings, 0xff, buf);
}
/* String IDs are device-scoped, so we look up each string
* table we're told about. These lookups are infrequent;
* simpler-is-better here.
*/
if (composite->strings) {
len = lookup_string(composite->strings, buf, language, id);
if (len > 0)
return len;
}
list_for_each_entry(c, &cdev->configs, list) {
if (c->strings) {
len = lookup_string(c->strings, buf, language, id);
if (len > 0)
return len;
}
list_for_each_entry(f, &c->functions, list) {
if (!f->strings)
continue;
len = lookup_string(f->strings, buf, language, id);
if (len > 0)
return len;
}
}
return -EINVAL;
}
/**
* usb_string_id() - allocate an unused string ID
* @cdev: the device whose string descriptor IDs are being allocated
* Context: single threaded during gadget setup
*
* @usb_string_id() is called from bind() callbacks to allocate
* string IDs. Drivers for functions, configurations, or gadgets will
* then store that ID in the appropriate descriptors and string table.
*
* All string identifier should be allocated using this,
* @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure
* that for example different functions don't wrongly assign different
* meanings to the same identifier.
*/
int usb_string_id(struct usb_composite_dev *cdev)
{
if (cdev->next_string_id < 254) {
/* string id 0 is reserved by USB spec for list of
* supported languages */
/* 255 reserved as well? -- mina86 */
cdev->next_string_id++;
return cdev->next_string_id;
}
return -ENODEV;
}
/**
* usb_string_ids() - allocate unused string IDs in batch
* @cdev: the device whose string descriptor IDs are being allocated
* @str: an array of usb_string objects to assign numbers to
* Context: single threaded during gadget setup
*
* @usb_string_ids() is called from bind() callbacks to allocate
* string IDs. Drivers for functions, configurations, or gadgets will
* then copy IDs from the string table to the appropriate descriptors
* and string table for other languages.
*
* All string identifier should be allocated using this,
* @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
* example different functions don't wrongly assign different meanings
* to the same identifier.
*/
int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
{
int next = cdev->next_string_id;
for (; str->s; ++str) {
if (unlikely(next >= 254))
return -ENODEV;
str->id = ++next;
}
cdev->next_string_id = next;
return 0;
}
/**
* usb_string_ids_n() - allocate unused string IDs in batch
* @c: the device whose string descriptor IDs are being allocated
* @n: number of string IDs to allocate
* Context: single threaded during gadget setup
*
* Returns the first requested ID. This ID and next @n-1 IDs are now
* valid IDs. At least provided that @n is non-zero because if it
* is, returns last requested ID which is now very useful information.
*
* @usb_string_ids_n() is called from bind() callbacks to allocate
* string IDs. Drivers for functions, configurations, or gadgets will
* then store that ID in the appropriate descriptors and string table.
*
* All string identifier should be allocated using this,
* @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
* example different functions don't wrongly assign different meanings
* to the same identifier.
*/
int usb_string_ids_n(struct usb_composite_dev *c, unsigned n)
{
unsigned next = c->next_string_id;
if (unlikely(n > 254 || (unsigned)next + n > 254))
return -ENODEV;
c->next_string_id += n;
return next + 1;
}
/*-------------------------------------------------------------------------*/
static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
{
if (req->status || req->actual != req->length)
DBG((struct usb_composite_dev *) ep->driver_data,
"setup complete --> %d, %d/%d\n",
req->status, req->actual, req->length);
}
/*
* The setup() callback implements all the ep0 functionality that's
* not handled lower down, in hardware or the hardware driver(like
* device and endpoint feature flags, and their status). It's all
* housekeeping for the gadget function we're implementing. Most of
* the work is in config and function specific setup.
*/
static int
composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
int status = 0;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u8 intf = w_index & 0xFF;
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
struct usb_function *f = NULL;
u8 endp;
struct usb_configuration *c;
if (w_length > USB_BUFSIZ)
return value;
/* partial re-init of the response message; the function or the
* gadget might need to intercept e.g. a control-OUT completion
* when we delegate to it.
*/
req->zero = 0;
req->complete = composite_setup_complete;
req->length = 0;
gadget->ep0->driver_data = cdev;
switch (ctrl->bRequest) {
/* we handle all standard USB descriptors */
case USB_REQ_GET_DESCRIPTOR:
#ifdef CONFIG_SMB349_VZW_FAST_CHG
if (!usb_connected_flag) {
usb_connected_flag = true;
pr_info("%s: usb_connected_flag set to TURE!! \n", __func__);
}
#endif
if (ctrl->bRequestType != USB_DIR_IN)
goto unknown;
switch (w_value >> 8) {
case USB_DT_DEVICE:
cdev->desc.bNumConfigurations =
count_configs(cdev, USB_DT_DEVICE);
cdev->desc.bMaxPacketSize0 =
cdev->gadget->ep0->maxpacket;
cdev->vbus_draw_units = 2;
if (gadget_is_superspeed(gadget)) {
if (gadget->speed >= USB_SPEED_SUPER) {
cdev->desc.bcdUSB = cpu_to_le16(0x0300);
cdev->desc.bMaxPacketSize0 = 9;
cdev->vbus_draw_units = 8;
DBG(cdev, "Config SS device in SS\n");
} else {
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
DBG(cdev, "Config SS device in HS\n");
}
} else if (gadget->l1_supported) {
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
DBG(cdev, "Config HS device with LPM(L1)\n");
}
value = min(w_length, (u16) sizeof cdev->desc);
memcpy(req->buf, &cdev->desc, value);
break;
case USB_DT_DEVICE_QUALIFIER:
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
device_qual(cdev);
value = min_t(int, w_length,
sizeof(struct usb_qualifier_descriptor));
break;
case USB_DT_OTHER_SPEED_CONFIG:
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
/* FALLTHROUGH */
case USB_DT_CONFIG:
value = config_desc(cdev, w_value);
if (value >= 0)
value = min(w_length, (u16) value);
break;
case USB_DT_OTG:
if (!gadget_is_otg(gadget))
break;
c = list_first_entry(&cdev->configs,
struct usb_configuration, list);
if (c && c->descriptors)
value = usb_find_descriptor_fillbuf(req->buf,
USB_BUFSIZ, c->descriptors,
USB_DT_OTG);
break;
case USB_DT_STRING:
value = get_string(cdev, req->buf,
w_index, w_value & 0xff);
if (value >= 0)
value = min(w_length, (u16) value);
break;
case USB_DT_BOS:
if (gadget_is_superspeed(gadget) ||
gadget->l1_supported) {
value = bos_desc(cdev);
value = min(w_length, (u16) value);
}
break;
}
break;
/* any number of configs can work */
case USB_REQ_SET_CONFIGURATION:
if (ctrl->bRequestType != 0)
goto unknown;
if (gadget_is_otg(gadget)) {
if (gadget->a_hnp_support)
DBG(cdev, "HNP available\n");
else if (gadget->a_alt_hnp_support)
DBG(cdev, "HNP on another port\n");
else
VDBG(cdev, "HNP inactive\n");
}
spin_lock(&cdev->lock);
value = set_config(cdev, ctrl, w_value);
spin_unlock(&cdev->lock);
#ifdef CONFIG_SMB349_VZW_FAST_CHG
usb_configured_flag = true;
pr_info("%s: usb_configured_flag set to TRUE!!\n", __func__);
set_vzw_usb_charging_state(2);
#endif
break;
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != USB_DIR_IN)
goto unknown;
if (cdev->config)
*(u8 *)req->buf = cdev->config->bConfigurationValue;
else
*(u8 *)req->buf = 0;
value = min(w_length, (u16) 1);
break;
/* function drivers must handle get/set altsetting; if there's
* no get() method, we know only altsetting zero works.
*/
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE)
goto unknown;
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
if (w_value && !f->set_alt)
break;
/*
* We put interfaces in default settings (alt 0)
* upon set config#1. Call set_alt for non-zero
* alternate setting.
*/
if (!w_value && cdev->config) {
value = 0;
break;
}
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
"%s: interface %d (%s) requested delayed status\n",
__func__, intf, f->name);
cdev->delayed_status++;
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
break;
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
goto unknown;
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
/* lots of interfaces only need altsetting zero... */
value = f->get_alt ? f->get_alt(f, w_index) : 0;
if (value < 0)
break;
*((u8 *)req->buf) = value;
value = min(w_length, (u16) 1);
break;
/*
* USB 3.0 additions:
* Function driver should handle get_status request. If such cb
* wasn't supplied we respond with default value = 0
* Note: function driver should supply such cb only for the first
* interface of the function
*/
case USB_REQ_GET_STATUS:
if (!gadget_is_superspeed(gadget))
goto unknown;
if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
goto unknown;
value = 2; /* This is the length of the get_status reply */
put_unaligned_le16(0, req->buf);
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
status = f->get_status ? f->get_status(f) : 0;
if (status < 0)
break;
put_unaligned_le16(status & 0x0000ffff, req->buf);
break;
/*
* Function drivers should handle SetFeature/ClearFeature
* (FUNCTION_SUSPEND) request. function_suspend cb should be supplied
* only for the first interface of the function
*/
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
if (!gadget_is_superspeed(gadget))
goto unknown;
if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE))
goto unknown;
switch (w_value) {
case USB_INTRF_FUNC_SUSPEND:
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
break;
value = 0;
if (f->func_suspend)
value = f->func_suspend(f, w_index >> 8);
if (value < 0) {
ERROR(cdev,
"func_suspend() returned error %d\n",
value);
value = 0;
}
break;
}
break;
default:
unknown:
VDBG(cdev,
"non-core control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
/* functions always handle their interfaces and endpoints...
* punt other recipients (other, WUSB, ...) to the current
* configuration code.
*
* REVISIT it could make sense to let the composite device
* take such requests too, if that's ever needed: to work
* in config 0, etc.
*/
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
break;
case USB_RECIP_ENDPOINT:
endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
list_for_each_entry(f, &cdev->config->functions, list) {
if (test_bit(endp, f->endpoints))
break;
}
if (&f->list == &cdev->config->functions)
f = NULL;
break;
}
if (f && f->setup)
value = f->setup(f, ctrl);
else {
struct usb_configuration *c;
c = cdev->config;
if (c && c->setup)
value = c->setup(c, ctrl);
}
if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
"%s: interface %d (%s) requested delayed status\n",
__func__, intf, f->name);
cdev->delayed_status++;
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
goto done;
}
/* respond with data transfer before status phase? */
if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) {
req->length = value;
req->zero = value < w_length;
value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
composite_setup_complete(gadget->ep0, req);
}
} else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) {
WARN(cdev,
"%s: Delayed status not supported for w_length != 0",
__func__);
}
done:
/* device either stalls (value < 0) or reports success */
return value;
}
static void composite_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
unsigned long flags;
/* REVISIT: should we have config and device level
* disconnect callbacks?
*/
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config)
reset_config(cdev);
if (composite->disconnect)
composite->disconnect(cdev);
if (cdev->delayed_status != 0) {
INFO(cdev, "delayed status mismatch..resetting\n");
cdev->delayed_status = 0;
}
spin_unlock_irqrestore(&cdev->lock, flags);
}
/*-------------------------------------------------------------------------*/
static ssize_t composite_show_suspended(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_composite_dev *cdev = get_gadget_data(gadget);
return sprintf(buf, "%d\n", cdev->suspended);
}
static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
static void
composite_unbind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
/* composite_disconnect() must already have been called
* by the underlying peripheral controller driver!
* so there's no i/o concurrency that could affect the
* state protected by cdev->lock.
*/
WARN_ON(cdev->config);
while (!list_empty(&cdev->configs)) {
struct usb_configuration *c;
c = list_first_entry(&cdev->configs,
struct usb_configuration, list);
list_del(&c->list);
unbind_config(cdev, c);
}
if (composite->unbind)
composite->unbind(cdev);
if (cdev->req) {
kfree(cdev->req->buf);
usb_ep_free_request(gadget->ep0, cdev->req);
}
device_remove_file(&gadget->dev, &dev_attr_suspended);
kfree(cdev);
set_gadget_data(gadget, NULL);
composite = NULL;
}
static u8 override_id(struct usb_composite_dev *cdev, u8 *desc)
{
if (!*desc) {
int ret = usb_string_id(cdev);
if (unlikely(ret < 0))
WARNING(cdev, "failed to override string ID\n");
else
*desc = ret;
}
return *desc;
}
#if defined CONFIG_DEBUG_FS && defined CONFIG_USB_G_LGE_ANDROID
static char debug_buffer[PAGE_SIZE];
static ssize_t debug_desc_read(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct usb_composite_dev *cdev = file->private_data;
struct usb_device_descriptor *desc;
struct usb_configuration *config;
struct usb_function *f;
enum usb_device_speed speed = USB_SPEED_UNKNOWN;
char *buf = debug_buffer;
unsigned long flags;
int cfg_count = 0, i = 0;
if (!cdev)
return 0;
desc = &cdev->desc;
speed = cdev->gadget->speed;
spin_lock_irqsave(&cdev->lock, flags);
i += snprintf(buf + i, PAGE_SIZE - i,
"Device Descriptor Infomation:\n");
i += snprintf(buf + i, PAGE_SIZE - i,
"\tVendor: %08x, Product: %08x\n",
desc->idVendor, desc->idProduct);
i += snprintf(buf + i, PAGE_SIZE - i,
"\tClass: %08x, SubClass: %08x, Protocol: %08x\n",
desc->bDeviceClass, desc->bDeviceSubClass, desc->bDeviceProtocol);
i += snprintf(buf + i, PAGE_SIZE - i,
"USB speed is %s\n", (speed == USB_SPEED_HIGH ? "HIGH" : "FULL"));
if (list_empty(&cdev->configs)) {
i += snprintf(buf + i, PAGE_SIZE - i,
"USB is not configured. It may be disabled....\n");
goto empty_list;
}
list_for_each_entry(config, &cdev->configs, list) {
i += snprintf(buf + i, PAGE_SIZE - i,
"USB Configuration #%d:\n", cfg_count++);
list_for_each_entry(f, &config->functions, list) {
struct usb_descriptor_header **descriptors;
struct usb_descriptor_header *descriptor;
if (speed == USB_SPEED_HIGH)
descriptors = f->hs_descriptors;
else
descriptors = f->descriptors;
if (!descriptors || descriptors[0] == NULL)
continue;
i += snprintf(buf + i, PAGE_SIZE - i,
"\tFunction descriptor: %s\n", f->name);
while ((descriptor = *descriptors++) != NULL) {
struct usb_interface_descriptor *intf;
struct usb_interface_assoc_descriptor *iad;
struct usb_cdc_header_desc *cdc_header;
struct usb_cdc_union_desc *union_desc;
struct usb_cdc_call_mgmt_descriptor *call_mgmt;
struct usb_endpoint_descriptor *ep;
intf = (struct usb_interface_descriptor *)descriptor;
switch (intf->bDescriptorType) {
case USB_DT_INTERFACE:
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tInterface descriptor\n");
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tbNumEndpoints: %d\n"
"\t\tbInterfaceNumber: %d\n"
"\t\tbInterfaceClass: %d\n"
"\t\tbInterfaceSubClass: %d\n"
"\t\tbInterfaceProtocol: %d\n\n",
intf->bNumEndpoints,
intf->bInterfaceNumber,
intf->bInterfaceClass,
intf->bInterfaceSubClass,
intf->bInterfaceProtocol);
break;
case USB_DT_INTERFACE_ASSOCIATION:
iad = (struct usb_interface_assoc_descriptor *)
descriptor;
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tIAD Interface descriptor\n");
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tbFirstInterface: %d\n"
"\t\tbInterfaceCount: %d\n"
"\t\tbFunctionClass: %d\n"
"\t\tbFunctionSubClass: %d\n"
"\t\tbFunctionProtocol: %d\n\n",
iad->bFirstInterface,
iad->bInterfaceCount,
iad->bFunctionClass,
iad->bFunctionSubClass,
iad->bFunctionProtocol);
break;
case USB_DT_CS_INTERFACE:
cdc_header = (struct usb_cdc_header_desc *)
descriptor;
if (cdc_header->bDescriptorSubType ==
USB_CDC_CALL_MANAGEMENT_TYPE) {
call_mgmt = (struct usb_cdc_call_mgmt_descriptor *)
descriptor;
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tCDC CALL MGMT Interface descriptor\n");
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tbDataInterface: %d\n\n",
call_mgmt->bDataInterface);
} else if (cdc_header->bDescriptorSubType ==
USB_CDC_UNION_TYPE) {
union_desc = (struct usb_cdc_union_desc *)
descriptor;
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tCDC UNION Interface descriptor\n");
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\tbMasterInterface0: %d\n"
"\t\tbSlaveInterface0: %d\n\n",
union_desc->bMasterInterface0,
union_desc->bSlaveInterface0);
}
break;
case USB_DT_ENDPOINT:
ep = (struct usb_endpoint_descriptor *)
descriptor;
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\t\tEndpoint descriptor\n");
i += snprintf(buf + i, PAGE_SIZE - i,
"\t\t\tbEndpointAddress: 0x%x(%s)\n"
"\t\t\tbmAttributes: %s\n"
"\t\t\twMaxPacketSize: %d\n"
"\t\t\tbInterval: %d\n\n",
ep->bEndpointAddress,
(ep->bEndpointAddress & USB_DIR_IN ? "IN" : "OUT"),
(ep->bmAttributes == USB_ENDPOINT_XFER_INT ? "INT" : "BULK"),
ep->wMaxPacketSize,
ep->bInterval);
break;
default:
/* do nothing */
break;
}
}
}
}
empty_list:
spin_unlock_irqrestore(&cdev->lock, flags);
return simple_read_from_buffer(ubuf, count, ppos, buf, i);
}
static int debug_desc_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
const struct file_operations debug_desc_ops = {
.open = debug_desc_open,
.read = debug_desc_read,
};
static void composite_debugfs_init(struct usb_composite_dev *cdev)
{
struct dentry *dent;
dent = debugfs_create_dir("usb_composite", 0);
if (IS_ERR(dent))
return;
debugfs_create_file("desc", 0444, dent, cdev, &debug_desc_ops);
}
#endif /* */
static int composite_bind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
int status = -ENOMEM;
cdev = kzalloc(sizeof *cdev, GFP_KERNEL);
if (!cdev)
return status;
spin_lock_init(&cdev->lock);
cdev->gadget = gadget;
set_gadget_data(gadget, cdev);
INIT_LIST_HEAD(&cdev->configs);
/* preallocate control response and buffer */
cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
if (!cdev->req)
goto fail;
cdev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL);
if (!cdev->req->buf)
goto fail;
cdev->req->complete = composite_setup_complete;
gadget->ep0->driver_data = cdev;
cdev->bufsiz = USB_BUFSIZ;
cdev->driver = composite;
/*
* As per USB compliance update, a device that is actively drawing
* more than 100mA from USB must report itself as bus-powered in
* the GetStatus(DEVICE) call.
*/
if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_set_selfpowered(gadget);
/* interface and string IDs start at zero via kzalloc.
* we force endpoints to start unassigned; few controller
* drivers will zero ep->driver_data.
*/
usb_ep_autoconfig_reset(cdev->gadget);
/* composite gadget needs to assign strings for whole device (like
* serial number), register function drivers, potentially update
* power state and consumption, etc
*/
status = composite_gadget_bind(cdev);
if (status < 0)
goto fail;
cdev->desc = *composite->dev;
/* standardized runtime overrides for device ID data */
if (idVendor)
cdev->desc.idVendor = cpu_to_le16(idVendor);
if (idProduct)
cdev->desc.idProduct = cpu_to_le16(idProduct);
if (bcdDevice)
cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
/* string overrides */
if (iManufacturer || !cdev->desc.iManufacturer) {
if (!iManufacturer && !composite->iManufacturer &&
!*composite_manufacturer)
snprintf(composite_manufacturer,
sizeof composite_manufacturer,
"%s %s with %s",
init_utsname()->sysname,
init_utsname()->release,
gadget->name);
cdev->manufacturer_override =
override_id(cdev, &cdev->desc.iManufacturer);
}
if (iProduct || (!cdev->desc.iProduct && composite->iProduct))
cdev->product_override =
override_id(cdev, &cdev->desc.iProduct);
if (iSerialNumber)
cdev->serial_override =
override_id(cdev, &cdev->desc.iSerialNumber);
/* has userspace failed to provide a serial number? */
if (composite->needs_serial && !cdev->desc.iSerialNumber)
WARNING(cdev, "userspace failed to provide iSerialNumber\n");
/* finish up */
status = device_create_file(&gadget->dev, &dev_attr_suspended);
if (status)
goto fail;
#if defined CONFIG_DEBUG_FS && defined CONFIG_USB_G_LGE_ANDROID
/*
*/
composite_debugfs_init(cdev);
#endif
INFO(cdev, "%s ready\n", composite->name);
return 0;
fail:
composite_unbind(gadget);
return status;
}
/*-------------------------------------------------------------------------*/
static void
composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
/* REVISIT: should we have config level
* suspend/resume callbacks?
*/
DBG(cdev, "suspend\n");
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list) {
if (f->suspend)
f->suspend(f);
}
}
if (composite->suspend)
composite->suspend(cdev);
cdev->suspended = 1;
#ifndef CONFIG_LGE_PM
usb_gadget_vbus_draw(gadget, 2);
#endif
}
static void
composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
u8 maxpower;
/* REVISIT: should we have config level
* suspend/resume callbacks?
*/
DBG(cdev, "resume\n");
if (composite->resume)
composite->resume(cdev);
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list) {
if (f->resume)
f->resume(f);
}
maxpower = cdev->config->bMaxPower;
usb_gadget_vbus_draw(gadget, maxpower ?
(cdev->vbus_draw_units * maxpower) :
CONFIG_USB_GADGET_VBUS_DRAW);
}
cdev->suspended = 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver composite_driver = {
.unbind = composite_unbind,
.setup = composite_setup,
.disconnect = composite_disconnect,
.suspend = composite_suspend,
.resume = composite_resume,
.driver = {
.owner = THIS_MODULE,
},
};
/**
* usb_composite_probe() - register a composite driver
* @driver: the driver to register
* @bind: the callback used to allocate resources that are shared across the
* whole device, such as string IDs, and add its configurations using
* @usb_add_config(). This may fail by returning a negative errno
* value; it should return zero on successful initialization.
* Context: single threaded during gadget setup
*
* This function is used to register drivers using the composite driver
* framework. The return value is zero, or a negative errno value.
* Those values normally come from the driver's @bind method, which does
* all the work of setting up the driver to match the hardware.
*
* On successful return, the gadget is ready to respond to requests from
* the host, unless one of its components invokes usb_gadget_disconnect()
* while it was binding. That would usually be done in order to wait for
* some userspace participation.
*/
int usb_composite_probe(struct usb_composite_driver *driver,
int (*bind)(struct usb_composite_dev *cdev))
{
int retval;
if (!driver || !driver->dev || !bind)
return -EINVAL;
if (!driver->name)
driver->name = "composite";
if (!driver->iProduct)
driver->iProduct = driver->name;
composite_driver.function = (char *) driver->name;
composite_driver.driver.name = driver->name;
composite_driver.max_speed = driver->max_speed;
composite = driver;
composite_gadget_bind = bind;
retval = usb_gadget_probe_driver(&composite_driver, composite_bind);
if (retval)
composite = NULL;
return retval;
}
/**
* usb_composite_unregister() - unregister a composite driver
* @driver: the driver to unregister
*
* This function is used to unregister drivers using the composite
* driver framework.
*/
void usb_composite_unregister(struct usb_composite_driver *driver)
{
if (composite != driver)
return;
usb_gadget_unregister_driver(&composite_driver);
}
/**
* usb_composite_setup_continue() - Continue with the control transfer
* @cdev: the composite device who's control transfer was kept waiting
*
* This function must be called by the USB function driver to continue
* with the control transfer's data/status stage in case it had requested to
* delay the data/status stages. A USB function's setup handler (e.g. set_alt())
* can request the composite framework to delay the setup request's data/status
* stages by returning USB_GADGET_DELAYED_STATUS.
*/
void usb_composite_setup_continue(struct usb_composite_dev *cdev)
{
int value;
struct usb_request *req = cdev->req;
unsigned long flags;
DBG(cdev, "%s\n", __func__);
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->delayed_status == 0) {
WARN(cdev, "%s: Unexpected call\n", __func__);
} else if (--cdev->delayed_status == 0) {
DBG(cdev, "%s: Completing delayed status\n", __func__);
req->length = 0;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
composite_setup_complete(cdev->gadget->ep0, req);
}
}
spin_unlock_irqrestore(&cdev->lock, flags);
}
| gpl-2.0 |
olegsvs/android_kernel_ark_benefit_m7_mm | drivers/misc/mediatek/thermal/common/coolers/mtk_cooler_fps.c | 18 | 19933 | #include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kobject.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <linux/err.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/mtk_gpu_utility.h>
#include "mt-plat/mtk_thermal_monitor.h"
/* fps update from display */
#include "disp_session.h"
/* switch device to sent the (fps limit)uevent */
#include <linux/switch.h>
#include "mach/mt_thermal.h"
#include <linux/uidgid.h>
/* 1: turn on adaptive fps cooler; 0: turn off */
#define ADAPTIVE_FPS_COOLER (1)
#define mtk_cooler_fps_dprintk_always(fmt, args...) \
pr_debug("thermal/cooler/fps" fmt, ##args)
#define mtk_cooler_fps_dprintk(fmt, args...) \
do { \
if (1 == cl_fps_klog_on) \
pr_debug("thermal/cooler/fps" fmt, ##args); \
} while (0)
#define MAX_NUM_INSTANCE_MTK_COOLER_FPS 4
#define MTK_CL_FPS_GET_CURR_STATE(curr_state, state) \
{ curr_state = (((unsigned long) (state))&0xFFFF); }
#define MTK_CL_FPS_SET_CURR_STATE(curr_state, state) \
do { \
if (0 == curr_state) \
state &= ~0x1; \
else \
state |= 0x1; \
} while (0)
static kuid_t uid = KUIDT_INIT(0);
static kgid_t gid = KGIDT_INIT(1000);
static int cl_fps_klog_on;
static struct thermal_cooling_device *cl_fps_dev[MAX_NUM_INSTANCE_MTK_COOLER_FPS] = { 0 };
static unsigned int cl_fps_param[MAX_NUM_INSTANCE_MTK_COOLER_FPS] = { 0 };
static unsigned long cl_fps_state[MAX_NUM_INSTANCE_MTK_COOLER_FPS] = { 0 };
static unsigned int cl_fps_cur_limit;
static unsigned int tm_input_fps;
static struct switch_dev fps_switch_data;
#if ADAPTIVE_FPS_COOLER
/* TODO: TBD */
#define MAX_FPS_LIMIT 60
#define MIN_FPS_LIMIT 10
#define MAX_FPS_LEVELS (MAX_FPS_LIMIT - MIN_FPS_LIMIT)
#define DEFAULT_FPS_LEVEL 10
static int fps_level[MAX_FPS_LEVELS];
static int nr_fps_levels = MAX_FPS_LEVELS;
static int curr_fps_level;
#define MAX_FPS_SMA_LEN 10
static int fps_history[MAX_FPS_SMA_LEN] = {0};
static int fps_history_idx;
static int fps_sma_len = MAX_FPS_SMA_LEN;
#define MAX_TPCB_SMA_LEN 10
static int tpcb_history[MAX_TPCB_SMA_LEN] = {0};
static int tpcb_history_idx;
static int tpcb_sma_len = MAX_TPCB_SMA_LEN;
#define MAX_GPU_LOADING_SMA_LEN 10
static int gpu_loading_history[MAX_GPU_LOADING_SMA_LEN] = {0};
static int gpu_loading_history_idx;
static int gpu_loading_sma_len = MAX_GPU_LOADING_SMA_LEN;
static struct thermal_cooling_device *cl_adp_fps_dev;
static unsigned int cl_adp_fps_state;
static int cl_adp_fps_limit = MAX_FPS_LIMIT;
#define GPU_LOADING_THRESHOLD 80
/* in percentage */
static int gpu_loading_threshold = GPU_LOADING_THRESHOLD;
/* in percentage */
static int fps_error_threshold = 10;
/* in round */
static int fps_stable_period = 10;
/* FPS is active when over stable tpcb or always */
static int fps_limit_always_on;
static int leave_fps_limit_duration = 1;
/* minimum fps that we regard as still in game playing */
static int in_game_low_fps = 5;
/* FIXME: need someone set/clear this */
static int in_game_whitelist = 1;
#endif
static int fps_update(void)
{
disp_session_info info;
memset(&info, 0, sizeof(info));
info.session_id = MAKE_DISP_SESSION(DISP_SESSION_PRIMARY, 0);
/* disp_mgr_get_session_info(&info); */ /* fix it */
/* mtk_cooler_fps_dprintk("display update fps is: %d.%d\n", info.updateFPS/100, info.updateFPS%100); */
/* mtk_cooler_fps_dprintk("is display fps stable: %d\n", info.is_updateFPS_stable); */
#if 0
if (info.is_updateFPS_stable)
tm_input_fps = info.updateFPS;
else
tm_input_fps = 0;
#else
tm_input_fps = info.updateFPS;
#endif
return 0;
}
static void mtk_cl_fps_set_fps_limit(void)
{
int i = 0;
int min_limit = 60;
unsigned int min_param = 60;
for (; i < MAX_NUM_INSTANCE_MTK_COOLER_FPS; i++) {
unsigned long curr_state;
MTK_CL_FPS_GET_CURR_STATE(curr_state, cl_fps_state[i]);
if (1 == curr_state) {
int limit = 0;
limit = cl_fps_param[i];
/* a cooler with 0 fps is not allowed */
if (limit == 0)
goto err_unreg;
if (limit <= min_limit) {
min_limit = limit;
min_param = cl_fps_param[i];
}
}
}
#if ADAPTIVE_FPS_COOLER
if (cl_adp_fps_limit < min_param)
min_param = cl_adp_fps_limit;
#endif
if (min_param != cl_fps_cur_limit) {
cl_fps_cur_limit = min_param;
switch_set_state(&fps_switch_data, cl_fps_cur_limit);
mtk_cooler_fps_dprintk_always("[%s] fps limit: %d\n", __func__, cl_fps_cur_limit);
}
err_unreg:
return;
}
static int mtk_cl_fps_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
{
*state = 1;
mtk_cooler_fps_dprintk("[%s] %s %lu\n", __func__, cdev->type, *state);
return 0;
}
static int mtk_cl_fps_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
{
MTK_CL_FPS_GET_CURR_STATE(*state, *((unsigned long *) cdev->devdata));
mtk_cooler_fps_dprintk("[%s] %s %lu\n", __func__, cdev->type, *state);
return 0;
}
static int mtk_cl_fps_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
{
mtk_cooler_fps_dprintk("[%s] %s %lu\n", __func__, cdev->type, state);
MTK_CL_FPS_SET_CURR_STATE(state, *((unsigned long *) cdev->devdata));
mtk_cl_fps_set_fps_limit();
return 0;
}
#if ADAPTIVE_FPS_COOLER
static int adp_fps_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
{
*state = 1;
mtk_cooler_fps_dprintk("[%s] %s %lu\n", __func__, cdev->type, *state);
return 0;
}
static int adp_fps_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
{
*state = cl_adp_fps_state;
mtk_cooler_fps_dprintk("[%s] %s %lu\n", __func__, cdev->type, *state);
return 0;
}
/* for gpu_loading, tpcb, fps */
static int get_sma_val(int vals[], int sma_len)
{
int i, v = 0;
for (i = 0; i < sma_len; i++)
v += vals[i];
v = v / sma_len;
return v;
}
static void set_sma_val(int vals[], int sma_len, int *idx, int val)
{
vals[*idx] = val;
*idx = (*idx + 1) % sma_len;
}
/* increase by one level */
static int increase_fps_limit(void)
{
if (curr_fps_level > 0)
curr_fps_level--;
return fps_level[curr_fps_level];
}
/* decrease by one level */
static int decrease_fps_limit(void)
{
if (curr_fps_level < (nr_fps_levels - 1))
curr_fps_level++;
return fps_level[curr_fps_level];
}
static int unlimit_fps_limit(void)
{
curr_fps_level = 0;
return fps_level[curr_fps_level];
}
/* This function is actually an governor */
static int adp_calc_fps_limit(void)
{
static int last_change_tpcb;
static int period;
int sma_tpcb, tpcb_change, sma_fps, gpu_loading;
int fps_limit = fps_level[curr_fps_level];
mtk_cooler_fps_dprintk("[%s] enter. period=%d, fps_stable_period=%d, fps_limit=%d\n",
__func__, period, fps_stable_period, fps_limit);
if (period < fps_stable_period) {
period++;
return fps_limit;
}
period = 0;
gpu_loading = get_sma_val(gpu_loading_history, gpu_loading_sma_len);
sma_tpcb = get_sma_val(tpcb_history, tpcb_sma_len);
tpcb_change = sma_tpcb - last_change_tpcb;
mtk_cooler_fps_dprintk("[%s] enter. gpu_loading=%d, sma_tpcb=%d, tpcb_change=%d\n",
__func__, gpu_loading, sma_tpcb, tpcb_change);
if (fps_limit_always_on || sma_tpcb >= mtk_thermal_get_tpcb_target()) {
sma_fps = get_sma_val(fps_history, fps_sma_len);
/* If gpu already utilizes almost full capacity but cannot reach the limit,
* then we consider to decrease fps limit to avoid unstable fps */
if (gpu_loading >= gpu_loading_threshold &&
fps_limit - sma_fps >= fps_limit * fps_error_threshold / 100) {
/* we do not limit FPS if not in game */
if (in_game_whitelist)
fps_limit = decrease_fps_limit();
#if 0
else {
/* FIXME: give hint to somebody, so that user
* can decides whether he/she wants fps to be
* limited or not */
/* send_hint_to_user(); */
}
#endif
}
}
/* tpcb is falling and gpu loading is low, too */
if (sma_tpcb < mtk_thermal_get_tpcb_target() && tpcb_change < 0
&& gpu_loading < gpu_loading_threshold) {
fps_limit = increase_fps_limit();
}
if (tpcb_change)
last_change_tpcb = sma_tpcb;
/* mtk_cooler_fps_dprintk("[%s] enter. sma_fps=%d, fps_limit=%d\n",
__func__, sma_fps, fps_limit); */
return fps_limit;
}
static bool in_consistent_scene(void)
{
static int duration;
int fps = tm_input_fps;
if (!in_game_whitelist)
return false;
if (fps <= in_game_low_fps)
duration++;
else /* TODO: TBD: should we reset duration or decrease */
duration = 0;
if (duration >= leave_fps_limit_duration) {
duration = 0;
return false;
} else
return true;
}
static int adp_fps_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
int gpu_loading;
if ((state != 0) && (state != 1)) {
mtk_cooler_fps_dprintk("[%s] invalid input (0: no thro; 1: adp fps thro on)\n", __func__);
return 0;
}
mtk_cooler_fps_dprintk("[%s] %s %lu\n", __func__, cdev->type, state);
cl_adp_fps_state = state;
/* check the fps update from display */
fps_update();
set_sma_val(fps_history, fps_sma_len, &fps_history_idx, tm_input_fps);
set_sma_val(tpcb_history, tpcb_sma_len, &tpcb_history_idx,
mtk_thermal_get_temp(MTK_THERMAL_SENSOR_AP));
if (!mtk_get_gpu_loading(&gpu_loading))
gpu_loading = 0;
set_sma_val(gpu_loading_history, gpu_loading_sma_len, &gpu_loading_history_idx,
gpu_loading);
/* 1. update the parameter of "cl_adp_fps_limit" */
/* do we already leave game? */
if (!in_consistent_scene())
unlimit_fps_limit();
cl_adp_fps_limit = adp_calc_fps_limit();
/* 2. set the the limit */
mtk_cl_fps_set_fps_limit();
return 0;
}
static struct thermal_cooling_device_ops mtk_cl_adp_fps_ops = {
.get_max_state = adp_fps_get_max_state,
.get_cur_state = adp_fps_get_cur_state,
.set_cur_state = adp_fps_set_cur_state,
};
static void reset_fps_level(void)
{
int i, fps;
for (i = 0, fps = MAX_FPS_LIMIT;
fps >= MIN_FPS_LIMIT && i < MAX_FPS_LEVELS;
i++, fps -= DEFAULT_FPS_LEVEL)
fps_level[i] = fps;
nr_fps_levels = i;
}
static int clfps_level_read(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "%d ", nr_fps_levels);
for (i = 0; i < nr_fps_levels; i++)
seq_printf(m, "%d ", fps_level[i]);
seq_puts(m, "\n");
return 0;
}
static ssize_t clfps_level_write(struct file *file, const char __user *buffer,
size_t count, loff_t *data)
{
char *buf, *ori_buf;
unsigned int _tmp;
int i, ret = -EINVAL;
buf = kmalloc(count + 1, GFP_KERNEL);
if (buf == NULL)
return -EFAULT;
/* buf would be modified in strsep() later */
ori_buf = buf;
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
goto exit;
}
buf[count] = '\0';
if (kstrtoint(buf, 10, &_tmp) == 0 || _tmp > MAX_FPS_LEVELS) {
ret = -EINVAL;
goto exit;
}
nr_fps_levels = _tmp;
for (i = 0; i < nr_fps_levels; i++) {
strsep(&buf, " ");
if (buf == NULL || kstrtoint(buf, 10, &_tmp) == 0 ||
_tmp > MAX_FPS_LIMIT || _tmp < MIN_FPS_LIMIT) {
ret = -EINVAL;
goto exit;
}
fps_level[i] = _tmp;
}
ret = count;
exit:
kfree(ori_buf);
if (ret < 0)
reset_fps_level();
return ret;
}
static int clfps_level_open(struct inode *inode, struct file *file)
{
return single_open(file, clfps_level_read, NULL);
}
static const struct file_operations clfps_level_fops = {
.owner = THIS_MODULE,
.open = clfps_level_open,
.read = seq_read,
.llseek = seq_lseek,
.write = clfps_level_write,
.release = single_release,
};
static int clfps_adp_read(struct seq_file *m, void *v)
{
seq_printf(m, "%d %d\n", MIN_FPS_LIMIT, MAX_FPS_LIMIT);
return 0;
}
static ssize_t clfps_adp_write(struct file *file, const char __user *buffer,
size_t count, loff_t *data)
{
char *buf;
int _k_tt, _k_sum_tt, _min, _max;
int ret = -EINVAL;
buf = kmalloc(count + 1, GFP_KERNEL);
if (buf == NULL)
return -EFAULT;
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
goto exit;
}
buf[count] = '\0';
if (sscanf(buf, "%d %d %d %d", &_k_tt, &_k_sum_tt, &_min, &_max) == 5) {
/* TODO: check the values are valid */
ret = count;
}
exit:
kfree(buf);
return ret;
}
static int clfps_adp_open(struct inode *inode, struct file *file)
{
return single_open(file, clfps_adp_read, NULL);
}
static const struct file_operations clfps_adp_fops = {
.owner = THIS_MODULE,
.open = clfps_adp_open,
.read = seq_read,
.llseek = seq_lseek,
.write = clfps_adp_write,
.release = single_release,
};
#endif
/* bind fan callbacks to fan device */
static struct thermal_cooling_device_ops mtk_cl_fps_ops = {
.get_max_state = mtk_cl_fps_get_max_state,
.get_cur_state = mtk_cl_fps_get_cur_state,
.set_cur_state = mtk_cl_fps_set_cur_state,
};
static int mtk_cooler_fps_register_ltf(void)
{
int i;
mtk_cooler_fps_dprintk("register ltf\n");
for (i = MAX_NUM_INSTANCE_MTK_COOLER_FPS; i-- > 0; ) {
char temp[20] = { 0 };
sprintf(temp, "mtk-cl-fps%02d", i);
/* put fps state to cooler devdata */
cl_fps_dev[i] = mtk_thermal_cooling_device_register(temp, (void *) &cl_fps_state[i],
&mtk_cl_fps_ops);
}
#if ADAPTIVE_FPS_COOLER
cl_adp_fps_dev = mtk_thermal_cooling_device_register("mtk-cl-adp-fps", NULL,
&mtk_cl_adp_fps_ops);
#endif
return 0;
}
static void mtk_cooler_fps_unregister_ltf(void)
{
int i;
mtk_cooler_fps_dprintk("unregister ltf\n");
for (i = MAX_NUM_INSTANCE_MTK_COOLER_FPS; i-- > 0; ) {
if (cl_fps_dev[i]) {
mtk_thermal_cooling_device_unregister(cl_fps_dev[i]);
cl_fps_dev[i] = NULL;
cl_fps_state[i] = 0;
}
}
#if ADAPTIVE_FPS_COOLER
if (cl_adp_fps_dev) {
mtk_thermal_cooling_device_unregister(cl_adp_fps_dev);
cl_adp_fps_dev = NULL;
cl_adp_fps_state = 0;
}
#endif
}
static int mtk_cl_fps_proc_read(struct seq_file *m, void *v)
{
/**
* The format to print out:
* kernel_log <0 or 1>
* <mtk-cl-fps<ID>> <limited fps> <param> <state>
* ..
*/
{
int i = 0;
seq_printf(m, "klog %d\n", cl_fps_klog_on);
seq_printf(m, "curr_limit %d\n", cl_fps_cur_limit);
for (; i < MAX_NUM_INSTANCE_MTK_COOLER_FPS; i++) {
unsigned int active;
unsigned long curr_state;
active = cl_fps_param[i];
MTK_CL_FPS_GET_CURR_STATE(curr_state, cl_fps_state[i]);
seq_printf(m, "mtk-cl-fps%02d %u 0x%x, state %lu\n", i, active, cl_fps_param[i], curr_state);
}
}
return 0;
}
static ssize_t mtk_cl_fps_proc_write(struct file *filp, const char __user *buffer, size_t count, loff_t *data)
{
int len = 0;
char desc[128];
int klog_on, fps0, fps1, fps2, fps3;
len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
if (copy_from_user(desc, buffer, len))
return 0;
desc[len] = '\0';
/**
* sscanf format <klog_on> <mtk-cl-fps00> <mtk-cl-fps01> <mtk-cl-fps02> ...
* <klog_on> can only be 0 or 1
*/
if (NULL == data) {
mtk_cooler_fps_dprintk("[%s] null data\n", __func__);
return -EINVAL;
}
/* WARNING: Modify here if MAX_NUM_INSTANCE_MTK_COOLER_FPS is changed to other than 4 */
#if (4 == MAX_NUM_INSTANCE_MTK_COOLER_FPS)
if (1 <= sscanf(desc, "%d %d %d %d %d",
&klog_on, &fps0, &fps1, &fps2, &fps3)) {
if (klog_on == 0 || klog_on == 1)
cl_fps_klog_on = klog_on;
if (fps0 == 0)
cl_fps_param[0] = 0;
else if (fps0 >= 10 && fps0 <= 60)
cl_fps_param[0] = fps0;
if (fps1 == 0)
cl_fps_param[1] = 0;
else if (fps1 >= 10 && fps1 <= 60)
cl_fps_param[1] = fps1;
if (fps2 == 0)
cl_fps_param[2] = 0;
else if (fps2 >= 10 && fps2 <= 60)
cl_fps_param[2] = fps2;
if (fps3 == 0)
cl_fps_param[3] = 0;
else if (fps3 >= 10 && fps3 <= 60)
cl_fps_param[3] = fps3;
return count;
}
#else
#error "Change correspondent part when changing MAX_NUM_INSTANCE_MTK_COOLER_FPS!"
#endif
mtk_cooler_fps_dprintk("[%s] bad arg\n", __func__);
return -EINVAL;
}
static int mtk_cl_fps_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, mtk_cl_fps_proc_read, NULL);
}
static const struct file_operations cl_fps_fops = {
.owner = THIS_MODULE,
.open = mtk_cl_fps_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.write = mtk_cl_fps_proc_write,
.release = single_release,
};
static ssize_t fps_tm_count_write(struct file *filp, const char __user *buf, size_t len, loff_t *data)
{
char tmp[32] = {0};
len = (len < (sizeof(tmp) - 1)) ? len : (sizeof(tmp) - 1);
/* write data to the buffer */
if (copy_from_user(tmp, buf, len))
return -EFAULT;
if (kstrtoint(tmp, 10, &tm_input_fps) == 0) {
mtk_cooler_fps_dprintk("[%s] = %d\n", __func__, tm_input_fps);
return len;
}
mtk_cooler_fps_dprintk("[%s] invalid input\n", __func__);
return -EINVAL;
}
static int fps_tm_count_read(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", tm_input_fps);
mtk_cooler_fps_dprintk("[%s] %d\n", __func__, tm_input_fps);
return 0;
}
static int fps_tm_count_open(struct inode *inode, struct file *file)
{
return single_open(file, fps_tm_count_read, PDE_DATA(inode));
}
static const struct file_operations tm_fps_fops = {
.owner = THIS_MODULE,
.open = fps_tm_count_open,
.read = seq_read,
.llseek = seq_lseek,
.write = fps_tm_count_write,
.release = single_release,
};
/* ===== debug only===
#define debugfs_entry(name) \
do { \
dentry_f = debugfs_create_u32(#name, S_IWUSR | S_IRUGO, _d, &name); \
if (IS_ERR_OR_NULL(dentry_f)) { \
pr_warn("Unable to create debugfsfile: " #name "\n"); \
return; \
} \
} while (0)
static void create_debugfs_entries(void)
{
struct dentry *dentry_f;
struct dentry *_d;
_d = debugfs_create_dir("clfps", NULL);
if (IS_ERR_OR_NULL(_d)) {
pr_info("unable to create debugfs directory\n");
return;
}
debugfs_entry(fps_error_threshold);
debugfs_entry(fps_stable_period);
debugfs_entry(curr_fps_level);
debugfs_entry(in_game_whitelist);
debugfs_entry(fps_limit_always_on);
debugfs_entry(leave_fps_limit_duration);
debugfs_entry(in_game_low_fps);
debugfs_entry(gpu_loading_threshold);
}
#undef debugfs_entry
===== debug only === */
static int __init mtk_cooler_fps_init(void)
{
int ret = 0;
int err = 0;
int i;
for (i = MAX_NUM_INSTANCE_MTK_COOLER_FPS; i-- > 0; ) {
cl_fps_dev[i] = NULL;
cl_fps_state[i] = 0;
}
mtk_cooler_fps_dprintk("init\n");
err = mtk_cooler_fps_register_ltf();
if (err)
goto err_unreg;
/* switch device to sent the (fps limit)uevent */
fps_switch_data.name = "fps";
fps_switch_data.index = 0;
fps_switch_data.state = 60; /* original 60 frames */
ret = switch_dev_register(&fps_switch_data);
if (ret)
mtk_cooler_fps_dprintk_always("[%s] switch_dev_register failed, returned:%d!\n",
__func__, ret);
/* create a proc file */
{
struct proc_dir_entry *entry = NULL;
struct proc_dir_entry *dir_entry = NULL;
struct proc_dir_entry *fps_tm_proc_dir = NULL;
fps_tm_proc_dir = proc_mkdir("fps_tm", NULL);
if (!fps_tm_proc_dir)
mtk_cooler_fps_dprintk_always("[%s]: mkdir /proc/fps_tm failed\n", __func__);
else
entry = proc_create("fps_count", S_IRWXUGO, fps_tm_proc_dir, &tm_fps_fops);
dir_entry = mtk_thermal_get_proc_drv_therm_dir_entry();
if (!dir_entry)
mtk_cooler_fps_dprintk_always("[%s]: mkdir /proc/driver/thermal failed\n", __func__);
else {
entry = proc_create("clfps", S_IRUGO | S_IWUSR | S_IWGRP, dir_entry, &cl_fps_fops);
if (entry)
proc_set_user(entry, uid, gid);
}
#if ADAPTIVE_FPS_COOLER
reset_fps_level();
if (dir_entry) {
entry = proc_create("clfps_adp", S_IRUGO | S_IWUSR | S_IWGRP,
dir_entry, &clfps_adp_fops);
if (entry)
proc_set_user(entry, uid, gid);
entry = proc_create("clfps_level", S_IRUGO | S_IWUSR | S_IWGRP,
dir_entry, &clfps_level_fops);
if (entry)
proc_set_user(entry, uid, gid);
}
/* ===== debug only===
create_debugfs_entries();
===== debug only === */
#endif
return 0;
}
err_unreg:
mtk_cooler_fps_unregister_ltf();
return err;
}
static void __exit mtk_cooler_fps_exit(void)
{
mtk_cooler_fps_dprintk("exit\n");
/* remove the proc file */
remove_proc_entry("clfps", NULL);
mtk_cooler_fps_unregister_ltf();
}
module_init(mtk_cooler_fps_init);
module_exit(mtk_cooler_fps_exit);
| gpl-2.0 |
ntddk/pemu | roms/u-boot/board/keymile/common/common.c | 18 | 9329 | /*
* (C) Copyright 2008
* Heiko Schocher, DENX Software Engineering, hs@denx.de.
*
* (C) Copyright 2011
* Holger Brunck, Keymile GmbH Hannover, holger.brunck@keymile.com
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <ioports.h>
#include <command.h>
#include <malloc.h>
#include <hush.h>
#include <net.h>
#include <netdev.h>
#include <asm/io.h>
#include <linux/ctype.h>
#if defined(CONFIG_POST)
#include "post.h"
#endif
#include "common.h"
#include <i2c.h>
DECLARE_GLOBAL_DATA_PTR;
/*
* Set Keymile specific environment variables
* Currently only some memory layout variables are calculated here
* ... ------------------------------------------------
* ... |@rootfsaddr |@pnvramaddr |@varaddr |@reserved |@END_OF_RAM
* ... |<------------------- pram ------------------->|
* ... ------------------------------------------------
* @END_OF_RAM: denotes the RAM size
* @pnvramaddr: Startadress of pseudo non volatile RAM in hex
* @pram : preserved ram size in k
* @varaddr : startadress for /var mounted into RAM
*/
int set_km_env(void)
{
uchar buf[32];
unsigned int pnvramaddr;
unsigned int pram;
unsigned int varaddr;
unsigned int kernelmem;
char *p;
unsigned long rootfssize = 0;
pnvramaddr = gd->ram_size - CONFIG_KM_RESERVED_PRAM - CONFIG_KM_PHRAM
- CONFIG_KM_PNVRAM;
sprintf((char *)buf, "0x%x", pnvramaddr);
setenv("pnvramaddr", (char *)buf);
/* try to read rootfssize (ram image) from envrionment */
p = getenv("rootfssize");
if (p != NULL)
strict_strtoul(p, 16, &rootfssize);
pram = (rootfssize + CONFIG_KM_RESERVED_PRAM + CONFIG_KM_PHRAM +
CONFIG_KM_PNVRAM) / 0x400;
sprintf((char *)buf, "0x%x", pram);
setenv("pram", (char *)buf);
varaddr = gd->ram_size - CONFIG_KM_RESERVED_PRAM - CONFIG_KM_PHRAM;
sprintf((char *)buf, "0x%x", varaddr);
setenv("varaddr", (char *)buf);
kernelmem = gd->ram_size - 0x400 * pram;
sprintf((char *)buf, "0x%x", kernelmem);
setenv("kernelmem", (char *)buf);
return 0;
}
#if defined(CONFIG_SYS_I2C_INIT_BOARD)
static void i2c_write_start_seq(void)
{
set_sda(1);
udelay(DELAY_HALF_PERIOD);
set_scl(1);
udelay(DELAY_HALF_PERIOD);
set_sda(0);
udelay(DELAY_HALF_PERIOD);
set_scl(0);
udelay(DELAY_HALF_PERIOD);
}
/*
* I2C is a synchronous protocol and resets of the processor in the middle
* of an access can block the I2C Bus until a powerdown of the full unit is
* done. This function toggles the SCL until the SCL and SCA line are
* released, but max. 16 times, after this a I2C start-sequence is sent.
* This I2C Deblocking mechanism was developed by Keymile in association
* with Anatech and Atmel in 1998.
*/
int i2c_make_abort(void)
{
int scl_state = 0;
int sda_state = 0;
int i = 0;
int ret = 0;
if (!get_sda()) {
ret = -1;
while (i < 16) {
i++;
set_scl(0);
udelay(DELAY_ABORT_SEQ);
set_scl(1);
udelay(DELAY_ABORT_SEQ);
scl_state = get_scl();
sda_state = get_sda();
if (scl_state && sda_state) {
ret = 0;
break;
}
}
}
if (ret == 0)
for (i = 0; i < 5; i++)
i2c_write_start_seq();
/* respect stop setup time */
udelay(DELAY_ABORT_SEQ);
set_scl(1);
udelay(DELAY_ABORT_SEQ);
set_sda(1);
get_sda();
return ret;
}
/**
* i2c_init_board - reset i2c bus. When the board is powercycled during a
* bus transfer it might hang; for details see doc/I2C_Edge_Conditions.
*/
void i2c_init_board(void)
{
/* Now run the AbortSequence() */
i2c_make_abort();
}
#endif
#if defined(CONFIG_KM_COMMON_ETH_INIT)
int board_eth_init(bd_t *bis)
{
if (ethernet_present())
return cpu_eth_init(bis);
return -1;
}
#endif
/*
* do_setboardid command
* read out the board id and the hw key from the intventory EEPROM and set
* this values as environment variables.
*/
static int do_setboardid(cmd_tbl_t *cmdtp, int flag, int argc,
char *const argv[])
{
unsigned char buf[32];
char *p;
p = get_local_var("IVM_BoardId");
if (p == NULL) {
printf("can't get the IVM_Boardid\n");
return 1;
}
sprintf((char *)buf, "%s", p);
setenv("boardid", (char *)buf);
printf("set boardid=%s\n", buf);
p = get_local_var("IVM_HWKey");
if (p == NULL) {
printf("can't get the IVM_HWKey\n");
return 1;
}
sprintf((char *)buf, "%s", p);
setenv("hwkey", (char *)buf);
printf("set hwkey=%s\n", buf);
printf("Execute manually saveenv for persistent storage.\n");
return 0;
}
U_BOOT_CMD(km_setboardid, 1, 0, do_setboardid, "setboardid", "read out bid and "
"hwkey from IVM and set in environment");
/*
* command km_checkbidhwk
* if "boardid" and "hwkey" are not already set in the environment, do:
* if a "boardIdListHex" exists in the environment:
* - read ivm data for boardid and hwkey
* - compare each entry of the boardIdListHex with the
* IVM data:
* if match:
* set environment variables boardid, boardId,
* hwkey, hwKey to the found values
* both (boardid and boardId) are set because
* they might be used differently in the
* application and in the init scripts (?)
* return 0 in case of match, 1 if not match or error
*/
static int do_checkboardidhwk(cmd_tbl_t *cmdtp, int flag, int argc,
char *const argv[])
{
unsigned long ivmbid = 0, ivmhwkey = 0;
unsigned long envbid = 0, envhwkey = 0;
char *p;
int verbose = argc > 1 && *argv[1] == 'v';
int rc = 0;
/*
* first read out the real inventory values, these values are
* already stored in the local hush variables
*/
p = get_local_var("IVM_BoardId");
if (p == NULL) {
printf("can't get the IVM_Boardid\n");
return 1;
}
rc = strict_strtoul(p, 16, &ivmbid);
p = get_local_var("IVM_HWKey");
if (p == NULL) {
printf("can't get the IVM_HWKey\n");
return 1;
}
rc = strict_strtoul(p, 16, &ivmhwkey);
if (!ivmbid || !ivmhwkey) {
printf("Error: IVM_BoardId and/or IVM_HWKey not set!\n");
return rc;
}
/* now try to read values from environment if available */
p = getenv("boardid");
if (p != NULL)
rc = strict_strtoul(p, 16, &envbid);
p = getenv("hwkey");
if (p != NULL)
rc = strict_strtoul(p, 16, &envhwkey);
if (rc != 0) {
printf("strict_strtoul returns error: %d", rc);
return rc;
}
if (!envbid || !envhwkey) {
/*
* BoardId/HWkey not available in the environment, so try the
* environment variable for BoardId/HWkey list
*/
char *bidhwklist = getenv("boardIdListHex");
if (bidhwklist) {
int found = 0;
char *rest = bidhwklist;
char *endp;
if (verbose) {
printf("IVM_BoardId: %ld, IVM_HWKey=%ld\n",
ivmbid, ivmhwkey);
printf("boardIdHwKeyList: %s\n",
bidhwklist);
}
while (!found) {
/* loop over each bid/hwkey pair in the list */
unsigned long bid = 0;
unsigned long hwkey = 0;
while (*rest && !isxdigit(*rest))
rest++;
/*
* use simple_strtoul because we need &end and
* we know we got non numeric char at the end
*/
bid = simple_strtoul(rest, &endp, 16);
/* BoardId and HWkey are separated with a "_" */
if (*endp == '_') {
rest = endp + 1;
/*
* use simple_strtoul because we need
* &end
*/
hwkey = simple_strtoul(rest, &endp, 16);
rest = endp;
while (*rest && !isxdigit(*rest))
rest++;
}
if ((!bid) || (!hwkey)) {
/* end of list */
break;
}
if (verbose) {
printf("trying bid=0x%lX, hwkey=%ld\n",
bid, hwkey);
}
/*
* Compare the values of the found entry in the
* list with the valid values which are stored
* in the inventory eeprom. If they are equal
* set the values in environment variables.
*/
if ((bid == ivmbid) && (hwkey == ivmhwkey)) {
char buf[10];
found = 1;
envbid = bid;
envhwkey = hwkey;
sprintf(buf, "%lx", bid);
setenv("boardid", buf);
sprintf(buf, "%lx", hwkey);
setenv("hwkey", buf);
}
} /* end while( ! found ) */
}
}
/* compare now the values */
if ((ivmbid == envbid) && (ivmhwkey == envhwkey)) {
printf("boardid=0x%3lX, hwkey=%ld\n", envbid, envhwkey);
rc = 0; /* match */
} else {
printf("Error: env boardid=0x%3lX, hwkey=%ld\n", envbid,
envhwkey);
printf(" IVM bId=0x%3lX, hwKey=%ld\n", ivmbid, ivmhwkey);
rc = 1; /* don't match */
}
return rc;
}
U_BOOT_CMD(km_checkbidhwk, 2, 0, do_checkboardidhwk,
"check boardid and hwkey",
"[v]\n - check environment parameter "\
"\"boardIdListHex\" against stored boardid and hwkey "\
"from the IVM\n v: verbose output"
);
/*
* command km_checktestboot
* if the testpin of the board is asserted, return 1
* * else return 0
*/
static int do_checktestboot(cmd_tbl_t *cmdtp, int flag, int argc,
char *const argv[])
{
int testpin = 0;
char *s = NULL;
int testboot = 0;
int verbose = argc > 1 && *argv[1] == 'v';
#if defined(CONFIG_POST)
testpin = post_hotkeys_pressed();
s = getenv("test_bank");
#endif
/* when test_bank is not set, act as if testpin is not asserted */
testboot = (testpin != 0) && (s);
if (verbose) {
printf("testpin = %d\n", testpin);
printf("test_bank = %s\n", s ? s : "not set");
printf("boot test app : %s\n", (testboot) ? "yes" : "no");
}
/* return 0 means: testboot, therefore we need the inversion */
return !testboot;
}
U_BOOT_CMD(km_checktestboot, 2, 0, do_checktestboot,
"check if testpin is asserted",
"[v]\n v - verbose output"
);
| gpl-2.0 |
erdincay/clamav-devel | libclamav/c++/llvm/lib/CodeGen/PostRASchedulerList.cpp | 18 | 24118 | //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements a top-down list scheduler, using standard algorithms.
// The basic approach uses a priority queue of available nodes to schedule.
// One at a time, nodes are taken from the priority queue (thus in priority
// order), checked for legality to schedule, and emitted if legal.
//
// Nodes may not be legal to schedule either due to structural hazards (e.g.
// pipeline or resource constraints) or because an input to the instruction has
// not completed execution.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "post-RA-sched"
#include "AntiDepBreaker.h"
#include "AggressiveAntiDepBreaker.h"
#include "CriticalAntiDepBreaker.h"
#include "ScheduleDAGInstrs.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtarget.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Statistic.h"
#include <set>
using namespace llvm;
STATISTIC(NumNoops, "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
// Post-RA scheduling is enabled with
// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
// override the target.
static cl::opt<bool>
EnablePostRAScheduler("post-RA-scheduler",
cl::desc("Enable scheduling after register allocation"),
cl::init(false), cl::Hidden);
static cl::opt<std::string>
EnableAntiDepBreaking("break-anti-dependencies",
cl::desc("Break post-RA scheduling anti-dependencies: "
"\"critical\", \"all\", or \"none\""),
cl::init("none"), cl::Hidden);
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
static cl::opt<int>
DebugDiv("postra-sched-debugdiv",
cl::desc("Debug control MBBs that are scheduled"),
cl::init(0), cl::Hidden);
static cl::opt<int>
DebugMod("postra-sched-debugmod",
cl::desc("Debug control MBBs that are scheduled"),
cl::init(0), cl::Hidden);
AntiDepBreaker::~AntiDepBreaker() { }
namespace {
class PostRAScheduler : public MachineFunctionPass {
AliasAnalysis *AA;
const TargetInstrInfo *TII;
CodeGenOpt::Level OptLevel;
public:
static char ID;
PostRAScheduler(CodeGenOpt::Level ol) :
MachineFunctionPass(ID), OptLevel(ol) {}
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
const char *getPassName() const {
return "Post RA top-down list latency scheduler";
}
bool runOnMachineFunction(MachineFunction &Fn);
};
char PostRAScheduler::ID = 0;
class SchedulePostRATDList : public ScheduleDAGInstrs {
/// AvailableQueue - The priority queue to use for the available SUnits.
///
LatencyPriorityQueue AvailableQueue;
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands becomes available, the instruction is
/// added to the AvailableQueue.
std::vector<SUnit*> PendingQueue;
/// Topo - A topological ordering for SUnits.
ScheduleDAGTopologicalSort Topo;
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
/// AntiDepBreak - Anti-dependence breaking object, or NULL if none
AntiDepBreaker *AntiDepBreak;
/// AA - AliasAnalysis for making memory reference queries.
AliasAnalysis *AA;
/// KillIndices - The index of the most recent kill (proceding bottom-up),
/// or ~0u if the register is not live.
std::vector<unsigned> KillIndices;
public:
SchedulePostRATDList(MachineFunction &MF,
const MachineLoopInfo &MLI,
const MachineDominatorTree &MDT,
ScheduleHazardRecognizer *HR,
AntiDepBreaker *ADB,
AliasAnalysis *aa)
: ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
HazardRec(HR), AntiDepBreak(ADB), AA(aa),
KillIndices(TRI->getNumRegs()) {}
~SchedulePostRATDList() {
}
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
void StartBlock(MachineBasicBlock *BB);
/// Schedule - Schedule the instruction range using list scheduling.
///
void Schedule();
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void Observe(MachineInstr *MI, unsigned Count);
/// FinishBlock - Clean up register live-range state.
///
void FinishBlock();
/// FixupKills - Fix register kill flags that have been made
/// invalid due to scheduling
///
void FixupKills(MachineBasicBlock *MBB);
private:
void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
void StartBlockForKills(MachineBasicBlock *BB);
// ToggleKillFlag - Toggle a register operand kill flag. Other
// adjustments may be made to the instruction if necessary. Return
// true if the operand has been deleted, false if not.
bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
};
}
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
AA = &getAnalysis<AliasAnalysis>();
TII = Fn.getTarget().getInstrInfo();
// Check for explicit enable/disable of post-ra scheduling.
TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
if (EnablePostRAScheduler.getPosition() > 0) {
if (!EnablePostRAScheduler)
return false;
} else {
// Check that post-RA scheduling is enabled for this target.
const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
return false;
}
// Check for antidep breaking override...
if (EnableAntiDepBreaking.getPosition() > 0) {
AntiDepMode = (EnableAntiDepBreaking == "all") ?
TargetSubtarget::ANTIDEP_ALL :
(EnableAntiDepBreaking == "critical")
? TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
}
DEBUG(dbgs() << "PostRAScheduler\n");
const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
const TargetMachine &TM = Fn.getTarget();
const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
ScheduleHazardRecognizer *HR =
TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins);
AntiDepBreaker *ADB =
((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
(AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
(AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
// Loop over all of the basic blocks
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
#ifndef NDEBUG
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
if (DebugDiv > 0) {
static int bbcnt = 0;
if (bbcnt++ % DebugDiv != DebugMod)
continue;
dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
":BB#" << MBB->getNumber() << " ***\n";
}
#endif
// Initialize register live-range state for scheduling in this block.
Scheduler.StartBlock(MBB);
// Schedule each sequence of instructions not interrupted by a label
// or anything else that effectively needs to shut down scheduling.
MachineBasicBlock::iterator Current = MBB->end();
unsigned Count = MBB->size(), CurrentCount = Count;
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
MachineInstr *MI = llvm::prior(I);
if (TII->isSchedulingBoundary(MI, MBB, Fn)) {
Scheduler.Run(MBB, I, Current, CurrentCount);
Scheduler.EmitSchedule();
Current = MI;
CurrentCount = Count - 1;
Scheduler.Observe(MI, CurrentCount);
}
I = MI;
--Count;
}
assert(Count == 0 && "Instruction count mismatch!");
assert((MBB->begin() == Current || CurrentCount != 0) &&
"Instruction count mismatch!");
Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
Scheduler.EmitSchedule();
// Clean up register live-range state.
Scheduler.FinishBlock();
// Update register kills
Scheduler.FixupKills(MBB);
}
delete HR;
delete ADB;
return true;
}
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
// Call the superclass.
ScheduleDAGInstrs::StartBlock(BB);
// Reset the hazard recognizer and anti-dep breaker.
HazardRec->Reset();
if (AntiDepBreak != NULL)
AntiDepBreak->StartBlock(BB);
}
/// Schedule - Schedule the instruction range using list scheduling.
///
void SchedulePostRATDList::Schedule() {
// Build the scheduling graph.
BuildSchedGraph(AA);
if (AntiDepBreak != NULL) {
unsigned Broken =
AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
InsertPosIndex);
if (Broken != 0) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
// When a live range is changed to use a different register, remove
// the def's anti-dependence *and* output-dependence edges due to
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
SUnits.clear();
Sequence.clear();
EntrySU = SUnit();
ExitSU = SUnit();
BuildSchedGraph(AA);
NumFixedAnti += Broken;
}
}
DEBUG(dbgs() << "********** List Scheduling **********\n");
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
AvailableQueue.initNodes(SUnits);
ListScheduleTopDown();
AvailableQueue.releaseState();
}
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
if (AntiDepBreak != NULL)
AntiDepBreak->Observe(MI, Count, InsertPosIndex);
}
/// FinishBlock - Clean up register live-range state.
///
void SchedulePostRATDList::FinishBlock() {
if (AntiDepBreak != NULL)
AntiDepBreak->FinishBlock();
// Call the superclass.
ScheduleDAGInstrs::FinishBlock();
}
/// StartBlockForKills - Initialize register live-range state for updating kills
///
void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
// Initialize the indices to indicate that no registers are live.
for (unsigned i = 0; i < TRI->getNumRegs(); ++i)
KillIndices[i] = ~0u;
// Determine the live-out physregs for this block.
if (!BB->empty() && BB->back().getDesc().isReturn()) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
unsigned Reg = *I;
KillIndices[Reg] = BB->size();
// Repeat, for all subregs.
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
KillIndices[*Subreg] = BB->size();
}
}
}
else {
// In a non-return block, examine the live-in regs of all successors.
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI) {
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
KillIndices[Reg] = BB->size();
// Repeat, for all subregs.
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
KillIndices[*Subreg] = BB->size();
}
}
}
}
}
bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
MachineOperand &MO) {
// Setting kill flag...
if (!MO.isKill()) {
MO.setIsKill(true);
return false;
}
// If MO itself is live, clear the kill flag...
if (KillIndices[MO.getReg()] != ~0u) {
MO.setIsKill(false);
return false;
}
// If any subreg of MO is live, then create an imp-def for that
// subreg and keep MO marked as killed.
MO.setIsKill(false);
bool AllDead = true;
const unsigned SuperReg = MO.getReg();
for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
*Subreg; ++Subreg) {
if (KillIndices[*Subreg] != ~0u) {
MI->addOperand(MachineOperand::CreateReg(*Subreg,
true /*IsDef*/,
true /*IsImp*/,
false /*IsKill*/,
false /*IsDead*/));
AllDead = false;
}
}
if(AllDead)
MO.setIsKill(true);
return false;
}
/// FixupKills - Fix the register kill flags, they may have been made
/// incorrect by instruction reordering.
///
void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
std::set<unsigned> killedRegs;
BitVector ReservedRegs = TRI->getReservedRegs(MF);
StartBlockForKills(MBB);
// Examine block from end to start...
unsigned Count = MBB->size();
for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
I != E; --Count) {
MachineInstr *MI = --I;
if (MI->isDebugValue())
continue;
// Update liveness. Registers that are defed but not used in this
// instruction are now dead. Mark register and all subregs as they
// are completely defined.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isDef()) continue;
// Ignore two-addr defs.
if (MI->isRegTiedToUseOperand(i)) continue;
KillIndices[Reg] = ~0u;
// Repeat for all subregs.
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
KillIndices[*Subreg] = ~0u;
}
}
// Examine all used registers and set/clear kill flag. When a
// register is used multiple times we only set the kill flag on
// the first use.
killedRegs.clear();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse()) continue;
unsigned Reg = MO.getReg();
if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
bool kill = false;
if (killedRegs.find(Reg) == killedRegs.end()) {
kill = true;
// A register is not killed if any subregs are live...
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
if (KillIndices[*Subreg] != ~0u) {
kill = false;
break;
}
}
// If subreg is not live, then register is killed if it became
// live in this instruction
if (kill)
kill = (KillIndices[Reg] == ~0u);
}
if (MO.isKill() != kill) {
DEBUG(dbgs() << "Fixing " << MO << " in ");
// Warning: ToggleKillFlag may invalidate MO.
ToggleKillFlag(MI, MO);
DEBUG(MI->dump());
}
killedRegs.insert(Reg);
}
// Mark any used register (that is not using undef) and subregs as
// now live...
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
unsigned Reg = MO.getReg();
if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
KillIndices[Reg] = Count;
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
KillIndices[*Subreg] = Count;
}
}
}
}
//===----------------------------------------------------------------------===//
// Top-Down Scheduling
//===----------------------------------------------------------------------===//
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
/// the PendingQueue if the count reaches zero. Also update its cycle bound.
void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
#ifndef NDEBUG
if (SuccSU->NumPredsLeft == 0) {
dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
dbgs() << " has been released too many times!\n";
llvm_unreachable(0);
}
#endif
--SuccSU->NumPredsLeft;
// Compute how many cycles it will be before this actually becomes
// available. This is the max of the start time of all predecessors plus
// their latencies.
SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
PendingQueue.push_back(SuccSU);
}
/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
ReleaseSucc(SU, &*I);
}
}
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
Sequence.push_back(SU);
assert(CurCycle >= SU->getDepth() &&
"Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue.ScheduledNode(SU);
}
/// ListScheduleTopDown - The main loop of list scheduling for top-down
/// schedulers.
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
// We're scheduling top-down but we're visiting the regions in
// bottom-up order, so we don't know the hazards at the start of a
// region. So assume no hazards (this should usually be ok as most
// blocks are a single region).
HazardRec->Reset();
// Release any successors of the special Entry node.
ReleaseSuccessors(&EntrySU);
// Add all leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
bool available = SUnits[i].Preds.empty();
if (available) {
AvailableQueue.push(&SUnits[i]);
SUnits[i].isAvailable = true;
}
}
// In any cycle where we can't schedule any instructions, we must
// stall or emit a noop, depending on the target.
bool CycleHasInsts = false;
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;
Sequence.reserve(SUnits.size());
while (!AvailableQueue.empty() || !PendingQueue.empty()) {
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
unsigned MinDepth = ~0u;
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
if (PendingQueue[i]->getDepth() <= CurCycle) {
AvailableQueue.push(PendingQueue[i]);
PendingQueue[i]->isAvailable = true;
PendingQueue[i] = PendingQueue.back();
PendingQueue.pop_back();
--i; --e;
} else if (PendingQueue[i]->getDepth() < MinDepth)
MinDepth = PendingQueue[i]->getDepth();
}
DEBUG(dbgs() << "\n*** Examining Available\n";
LatencyPriorityQueue q = AvailableQueue;
while (!q.empty()) {
SUnit *su = q.pop();
dbgs() << "Height " << su->getHeight() << ": ";
su->dump(this);
});
SUnit *FoundSUnit = 0;
bool HasNoopHazards = false;
while (!AvailableQueue.empty()) {
SUnit *CurSUnit = AvailableQueue.pop();
ScheduleHazardRecognizer::HazardType HT =
HazardRec->getHazardType(CurSUnit);
if (HT == ScheduleHazardRecognizer::NoHazard) {
FoundSUnit = CurSUnit;
break;
}
// Remember if this is a noop hazard.
HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
NotReady.push_back(CurSUnit);
}
// Add the nodes that aren't ready back onto the available list.
if (!NotReady.empty()) {
AvailableQueue.push_all(NotReady);
NotReady.clear();
}
// If we found a node to schedule...
if (FoundSUnit) {
// ... schedule the node...
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
CycleHasInsts = true;
} else {
if (CycleHasInsts) {
DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
HazardRec->AdvanceCycle();
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem,
// just advance the current cycle and try again.
DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
HazardRec->AdvanceCycle();
++NumStalls;
} else {
// Otherwise, we have no instructions to issue and we have instructions
// that will fault if we don't do this right. This is the case for
// processors without pipeline interlocks and other cases.
DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
HazardRec->EmitNoop();
Sequence.push_back(0); // NULL here means noop
++NumNoops;
}
++CurCycle;
CycleHasInsts = false;
}
}
#ifndef NDEBUG
VerifySchedule(/*isBottomUp=*/false);
#endif
}
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
return new PostRAScheduler(OptLevel);
}
| gpl-2.0 |
NorthWard/shooter-player | src/subpic/ISubPic.cpp | 18 | 34133 | /*
* Copyright (C) 2003-2006 Gabest
* http://www.gabest.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU Make; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
* http://www.gnu.org/copyleft/gpl.html
*
*/
#include "stdafx.h"
#include "ISubPic.h"
#include "..\DSUtil\DSUtil.h"
#include <afxtempl.h>
#include "..\apps\mplayerc\mplayerc.h"
#include "..\svplib\svplib.h"
//#define LOGSUBRECT
#undef SVP_LogMsg5
#define SVP_LogMsg5 __noop
//
// ISubPicImpl
//
ISubPicImpl::ISubPicImpl()
: CUnknown(NAME("ISubPicImpl"), NULL)
, m_rtStart(0), m_rtStop(0)
, m_rtSegmentStart(0), m_rtSegmentStop(0)
, m_rcDirty(0, 0, 0, 0), m_maxsize(0, 0), m_size(0, 0), m_vidrect(0, 0, 0, 0)
, m_VirtualTextureSize(0, 0), m_VirtualTextureTopLeft (0, 0)
{
}
STDMETHODIMP ISubPicImpl::NonDelegatingQueryInterface(REFIID riid, void** ppv)
{
return
QI(ISubPic)
__super::NonDelegatingQueryInterface(riid, ppv);
}
// ISubPic
STDMETHODIMP_(REFERENCE_TIME) ISubPicImpl::GetStart()
{
return(m_rtStart);
}
STDMETHODIMP_(REFERENCE_TIME) ISubPicImpl::GetStop()
{
return(m_rtStop);
}
STDMETHODIMP_(REFERENCE_TIME) ISubPicImpl::GetSegmentStart()
{
if (m_rtSegmentStart)
return(m_rtSegmentStart);
return(m_rtStart);
}
STDMETHODIMP_(REFERENCE_TIME) ISubPicImpl::GetSegmentStop()
{
if (m_rtSegmentStop)
return(m_rtSegmentStop);
return(m_rtStop);
}
STDMETHODIMP_(void) ISubPicImpl::SetSegmentStart(REFERENCE_TIME rtStart)
{
m_rtSegmentStart = rtStart;
}
STDMETHODIMP_(void) ISubPicImpl::SetSegmentStop(REFERENCE_TIME rtStop)
{
m_rtSegmentStop = rtStop;
}
STDMETHODIMP_(void) ISubPicImpl::SetStart(REFERENCE_TIME rtStart)
{
m_rtStart = rtStart;
}
STDMETHODIMP_(void) ISubPicImpl::SetStop(REFERENCE_TIME rtStop)
{
m_rtStop = rtStop;
}
STDMETHODIMP ISubPicImpl::CopyTo(ISubPic* pSubPic)
{
if(!pSubPic)
return E_POINTER;
pSubPic->SetStart(m_rtStart);
pSubPic->SetStop(m_rtStop);
pSubPic->SetDirtyRect(m_rcDirty);
pSubPic->SetSize(m_size, m_vidrect);
pSubPic->SetVirtualTextureSize(m_VirtualTextureSize, m_VirtualTextureTopLeft);
return S_OK;
}
STDMETHODIMP ISubPicImpl::GetDirtyRect(RECT* pDirtyRect)
{
return pDirtyRect ? *pDirtyRect = m_rcDirty, S_OK : E_POINTER;
}
STDMETHODIMP ISubPicImpl::GetSourceAndDest(SIZE* pSize, RECT* pRcSource, RECT* pRcDest)
{
CheckPointer (pRcSource, E_POINTER);
CheckPointer (pRcDest, E_POINTER);
if(m_size.cx > 0 && m_size.cy > 0)
{
CRect rcTemp = m_rcDirty;
// FIXME
rcTemp.DeflateRect(1, 1);
*pRcSource = rcTemp;
CRect RcDest (rcTemp.left * pSize->cx / m_VirtualTextureSize.cx,
rcTemp.top * pSize->cy / m_VirtualTextureSize.cy,
rcTemp.right * pSize->cx / m_VirtualTextureSize.cx,
rcTemp.bottom * pSize->cy / m_VirtualTextureSize.cy);
LONG cy = min ( pSize->cx * 4/5 , pSize->cy );
if(cy < pSize->cy ){
cy = pSize->cy - cy;
RcDest.DeflateRect( RcDest.Width() * cy / pSize->cy / 2 , RcDest.Height() * cy / pSize->cy / 2);
}
*pRcDest = RcDest;
#ifdef LOGSUBRECT
CString szLog ;
szLog.Format(_T(" resize w %d %d h %d %d") , pSize->cx , m_VirtualTextureSize.cx ,pSize->cy , m_VirtualTextureSize.cy);
SVP_LogMsg(szLog);
#endif
return S_OK;
}
else
return E_INVALIDARG;
}
STDMETHODIMP ISubPicImpl::SetDirtyRect(RECT* pDirtyRect)
{
return pDirtyRect ? m_rcDirty = *pDirtyRect, S_OK : E_POINTER;
}
STDMETHODIMP ISubPicImpl::GetMaxSize(SIZE* pMaxSize)
{
return pMaxSize ? *pMaxSize = m_maxsize, S_OK : E_POINTER;
}
STDMETHODIMP ISubPicImpl::SetSize(SIZE size, RECT vidrect)
{
m_size = size;
m_vidrect = vidrect;
if(m_size.cx > m_maxsize.cx)
{
m_size.cy = MulDiv(m_size.cy, m_maxsize.cx, m_size.cx);
m_size.cx = m_maxsize.cx;
}
if(m_size.cy > m_maxsize.cy)
{
m_size.cx = MulDiv(m_size.cx, m_maxsize.cy, m_size.cy);
m_size.cy = m_maxsize.cy;
}
if(m_size.cx != size.cx || m_size.cy != size.cy)
{
m_vidrect.top = MulDiv(m_vidrect.top, m_size.cx, size.cx);
m_vidrect.bottom = MulDiv(m_vidrect.bottom, m_size.cx, size.cx);
m_vidrect.left = MulDiv(m_vidrect.left, m_size.cy, size.cy);
m_vidrect.right = MulDiv(m_vidrect.right, m_size.cy, size.cy);
}
m_VirtualTextureSize = m_size;
return S_OK;
}
STDMETHODIMP ISubPicImpl::SetVirtualTextureSize (const SIZE pSize, const POINT pTopLeft)
{
m_VirtualTextureSize.SetSize (pSize.cx, pSize.cy);
m_VirtualTextureTopLeft.SetPoint (pTopLeft.x, pTopLeft.y);
return S_OK;
}
//
// ISubPicAllocatorImpl
//
ISubPicAllocatorImpl::ISubPicAllocatorImpl(SIZE cursize, bool fDynamicWriteOnly, bool fPow2Textures)
: CUnknown(NAME("ISubPicAllocatorImpl"), NULL)
, m_cursize(cursize)
, m_fDynamicWriteOnly(fDynamicWriteOnly)
, m_fPow2Textures(fPow2Textures)
{
m_curvidrect = CRect(CPoint(0,0), m_cursize);
}
STDMETHODIMP ISubPicAllocatorImpl::NonDelegatingQueryInterface(REFIID riid, void** ppv)
{
return
QI(ISubPicAllocator)
__super::NonDelegatingQueryInterface(riid, ppv);
}
// ISubPicAllocator
STDMETHODIMP ISubPicAllocatorImpl::Lock(){
m_pLock.Lock();
return S_OK;
}
STDMETHODIMP ISubPicAllocatorImpl::Unlock(){
m_pLock.Unlock();
return S_OK ;
}
STDMETHODIMP ISubPicAllocatorImpl::SetCurSize(SIZE cursize)
{
m_cursize = cursize;
return S_OK;
}
STDMETHODIMP ISubPicAllocatorImpl::SetCurVidRect(RECT curvidrect)
{
m_curvidrect = curvidrect;
return S_OK;
}
STDMETHODIMP ISubPicAllocatorImpl::GetStatic(ISubPic** ppSubPic)
{
if(!ppSubPic)
return E_POINTER;
if(!m_pStatic)
{
if(!Alloc(true, &m_pStatic) || !m_pStatic)
return E_OUTOFMEMORY;
}
m_pStatic->SetSize(m_cursize, m_curvidrect);
(*ppSubPic = m_pStatic)->AddRef();
return S_OK;
}
STDMETHODIMP ISubPicAllocatorImpl::AllocDynamic(ISubPic** ppSubPic)
{
if(!ppSubPic)
return E_POINTER;
if(!Alloc(false, ppSubPic) || !*ppSubPic)
return E_OUTOFMEMORY;
(*ppSubPic)->SetSize(m_cursize, m_curvidrect);
return S_OK;
}
STDMETHODIMP_(bool) ISubPicAllocatorImpl::IsDynamicWriteOnly()
{
return(m_fDynamicWriteOnly);
}
STDMETHODIMP ISubPicAllocatorImpl::ChangeDevice(IUnknown* pDev)
{
m_pStatic = NULL;
return S_OK;
}
//
// ISubPicProviderImpl
//
ISubPicProviderImpl::ISubPicProviderImpl(CCritSec* pLock)
: CUnknown(NAME("ISubPicProviderImpl"), NULL)
, m_pLock(pLock)
{
}
ISubPicProviderImpl::~ISubPicProviderImpl()
{
}
STDMETHODIMP ISubPicProviderImpl::NonDelegatingQueryInterface(REFIID riid, void** ppv)
{
return
QI(ISubPicProvider)
__super::NonDelegatingQueryInterface(riid, ppv);
}
// ISubPicProvider
STDMETHODIMP ISubPicProviderImpl::Lock()
{
return m_pLock ? m_pLock->Lock(), S_OK : E_FAIL;
}
STDMETHODIMP ISubPicProviderImpl::Unlock()
{
return m_pLock ? m_pLock->Unlock(), S_OK : E_FAIL;
}
//
// ISubPicQueueImpl
//
ISubPicQueueImpl::ISubPicQueueImpl(ISubPicAllocator* pAllocator, HRESULT* phr)
: CUnknown(NAME("ISubPicQueueImpl"), NULL)
, m_pAllocator(pAllocator)
, m_rtNow(0)
, m_rtNowLast(0)
, m_fps(25.0)
{
if(phr) *phr = S_OK;
if(!m_pAllocator)
{
if(phr) *phr = E_FAIL;
return;
}
}
ISubPicQueueImpl::~ISubPicQueueImpl()
{
}
STDMETHODIMP ISubPicQueueImpl::NonDelegatingQueryInterface(REFIID riid, void** ppv)
{
return
QI(ISubPicQueue)
__super::NonDelegatingQueryInterface(riid, ppv);
}
// ISubPicQueue
STDMETHODIMP ISubPicQueueImpl::SetSubPicProvider(ISubPicProvider* pSubPicProvider)
{
CAutoLock cAutoLock(&m_csSubPicProvider);
// if(m_pSubPicProvider != pSubPicProvider)
{
m_pSubPicProvider = pSubPicProvider;
Invalidate();
}
return S_OK;
}
STDMETHODIMP ISubPicQueueImpl::GetSubPicProvider(ISubPicProvider** pSubPicProvider)
{
if(!pSubPicProvider)
return E_POINTER;
CAutoLock cAutoLock(&m_csSubPicProvider);
if(m_pSubPicProvider)
(*pSubPicProvider = m_pSubPicProvider)->AddRef();
return !!*pSubPicProvider ? S_OK : E_FAIL;
}
STDMETHODIMP ISubPicQueueImpl::SetFPS(double fps)
{
m_fps = fps;
return S_OK;
}
STDMETHODIMP ISubPicQueueImpl::SetTime(REFERENCE_TIME rtNow)
{
m_rtNow = rtNow;
return S_OK;
}
// private
HRESULT ISubPicQueueImpl::RenderTo(ISubPic* pSubPic, REFERENCE_TIME rtStart, REFERENCE_TIME rtStop, double fps, BOOL bIsAnimated)
{
HRESULT hr = E_FAIL;
if(!pSubPic)
return hr;
CComPtr<ISubPicProvider> pSubPicProvider;
if(FAILED(GetSubPicProvider(&pSubPicProvider)) || !pSubPicProvider)
return hr;
if(FAILED(pSubPicProvider->Lock()))
return hr;
SubPicDesc spd;
if(SUCCEEDED(pSubPic->ClearDirtyRect(0xFF000000))
&& SUCCEEDED(pSubPic->Lock(spd)))
{
CRect r(0,0,0,0);
hr = pSubPicProvider->Render(spd, bIsAnimated ? rtStart : ((rtStart+rtStop)/2), fps, r);
pSubPic->SetStart(rtStart);
pSubPic->SetStop(rtStop);
pSubPic->Unlock(r);
}
pSubPicProvider->Unlock();
return hr;
}
//
// CSubPicQueue
//
//
// CSubPicQueue
//
CSubPicQueue::CSubPicQueue(int nMaxSubPic, ISubPicAllocator* pAllocator, HRESULT* phr, BOOL bDisableAnim)
: ISubPicQueueImpl(pAllocator, phr)
, m_nMaxSubPic(nMaxSubPic)
, m_bDisableAnim(bDisableAnim)
,m_rtQueueMin(0)
,m_rtQueueMax(0)
{
if(phr && FAILED(*phr))
return;
if(m_nMaxSubPic < 1)
{if(phr) *phr = E_INVALIDARG; return;}
m_fBreakBuffering = false;
for(ptrdiff_t i = 0; i < EVENT_COUNT; i++)
m_ThreadEvents[i] = CreateEvent(NULL, FALSE, FALSE, NULL);
CAMThread::Create();
}
CSubPicQueue::~CSubPicQueue()
{
m_fBreakBuffering = true;
SetEvent(m_ThreadEvents[EVENT_EXIT]);
CAMThread::Close();
for(ptrdiff_t i = 0; i < EVENT_COUNT; i++)
CloseHandle(m_ThreadEvents[i]);
}
// ISubPicQueue
STDMETHODIMP CSubPicQueue::SetFPS(double fps)
{
HRESULT hr = __super::SetFPS(fps);
if(FAILED(hr)) return hr;
SetEvent(m_ThreadEvents[EVENT_TIME]);
return S_OK;
}
STDMETHODIMP CSubPicQueue::SetTime(REFERENCE_TIME rtNow)
{
HRESULT hr = __super::SetTime(rtNow);
if(FAILED(hr)) return hr;
SetEvent(m_ThreadEvents[EVENT_TIME]);
return S_OK;
}
STDMETHODIMP CSubPicQueue::Invalidate(REFERENCE_TIME rtInvalidate)
{
{
// CAutoLock cQueueLock(&m_csQueueLock);
// RemoveAll();
m_rtInvalidate = rtInvalidate;
m_fBreakBuffering = true;
#if DSubPicTraceLevel > 0
TRACE(_T("Invalidate: %f\n"), double(rtInvalidate) / 10000000.0);
#endif
SetEvent(m_ThreadEvents[EVENT_TIME]);
}
return S_OK;
}
#define DSubPicTraceLevel 0
#define TRACE __noop
//SVP_LogMsg6
STDMETHODIMP_(bool) CSubPicQueue::LookupSubPic(REFERENCE_TIME rtNow, CComPtr<ISubPic> &ppSubPic)
{
CAutoLock cQueueLock(&m_csQueueLock);
REFERENCE_TIME rtBestStop = 0x7fffffffffffffffi64;
POSITION pos = m_Queue.GetHeadPosition();
#if DSubPicTraceLevel > 2
TRACE("Find: %d", m_Queue.GetCount());
#endif
while(pos)
{
CComPtr<ISubPic> pSubPic = m_Queue.GetNext(pos);
REFERENCE_TIME rtStart = pSubPic->GetStart();
REFERENCE_TIME rtStop = pSubPic->GetStop();
REFERENCE_TIME rtSegmentStop = pSubPic->GetSegmentStop();
if(rtNow >= rtStart && rtNow < rtSegmentStop)
{
REFERENCE_TIME Diff = rtNow - rtStop;
if (Diff < rtBestStop)
{
rtBestStop = Diff;
TRACE(" %f->%f", double(Diff) / 10000000.0, double(rtStop) / 10000000.0);
ppSubPic = pSubPic;
}
#if DSubPicTraceLevel > 2
else
TRACE(" !%f->%f", double(Diff) / 10000000.0, double(rtStop) / 10000000.0);
#endif
}
#if DSubPicTraceLevel > 2
else
TRACE(" !!%f->%f", double(rtStart) / 10000000.0, double(rtSegmentStop) / 10000000.0);
#endif
}
#if DSubPicTraceLevel > 2
TRACE("\n");
#endif
if (!ppSubPic)
{
#if DSubPicTraceLevel > 1
TRACE("NO Display: %f\n", double(rtNow) / 10000000.0);
#endif
}
else
{
#if DSubPicTraceLevel > 0
REFERENCE_TIME rtStart = (ppSubPic)->GetStart();
REFERENCE_TIME rtSegmentStop = (ppSubPic)->GetSegmentStop();
CRect r;
(ppSubPic)->GetDirtyRect(&r);
TRACE("Display: %f->%f %f %dx%d\n", double(rtStart) / 10000000.0, double(rtSegmentStop) / 10000000.0, double(rtNow) / 10000000.0, r.Width(), r.Height());
#endif
}
return(!!ppSubPic);
}
STDMETHODIMP CSubPicQueue::GetStats(int& nSubPics, REFERENCE_TIME& rtNow, REFERENCE_TIME& rtStart, REFERENCE_TIME& rtStop)
{
CAutoLock cQueueLock(&m_csQueueLock);
nSubPics = m_Queue.GetCount();
rtNow = m_rtNow;
rtStart = m_rtQueueMin;
if (rtStart == 0x7fffffffffffffffi64)
rtStart = 0;
rtStop = m_rtQueueMax;
if (rtStop == 0xffffffffffffffffi64)
rtStop = 0;
return S_OK;
}
STDMETHODIMP CSubPicQueue::GetStats(int nSubPic, REFERENCE_TIME& rtStart, REFERENCE_TIME& rtStop)
{
CAutoLock cQueueLock(&m_csQueueLock);
rtStart = rtStop = -1;
if(nSubPic >= 0 && nSubPic < (int)m_Queue.GetCount())
{
if(POSITION pos = m_Queue.FindIndex(nSubPic))
{
rtStart = m_Queue.GetAt(pos)->GetStart();
rtStop = m_Queue.GetAt(pos)->GetStop();
}
}
else
{
return E_INVALIDARG;
}
return S_OK;
}
// private
REFERENCE_TIME CSubPicQueue::UpdateQueue()
{
CAutoLock cQueueLock(&m_csQueueLock);
REFERENCE_TIME rtNow = m_rtNow;
REFERENCE_TIME rtNowCompare = rtNow;
if (rtNow < m_rtNowLast)
{
m_Queue.RemoveAll();
m_rtNowLast = rtNow;
}
else
{
m_rtNowLast = rtNow;
m_rtQueueMin = 0x7fffffffffffffffi64;
m_rtQueueMax = 0xffffffffffffffffi64;
REFERENCE_TIME rtBestStop = 0x7fffffffffffffffi64;
POSITION SavePos = 0;
{
POSITION Iter = m_Queue.GetHeadPosition();
while(Iter)
{
POSITION ThisPos = Iter;
ISubPic *pSubPic = m_Queue.GetNext(Iter);
REFERENCE_TIME rtStart = pSubPic->GetStart();
REFERENCE_TIME rtStop = pSubPic->GetStop();
REFERENCE_TIME rtSegmentStop = pSubPic->GetSegmentStop();
if(rtNow >= rtStart && rtNow < rtSegmentStop)
{
REFERENCE_TIME Diff = rtNow - rtStop;
if (Diff < rtBestStop)
{
rtBestStop = Diff;
SavePos = ThisPos;
}
}
}
}
#if DSubPicTraceLevel > 3
if (SavePos)
{
ISubPic *pSubPic = GetAt(SavePos);
REFERENCE_TIME rtStart = pSubPic->GetStart();
REFERENCE_TIME rtStop = pSubPic->GetStop();
TRACE("Save: %f->%f\n", double(rtStart) / 10000000.0, double(rtStop) / 10000000.0);
}
#endif
{
POSITION Iter = m_Queue.GetHeadPosition();
while(Iter)
{
POSITION ThisPos = Iter;
ISubPic *pSubPic = m_Queue.GetNext(Iter);
REFERENCE_TIME rtStart = pSubPic->GetStart();
REFERENCE_TIME rtStop = pSubPic->GetStop();
if (rtStop <= rtNowCompare && ThisPos != SavePos)
{
#if DSubPicTraceLevel > 0
TRACE("Remove: %f %f->%f\n",double(rtNowCompare) / 10000000.0, double(rtStart) / 10000000.0, double(rtStop) / 10000000.0);
#endif
m_Queue.RemoveAt(ThisPos);
continue;
}
if (rtStop > rtNow)
rtNow = rtStop;
m_rtQueueMin = min(m_rtQueueMin, rtStart);
m_rtQueueMax = max(m_rtQueueMax, rtStop);
}
}
}
return(rtNow);
}
int CSubPicQueue::GetQueueCount()
{
CAutoLock cQueueLock(&m_csQueueLock);
return m_Queue.GetCount();
}
void CSubPicQueue::AppendQueue(ISubPic* pSubPic)
{
CAutoLock cQueueLock(&m_csQueueLock);
m_Queue.AddTail(pSubPic);
}
// overrides
DWORD CSubPicQueue::ThreadProc()
{
BOOL bDisableAnim = m_bDisableAnim;
SetThreadPriority(m_hThread, bDisableAnim ? THREAD_PRIORITY_LOWEST : THREAD_PRIORITY_ABOVE_NORMAL/*THREAD_PRIORITY_BELOW_NORMAL*/);
bool bAgain = true;
while(1)
{
DWORD Ret = WaitForMultipleObjects(EVENT_COUNT, m_ThreadEvents, FALSE, bAgain ? 0 : INFINITE);
bAgain = false;
if (Ret == WAIT_TIMEOUT)
;
else if ((Ret - WAIT_OBJECT_0) != EVENT_TIME)
break;
double fps = m_fps;
REFERENCE_TIME rtTimePerFrame = max(10000000.0/fps*1.5, 1000000); //1.5 to reduce flick
REFERENCE_TIME rtNow = UpdateQueue();
int nMaxSubPic = m_nMaxSubPic;
CComPtr<ISubPicProvider> pSubPicProvider;
if(SUCCEEDED(GetSubPicProvider(&pSubPicProvider)) && pSubPicProvider
&& SUCCEEDED(pSubPicProvider->Lock()) && SUCCEEDED(m_pAllocator->Lock()))
{
for(POSITION pos = pSubPicProvider->GetStartPosition(rtNow, fps);
pos && !m_fBreakBuffering && GetQueueCount() < (size_t)nMaxSubPic;
pos = pSubPicProvider->GetNext(pos))
{
REFERENCE_TIME rtStart = pSubPicProvider->GetStart(pos, fps);
REFERENCE_TIME rtStop = pSubPicProvider->GetStop(pos, fps);
if(m_rtNow >= rtStart)
{
// m_fBufferUnderrun = true;
if(m_rtNow >= rtStop) continue;
}
if(rtStart >= m_rtNow + 60*10000000i64) // we are already one minute ahead, this should be enough
break;
if(rtNow < rtStop)
{
REFERENCE_TIME rtCurrent = max(rtNow, rtStart);
bool bIsAnimated = pSubPicProvider->IsAnimated(pos) && !bDisableAnim;
while (rtCurrent < rtStop)
{
SIZE MaxTextureSize, VirtualSize;
POINT VirtualTopLeft;
HRESULT hr2;
if (SUCCEEDED (hr2 = pSubPicProvider->GetTextureSize(pos, MaxTextureSize, VirtualSize, VirtualTopLeft)))
m_pAllocator->SetMaxTextureSize(MaxTextureSize);
CComPtr<ISubPic> pStatic;
if(FAILED(m_pAllocator->GetStatic(&pStatic)))
break;
HRESULT hr;
if (bIsAnimated)
{
//if (rtCurrent < m_rtNow + rtTimePerFrame)
// rtCurrent = min(m_rtNow + rtTimePerFrame, rtStop-1);
REFERENCE_TIME rtEndThis = min(rtCurrent + rtTimePerFrame, rtStop);
hr = RenderTo(pStatic, rtCurrent, rtEndThis, fps, bIsAnimated);
pStatic->SetSegmentStart(rtStart);
pStatic->SetSegmentStop(rtStop);
#if DSubPicTraceLevel > 0
CRect r;
pStatic->GetDirtyRect(&r);
TRACE("Render: %f %f %f->%f %f->%f %dx%d\n",m_fps ,(double)rtTimePerFrame/ 10000000.0 , double(rtCurrent) / 10000000.0, double(rtEndThis) / 10000000.0, double(rtStart) / 10000000.0, double(rtStop) / 10000000.0, r.Width(), r.Height());
#endif
rtCurrent = rtEndThis;
}
else
{
hr = RenderTo(pStatic, rtStart, rtStop, fps, bIsAnimated);
rtCurrent = rtStop;
}
#if DSubPicTraceLevel > 0
if (m_rtNow > rtCurrent)
{
TRACE("BEHIND\n");
}
#endif
if(FAILED(hr))
break;
if(S_OK != hr) // subpic was probably empty
continue;
CComPtr<ISubPic> pDynamic;
if(FAILED(m_pAllocator->AllocDynamic(&pDynamic))
|| FAILED(pStatic->CopyTo(pDynamic)))
break;
if (SUCCEEDED (hr2))
pDynamic->SetVirtualTextureSize (VirtualSize, VirtualTopLeft);
AppendQueue(pDynamic);
bAgain = true;
if (GetQueueCount() >= (size_t)nMaxSubPic)
break;
}
}
}
pSubPicProvider->Unlock();
m_pAllocator->Unlock();
}
if(m_fBreakBuffering)
{
bAgain = true;
CAutoLock cQueueLock(&m_csQueueLock);
REFERENCE_TIME rtInvalidate = m_rtInvalidate;
POSITION Iter = m_Queue.GetHeadPosition();
while(Iter)
{
POSITION ThisPos = Iter;
ISubPic *pSubPic = m_Queue.GetNext(Iter);
REFERENCE_TIME rtStart = pSubPic->GetStart();
REFERENCE_TIME rtStop = pSubPic->GetStop();
if (rtStop > rtInvalidate)
{
#if DSubPicTraceLevel >= 0
TRACE(("Removed subtitle because of invalidation: %f->%f\n"), double(rtStart) / 10000000.0, double(rtStop) / 10000000.0);
#endif
m_Queue.RemoveAt(ThisPos);
continue;
}
}
/*
while(GetCount() && GetTail()->GetStop() > rtInvalidate)
{
if(GetTail()->GetStart() < rtInvalidate) GetTail()->SetStop(rtInvalidate);
else
{
RemoveTail();
}
}
*/
m_fBreakBuffering = false;
}
}
return(0);
}
//
// CSubPicQueueNoThread
//
CSubPicQueueNoThread::CSubPicQueueNoThread(ISubPicAllocator* pAllocator, HRESULT* phr)
: ISubPicQueueImpl(pAllocator, phr)
{
}
CSubPicQueueNoThread::~CSubPicQueueNoThread()
{
}
// ISubPicQueue
STDMETHODIMP CSubPicQueueNoThread::Invalidate(REFERENCE_TIME rtInvalidate)
{
CAutoLock cQueueLock(&m_csLock);
m_pSubPic = NULL;
return S_OK;
}
STDMETHODIMP_(bool) CSubPicQueueNoThread::LookupSubPic(REFERENCE_TIME rtNow, CComPtr<ISubPic> &ppSubPic)
{
CComPtr<ISubPic> pSubPic;
{
CAutoLock cAutoLock(&m_csLock);
if(!m_pSubPic)
{
if(FAILED(m_pAllocator->AllocDynamic(&m_pSubPic)))
return(false);
}
pSubPic = m_pSubPic;
}
if(pSubPic->GetStart() <= rtNow && rtNow < pSubPic->GetStop())
{
ppSubPic = pSubPic;
}
else
{
CComPtr<ISubPicProvider> pSubPicProvider;
if(SUCCEEDED(GetSubPicProvider(&pSubPicProvider)) && pSubPicProvider
&& SUCCEEDED(pSubPicProvider->Lock()))
{
double fps = m_fps;
if(POSITION pos = pSubPicProvider->GetStartPosition(rtNow, fps))
{
REFERENCE_TIME rtStart = pSubPicProvider->GetStart(pos, fps);
REFERENCE_TIME rtStop = pSubPicProvider->GetStop(pos, fps);
if(pSubPicProvider->IsAnimated(pos))
{
rtStart = rtNow;
rtStop = rtNow+1;
}
if(rtStart <= rtNow && rtNow < rtStop)
{
SIZE MaxTextureSize, VirtualSize;
POINT VirtualTopLeft;
HRESULT hr2;
if (SUCCEEDED (hr2 = pSubPicProvider->GetTextureSize(pos, MaxTextureSize, VirtualSize, VirtualTopLeft)))
m_pAllocator->SetMaxTextureSize(MaxTextureSize);
if(m_pAllocator->IsDynamicWriteOnly())
{
CComPtr<ISubPic> pStatic;
if(SUCCEEDED(m_pAllocator->GetStatic(&pStatic))
&& SUCCEEDED(RenderTo(pStatic, rtStart, rtStop, fps, false))
&& SUCCEEDED(pStatic->CopyTo(pSubPic)))
ppSubPic = pSubPic;
}
else
{
if(SUCCEEDED(RenderTo(m_pSubPic, rtStart, rtStop, fps, false)))
ppSubPic = pSubPic;
}
if (SUCCEEDED(hr2))
pSubPic->SetVirtualTextureSize (VirtualSize, VirtualTopLeft);
}
}
pSubPicProvider->Unlock();
if(ppSubPic)
{
CAutoLock cAutoLock(&m_csLock);
m_pSubPic = ppSubPic;
}
}
}
return(!!ppSubPic);
}
STDMETHODIMP CSubPicQueueNoThread::GetStats(int& nSubPics, REFERENCE_TIME& rtNow, REFERENCE_TIME& rtStart, REFERENCE_TIME& rtStop)
{
CAutoLock cAutoLock(&m_csLock);
nSubPics = 0;
rtNow = m_rtNow;
rtStart = rtStop = 0;
if(m_pSubPic)
{
nSubPics = 1;
rtStart = m_pSubPic->GetStart();
rtStop = m_pSubPic->GetStop();
}
return S_OK;
}
STDMETHODIMP CSubPicQueueNoThread::GetStats(int nSubPic, REFERENCE_TIME& rtStart, REFERENCE_TIME& rtStop)
{
CAutoLock cAutoLock(&m_csLock);
if(!m_pSubPic || nSubPic != 0)
return E_INVALIDARG;
rtStart = m_pSubPic->GetStart();
rtStop = m_pSubPic->GetStop();
return S_OK;
}
//
// ISubPicAllocatorPresenterImpl
//
ISubPicAllocatorPresenterImpl::ISubPicAllocatorPresenterImpl(HWND hWnd, HRESULT& hr)
: CUnknown(NAME("ISubPicAllocatorPresenterImpl"), NULL)
, m_hWnd(hWnd)
, m_NativeVideoSize(0, 0), m_AspectRatio(0, 0)
, m_VideoRect(0, 0, 0, 0), m_WindowRect(0, 0, 0, 0)
, m_fps(25.0)
, m_lSubtitleDelay(0), m_lSubtitleDelay2(0)
, m_pSubPicQueue(NULL) , m_pSubPicQueue2(NULL)
{
if(!IsWindow(m_hWnd)) {hr = E_INVALIDARG; return;}
GetWindowRect(m_hWnd, &m_WindowRect);
SetVideoAngle(Vector(), false);
hr = S_OK;
}
ISubPicAllocatorPresenterImpl::~ISubPicAllocatorPresenterImpl()
{
}
STDMETHODIMP ISubPicAllocatorPresenterImpl::NonDelegatingQueryInterface(REFIID riid, void** ppv)
{
//SVP_LogMsg5(L"ISubPic %s" , CStringFromGUID(riid));
return
QI(ISubPicAllocatorPresenter)
QI(ISubPicAllocatorPresenterRender)
QI(ISubPicAllocatorPresenter2)
__super::NonDelegatingQueryInterface(riid, ppv);
}
void ISubPicAllocatorPresenterImpl::AlphaBltSubPic(CSize size, SubPicDesc* pTarget)
{
size.cy -= AfxGetMyApp()->GetBottomSubOffset();
CComPtr<ISubPic> pSubPic;
CComPtr<ISubPic> pSubPic2;
BOOL bltSub1 = false, bltSub2 = false;
CRect rcSource1, rcSource2, rcDest1, rcDest2;
#ifdef LOGSUBRECT
CString szD1, szD2;
#endif
if(m_pSubPicQueue->LookupSubPic(m_rtNow, pSubPic))
{
if (SUCCEEDED (pSubPic->GetSourceAndDest(&size, rcSource1, rcDest1))){
//pSubPic->AlphaBlt(rcSource, rcDest, pTarget);
bltSub1 = true;
}
#ifdef LOGSUBRECT
UINT iTotalLenSec = (UINT)( (INT64) m_rtNow / 10000000 );
szD1.Format(_T(" sub1 size %d %d , source %d %d %d %d , dest %d %d %d %d , time %d:%02d") ,
size.cx, size.cy, rcSource1.top, rcSource1.right, rcSource1.bottom, rcSource1.left,
rcDest1.top, rcDest1.right, rcDest1.bottom, rcDest1.left,(int)iTotalLenSec/60, iTotalLenSec % 60 );
#endif
}
if(m_pSubPicQueue2->LookupSubPic(m_rtNow2, pSubPic2))
{
if (SUCCEEDED (pSubPic2->GetSourceAndDest(&size, rcSource2, rcDest2))){
bltSub2 = true;
}
#ifdef LOGSUBRECT
UINT iTotalLenSec = (UINT)( (INT64) m_rtNow2 / 10000000 );
szD2.Format(_T(" sub2 size %d %d , source %d %d %d %d , dest %d %d %d %d , time %d:%02d") ,
size.cx, size.cy, rcSource2.top, rcSource2.right, rcSource2.bottom, rcSource2.left,
rcDest2.top, rcDest2.right, rcDest2.bottom, rcDest2.left,(int)iTotalLenSec/60, iTotalLenSec % 60 );
#endif
}
m_sublib2.CalcDualSubPosisiton(bltSub1 , bltSub2 , rcDest1 , rcDest2 , size , !!pSubPic, !!pSubPic2) ;
if(bltSub1)
pSubPic->AlphaBlt(rcSource1, rcDest1, pTarget);
if(bltSub2){
pSubPic2->AlphaBlt(rcSource2, rcDest2, pTarget);
}
#ifdef LOGSUBRECT
if(bltSub1 || bltSub2){
SVP_LogMsg(szD1 + szD2);
}
#endif
}
// ISubPicAllocatorPresenter
STDMETHODIMP_(SIZE) ISubPicAllocatorPresenterImpl::GetVideoSize(bool fCorrectAR)
{
CSize VideoSize(m_NativeVideoSize);
if(fCorrectAR && m_AspectRatio.cx > 0 && m_AspectRatio.cy > 0)
VideoSize.cx = VideoSize.cy*m_AspectRatio.cx/m_AspectRatio.cy;
return(VideoSize);
}
STDMETHODIMP_(void) ISubPicAllocatorPresenterImpl::SetPosition(RECT w, RECT v)
{
bool fWindowPosChanged = !!(m_WindowRect != w);
bool fWindowSizeChanged = !!(m_WindowRect.Size() != CRect(w).Size());
m_WindowRect = w;
bool fVideoRectChanged = !!(m_VideoRect != v);
m_VideoRect = v;
if(fWindowSizeChanged || fVideoRectChanged)
{
if(m_pAllocator)
{
m_pAllocator->SetCurSize(m_WindowRect.Size());
m_pAllocator->SetCurVidRect(m_VideoRect);
}
if(m_pAllocator2)
{
m_pAllocator2->SetCurSize(m_WindowRect.Size());
m_pAllocator2->SetCurVidRect(m_VideoRect);
}
if(m_pSubPicQueue)
{
m_pSubPicQueue->Invalidate();
}
if(m_pSubPicQueue2)
{
m_pSubPicQueue2->Invalidate();
}
}
//CString szLog;
//szLog.Format(_T("WVSize %d %d %d %d "), m_WindowRect.Width(), m_WindowRect.Height(), m_VideoRect.Width(), m_VideoRect.Height());
//SVP_LogMsg(szLog);
if(fWindowPosChanged || fVideoRectChanged)
Paint(fWindowSizeChanged || fVideoRectChanged);
m_sublib2.ResSetForcePos();
}
STDMETHODIMP ISubPicAllocatorPresenterImpl::GetSubStats(int& nSubPics, REFERENCE_TIME& rtNow, REFERENCE_TIME& rtStart, REFERENCE_TIME& rtStop)
{
if(m_pSubPicQueue)
{
//SVP_LogMsg5(L" SetTime1 %f " ,(double) rtNow- m_SubtitleDelay);
m_pSubPicQueue->GetStats(nSubPics, rtNow, rtStart ,rtStop);
}
return S_OK;
}
STDMETHODIMP_(void) ISubPicAllocatorPresenterImpl::SetTime(REFERENCE_TIME rtNow)
{
/*
if(m_rtNow <= rtNow && rtNow <= m_rtNow + 1000000)
return;
*/
m_rtNow = rtNow - m_lSubtitleDelay;
m_rtNow2 = rtNow - m_lSubtitleDelay2;
if(m_pSubPicQueue)
{
m_pSubPicQueue->SetTime(m_rtNow);
}
if(m_pSubPicQueue2)
{
m_pSubPicQueue2->SetTime(m_rtNow2);
}
}
STDMETHODIMP_(void) ISubPicAllocatorPresenterImpl::SetSubtitleDelay(int delay_ms)
{
m_lSubtitleDelay = delay_ms*10000;
}
STDMETHODIMP_(int) ISubPicAllocatorPresenterImpl::GetSubtitleDelay()
{
return (m_lSubtitleDelay/10000);
}
STDMETHODIMP_(void) ISubPicAllocatorPresenterImpl::SetSubtitleDelay2(int delay_ms)
{
m_lSubtitleDelay2 = delay_ms*10000;
}
STDMETHODIMP_(int) ISubPicAllocatorPresenterImpl::GetSubtitleDelay2()
{
return (m_lSubtitleDelay2/10000);
}
STDMETHODIMP_(double) ISubPicAllocatorPresenterImpl::GetFPS()
{
return(m_fps);
}
STDMETHODIMP_(void) ISubPicAllocatorPresenterImpl::SetSubPicProvider(ISubPicProvider* pSubPicProvider)
{
m_SubPicProvider = pSubPicProvider;
m_sublib2.ResSetForcePos();
if(m_pSubPicQueue)
m_pSubPicQueue->SetSubPicProvider(pSubPicProvider);
}
STDMETHODIMP_(void) ISubPicAllocatorPresenterImpl::SetSubPicProvider2(ISubPicProvider* pSubPicProvider)
{
m_SubPicProvider2 = pSubPicProvider;
m_sublib2.ResSetForcePos();
if(m_pSubPicQueue2)
m_pSubPicQueue2->SetSubPicProvider(pSubPicProvider);
}
STDMETHODIMP_(void) ISubPicAllocatorPresenterImpl::Invalidate(REFERENCE_TIME rtInvalidate)
{
m_sublib2.ResSetForcePos();
if(m_pSubPicQueue)
m_pSubPicQueue->Invalidate(rtInvalidate);
if(m_pSubPicQueue2)
m_pSubPicQueue2->Invalidate(rtInvalidate);
}
#include <math.h>
void ISubPicAllocatorPresenterImpl::Transform(CRect r, Vector v[4])
{
v[0] = Vector(r.left, r.top, 0);
v[1] = Vector(r.right, r.top, 0);
v[2] = Vector(r.left, r.bottom, 0);
v[3] = Vector(r.right, r.bottom, 0);
Vector center(r.CenterPoint().x, r.CenterPoint().y, 0);
int l = (int)(Vector(r.Size().cx, r.Size().cy, 0).Length()*1.5f)+1;
for(int i = 0; i < 4; i++)
{
v[i] = m_xform << (v[i] - center);
v[i].z = v[i].z / l + 0.5f;
v[i].x /= v[i].z*2;
v[i].y /= v[i].z*2;
//CString szLog;
//szLog.Format(_T("%f %f %f %f %f %f"), l + 0.5f , v[i].z*2 , v[i].z*2 , v[i].x , v[i].y , v[i].z);
//SVP_LogMsg(szLog);
v[i] += center;
}
}
STDMETHODIMP ISubPicAllocatorPresenterImpl::SetVideoAngle(Vector v, bool fRepaint)
{
m_xform = XForm(Ray(Vector(0, 0, 0), v), Vector(1, 1, 1), false);
if(fRepaint) Paint(true);
return S_OK;
}
| gpl-2.0 |
W4TCH0UT/zz_lettuce | net/ipv4/inet_hashtables.c | 1554 | 15971 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Generic INET transport hashtables
*
* Authors: Lotsa people, from code originally in tcp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/secure_seq.h>
#include <net/ip.h>
/*
* Allocate and initialize a new local port bind bucket.
* The bindhash mutex for snum's hash chain must be held here.
*/
struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
struct net *net,
struct inet_bind_hashbucket *head,
const unsigned short snum)
{
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
if (tb != NULL) {
write_pnet(&tb->ib_net, hold_net(net));
tb->port = snum;
tb->fastreuse = 0;
tb->fastreuseport = 0;
tb->num_owners = 0;
INIT_HLIST_HEAD(&tb->owners);
hlist_add_head(&tb->node, &head->chain);
}
return tb;
}
/*
* Caller must hold hashbucket lock for this tb with local BH disabled
*/
void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
{
if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node);
release_net(ib_net(tb));
kmem_cache_free(cachep, tb);
}
}
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
atomic_inc(&hashinfo->bsockets);
inet_sk(sk)->inet_num = snum;
sk_add_bind_node(sk, &tb->owners);
tb->num_owners++;
inet_csk(sk)->icsk_bind_hash = tb;
}
/*
* Get rid of any references to a local port held by the given sock.
*/
static void __inet_put_port(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
hashinfo->bhash_size);
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
struct inet_bind_bucket *tb;
atomic_dec(&hashinfo->bsockets);
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
__sk_del_bind_node(sk);
tb->num_owners--;
inet_csk(sk)->icsk_bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
spin_unlock(&head->lock);
}
void inet_put_port(struct sock *sk)
{
local_bh_disable();
__inet_put_port(sk);
local_bh_enable();
}
EXPORT_SYMBOL(inet_put_port);
int __inet_inherit_port(struct sock *sk, struct sock *child)
{
struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
unsigned short port = inet_sk(child)->inet_num;
const int bhash = inet_bhashfn(sock_net(sk), port,
table->bhash_size);
struct inet_bind_hashbucket *head = &table->bhash[bhash];
struct inet_bind_bucket *tb;
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
if (tb->port != port) {
/* NOTE: using tproxy and redirecting skbs to a proxy
* on a different listener port breaks the assumption
* that the listener socket's icsk_bind_hash is the same
* as that of the child socket. We have to look up or
* create a new bind bucket for the child here. */
inet_bind_bucket_for_each(tb, &head->chain) {
if (net_eq(ib_net(tb), sock_net(sk)) &&
tb->port == port)
break;
}
if (!tb) {
tb = inet_bind_bucket_create(table->bind_bucket_cachep,
sock_net(sk), head, port);
if (!tb) {
spin_unlock(&head->lock);
return -ENOMEM;
}
}
}
inet_bind_hash(child, tb, port);
spin_unlock(&head->lock);
return 0;
}
EXPORT_SYMBOL_GPL(__inet_inherit_port);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr,
const int dif)
{
int score = -1;
struct inet_sock *inet = inet_sk(sk);
if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
!ipv6_only_sock(sk)) {
__be32 rcv_saddr = inet->inet_rcv_saddr;
score = sk->sk_family == PF_INET ? 2 : 1;
if (rcv_saddr) {
if (rcv_saddr != daddr)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
}
return score;
}
/*
* Don't inline this cruft. Here are some nice properties to exploit here. The
* BSD API does not allow a listening sock to specify the remote port nor the
* remote address for the connection. So always assume those are both
* wildcarded during the search since they can never be otherwise.
*/
struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, __be16 sport,
const __be32 daddr, const unsigned short hnum,
const int dif)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
int score, hiscore, matches = 0, reuseport = 0;
u32 phash = 0;
rcu_read_lock();
begin:
result = NULL;
hiscore = 0;
sk_nulls_for_each_rcu(sk, node, &ilb->head) {
score = compute_score(sk, net, hnum, daddr, dif);
if (score > hiscore) {
result = sk;
hiscore = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
phash = inet_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == hiscore && reuseport) {
matches++;
if (((u64)phash * matches) >> 32 == 0)
result = sk;
phash = next_pseudo_random32(phash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
result = NULL;
else if (unlikely(compute_score(result, net, hnum, daddr,
dif) < hiscore)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
const int dif)
{
INET_ADDR_COOKIE(acookie, saddr, daddr)
const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
struct sock *sk;
const struct hlist_nulls_node *node;
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
unsigned int slot = hash & hashinfo->ehash_mask;
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
rcu_read_lock();
begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
continue;
if (likely(INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
goto begintw;
if (unlikely(!INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) {
sock_put(sk);
goto begin;
}
goto out;
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
begintw:
/* Must check for a TIME_WAIT'er before going to listener hash. */
sk_nulls_for_each_rcu(sk, node, &head->twchain) {
if (sk->sk_hash != hash)
continue;
if (likely(INET_TW_MATCH(sk, net, acookie,
saddr, daddr, ports,
dif))) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
sk = NULL;
goto out;
}
if (unlikely(!INET_TW_MATCH(sk, net, acookie,
saddr, daddr, ports,
dif))) {
inet_twsk_put(inet_twsk(sk));
goto begintw;
}
goto out;
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begintw;
sk = NULL;
out:
rcu_read_unlock();
return sk;
}
EXPORT_SYMBOL_GPL(__inet_lookup_established);
/* called with local bh disabled */
static int __inet_check_established(struct inet_timewait_death_row *death_row,
struct sock *sk, __u16 lport,
struct inet_timewait_sock **twp)
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
struct inet_sock *inet = inet_sk(sk);
__be32 daddr = inet->inet_rcv_saddr;
__be32 saddr = inet->inet_daddr;
int dif = sk->sk_bound_dev_if;
INET_ADDR_COOKIE(acookie, saddr, daddr)
const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
struct net *net = sock_net(sk);
unsigned int hash = inet_ehashfn(net, daddr, lport,
saddr, inet->inet_dport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw;
int twrefcnt = 0;
spin_lock(lock);
/* Check TIME-WAIT sockets first. */
sk_nulls_for_each(sk2, node, &head->twchain) {
if (sk2->sk_hash != hash)
continue;
if (likely(INET_TW_MATCH(sk2, net, acookie,
saddr, daddr, ports, dif))) {
tw = inet_twsk(sk2);
if (twsk_unique(sk, sk2, twp))
goto unique;
else
goto not_unique;
}
}
tw = NULL;
/* And established part... */
sk_nulls_for_each(sk2, node, &head->chain) {
if (sk2->sk_hash != hash)
continue;
if (likely(INET_MATCH(sk2, net, acookie,
saddr, daddr, ports, dif)))
goto not_unique;
}
unique:
/* Must record num and sport now. Otherwise we will see
* in hash table socket with a funny identity. */
inet->inet_num = lport;
inet->inet_sport = htons(lport);
sk->sk_hash = hash;
WARN_ON(!sk_unhashed(sk));
__sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) {
twrefcnt = inet_twsk_unhash(tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
}
spin_unlock(lock);
if (twrefcnt)
inet_twsk_put(tw);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
if (twp) {
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
inet_twsk_deschedule(tw, death_row);
inet_twsk_put(tw);
}
return 0;
not_unique:
spin_unlock(lock);
return -EADDRNOTAVAIL;
}
static inline u32 inet_sk_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
inet->inet_daddr,
inet->inet_dport);
}
int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
spinlock_t *lock;
struct inet_ehash_bucket *head;
int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
sk->sk_hash = inet_sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
list = &head->chain;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
if (tw) {
WARN_ON(sk->sk_hash != tw->tw_hash);
twrefcnt = inet_twsk_unhash(tw);
}
spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return twrefcnt;
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
static void __inet_hash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb;
if (sk->sk_state != TCP_LISTEN) {
__inet_hash_nolisten(sk, NULL);
return;
}
WARN_ON(!sk_unhashed(sk));
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
spin_lock(&ilb->lock);
__sk_nulls_add_node_rcu(sk, &ilb->head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
spin_unlock(&ilb->lock);
}
void inet_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
__inet_hash(sk);
local_bh_enable();
}
}
EXPORT_SYMBOL_GPL(inet_hash);
void inet_unhash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
spinlock_t *lock;
int done;
if (sk_unhashed(sk))
return;
if (sk->sk_state == TCP_LISTEN)
lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
else
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock_bh(lock);
done =__sk_nulls_del_node_init_rcu(sk);
if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
}
EXPORT_SYMBOL_GPL(inet_unhash);
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
const unsigned short snum = inet_sk(sk)->inet_num;
struct inet_bind_hashbucket *head;
struct inet_bind_bucket *tb;
int ret;
struct net *net = sock_net(sk);
int twrefcnt = 1;
if (!snum) {
int i, remaining, low, high, port;
static u32 hint;
u32 offset = hint + port_offset;
struct inet_timewait_sock *tw = NULL;
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
local_bh_disable();
for (i = 1; i <= remaining; i++) {
port = low + (i + offset) % remaining;
if (inet_is_reserved_local_port(port))
continue;
head = &hinfo->bhash[inet_bhashfn(net, port,
hinfo->bhash_size)];
spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks,
* because the established check is already
* unique enough.
*/
inet_bind_bucket_for_each(tb, &head->chain) {
if (net_eq(ib_net(tb), net) &&
tb->port == port) {
if (tb->fastreuse >= 0 ||
tb->fastreuseport >= 0)
goto next_port;
WARN_ON(hlist_empty(&tb->owners));
if (!check_established(death_row, sk,
port, &tw))
goto ok;
goto next_port;
}
}
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
net, head, port);
if (!tb) {
spin_unlock(&head->lock);
break;
}
tb->fastreuse = -1;
tb->fastreuseport = -1;
goto ok;
next_port:
spin_unlock(&head->lock);
}
local_bh_enable();
return -EADDRNOTAVAIL;
ok:
hint += i;
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
twrefcnt += hash(sk, tw);
}
if (tw)
twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
spin_unlock(&head->lock);
if (tw) {
inet_twsk_deschedule(tw, death_row);
while (twrefcnt) {
twrefcnt--;
inet_twsk_put(tw);
}
}
ret = 0;
goto out;
}
head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
hash(sk, NULL);
spin_unlock_bh(&head->lock);
return 0;
} else {
spin_unlock(&head->lock);
/* No definite answer... Walk to established hash table */
ret = check_established(death_row, sk, snum, NULL);
out:
local_bh_enable();
return ret;
}
}
/*
* Bind a port for a connect operation and hash it.
*/
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
__inet_check_established, __inet_hash_nolisten);
}
EXPORT_SYMBOL_GPL(inet_hash_connect);
void inet_hashinfo_init(struct inet_hashinfo *h)
{
int i;
atomic_set(&h->bsockets, 0);
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
i + LISTENING_NULLS_BASE);
}
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
| gpl-2.0 |
Alberto96/android_kernel_samsung_tassve | arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | 1810 | 5810 | /*
* mpc7448_hpc2.c
*
* Board setup routines for the Freescale mpc7448hpc2(taiga) platform
*
* Author: Jacob Pan
* jacob.pan@freescale.com
* Author: Xianghua Xiao
* x.xiao@freescale.com
* Maintainer: Roy Zang <tie-fei.zang@freescale.com>
* Add Flat Device Tree support fot mpc7448hpc2 board
*
* Copyright 2004-2006 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
#include <asm/reg.h>
#include <mm/mmu_decl.h>
#include <asm/tsi108_pci.h>
#include <asm/tsi108_irq.h>
#include <asm/mpic.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
#define MPC7448HPC2_PCI_CFG_PHYS 0xfb000000
int mpc7448_hpc2_exclude_device(struct pci_controller *hose,
u_char bus, u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
static void __init mpc7448_hpc2_setup_arch(void)
{
struct device_node *np;
if (ppc_md.progress)
ppc_md.progress("mpc7448_hpc2_setup_arch():set_bridge", 0);
tsi108_csr_vir_base = get_vir_csrbase();
/* setup PCI host bridge */
#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "tsi108-pci")
tsi108_setup_pci(np, MPC7448HPC2_PCI_CFG_PHYS, 0);
ppc_md.pci_exclude_device = mpc7448_hpc2_exclude_device;
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
#endif
printk(KERN_INFO "MPC7448HPC2 (TAIGA) Platform\n");
printk(KERN_INFO
"Jointly ported by Freescale and Tundra Semiconductor\n");
printk(KERN_INFO
"Enabling L2 cache then enabling the HID0 prefetch engine.\n");
}
/*
* Interrupt setup and service. Interrupts on the mpc7448_hpc2 come
* from the four external INT pins, PCI interrupts are routed via
* PCI interrupt control registers, it generates internal IRQ23
*
* Interrupt routing on the Taiga Board:
* TSI108:PB_INT[0] -> CPU0:INT#
* TSI108:PB_INT[1] -> CPU0:MCP#
* TSI108:PB_INT[2] -> N/C
* TSI108:PB_INT[3] -> N/C
*/
static void __init mpc7448_hpc2_init_IRQ(void)
{
struct mpic *mpic;
phys_addr_t mpic_paddr = 0;
struct device_node *tsi_pic;
#ifdef CONFIG_PCI
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
struct device_node *cascade_node = NULL;
#endif
tsi_pic = of_find_node_by_type(NULL, "open-pic");
if (tsi_pic) {
unsigned int size;
const void *prop = of_get_property(tsi_pic, "reg", &size);
mpic_paddr = of_translate_address(tsi_pic, prop);
}
if (mpic_paddr == 0) {
printk("%s: No tsi108 PIC found !\n", __func__);
return;
}
DBG("%s: tsi108 pic phys_addr = 0x%x\n", __func__,
(u32) mpic_paddr);
mpic = mpic_alloc(tsi_pic, mpic_paddr,
MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
24,
NR_IRQS-4, /* num_sources used */
"Tsi108_PIC");
BUG_ON(mpic == NULL);
mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
mpic_init(mpic);
#ifdef CONFIG_PCI
tsi_pci = of_find_node_by_type(NULL, "pci");
if (tsi_pci == NULL) {
printk("%s: No tsi108 pci node found !\n", __func__);
return;
}
cascade_node = of_find_node_by_type(NULL, "pic-router");
if (cascade_node == NULL) {
printk("%s: No tsi108 pci cascade node found !\n", __func__);
return;
}
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__,
(u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
set_irq_data(cascade_pci_irq, mpic);
set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
of_node_put(tsi_pic);
}
void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: Freescale Semiconductor\n");
}
void mpc7448_hpc2_restart(char *cmd)
{
local_irq_disable();
/* Set exception prefix high - to the firmware */
_nmask_and_or_msr(0, MSR_IP);
for (;;) ; /* Spin until reset happens */
}
void mpc7448_hpc2_power_off(void)
{
local_irq_disable();
for (;;) ; /* No way to shut power off with software */
}
void mpc7448_hpc2_halt(void)
{
mpc7448_hpc2_power_off();
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init mpc7448_hpc2_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "mpc74xx"))
return 0;
return 1;
}
static int mpc7448_machine_check_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
regs->nip = entry->fixup;
return 1;
}
return 0;
}
define_machine(mpc7448_hpc2){
.name = "MPC7448 HPC2",
.probe = mpc7448_hpc2_probe,
.setup_arch = mpc7448_hpc2_setup_arch,
.init_IRQ = mpc7448_hpc2_init_IRQ,
.show_cpuinfo = mpc7448_hpc2_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = mpc7448_hpc2_restart,
.calibrate_decr = generic_calibrate_decr,
.machine_check_exception= mpc7448_machine_check_exception,
.progress = udbg_progress,
};
| gpl-2.0 |
djmax81/android_kernel_samsung_exynos5433_LL | drivers/xen/xenbus/xenbus_probe.c | 2066 | 18874 | /******************************************************************************
* Talks to Xen Store to figure out what devices we have.
*
* Copyright (C) 2005 Rusty Russell, IBM Corporation
* Copyright (C) 2005 Mike Wray, Hewlett-Packard
* Copyright (C) 2005, 2006 XenSource Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define DPRINTK(fmt, args...) \
pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/hvm.h>
#include "xenbus_comms.h"
#include "xenbus_probe.h"
int xen_store_evtchn;
EXPORT_SYMBOL_GPL(xen_store_evtchn);
struct xenstore_domain_interface *xen_store_interface;
EXPORT_SYMBOL_GPL(xen_store_interface);
enum xenstore_init xen_store_domain_type;
EXPORT_SYMBOL_GPL(xen_store_domain_type);
static unsigned long xen_store_mfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
/* If something in array of ids matches this device, return it. */
static const struct xenbus_device_id *
match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
{
for (; *arr->devicetype != '\0'; arr++) {
if (!strcmp(arr->devicetype, dev->devicetype))
return arr;
}
return NULL;
}
int xenbus_match(struct device *_dev, struct device_driver *_drv)
{
struct xenbus_driver *drv = to_xenbus_driver(_drv);
if (!drv->ids)
return 0;
return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
}
EXPORT_SYMBOL_GPL(xenbus_match);
static void free_otherend_details(struct xenbus_device *dev)
{
kfree(dev->otherend);
dev->otherend = NULL;
}
static void free_otherend_watch(struct xenbus_device *dev)
{
if (dev->otherend_watch.node) {
unregister_xenbus_watch(&dev->otherend_watch);
kfree(dev->otherend_watch.node);
dev->otherend_watch.node = NULL;
}
}
static int talk_to_otherend(struct xenbus_device *dev)
{
struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
free_otherend_watch(dev);
free_otherend_details(dev);
return drv->read_otherend_details(dev);
}
static int watch_otherend(struct xenbus_device *dev)
{
struct xen_bus_type *bus =
container_of(dev->dev.bus, struct xen_bus_type, bus);
return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
bus->otherend_changed,
"%s/%s", dev->otherend, "state");
}
int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node)
{
int err = xenbus_gather(XBT_NIL, xendev->nodename,
id_node, "%i", &xendev->otherend_id,
path_node, NULL, &xendev->otherend,
NULL);
if (err) {
xenbus_dev_fatal(xendev, err,
"reading other end details from %s",
xendev->nodename);
return err;
}
if (strlen(xendev->otherend) == 0 ||
!xenbus_exists(XBT_NIL, xendev->otherend, "")) {
xenbus_dev_fatal(xendev, -ENOENT,
"unable to read other end from %s. "
"missing or inaccessible.",
xendev->nodename);
free_otherend_details(xendev);
return -ENOENT;
}
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
void xenbus_otherend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len,
int ignore_on_shutdown)
{
struct xenbus_device *dev =
container_of(watch, struct xenbus_device, otherend_watch);
struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
enum xenbus_state state;
/* Protect us against watches firing on old details when the otherend
details change, say immediately after a resume. */
if (!dev->otherend ||
strncmp(dev->otherend, vec[XS_WATCH_PATH],
strlen(dev->otherend))) {
dev_dbg(&dev->dev, "Ignoring watch at %s\n",
vec[XS_WATCH_PATH]);
return;
}
state = xenbus_read_driver_state(dev->otherend);
dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
state, xenbus_strstate(state), dev->otherend_watch.node,
vec[XS_WATCH_PATH]);
/*
* Ignore xenbus transitions during shutdown. This prevents us doing
* work that can fail e.g., when the rootfs is gone.
*/
if (system_state > SYSTEM_RUNNING) {
if (ignore_on_shutdown && (state == XenbusStateClosing))
xenbus_frontend_closed(dev);
return;
}
if (drv->otherend_changed)
drv->otherend_changed(dev, state);
}
EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
int xenbus_dev_probe(struct device *_dev)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
const struct xenbus_device_id *id;
int err;
DPRINTK("%s", dev->nodename);
if (!drv->probe) {
err = -ENODEV;
goto fail;
}
id = match_device(drv->ids, dev);
if (!id) {
err = -ENODEV;
goto fail;
}
err = talk_to_otherend(dev);
if (err) {
dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
dev->nodename);
return err;
}
err = drv->probe(dev, id);
if (err)
goto fail;
err = watch_otherend(dev);
if (err) {
dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
dev->nodename);
return err;
}
return 0;
fail:
xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
xenbus_switch_state(dev, XenbusStateClosed);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_dev_probe);
int xenbus_dev_remove(struct device *_dev)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
DPRINTK("%s", dev->nodename);
free_otherend_watch(dev);
if (drv->remove)
drv->remove(dev);
free_otherend_details(dev);
xenbus_switch_state(dev, XenbusStateClosed);
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_dev_remove);
void xenbus_dev_shutdown(struct device *_dev)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
unsigned long timeout = 5*HZ;
DPRINTK("%s", dev->nodename);
get_device(&dev->dev);
if (dev->state != XenbusStateConnected) {
printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__,
dev->nodename, xenbus_strstate(dev->state));
goto out;
}
xenbus_switch_state(dev, XenbusStateClosing);
timeout = wait_for_completion_timeout(&dev->down, timeout);
if (!timeout)
printk(KERN_INFO "%s: %s timeout closing device\n",
__func__, dev->nodename);
out:
put_device(&dev->dev);
}
EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv,
struct xen_bus_type *bus)
{
drv->driver.bus = &bus->bus;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
void xenbus_unregister_driver(struct xenbus_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
struct xb_find_info {
struct xenbus_device *dev;
const char *nodename;
};
static int cmp_dev(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct xb_find_info *info = data;
if (!strcmp(xendev->nodename, info->nodename)) {
info->dev = xendev;
get_device(dev);
return 1;
}
return 0;
}
static struct xenbus_device *xenbus_device_find(const char *nodename,
struct bus_type *bus)
{
struct xb_find_info info = { .dev = NULL, .nodename = nodename };
bus_for_each_dev(bus, NULL, &info, cmp_dev);
return info.dev;
}
static int cleanup_dev(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct xb_find_info *info = data;
int len = strlen(info->nodename);
DPRINTK("%s", info->nodename);
/* Match the info->nodename path, or any subdirectory of that path. */
if (strncmp(xendev->nodename, info->nodename, len))
return 0;
/* If the node name is longer, ensure it really is a subdirectory. */
if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
return 0;
info->dev = xendev;
get_device(dev);
return 1;
}
static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
{
struct xb_find_info info = { .nodename = path };
do {
info.dev = NULL;
bus_for_each_dev(bus, NULL, &info, cleanup_dev);
if (info.dev) {
device_unregister(&info.dev->dev);
put_device(&info.dev->dev);
}
} while (info.dev);
}
static void xenbus_dev_release(struct device *dev)
{
if (dev)
kfree(to_xenbus_device(dev));
}
static ssize_t nodename_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
}
static ssize_t devtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
}
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s:%s\n", dev->bus->name,
to_xenbus_device(dev)->devicetype);
}
struct device_attribute xenbus_dev_attrs[] = {
__ATTR_RO(nodename),
__ATTR_RO(devtype),
__ATTR_RO(modalias),
__ATTR_NULL
};
EXPORT_SYMBOL_GPL(xenbus_dev_attrs);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename)
{
char devname[XEN_BUS_ID_SIZE];
int err;
struct xenbus_device *xendev;
size_t stringlen;
char *tmpstring;
enum xenbus_state state = xenbus_read_driver_state(nodename);
if (state != XenbusStateInitialising) {
/* Device is not new, so ignore it. This can happen if a
device is going away after switching to Closed. */
return 0;
}
stringlen = strlen(nodename) + 1 + strlen(type) + 1;
xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
if (!xendev)
return -ENOMEM;
xendev->state = XenbusStateInitialising;
/* Copy the strings into the extra space. */
tmpstring = (char *)(xendev + 1);
strcpy(tmpstring, nodename);
xendev->nodename = tmpstring;
tmpstring += strlen(tmpstring) + 1;
strcpy(tmpstring, type);
xendev->devicetype = tmpstring;
init_completion(&xendev->down);
xendev->dev.bus = &bus->bus;
xendev->dev.release = xenbus_dev_release;
err = bus->get_bus_id(devname, xendev->nodename);
if (err)
goto fail;
dev_set_name(&xendev->dev, devname);
/* Register with generic device framework. */
err = device_register(&xendev->dev);
if (err)
goto fail;
return 0;
fail:
kfree(xendev);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_probe_node);
static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
{
int err = 0;
char **dir;
unsigned int dir_n = 0;
int i;
dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
if (IS_ERR(dir))
return PTR_ERR(dir);
for (i = 0; i < dir_n; i++) {
err = bus->probe(bus, type, dir[i]);
if (err)
break;
}
kfree(dir);
return err;
}
int xenbus_probe_devices(struct xen_bus_type *bus)
{
int err = 0;
char **dir;
unsigned int i, dir_n;
dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
if (IS_ERR(dir))
return PTR_ERR(dir);
for (i = 0; i < dir_n; i++) {
err = xenbus_probe_device_type(bus, dir[i]);
if (err)
break;
}
kfree(dir);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_probe_devices);
static unsigned int char_count(const char *str, char c)
{
unsigned int i, ret = 0;
for (i = 0; str[i]; i++)
if (str[i] == c)
ret++;
return ret;
}
static int strsep_len(const char *str, char c, unsigned int len)
{
unsigned int i;
for (i = 0; str[i]; i++)
if (str[i] == c) {
if (len == 0)
return i;
len--;
}
return (len == 0) ? i : -ERANGE;
}
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
{
int exists, rootlen;
struct xenbus_device *dev;
char type[XEN_BUS_ID_SIZE];
const char *p, *root;
if (char_count(node, '/') < 2)
return;
exists = xenbus_exists(XBT_NIL, node, "");
if (!exists) {
xenbus_cleanup_devices(node, &bus->bus);
return;
}
/* backend/<type>/... or device/<type>/... */
p = strchr(node, '/') + 1;
snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
type[XEN_BUS_ID_SIZE-1] = '\0';
rootlen = strsep_len(node, '/', bus->levels);
if (rootlen < 0)
return;
root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
if (!root)
return;
dev = xenbus_device_find(root, &bus->bus);
if (!dev)
xenbus_probe_node(bus, type, root);
else
put_device(&dev->dev);
kfree(root);
}
EXPORT_SYMBOL_GPL(xenbus_dev_changed);
int xenbus_dev_suspend(struct device *dev)
{
int err = 0;
struct xenbus_driver *drv;
struct xenbus_device *xdev
= container_of(dev, struct xenbus_device, dev);
DPRINTK("%s", xdev->nodename);
if (dev->driver == NULL)
return 0;
drv = to_xenbus_driver(dev->driver);
if (drv->suspend)
err = drv->suspend(xdev);
if (err)
printk(KERN_WARNING
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
int xenbus_dev_resume(struct device *dev)
{
int err;
struct xenbus_driver *drv;
struct xenbus_device *xdev
= container_of(dev, struct xenbus_device, dev);
DPRINTK("%s", xdev->nodename);
if (dev->driver == NULL)
return 0;
drv = to_xenbus_driver(dev->driver);
err = talk_to_otherend(xdev);
if (err) {
printk(KERN_WARNING
"xenbus: resume (talk_to_otherend) %s failed: %i\n",
dev_name(dev), err);
return err;
}
xdev->state = XenbusStateInitialising;
if (drv->resume) {
err = drv->resume(xdev);
if (err) {
printk(KERN_WARNING
"xenbus: resume %s failed: %i\n",
dev_name(dev), err);
return err;
}
}
err = watch_otherend(xdev);
if (err) {
printk(KERN_WARNING
"xenbus_probe: resume (watch_otherend) %s failed: "
"%d.\n", dev_name(dev), err);
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_dev_resume);
int xenbus_dev_cancel(struct device *dev)
{
/* Do nothing */
DPRINTK("cancel");
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready;
int register_xenstore_notifier(struct notifier_block *nb)
{
int ret = 0;
if (xenstored_ready > 0)
ret = nb->notifier_call(nb, 0, NULL);
else
blocking_notifier_chain_register(&xenstore_chain, nb);
return ret;
}
EXPORT_SYMBOL_GPL(register_xenstore_notifier);
void unregister_xenstore_notifier(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&xenstore_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
void xenbus_probe(struct work_struct *unused)
{
xenstored_ready = 1;
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
EXPORT_SYMBOL_GPL(xenbus_probe);
static int __init xenbus_probe_initcall(void)
{
if (!xen_domain())
return -ENODEV;
if (xen_initial_domain() || xen_hvm_domain())
return 0;
xenbus_probe(NULL);
return 0;
}
device_initcall(xenbus_probe_initcall);
/* Set up event channel for xenstored which is run as a local process
* (this is normally used only in dom0)
*/
static int __init xenstored_local_init(void)
{
int err = 0;
unsigned long page = 0;
struct evtchn_alloc_unbound alloc_unbound;
/* Allocate Xenstore page */
page = get_zeroed_page(GFP_KERNEL);
if (!page)
goto out_err;
xen_store_mfn = xen_start_info->store_mfn =
pfn_to_mfn(virt_to_phys((void *)page) >>
PAGE_SHIFT);
/* Next allocate a local port which xenstored can bind to */
alloc_unbound.dom = DOMID_SELF;
alloc_unbound.remote_dom = DOMID_SELF;
err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
&alloc_unbound);
if (err == -ENOSYS)
goto out_err;
BUG_ON(err);
xen_store_evtchn = xen_start_info->store_evtchn =
alloc_unbound.port;
return 0;
out_err:
if (page != 0)
free_page(page);
return err;
}
static int __init xenbus_init(void)
{
int err = 0;
uint64_t v = 0;
xen_store_domain_type = XS_UNKNOWN;
if (!xen_domain())
return -ENODEV;
xenbus_ring_ops_init();
if (xen_pv_domain())
xen_store_domain_type = XS_PV;
if (xen_hvm_domain())
xen_store_domain_type = XS_HVM;
if (xen_hvm_domain() && xen_initial_domain())
xen_store_domain_type = XS_LOCAL;
if (xen_pv_domain() && !xen_start_info->store_evtchn)
xen_store_domain_type = XS_LOCAL;
if (xen_pv_domain() && xen_start_info->store_evtchn)
xenstored_ready = 1;
switch (xen_store_domain_type) {
case XS_LOCAL:
err = xenstored_local_init();
if (err)
goto out_error;
xen_store_interface = mfn_to_virt(xen_store_mfn);
break;
case XS_PV:
xen_store_evtchn = xen_start_info->store_evtchn;
xen_store_mfn = xen_start_info->store_mfn;
xen_store_interface = mfn_to_virt(xen_store_mfn);
break;
case XS_HVM:
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
if (err)
goto out_error;
xen_store_evtchn = (int)v;
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
xen_store_mfn = (unsigned long)v;
xen_store_interface =
xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
break;
default:
pr_warn("Xenstore state unknown\n");
break;
}
/* Initialize the interface to xenstore. */
err = xs_init();
if (err) {
printk(KERN_WARNING
"XENBUS: Error initializing xenstore comms: %i\n", err);
goto out_error;
}
#ifdef CONFIG_XEN_COMPAT_XENFS
/*
* Create xenfs mountpoint in /proc for compatibility with
* utilities that expect to find "xenbus" under "/proc/xen".
*/
proc_mkdir("xen", NULL);
#endif
out_error:
return err;
}
postcore_initcall(xenbus_init);
MODULE_LICENSE("GPL");
| gpl-2.0 |
hroark13/android_kernel_zte_draconis | drivers/gpio/gpio-msm-v3.c | 2834 | 6519 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <mach/msm_iomap.h>
#include <mach/gpiomux.h>
#include "gpio-msm-common.h"
/* Bits of interest in the GPIO_IN_OUT register.
*/
enum {
GPIO_IN_BIT = 0,
GPIO_OUT_BIT = 1
};
/* Bits of interest in the GPIO_INTR_STATUS register.
*/
enum {
INTR_STATUS_BIT = 0,
};
/* Bits of interest in the GPIO_CFG register.
*/
enum {
GPIO_OE_BIT = 9,
};
/* Bits of interest in the GPIO_INTR_CFG register.
*/
enum {
INTR_ENABLE_BIT = 0,
INTR_POL_CTL_BIT = 1,
INTR_DECT_CTL_BIT = 2,
INTR_RAW_STATUS_EN_BIT = 4,
INTR_TARGET_PROC_BIT = 5,
INTR_DIR_CONN_EN_BIT = 8,
};
/*
* There is no 'DC_POLARITY_LO' because the GIC is incapable
* of asserting on falling edge or level-low conditions. Even though
* the registers allow for low-polarity inputs, the case can never arise.
*/
enum {
DC_GPIO_SEL_BIT = 0,
DC_POLARITY_BIT = 8,
};
/*
* When a GPIO triggers, two separate decisions are made, controlled
* by two separate flags.
*
* - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS
* register for that GPIO will be updated to reflect the triggering of that
* gpio. If this bit is 0, this register will not be updated.
* - Second, INTR_ENABLE controls whether an interrupt is triggered.
*
* If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt
* can be triggered but the status register will not reflect it.
*/
#define INTR_RAW_STATUS_EN BIT(INTR_RAW_STATUS_EN_BIT)
#define INTR_ENABLE BIT(INTR_ENABLE_BIT)
#define INTR_POL_CTL_HI BIT(INTR_POL_CTL_BIT)
#define INTR_DIR_CONN_EN BIT(INTR_DIR_CONN_EN_BIT)
#define DC_POLARITY_HI BIT(DC_POLARITY_BIT)
#define INTR_TARGET_PROC_APPS (4 << INTR_TARGET_PROC_BIT)
#define INTR_TARGET_PROC_NONE (7 << INTR_TARGET_PROC_BIT)
#define INTR_DECT_CTL_LEVEL (0 << INTR_DECT_CTL_BIT)
#define INTR_DECT_CTL_POS_EDGE (1 << INTR_DECT_CTL_BIT)
#define INTR_DECT_CTL_NEG_EDGE (2 << INTR_DECT_CTL_BIT)
#define INTR_DECT_CTL_DUAL_EDGE (3 << INTR_DECT_CTL_BIT)
#define INTR_DECT_CTL_MASK (3 << INTR_DECT_CTL_BIT)
#define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio)))
#define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio)))
#define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio)))
#define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio)))
#define GPIO_DIR_CONN_INTR(intr) (MSM_TLMM_BASE + 0x2800 + (0x04 * (intr)))
static inline void set_gpio_bits(unsigned n, void __iomem *reg)
{
__raw_writel(__raw_readl(reg) | n, reg);
}
static inline void clr_gpio_bits(unsigned n, void __iomem *reg)
{
__raw_writel(__raw_readl(reg) & ~n, reg);
}
unsigned __msm_gpio_get_inout(unsigned gpio)
{
return __raw_readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN_BIT);
}
void __msm_gpio_set_inout(unsigned gpio, unsigned val)
{
__raw_writel(val ? BIT(GPIO_OUT_BIT) : 0, GPIO_IN_OUT(gpio));
}
void __msm_gpio_set_config_direction(unsigned gpio, int input, int val)
{
if (input) {
clr_gpio_bits(BIT(GPIO_OE_BIT), GPIO_CONFIG(gpio));
} else {
__msm_gpio_set_inout(gpio, val);
set_gpio_bits(BIT(GPIO_OE_BIT), GPIO_CONFIG(gpio));
}
}
void __msm_gpio_set_polarity(unsigned gpio, unsigned val)
{
if (val)
clr_gpio_bits(INTR_POL_CTL_HI, GPIO_INTR_CFG(gpio));
else
set_gpio_bits(INTR_POL_CTL_HI, GPIO_INTR_CFG(gpio));
}
unsigned __msm_gpio_get_intr_status(unsigned gpio)
{
return __raw_readl(GPIO_INTR_STATUS(gpio)) &
BIT(INTR_STATUS_BIT);
}
void __msm_gpio_set_intr_status(unsigned gpio)
{
__raw_writel(0, GPIO_INTR_STATUS(gpio));
}
unsigned __msm_gpio_get_intr_config(unsigned gpio)
{
return __raw_readl(GPIO_INTR_CFG(gpio));
}
void __msm_gpio_set_intr_cfg_enable(unsigned gpio, unsigned val)
{
unsigned cfg;
cfg = __raw_readl(GPIO_INTR_CFG(gpio));
if (val) {
cfg &= ~INTR_DIR_CONN_EN;
cfg |= INTR_ENABLE;
} else {
cfg &= ~INTR_ENABLE;
}
__raw_writel(cfg, GPIO_INTR_CFG(gpio));
}
unsigned __msm_gpio_get_intr_cfg_enable(unsigned gpio)
{
return __msm_gpio_get_intr_config(gpio) & INTR_ENABLE;
}
void __msm_gpio_set_intr_cfg_type(unsigned gpio, unsigned type)
{
unsigned cfg;
/* RAW_STATUS_EN is left on for all gpio irqs. Due to the
* internal circuitry of TLMM, toggling the RAW_STATUS
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
cfg = INTR_RAW_STATUS_EN | INTR_TARGET_PROC_APPS;
__raw_writel(cfg, GPIO_INTR_CFG(gpio));
cfg &= ~INTR_DECT_CTL_MASK;
if (type == IRQ_TYPE_EDGE_RISING)
cfg |= INTR_DECT_CTL_POS_EDGE;
else if (type == IRQ_TYPE_EDGE_FALLING)
cfg |= INTR_DECT_CTL_NEG_EDGE;
else if (type == IRQ_TYPE_EDGE_BOTH)
cfg |= INTR_DECT_CTL_DUAL_EDGE;
else
cfg |= INTR_DECT_CTL_LEVEL;
if (type & IRQ_TYPE_LEVEL_LOW)
cfg &= ~INTR_POL_CTL_HI;
else
cfg |= INTR_POL_CTL_HI;
__raw_writel(cfg, GPIO_INTR_CFG(gpio));
/* Sometimes it might take a little while to update
* the interrupt status after the RAW_STATUS is enabled
* We clear the interrupt status before enabling the
* interrupt in the unmask call-back.
*/
udelay(5);
}
void __gpio_tlmm_config(unsigned config)
{
unsigned flags;
unsigned gpio = GPIO_PIN(config);
flags = ((GPIO_DIR(config) << 9) & (0x1 << 9)) |
((GPIO_DRVSTR(config) << 6) & (0x7 << 6)) |
((GPIO_FUNC(config) << 2) & (0xf << 2)) |
((GPIO_PULL(config) & 0x3));
__raw_writel(flags, GPIO_CONFIG(gpio));
}
void __msm_gpio_install_direct_irq(unsigned gpio, unsigned irq,
unsigned int input_polarity)
{
unsigned cfg;
set_gpio_bits(BIT(GPIO_OE_BIT), GPIO_CONFIG(gpio));
cfg = __raw_readl(GPIO_INTR_CFG(gpio));
cfg &= ~(INTR_TARGET_PROC_NONE | INTR_RAW_STATUS_EN | INTR_ENABLE);
cfg |= INTR_TARGET_PROC_APPS | INTR_DIR_CONN_EN;
__raw_writel(cfg, GPIO_INTR_CFG(gpio));
cfg = gpio;
if (input_polarity)
cfg |= DC_POLARITY_HI;
__raw_writel(cfg, GPIO_DIR_CONN_INTR(irq));
}
| gpl-2.0 |
ghsr/android_kernel_samsung_i9152 | sound/soc/au1x/psc-ac97.c | 3090 | 12214 | /*
* Au12x0/Au1550 PSC ALSA ASoC audio support.
*
* (c) 2007-2009 MSC Vertriebsges.m.b.H.,
* Manuel Lauss <manuel.lauss@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Au1xxx-PSC AC97 glue.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include "psc.h"
/* how often to retry failed codec register reads/writes */
#define AC97_RW_RETRIES 5
#define AC97_DIR \
(SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE)
#define AC97_RATES \
SNDRV_PCM_RATE_8000_48000
#define AC97_FMTS \
(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3BE)
#define AC97PCR_START(stype) \
((stype) == PCM_TX ? PSC_AC97PCR_TS : PSC_AC97PCR_RS)
#define AC97PCR_STOP(stype) \
((stype) == PCM_TX ? PSC_AC97PCR_TP : PSC_AC97PCR_RP)
#define AC97PCR_CLRFIFO(stype) \
((stype) == PCM_TX ? PSC_AC97PCR_TC : PSC_AC97PCR_RC)
#define AC97STAT_BUSY(stype) \
((stype) == PCM_TX ? PSC_AC97STAT_TB : PSC_AC97STAT_RB)
/* instance data. There can be only one, MacLeod!!!! */
static struct au1xpsc_audio_data *au1xpsc_ac97_workdata;
#if 0
/* this could theoretically work, but ac97->bus->card->private_data can be NULL
* when snd_ac97_mixer() is called; I don't know if the rest further down the
* chain are always valid either.
*/
static inline struct au1xpsc_audio_data *ac97_to_pscdata(struct snd_ac97 *x)
{
struct snd_soc_card *c = x->bus->card->private_data;
return snd_soc_dai_get_drvdata(c->rtd->cpu_dai);
}
#else
#define ac97_to_pscdata(x) au1xpsc_ac97_workdata
#endif
/* AC97 controller reads codec register */
static unsigned short au1xpsc_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97);
unsigned short retry, tmo;
unsigned long data;
au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
au_sync();
retry = AC97_RW_RETRIES;
do {
mutex_lock(&pscdata->lock);
au_writel(PSC_AC97CDC_RD | PSC_AC97CDC_INDX(reg),
AC97_CDC(pscdata));
au_sync();
tmo = 20;
do {
udelay(21);
if (au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD)
break;
} while (--tmo);
data = au_readl(AC97_CDC(pscdata));
au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
au_sync();
mutex_unlock(&pscdata->lock);
if (reg != ((data >> 16) & 0x7f))
tmo = 1; /* wrong register, try again */
} while (--retry && !tmo);
return retry ? data & 0xffff : 0xffff;
}
/* AC97 controller writes to codec register */
static void au1xpsc_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97);
unsigned int tmo, retry;
au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
au_sync();
retry = AC97_RW_RETRIES;
do {
mutex_lock(&pscdata->lock);
au_writel(PSC_AC97CDC_INDX(reg) | (val & 0xffff),
AC97_CDC(pscdata));
au_sync();
tmo = 20;
do {
udelay(21);
if (au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD)
break;
} while (--tmo);
au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
au_sync();
mutex_unlock(&pscdata->lock);
} while (--retry && !tmo);
}
/* AC97 controller asserts a warm reset */
static void au1xpsc_ac97_warm_reset(struct snd_ac97 *ac97)
{
struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97);
au_writel(PSC_AC97RST_SNC, AC97_RST(pscdata));
au_sync();
msleep(10);
au_writel(0, AC97_RST(pscdata));
au_sync();
}
static void au1xpsc_ac97_cold_reset(struct snd_ac97 *ac97)
{
struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97);
int i;
/* disable PSC during cold reset */
au_writel(0, AC97_CFG(au1xpsc_ac97_workdata));
au_sync();
au_writel(PSC_CTRL_DISABLE, PSC_CTRL(pscdata));
au_sync();
/* issue cold reset */
au_writel(PSC_AC97RST_RST, AC97_RST(pscdata));
au_sync();
msleep(500);
au_writel(0, AC97_RST(pscdata));
au_sync();
/* enable PSC */
au_writel(PSC_CTRL_ENABLE, PSC_CTRL(pscdata));
au_sync();
/* wait for PSC to indicate it's ready */
i = 1000;
while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_SR)) && (--i))
msleep(1);
if (i == 0) {
printk(KERN_ERR "au1xpsc-ac97: PSC not ready!\n");
return;
}
/* enable the ac97 function */
au_writel(pscdata->cfg | PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
au_sync();
/* wait for AC97 core to become ready */
i = 1000;
while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && (--i))
msleep(1);
if (i == 0)
printk(KERN_ERR "au1xpsc-ac97: AC97 ctrl not ready\n");
}
/* AC97 controller operations */
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = au1xpsc_ac97_read,
.write = au1xpsc_ac97_write,
.reset = au1xpsc_ac97_cold_reset,
.warm_reset = au1xpsc_ac97_warm_reset,
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai);
unsigned long r, ro, stat;
int chans, t, stype = SUBSTREAM_TYPE(substream);
chans = params_channels(params);
r = ro = au_readl(AC97_CFG(pscdata));
stat = au_readl(AC97_STAT(pscdata));
/* already active? */
if (stat & (PSC_AC97STAT_TB | PSC_AC97STAT_RB)) {
/* reject parameters not currently set up */
if ((PSC_AC97CFG_GET_LEN(r) != params->msbits) ||
(pscdata->rate != params_rate(params)))
return -EINVAL;
} else {
/* set sample bitdepth: REG[24:21]=(BITS-2)/2 */
r &= ~PSC_AC97CFG_LEN_MASK;
r |= PSC_AC97CFG_SET_LEN(params->msbits);
/* channels: enable slots for front L/R channel */
if (stype == PCM_TX) {
r &= ~PSC_AC97CFG_TXSLOT_MASK;
r |= PSC_AC97CFG_TXSLOT_ENA(3);
r |= PSC_AC97CFG_TXSLOT_ENA(4);
} else {
r &= ~PSC_AC97CFG_RXSLOT_MASK;
r |= PSC_AC97CFG_RXSLOT_ENA(3);
r |= PSC_AC97CFG_RXSLOT_ENA(4);
}
/* do we need to poke the hardware? */
if (!(r ^ ro))
goto out;
/* ac97 engine is about to be disabled */
mutex_lock(&pscdata->lock);
/* disable AC97 device controller first... */
au_writel(r & ~PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
au_sync();
/* ...wait for it... */
t = 100;
while ((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR) && --t)
msleep(1);
if (!t)
printk(KERN_ERR "PSC-AC97: can't disable!\n");
/* ...write config... */
au_writel(r, AC97_CFG(pscdata));
au_sync();
/* ...enable the AC97 controller again... */
au_writel(r | PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
au_sync();
/* ...and wait for ready bit */
t = 100;
while ((!(au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && --t)
msleep(1);
if (!t)
printk(KERN_ERR "PSC-AC97: can't enable!\n");
mutex_unlock(&pscdata->lock);
pscdata->cfg = r;
pscdata->rate = params_rate(params);
}
out:
return 0;
}
static int au1xpsc_ac97_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai);
int ret, stype = SUBSTREAM_TYPE(substream);
ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata));
au_sync();
au_writel(AC97PCR_START(stype), AC97_PCR(pscdata));
au_sync();
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
au_writel(AC97PCR_STOP(stype), AC97_PCR(pscdata));
au_sync();
while (au_readl(AC97_STAT(pscdata)) & AC97STAT_BUSY(stype))
asm volatile ("nop");
au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata));
au_sync();
break;
default:
ret = -EINVAL;
}
return ret;
}
static int au1xpsc_ac97_probe(struct snd_soc_dai *dai)
{
return au1xpsc_ac97_workdata ? 0 : -ENODEV;
}
static struct snd_soc_dai_ops au1xpsc_ac97_dai_ops = {
.trigger = au1xpsc_ac97_trigger,
.hw_params = au1xpsc_ac97_hw_params,
};
static const struct snd_soc_dai_driver au1xpsc_ac97_dai_template = {
.ac97_control = 1,
.probe = au1xpsc_ac97_probe,
.playback = {
.rates = AC97_RATES,
.formats = AC97_FMTS,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.rates = AC97_RATES,
.formats = AC97_FMTS,
.channels_min = 2,
.channels_max = 2,
},
.ops = &au1xpsc_ac97_dai_ops,
};
static int __devinit au1xpsc_ac97_drvprobe(struct platform_device *pdev)
{
int ret;
struct resource *r;
unsigned long sel;
struct au1xpsc_audio_data *wd;
wd = kzalloc(sizeof(struct au1xpsc_audio_data), GFP_KERNEL);
if (!wd)
return -ENOMEM;
mutex_init(&wd->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
ret = -ENODEV;
goto out0;
}
ret = -EBUSY;
if (!request_mem_region(r->start, resource_size(r), pdev->name))
goto out0;
wd->mmio = ioremap(r->start, resource_size(r));
if (!wd->mmio)
goto out1;
/* configuration: max dma trigger threshold, enable ac97 */
wd->cfg = PSC_AC97CFG_RT_FIFO8 | PSC_AC97CFG_TT_FIFO8 |
PSC_AC97CFG_DE_ENABLE;
/* preserve PSC clock source set up by platform */
sel = au_readl(PSC_SEL(wd)) & PSC_SEL_CLK_MASK;
au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd));
au_sync();
au_writel(0, PSC_SEL(wd));
au_sync();
au_writel(PSC_SEL_PS_AC97MODE | sel, PSC_SEL(wd));
au_sync();
/* name the DAI like this device instance ("au1xpsc-ac97.PSCINDEX") */
memcpy(&wd->dai_drv, &au1xpsc_ac97_dai_template,
sizeof(struct snd_soc_dai_driver));
wd->dai_drv.name = dev_name(&pdev->dev);
platform_set_drvdata(pdev, wd);
ret = snd_soc_register_dai(&pdev->dev, &wd->dai_drv);
if (ret)
goto out1;
wd->dmapd = au1xpsc_pcm_add(pdev);
if (wd->dmapd) {
au1xpsc_ac97_workdata = wd;
return 0;
}
snd_soc_unregister_dai(&pdev->dev);
out1:
release_mem_region(r->start, resource_size(r));
out0:
kfree(wd);
return ret;
}
static int __devexit au1xpsc_ac97_drvremove(struct platform_device *pdev)
{
struct au1xpsc_audio_data *wd = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (wd->dmapd)
au1xpsc_pcm_destroy(wd->dmapd);
snd_soc_unregister_dai(&pdev->dev);
/* disable PSC completely */
au_writel(0, AC97_CFG(wd));
au_sync();
au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd));
au_sync();
iounmap(wd->mmio);
release_mem_region(r->start, resource_size(r));
kfree(wd);
au1xpsc_ac97_workdata = NULL; /* MDEV */
return 0;
}
#ifdef CONFIG_PM
static int au1xpsc_ac97_drvsuspend(struct device *dev)
{
struct au1xpsc_audio_data *wd = dev_get_drvdata(dev);
/* save interesting registers and disable PSC */
wd->pm[0] = au_readl(PSC_SEL(wd));
au_writel(0, AC97_CFG(wd));
au_sync();
au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd));
au_sync();
return 0;
}
static int au1xpsc_ac97_drvresume(struct device *dev)
{
struct au1xpsc_audio_data *wd = dev_get_drvdata(dev);
/* restore PSC clock config */
au_writel(wd->pm[0] | PSC_SEL_PS_AC97MODE, PSC_SEL(wd));
au_sync();
/* after this point the ac97 core will cold-reset the codec.
* During cold-reset the PSC is reinitialized and the last
* configuration set up in hw_params() is restored.
*/
return 0;
}
static struct dev_pm_ops au1xpscac97_pmops = {
.suspend = au1xpsc_ac97_drvsuspend,
.resume = au1xpsc_ac97_drvresume,
};
#define AU1XPSCAC97_PMOPS &au1xpscac97_pmops
#else
#define AU1XPSCAC97_PMOPS NULL
#endif
static struct platform_driver au1xpsc_ac97_driver = {
.driver = {
.name = "au1xpsc_ac97",
.owner = THIS_MODULE,
.pm = AU1XPSCAC97_PMOPS,
},
.probe = au1xpsc_ac97_drvprobe,
.remove = __devexit_p(au1xpsc_ac97_drvremove),
};
static int __init au1xpsc_ac97_load(void)
{
au1xpsc_ac97_workdata = NULL;
return platform_driver_register(&au1xpsc_ac97_driver);
}
static void __exit au1xpsc_ac97_unload(void)
{
platform_driver_unregister(&au1xpsc_ac97_driver);
}
module_init(au1xpsc_ac97_load);
module_exit(au1xpsc_ac97_unload);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Au12x0/Au1550 PSC AC97 ALSA ASoC audio driver");
MODULE_AUTHOR("Manuel Lauss");
| gpl-2.0 |
friedrich420/HTC-ONE-M7-AEL-Kernel-5.0.2 | mm/filemap_xip.c | 4370 | 11420 | /*
* linux/mm/filemap_xip.c
*
* Copyright (C) 2005 IBM Corporation
* Author: Carsten Otte <cotte@de.ibm.com>
*
* derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
*
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/uio.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <linux/seqlock.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
/*
* We do use our own empty page to avoid interference with other users
* of ZERO_PAGE(), such as /dev/zero
*/
static DEFINE_MUTEX(xip_sparse_mutex);
static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
static struct page *__xip_sparse_page;
/* called under xip_sparse_mutex */
static struct page *xip_sparse_page(void)
{
if (!__xip_sparse_page) {
struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
if (page)
__xip_sparse_page = page;
}
return __xip_sparse_page;
}
/*
* This is a file read routine for execute in place files, and uses
* the mapping->a_ops->get_xip_mem() function for the actual low-level
* stuff.
*
* Note the struct file* is not used at all. It may be NULL.
*/
static ssize_t
do_xip_mapping_read(struct address_space *mapping,
struct file_ra_state *_ra,
struct file *filp,
char __user *buf,
size_t len,
loff_t *ppos)
{
struct inode *inode = mapping->host;
pgoff_t index, end_index;
unsigned long offset;
loff_t isize, pos;
size_t copied = 0, error = 0;
BUG_ON(!mapping->a_ops->get_xip_mem);
pos = *ppos;
index = pos >> PAGE_CACHE_SHIFT;
offset = pos & ~PAGE_CACHE_MASK;
isize = i_size_read(inode);
if (!isize)
goto out;
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
do {
unsigned long nr, left;
void *xip_mem;
unsigned long xip_pfn;
int zero = 0;
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
if (index >= end_index) {
if (index > end_index)
goto out;
nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
if (nr <= offset) {
goto out;
}
}
nr = nr - offset;
if (nr > len - copied)
nr = len - copied;
error = mapping->a_ops->get_xip_mem(mapping, index, 0,
&xip_mem, &xip_pfn);
if (unlikely(error)) {
if (error == -ENODATA) {
/* sparse */
zero = 1;
} else
goto out;
}
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
/* address based flush */ ;
/*
* Ok, we have the mem, so now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
* we filled up (we may be padding etc), so we can only update
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
if (!zero)
left = __copy_to_user(buf+copied, xip_mem+offset, nr);
else
left = __clear_user(buf + copied, nr);
if (left) {
error = -EFAULT;
goto out;
}
copied += (nr - left);
offset += (nr - left);
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
} while (copied < len);
out:
*ppos = pos + copied;
if (filp)
file_accessed(filp);
return (copied ? copied : error);
}
ssize_t
xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
{
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
buf, len, ppos);
}
EXPORT_SYMBOL_GPL(xip_file_read);
/*
* __xip_unmap is invoked from xip_unmap and
* xip_write
*
* This function walks all vmas of the address_space and unmaps the
* __xip_sparse_page when found at pgoff.
*/
static void
__xip_unmap (struct address_space * mapping,
unsigned long pgoff)
{
struct vm_area_struct *vma;
struct mm_struct *mm;
struct prio_tree_iter iter;
unsigned long address;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
struct page *page;
unsigned count;
int locked = 0;
count = read_seqcount_begin(&xip_sparse_seq);
page = __xip_sparse_page;
if (!page)
return;
retry:
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
mm = vma->vm_mm;
address = vma->vm_start +
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
pte = page_check_address(page, mm, address, &ptl, 1);
if (pte) {
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush_notify(vma, address, pte);
page_remove_rmap(page);
dec_mm_counter(mm, MM_FILEPAGES);
BUG_ON(pte_dirty(pteval));
pte_unmap_unlock(pte, ptl);
page_cache_release(page);
}
}
mutex_unlock(&mapping->i_mmap_mutex);
if (locked) {
mutex_unlock(&xip_sparse_mutex);
} else if (read_seqcount_retry(&xip_sparse_seq, count)) {
mutex_lock(&xip_sparse_mutex);
locked = 1;
goto retry;
}
}
/*
* xip_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
* This function is derived from filemap_fault, but used for execute in place
*/
static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
pgoff_t size;
void *xip_mem;
unsigned long xip_pfn;
struct page *page;
int error;
/* XXX: are VM_FAULT_ codes OK? */
again:
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (vmf->pgoff >= size)
return VM_FAULT_SIGBUS;
error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
&xip_mem, &xip_pfn);
if (likely(!error))
goto found;
if (error != -ENODATA)
return VM_FAULT_OOM;
/* sparse block */
if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
(!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
int err;
/* maybe shared writable, allocate new block */
mutex_lock(&xip_sparse_mutex);
error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
&xip_mem, &xip_pfn);
mutex_unlock(&xip_sparse_mutex);
if (error)
return VM_FAULT_SIGBUS;
/* unmap sparse mappings at pgoff from all other vmas */
__xip_unmap(mapping, vmf->pgoff);
found:
err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
xip_pfn);
if (err == -ENOMEM)
return VM_FAULT_OOM;
/*
* err == -EBUSY is fine, we've raced against another thread
* that faulted-in the same page
*/
if (err != -EBUSY)
BUG_ON(err);
return VM_FAULT_NOPAGE;
} else {
int err, ret = VM_FAULT_OOM;
mutex_lock(&xip_sparse_mutex);
write_seqcount_begin(&xip_sparse_seq);
error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
&xip_mem, &xip_pfn);
if (unlikely(!error)) {
write_seqcount_end(&xip_sparse_seq);
mutex_unlock(&xip_sparse_mutex);
goto again;
}
if (error != -ENODATA)
goto out;
/* not shared and writable, use xip_sparse_page() */
page = xip_sparse_page();
if (!page)
goto out;
err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
page);
if (err == -ENOMEM)
goto out;
ret = VM_FAULT_NOPAGE;
out:
write_seqcount_end(&xip_sparse_seq);
mutex_unlock(&xip_sparse_mutex);
return ret;
}
}
static const struct vm_operations_struct xip_file_vm_ops = {
.fault = xip_file_fault,
};
int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
{
BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
file_accessed(file);
vma->vm_ops = &xip_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
return 0;
}
EXPORT_SYMBOL_GPL(xip_file_mmap);
static ssize_t
__xip_file_write(struct file *filp, const char __user *buf,
size_t count, loff_t pos, loff_t *ppos)
{
struct address_space * mapping = filp->f_mapping;
const struct address_space_operations *a_ops = mapping->a_ops;
struct inode *inode = mapping->host;
long status = 0;
size_t bytes;
ssize_t written = 0;
BUG_ON(!mapping->a_ops->get_xip_mem);
do {
unsigned long index;
unsigned long offset;
size_t copied;
void *xip_mem;
unsigned long xip_pfn;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT;
bytes = PAGE_CACHE_SIZE - offset;
if (bytes > count)
bytes = count;
status = a_ops->get_xip_mem(mapping, index, 0,
&xip_mem, &xip_pfn);
if (status == -ENODATA) {
/* we allocate a new page unmap it */
mutex_lock(&xip_sparse_mutex);
status = a_ops->get_xip_mem(mapping, index, 1,
&xip_mem, &xip_pfn);
mutex_unlock(&xip_sparse_mutex);
if (!status)
/* unmap page at pgoff from all other vmas */
__xip_unmap(mapping, index);
}
if (status)
break;
copied = bytes -
__copy_from_user_nocache(xip_mem + offset, buf, bytes);
if (likely(copied > 0)) {
status = copied;
if (status >= 0) {
written += status;
count -= status;
pos += status;
buf += status;
}
}
if (unlikely(copied != bytes))
if (status >= 0)
status = -EFAULT;
if (status < 0)
break;
} while (count);
*ppos = pos;
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold i_mutex.
*/
if (pos > inode->i_size) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
return written ? written : status;
}
ssize_t
xip_file_write(struct file *filp, const char __user *buf, size_t len,
loff_t *ppos)
{
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
size_t count;
loff_t pos;
ssize_t ret;
mutex_lock(&inode->i_mutex);
if (!access_ok(VERIFY_READ, buf, len)) {
ret=-EFAULT;
goto out_up;
}
pos = *ppos;
count = len;
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
if (ret)
goto out_backing;
if (count == 0)
goto out_backing;
ret = file_remove_suid(filp);
if (ret)
goto out_backing;
file_update_time(filp);
ret = __xip_file_write (filp, buf, count, pos, ppos);
out_backing:
current->backing_dev_info = NULL;
out_up:
mutex_unlock(&inode->i_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(xip_file_write);
/*
* truncate a page used for execute in place
* functionality is analog to block_truncate_page but does use get_xip_mem
* to get the page instead of page cache
*/
int
xip_truncate_page(struct address_space *mapping, loff_t from)
{
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize;
unsigned length;
void *xip_mem;
unsigned long xip_pfn;
int err;
BUG_ON(!mapping->a_ops->get_xip_mem);
blocksize = 1 << mapping->host->i_blkbits;
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
if (!length)
return 0;
length = blocksize - length;
err = mapping->a_ops->get_xip_mem(mapping, index, 0,
&xip_mem, &xip_pfn);
if (unlikely(err)) {
if (err == -ENODATA)
/* Hole? No need to truncate */
return 0;
else
return err;
}
memset(xip_mem + offset, 0, length);
return 0;
}
EXPORT_SYMBOL_GPL(xip_truncate_page);
| gpl-2.0 |
Loller79/Solid_Kernel-GPROJ | arch/um/drivers/ssl.c | 4626 | 5341 | /*
* Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/fs.h"
#include "linux/tty.h"
#include "linux/tty_driver.h"
#include "linux/major.h"
#include "linux/mm.h"
#include "linux/init.h"
#include "linux/console.h"
#include "asm/termbits.h"
#include "asm/irq.h"
#include "ssl.h"
#include "chan.h"
#include "init.h"
#include "irq_user.h"
#include "mconsole_kern.h"
static const int ssl_version = 1;
#define NR_PORTS 64
static void ssl_announce(char *dev_name, int dev)
{
printk(KERN_INFO "Serial line %d assigned device '%s'\n", dev,
dev_name);
}
/* Almost const, except that xterm_title may be changed in an initcall */
static struct chan_opts opts = {
.announce = ssl_announce,
.xterm_title = "Serial Line #%d",
.raw = 1,
};
static int ssl_config(char *str, char **error_out);
static int ssl_get_config(char *dev, char *str, int size, char **error_out);
static int ssl_remove(int n, char **error_out);
/* Const, except for .mc.list */
static struct line_driver driver = {
.name = "UML serial line",
.device_name = "ttyS",
.major = TTY_MAJOR,
.minor_start = 64,
.type = TTY_DRIVER_TYPE_SERIAL,
.subtype = 0,
.read_irq = SSL_IRQ,
.read_irq_name = "ssl",
.write_irq = SSL_WRITE_IRQ,
.write_irq_name = "ssl-write",
.mc = {
.list = LIST_HEAD_INIT(driver.mc.list),
.name = "ssl",
.config = ssl_config,
.get_config = ssl_get_config,
.id = line_id,
.remove = ssl_remove,
},
};
/* The array is initialized by line_init, at initcall time. The
* elements are locked individually as needed.
*/
static char *conf[NR_PORTS];
static char *def_conf = CONFIG_SSL_CHAN;
static struct line serial_lines[NR_PORTS];
static int ssl_config(char *str, char **error_out)
{
return line_config(serial_lines, ARRAY_SIZE(serial_lines), str, &opts,
error_out);
}
static int ssl_get_config(char *dev, char *str, int size, char **error_out)
{
return line_get_config(dev, serial_lines, ARRAY_SIZE(serial_lines), str,
size, error_out);
}
static int ssl_remove(int n, char **error_out)
{
return line_remove(serial_lines, ARRAY_SIZE(serial_lines), n,
error_out);
}
static int ssl_open(struct tty_struct *tty, struct file *filp)
{
int err = line_open(serial_lines, tty);
if (err)
printk(KERN_ERR "Failed to open serial line %d, err = %d\n",
tty->index, err);
return err;
}
#if 0
static void ssl_flush_buffer(struct tty_struct *tty)
{
return;
}
static void ssl_stop(struct tty_struct *tty)
{
printk(KERN_ERR "Someone should implement ssl_stop\n");
}
static void ssl_start(struct tty_struct *tty)
{
printk(KERN_ERR "Someone should implement ssl_start\n");
}
void ssl_hangup(struct tty_struct *tty)
{
}
#endif
static const struct tty_operations ssl_ops = {
.open = ssl_open,
.close = line_close,
.write = line_write,
.put_char = line_put_char,
.write_room = line_write_room,
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
.set_termios = line_set_termios,
.ioctl = line_ioctl,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
#if 0
.stop = ssl_stop,
.start = ssl_start,
.hangup = ssl_hangup,
#endif
};
/* Changed by ssl_init and referenced by ssl_exit, which are both serialized
* by being an initcall and exitcall, respectively.
*/
static int ssl_init_done = 0;
static void ssl_console_write(struct console *c, const char *string,
unsigned len)
{
struct line *line = &serial_lines[c->index];
unsigned long flags;
spin_lock_irqsave(&line->lock, flags);
console_write_chan(line->chan_out, string, len);
spin_unlock_irqrestore(&line->lock, flags);
}
static struct tty_driver *ssl_console_device(struct console *c, int *index)
{
*index = c->index;
return driver.driver;
}
static int ssl_console_setup(struct console *co, char *options)
{
struct line *line = &serial_lines[co->index];
return console_open_chan(line, co);
}
/* No locking for register_console call - relies on single-threaded initcalls */
static struct console ssl_cons = {
.name = "ttyS",
.write = ssl_console_write,
.device = ssl_console_device,
.setup = ssl_console_setup,
.flags = CON_PRINTBUFFER|CON_ANYTIME,
.index = -1,
};
static int ssl_init(void)
{
char *new_title;
int err;
int i;
printk(KERN_INFO "Initializing software serial port version %d\n",
ssl_version);
err = register_lines(&driver, &ssl_ops, serial_lines,
ARRAY_SIZE(serial_lines));
if (err)
return err;
new_title = add_xterm_umid(opts.xterm_title);
if (new_title != NULL)
opts.xterm_title = new_title;
for (i = 0; i < NR_PORTS; i++) {
char *error;
char *s = conf[i];
if (!s)
s = def_conf;
if (setup_one_line(serial_lines, i, s, &opts, &error))
printk(KERN_ERR "setup_one_line failed for "
"device %d : %s\n", i, error);
}
ssl_init_done = 1;
register_console(&ssl_cons);
return 0;
}
late_initcall(ssl_init);
static void ssl_exit(void)
{
if (!ssl_init_done)
return;
close_lines(serial_lines, ARRAY_SIZE(serial_lines));
}
__uml_exitcall(ssl_exit);
static int ssl_chan_setup(char *str)
{
line_setup(conf, NR_PORTS, &def_conf, str, "serial line");
return 1;
}
__setup("ssl", ssl_chan_setup);
__channel_help(ssl_chan_setup, "ssl");
| gpl-2.0 |
Metallice/GTab2-Kernel-TW | lib/raid6/mktables.c | 4626 | 2971 | /* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2 or (at your
* option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* mktables.c
*
* Make RAID-6 tables. This is a host user space program to be run at
* compile time.
*/
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <stdlib.h>
#include <time.h>
static uint8_t gfmul(uint8_t a, uint8_t b)
{
uint8_t v = 0;
while (b) {
if (b & 1)
v ^= a;
a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
b >>= 1;
}
return v;
}
static uint8_t gfpow(uint8_t a, int b)
{
uint8_t v = 1;
b %= 255;
if (b < 0)
b += 255;
while (b) {
if (b & 1)
v = gfmul(v, a);
a = gfmul(a, a);
b >>= 1;
}
return v;
}
int main(int argc, char *argv[])
{
int i, j, k;
uint8_t v;
uint8_t exptbl[256], invtbl[256];
printf("#include <linux/raid/pq.h>\n");
/* Compute multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfmul[256][256] =\n"
"{\n");
for (i = 0; i < 256; i++) {
printf("\t{\n");
for (j = 0; j < 256; j += 8) {
printf("\t\t");
for (k = 0; k < 8; k++)
printf("0x%02x,%c", gfmul(i, j + k),
(k == 7) ? '\n' : ' ');
}
printf("\t},\n");
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfmul);\n");
printf("#endif\n");
/* Compute power-of-2 table (exponent) */
v = 1;
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfexp[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
exptbl[i + j] = v;
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
v = gfmul(v, 2);
if (v == 1)
v = 0; /* For entry 255, not a real entry */
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
printf("#endif\n");
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfinv[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
invtbl[i + j] = v = gfpow(i + j, 254);
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfinv);\n");
printf("#endif\n");
/* Compute inv(2^x + 1) (exponent-xor-inverse) table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfexi[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++)
printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
(j == 7) ? '\n' : ' ');
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexi);\n");
printf("#endif\n");
return 0;
}
| gpl-2.0 |
Stuxnet-Kernel/kernel_mako | drivers/mtd/maps/solutionengine.c | 5138 | 3079 | /*
* Flash and EPROM on Hitachi Solution Engine and similar boards.
*
* (C) 2001 Red Hat, Inc.
*
* GPL'd
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/errno.h>
static struct mtd_info *flash_mtd;
static struct mtd_info *eprom_mtd;
struct map_info soleng_eprom_map = {
.name = "Solution Engine EPROM",
.size = 0x400000,
.bankwidth = 4,
};
struct map_info soleng_flash_map = {
.name = "Solution Engine FLASH",
.size = 0x400000,
.bankwidth = 4,
};
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
#ifdef CONFIG_MTD_SUPERH_RESERVE
static struct mtd_partition superh_se_partitions[] = {
/* Reserved for boot code, read-only */
{
.name = "flash_boot",
.offset = 0x00000000,
.size = CONFIG_MTD_SUPERH_RESERVE,
.mask_flags = MTD_WRITEABLE,
},
/* All else is writable (e.g. JFFS) */
{
.name = "Flash FS",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
}
};
#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions)
#else
#define superh_se_partitions NULL
#define NUM_PARTITIONS 0
#endif /* CONFIG_MTD_SUPERH_RESERVE */
static int __init init_soleng_maps(void)
{
/* First probe at offset 0 */
soleng_flash_map.phys = 0;
soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
soleng_eprom_map.phys = 0x01000000;
soleng_eprom_map.virt = (void __iomem *)P1SEGADDR(0x01000000);
simple_map_init(&soleng_eprom_map);
simple_map_init(&soleng_flash_map);
printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
if (!flash_mtd) {
/* Not there. Try swapping */
printk(KERN_NOTICE "Probing for flash chips at 0x01000000:\n");
soleng_flash_map.phys = 0x01000000;
soleng_flash_map.virt = P2SEGADDR(0x01000000);
soleng_eprom_map.phys = 0;
soleng_eprom_map.virt = P1SEGADDR(0);
flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
if (!flash_mtd) {
/* Eep. */
printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
return -ENXIO;
}
}
printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
soleng_flash_map.phys & 0x1fffffff,
soleng_eprom_map.phys & 0x1fffffff);
flash_mtd->owner = THIS_MODULE;
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
if (eprom_mtd) {
eprom_mtd->owner = THIS_MODULE;
mtd_device_register(eprom_mtd, NULL, 0);
}
mtd_device_parse_register(flash_mtd, probes, NULL,
superh_se_partitions, NUM_PARTITIONS);
return 0;
}
static void __exit cleanup_soleng_maps(void)
{
if (eprom_mtd) {
mtd_device_unregister(eprom_mtd);
map_destroy(eprom_mtd);
}
mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
}
module_init(init_soleng_maps);
module_exit(cleanup_soleng_maps);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("MTD map driver for Hitachi SolutionEngine (and similar) boards");
| gpl-2.0 |
sub-b/android_kernel_samsung_matissewifi-old | arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c | 9490 | 70621 | /*
* SH7723 Pinmux
*
* Copyright (C) 2008 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <cpu/sh7723.h>
enum {
PINMUX_RESERVED = 0,
PINMUX_DATA_BEGIN,
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
PTJ7_DATA, PTJ5_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
PINMUX_DATA_END,
PINMUX_INPUT_BEGIN,
PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
PINMUX_INPUT_END,
PINMUX_INPUT_PULLUP_BEGIN,
PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
PTB2_IN_PU, PTB1_IN_PU,
PTR2_IN_PU,
PINMUX_INPUT_PULLUP_END,
PINMUX_OUTPUT_BEGIN,
PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
PTJ7_OUT, PTJ5_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
PTR1_OUT, PTR0_OUT,
PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
PINMUX_OUTPUT_END,
PINMUX_FUNCTION_BEGIN,
PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
PTJ7_FN, PTJ5_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
PSA15_PSA14_FN1, PSA15_PSA14_FN2,
PSA13_PSA12_FN1, PSA13_PSA12_FN2,
PSA11_PSA10_FN1, PSA11_PSA10_FN2,
PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3,
PSA3_PSA2_FN1, PSA3_PSA2_FN2,
PSB15_PSB14_FN1, PSB15_PSB14_FN2,
PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS,
PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3,
PSB7_PSB6_FN1, PSB7_PSB6_FN2,
PSB5_PSB4_FN1, PSB5_PSB4_FN2,
PSB3_PSB2_FN1, PSB3_PSB2_FN2,
PSC15_PSC14_FN1, PSC15_PSC14_FN2,
PSC13_PSC12_FN1, PSC13_PSC12_FN2,
PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3,
PSC9_PSC8_FN1, PSC9_PSC8_FN2,
PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3,
PSD15_PSD14_FN1, PSD15_PSD14_FN2,
PSD13_PSD12_FN1, PSD13_PSD12_FN2,
PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3,
PSD9_PSD8_FN1, PSD9_PSD8_FN2,
PSD7_PSD6_FN1, PSD7_PSD6_FN2,
PSD5_PSD4_FN1, PSD5_PSD4_FN2,
PSD3_PSD2_FN1, PSD3_PSD2_FN2,
PSD1_PSD0_FN1, PSD1_PSD0_FN2,
PINMUX_FUNCTION_END,
PINMUX_MARK_BEGIN,
SCIF0_PTT_TXD_MARK, SCIF0_PTT_RXD_MARK,
SCIF0_PTT_SCK_MARK, SCIF0_PTU_TXD_MARK,
SCIF0_PTU_RXD_MARK, SCIF0_PTU_SCK_MARK,
SCIF1_PTS_TXD_MARK, SCIF1_PTS_RXD_MARK,
SCIF1_PTS_SCK_MARK, SCIF1_PTV_TXD_MARK,
SCIF1_PTV_RXD_MARK, SCIF1_PTV_SCK_MARK,
SCIF2_PTT_TXD_MARK, SCIF2_PTT_RXD_MARK,
SCIF2_PTT_SCK_MARK, SCIF2_PTU_TXD_MARK,
SCIF2_PTU_RXD_MARK, SCIF2_PTU_SCK_MARK,
SCIF3_PTS_TXD_MARK, SCIF3_PTS_RXD_MARK,
SCIF3_PTS_SCK_MARK, SCIF3_PTS_RTS_MARK,
SCIF3_PTS_CTS_MARK, SCIF3_PTV_TXD_MARK,
SCIF3_PTV_RXD_MARK, SCIF3_PTV_SCK_MARK,
SCIF3_PTV_RTS_MARK, SCIF3_PTV_CTS_MARK,
SCIF4_PTE_TXD_MARK, SCIF4_PTE_RXD_MARK,
SCIF4_PTE_SCK_MARK, SCIF4_PTN_TXD_MARK,
SCIF4_PTN_RXD_MARK, SCIF4_PTN_SCK_MARK,
SCIF5_PTE_TXD_MARK, SCIF5_PTE_RXD_MARK,
SCIF5_PTE_SCK_MARK, SCIF5_PTN_TXD_MARK,
SCIF5_PTN_RXD_MARK, SCIF5_PTN_SCK_MARK,
VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
VIO_FLD_MARK, VIO_CKO_MARK,
VIO_VD1_MARK, VIO_HD1_MARK, VIO_CLK1_MARK,
VIO_HD2_MARK, VIO_VD2_MARK, VIO_CLK2_MARK,
LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
LCDLCLK_PTR_MARK, LCDLCLK_PTW_MARK,
IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
AUDCK_MARK, AUDSYNC_MARK,
SDHI0CD_PTD_MARK, SDHI0WP_PTD_MARK,
SDHI0D3_PTD_MARK, SDHI0D2_PTD_MARK,
SDHI0D1_PTD_MARK, SDHI0D0_PTD_MARK,
SDHI0CMD_PTD_MARK, SDHI0CLK_PTD_MARK,
SDHI0CD_PTS_MARK, SDHI0WP_PTS_MARK,
SDHI0D3_PTS_MARK, SDHI0D2_PTS_MARK,
SDHI0D1_PTS_MARK, SDHI0D0_PTS_MARK,
SDHI0CMD_PTS_MARK, SDHI0CLK_PTS_MARK,
SDHI1CD_MARK, SDHI1WP_MARK, SDHI1D3_MARK, SDHI1D2_MARK,
SDHI1D1_MARK, SDHI1D0_MARK, SDHI1CMD_MARK, SDHI1CLK_MARK,
SIUAFCK_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAISLD_MARK,
SIUAOLR_MARK, SIUAOBT_MARK, SIUAOSLD_MARK, SIUAMCK_MARK,
SIUAISPD_MARK, SIUAOSPD_MARK,
SIUBFCK_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBISLD_MARK,
SIUBOLR_MARK, SIUBOBT_MARK, SIUBOSLD_MARK, SIUBMCK_MARK,
IRDA_IN_MARK, IRDA_OUT_MARK,
DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
MSIOF0_PTF_TXD_MARK, MSIOF0_PTF_RXD_MARK, MSIOF0_PTF_MCK_MARK,
MSIOF0_PTF_TSYNC_MARK, MSIOF0_PTF_TSCK_MARK, MSIOF0_PTF_RSYNC_MARK,
MSIOF0_PTF_RSCK_MARK, MSIOF0_PTF_SS1_MARK, MSIOF0_PTF_SS2_MARK,
MSIOF0_PTT_TXD_MARK, MSIOF0_PTT_RXD_MARK, MSIOF0_PTX_MCK_MARK,
MSIOF0_PTT_TSYNC_MARK, MSIOF0_PTT_TSCK_MARK, MSIOF0_PTT_RSYNC_MARK,
MSIOF0_PTT_RSCK_MARK, MSIOF0_PTT_SS1_MARK, MSIOF0_PTT_SS2_MARK,
MSIOF1_TXD_MARK, MSIOF1_RXD_MARK, MSIOF1_MCK_MARK,
MSIOF1_TSYNC_MARK, MSIOF1_TSCK_MARK, MSIOF1_RSYNC_MARK,
MSIOF1_RSCK_MARK, MSIOF1_SS1_MARK, MSIOF1_SS2_MARK,
TS0_SDAT_MARK, TS0_SCK_MARK, TS0_SDEN_MARK, TS0_SPSYNC_MARK,
FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
STATUS0_MARK, PDSTATUS_MARK,
TPUTO3_MARK, TPUTO2_MARK, TPUTO1_MARK, TPUTO0_MARK,
D31_MARK, D30_MARK, D29_MARK, D28_MARK,
D27_MARK, D26_MARK, D25_MARK, D24_MARK,
D23_MARK, D22_MARK, D21_MARK, D20_MARK,
D19_MARK, D18_MARK, D17_MARK, D16_MARK,
IOIS16_MARK, WAIT_MARK, BS_MARK,
A25_MARK, A24_MARK, A23_MARK, A22_MARK,
CS6B_CE1B_MARK, CS6A_CE2B_MARK,
CS5B_CE1A_MARK, CS5A_CE2A_MARK,
WE3_ICIOWR_MARK, WE2_ICIORD_MARK,
IDED15_MARK, IDED14_MARK, IDED13_MARK, IDED12_MARK,
IDED11_MARK, IDED10_MARK, IDED9_MARK, IDED8_MARK,
IDED7_MARK, IDED6_MARK, IDED5_MARK, IDED4_MARK,
IDED3_MARK, IDED2_MARK, IDED1_MARK, IDED0_MARK,
DIRECTION_MARK, EXBUF_ENB_MARK, IDERST_MARK, IODACK_MARK,
IODREQ_MARK, IDEIORDY_MARK, IDEINT_MARK, IDEIOWR_MARK,
IDEIORD_MARK, IDECS1_MARK, IDECS0_MARK, IDEA2_MARK,
IDEA1_MARK, IDEA0_MARK,
PINMUX_MARK_END,
};
static pinmux_enum_t pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
/* PTB GPIO */
PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
/* PTC GPIO */
PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
/* PTD GPIO */
PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
/* PTE GPIO */
PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
/* PTF GPIO */
PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
/* PTG GPIO */
PINMUX_DATA(PTG5_DATA, PTG5_OUT),
PINMUX_DATA(PTG4_DATA, PTG4_OUT),
PINMUX_DATA(PTG3_DATA, PTG3_OUT),
PINMUX_DATA(PTG2_DATA, PTG2_OUT),
PINMUX_DATA(PTG1_DATA, PTG1_OUT),
PINMUX_DATA(PTG0_DATA, PTG0_OUT),
/* PTH GPIO */
PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
/* PTJ GPIO */
PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
/* PTK GPIO */
PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
/* PTL GPIO */
PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
/* PTM GPIO */
PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT),
PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
/* PTN GPIO */
PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
/* PTQ GPIO */
PINMUX_DATA(PTQ3_DATA, PTQ3_IN),
PINMUX_DATA(PTQ2_DATA, PTQ2_IN),
PINMUX_DATA(PTQ1_DATA, PTQ1_IN),
PINMUX_DATA(PTQ0_DATA, PTQ0_IN),
/* PTR GPIO */
PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
PINMUX_DATA(PTR3_DATA, PTR3_IN),
PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
/* PTS GPIO */
PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
/* PTT GPIO */
PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
/* PTU GPIO */
PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
/* PTV GPIO */
PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
/* PTW GPIO */
PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
/* PTX GPIO */
PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
/* PTY GPIO */
PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
/* PTZ GPIO */
PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
/* PTA FN */
PINMUX_DATA(D23_MARK, PSA15_PSA14_FN1, PTA7_FN),
PINMUX_DATA(KEYOUT2_MARK, PSA15_PSA14_FN2, PTA7_FN),
PINMUX_DATA(D22_MARK, PSA15_PSA14_FN1, PTA6_FN),
PINMUX_DATA(KEYOUT1_MARK, PSA15_PSA14_FN2, PTA6_FN),
PINMUX_DATA(D21_MARK, PSA15_PSA14_FN1, PTA5_FN),
PINMUX_DATA(KEYOUT0_MARK, PSA15_PSA14_FN2, PTA5_FN),
PINMUX_DATA(D20_MARK, PSA15_PSA14_FN1, PTA4_FN),
PINMUX_DATA(KEYIN4_MARK, PSA15_PSA14_FN2, PTA4_FN),
PINMUX_DATA(D19_MARK, PSA15_PSA14_FN1, PTA3_FN),
PINMUX_DATA(KEYIN3_MARK, PSA15_PSA14_FN2, PTA3_FN),
PINMUX_DATA(D18_MARK, PSA15_PSA14_FN1, PTA2_FN),
PINMUX_DATA(KEYIN2_MARK, PSA15_PSA14_FN2, PTA2_FN),
PINMUX_DATA(D17_MARK, PSA15_PSA14_FN1, PTA1_FN),
PINMUX_DATA(KEYIN1_MARK, PSA15_PSA14_FN2, PTA1_FN),
PINMUX_DATA(D16_MARK, PSA15_PSA14_FN1, PTA0_FN),
PINMUX_DATA(KEYIN0_MARK, PSA15_PSA14_FN2, PTA0_FN),
/* PTB FN */
PINMUX_DATA(D31_MARK, PTB7_FN),
PINMUX_DATA(D30_MARK, PTB6_FN),
PINMUX_DATA(D29_MARK, PTB5_FN),
PINMUX_DATA(D28_MARK, PTB4_FN),
PINMUX_DATA(D27_MARK, PTB3_FN),
PINMUX_DATA(D26_MARK, PSA15_PSA14_FN1, PTB2_FN),
PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_PSA14_FN2, PTB2_FN),
PINMUX_DATA(D25_MARK, PSA15_PSA14_FN1, PTB1_FN),
PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_PSA14_FN2, PTB1_FN),
PINMUX_DATA(D24_MARK, PSA15_PSA14_FN1, PTB0_FN),
PINMUX_DATA(KEYOUT3_MARK, PSA15_PSA14_FN2, PTB0_FN),
/* PTC FN */
PINMUX_DATA(IDED15_MARK, PSA11_PSA10_FN1, PTC7_FN),
PINMUX_DATA(SDHI1CD_MARK, PSA11_PSA10_FN2, PTC7_FN),
PINMUX_DATA(IDED14_MARK, PSA11_PSA10_FN1, PTC6_FN),
PINMUX_DATA(SDHI1WP_MARK, PSA11_PSA10_FN2, PTC6_FN),
PINMUX_DATA(IDED13_MARK, PSA11_PSA10_FN1, PTC5_FN),
PINMUX_DATA(SDHI1D3_MARK, PSA11_PSA10_FN2, PTC5_FN),
PINMUX_DATA(IDED12_MARK, PSA11_PSA10_FN1, PTC4_FN),
PINMUX_DATA(SDHI1D2_MARK, PSA11_PSA10_FN2, PTC4_FN),
PINMUX_DATA(IDED11_MARK, PSA11_PSA10_FN1, PTC3_FN),
PINMUX_DATA(SDHI1D1_MARK, PSA11_PSA10_FN2, PTC3_FN),
PINMUX_DATA(IDED10_MARK, PSA11_PSA10_FN1, PTC2_FN),
PINMUX_DATA(SDHI1D0_MARK, PSA11_PSA10_FN2, PTC2_FN),
PINMUX_DATA(IDED9_MARK, PSA11_PSA10_FN1, PTC1_FN),
PINMUX_DATA(SDHI1CMD_MARK, PSA11_PSA10_FN2, PTC1_FN),
PINMUX_DATA(IDED8_MARK, PSA11_PSA10_FN1, PTC0_FN),
PINMUX_DATA(SDHI1CLK_MARK, PSA11_PSA10_FN2, PTC0_FN),
/* PTD FN */
PINMUX_DATA(IDED7_MARK, PSA11_PSA10_FN1, PTD7_FN),
PINMUX_DATA(SDHI0CD_PTD_MARK, PSA11_PSA10_FN2, PTD7_FN),
PINMUX_DATA(IDED6_MARK, PSA11_PSA10_FN1, PTD6_FN),
PINMUX_DATA(SDHI0WP_PTD_MARK, PSA11_PSA10_FN2, PTD6_FN),
PINMUX_DATA(IDED5_MARK, PSA11_PSA10_FN1, PTD5_FN),
PINMUX_DATA(SDHI0D3_PTD_MARK, PSA11_PSA10_FN2, PTD5_FN),
PINMUX_DATA(IDED4_MARK, PSA11_PSA10_FN1, PTD4_FN),
PINMUX_DATA(SDHI0D2_PTD_MARK, PSA11_PSA10_FN2, PTD4_FN),
PINMUX_DATA(IDED3_MARK, PSA11_PSA10_FN1, PTD3_FN),
PINMUX_DATA(SDHI0D1_PTD_MARK, PSA11_PSA10_FN2, PTD3_FN),
PINMUX_DATA(IDED2_MARK, PSA11_PSA10_FN1, PTD2_FN),
PINMUX_DATA(SDHI0D0_PTD_MARK, PSA11_PSA10_FN2, PTD2_FN),
PINMUX_DATA(IDED1_MARK, PSA11_PSA10_FN1, PTD1_FN),
PINMUX_DATA(SDHI0CMD_PTD_MARK, PSA11_PSA10_FN2, PTD1_FN),
PINMUX_DATA(IDED0_MARK, PSA11_PSA10_FN1, PTD0_FN),
PINMUX_DATA(SDHI0CLK_PTD_MARK, PSA11_PSA10_FN2, PTD0_FN),
/* PTE FN */
PINMUX_DATA(DIRECTION_MARK, PSA11_PSA10_FN1, PTE5_FN),
PINMUX_DATA(SCIF5_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE5_FN),
PINMUX_DATA(EXBUF_ENB_MARK, PSA11_PSA10_FN1, PTE4_FN),
PINMUX_DATA(SCIF5_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE4_FN),
PINMUX_DATA(IDERST_MARK, PSA11_PSA10_FN1, PTE3_FN),
PINMUX_DATA(SCIF5_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE3_FN),
PINMUX_DATA(IODACK_MARK, PSA11_PSA10_FN1, PTE2_FN),
PINMUX_DATA(SCIF4_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE2_FN),
PINMUX_DATA(IODREQ_MARK, PSA11_PSA10_FN1, PTE1_FN),
PINMUX_DATA(SCIF4_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE1_FN),
PINMUX_DATA(IDEIORDY_MARK, PSA11_PSA10_FN1, PTE0_FN),
PINMUX_DATA(SCIF4_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE0_FN),
/* PTF FN */
PINMUX_DATA(IDEINT_MARK, PTF7_FN),
PINMUX_DATA(IDEIOWR_MARK, PSA5_PSA4_FN1, PTF6_FN),
PINMUX_DATA(MSIOF0_PTF_SS2_MARK, PSA5_PSA4_FN2, PTF6_FN),
PINMUX_DATA(MSIOF0_PTF_RSYNC_MARK, PSA5_PSA4_FN3, PTF6_FN),
PINMUX_DATA(IDEIORD_MARK, PSA5_PSA4_FN1, PTF5_FN),
PINMUX_DATA(MSIOF0_PTF_SS1_MARK, PSA5_PSA4_FN2, PTF5_FN),
PINMUX_DATA(MSIOF0_PTF_RSCK_MARK, PSA5_PSA4_FN3, PTF5_FN),
PINMUX_DATA(IDECS1_MARK, PSA11_PSA10_FN1, PTF4_FN),
PINMUX_DATA(MSIOF0_PTF_TSYNC_MARK, PSA11_PSA10_FN2, PTF4_FN),
PINMUX_DATA(IDECS0_MARK, PSA11_PSA10_FN1, PTF3_FN),
PINMUX_DATA(MSIOF0_PTF_TSCK_MARK, PSA11_PSA10_FN2, PTF3_FN),
PINMUX_DATA(IDEA2_MARK, PSA11_PSA10_FN1, PTF2_FN),
PINMUX_DATA(MSIOF0_PTF_RXD_MARK, PSA11_PSA10_FN2, PTF2_FN),
PINMUX_DATA(IDEA1_MARK, PSA11_PSA10_FN1, PTF1_FN),
PINMUX_DATA(MSIOF0_PTF_TXD_MARK, PSA11_PSA10_FN2, PTF1_FN),
PINMUX_DATA(IDEA0_MARK, PSA11_PSA10_FN1, PTF0_FN),
PINMUX_DATA(MSIOF0_PTF_MCK_MARK, PSA11_PSA10_FN2, PTF0_FN),
/* PTG FN */
PINMUX_DATA(AUDCK_MARK, PTG5_FN),
PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
PINMUX_DATA(AUDATA3_MARK, PSA3_PSA2_FN1, PTG3_FN),
PINMUX_DATA(TPUTO3_MARK, PSA3_PSA2_FN2, PTG3_FN),
PINMUX_DATA(AUDATA2_MARK, PSA3_PSA2_FN1, PTG2_FN),
PINMUX_DATA(TPUTO2_MARK, PSA3_PSA2_FN2, PTG2_FN),
PINMUX_DATA(AUDATA1_MARK, PSA3_PSA2_FN1, PTG1_FN),
PINMUX_DATA(TPUTO1_MARK, PSA3_PSA2_FN2, PTG1_FN),
PINMUX_DATA(AUDATA0_MARK, PSA3_PSA2_FN1, PTG0_FN),
PINMUX_DATA(TPUTO0_MARK, PSA3_PSA2_FN2, PTG0_FN),
/* PTG FN */
PINMUX_DATA(LCDVCPWC_MARK, PTH7_FN),
PINMUX_DATA(LCDRD_MARK, PSB15_PSB14_FN1, PTH6_FN),
PINMUX_DATA(DV_CLKI_MARK, PSB15_PSB14_FN2, PTH6_FN),
PINMUX_DATA(LCDVSYN_MARK, PSB15_PSB14_FN1, PTH5_FN),
PINMUX_DATA(DV_CLK_MARK, PSB15_PSB14_FN2, PTH5_FN),
PINMUX_DATA(LCDDISP_MARK, PSB13_PSB12_LCDC_RGB, PTH4_FN),
PINMUX_DATA(LCDRS_MARK, PSB13_PSB12_LCDC_SYS, PTH4_FN),
PINMUX_DATA(LCDHSYN_MARK, PSB13_PSB12_LCDC_RGB, PTH3_FN),
PINMUX_DATA(LCDCS_MARK, PSB13_PSB12_LCDC_SYS, PTH3_FN),
PINMUX_DATA(LCDDON_MARK, PTH2_FN),
PINMUX_DATA(LCDDCK_MARK, PSB13_PSB12_LCDC_RGB, PTH1_FN),
PINMUX_DATA(LCDWR_MARK, PSB13_PSB12_LCDC_SYS, PTH1_FN),
PINMUX_DATA(LCDVEPWC_MARK, PTH0_FN),
/* PTJ FN */
PINMUX_DATA(STATUS0_MARK, PTJ7_FN),
PINMUX_DATA(PDSTATUS_MARK, PTJ5_FN),
PINMUX_DATA(A25_MARK, PTJ3_FN),
PINMUX_DATA(A24_MARK, PTJ2_FN),
PINMUX_DATA(A23_MARK, PTJ1_FN),
PINMUX_DATA(A22_MARK, PTJ0_FN),
/* PTK FN */
PINMUX_DATA(SIUAFCK_MARK, PTK7_FN),
PINMUX_DATA(SIUAILR_MARK, PSB9_PSB8_FN1, PTK6_FN),
PINMUX_DATA(MSIOF1_SS2_MARK, PSB9_PSB8_FN2, PTK6_FN),
PINMUX_DATA(MSIOF1_RSYNC_MARK, PSB9_PSB8_FN3, PTK6_FN),
PINMUX_DATA(SIUAIBT_MARK, PSB9_PSB8_FN1, PTK5_FN),
PINMUX_DATA(MSIOF1_SS1_MARK, PSB9_PSB8_FN2, PTK5_FN),
PINMUX_DATA(MSIOF1_RSCK_MARK, PSB9_PSB8_FN3, PTK5_FN),
PINMUX_DATA(SIUAISLD_MARK, PSB7_PSB6_FN1, PTK4_FN),
PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK4_FN),
PINMUX_DATA(SIUAOLR_MARK, PSB7_PSB6_FN1, PTK3_FN),
PINMUX_DATA(MSIOF1_TSYNC_MARK, PSB7_PSB6_FN2, PTK3_FN),
PINMUX_DATA(SIUAOBT_MARK, PSB7_PSB6_FN1, PTK2_FN),
PINMUX_DATA(MSIOF1_TSCK_MARK, PSB7_PSB6_FN2, PTK2_FN),
PINMUX_DATA(SIUAOSLD_MARK, PSB7_PSB6_FN1, PTK1_FN),
PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK1_FN),
PINMUX_DATA(SIUAMCK_MARK, PSB7_PSB6_FN1, PTK0_FN),
PINMUX_DATA(MSIOF1_MCK_MARK, PSB7_PSB6_FN2, PTK0_FN),
/* PTL FN */
PINMUX_DATA(LCDD15_MARK, PSB5_PSB4_FN1, PTL7_FN),
PINMUX_DATA(DV_D15_MARK, PSB5_PSB4_FN2, PTL7_FN),
PINMUX_DATA(LCDD14_MARK, PSB5_PSB4_FN1, PTL6_FN),
PINMUX_DATA(DV_D14_MARK, PSB5_PSB4_FN2, PTL6_FN),
PINMUX_DATA(LCDD13_MARK, PSB5_PSB4_FN1, PTL5_FN),
PINMUX_DATA(DV_D13_MARK, PSB5_PSB4_FN2, PTL5_FN),
PINMUX_DATA(LCDD12_MARK, PSB5_PSB4_FN1, PTL4_FN),
PINMUX_DATA(DV_D12_MARK, PSB5_PSB4_FN2, PTL4_FN),
PINMUX_DATA(LCDD11_MARK, PSB5_PSB4_FN1, PTL3_FN),
PINMUX_DATA(DV_D11_MARK, PSB5_PSB4_FN2, PTL3_FN),
PINMUX_DATA(LCDD10_MARK, PSB5_PSB4_FN1, PTL2_FN),
PINMUX_DATA(DV_D10_MARK, PSB5_PSB4_FN2, PTL2_FN),
PINMUX_DATA(LCDD9_MARK, PSB5_PSB4_FN1, PTL1_FN),
PINMUX_DATA(DV_D9_MARK, PSB5_PSB4_FN2, PTL1_FN),
PINMUX_DATA(LCDD8_MARK, PSB5_PSB4_FN1, PTL0_FN),
PINMUX_DATA(DV_D8_MARK, PSB5_PSB4_FN2, PTL0_FN),
/* PTM FN */
PINMUX_DATA(LCDD7_MARK, PSB5_PSB4_FN1, PTM7_FN),
PINMUX_DATA(DV_D7_MARK, PSB5_PSB4_FN2, PTM7_FN),
PINMUX_DATA(LCDD6_MARK, PSB5_PSB4_FN1, PTM6_FN),
PINMUX_DATA(DV_D6_MARK, PSB5_PSB4_FN2, PTM6_FN),
PINMUX_DATA(LCDD5_MARK, PSB5_PSB4_FN1, PTM5_FN),
PINMUX_DATA(DV_D5_MARK, PSB5_PSB4_FN2, PTM5_FN),
PINMUX_DATA(LCDD4_MARK, PSB5_PSB4_FN1, PTM4_FN),
PINMUX_DATA(DV_D4_MARK, PSB5_PSB4_FN2, PTM4_FN),
PINMUX_DATA(LCDD3_MARK, PSB5_PSB4_FN1, PTM3_FN),
PINMUX_DATA(DV_D3_MARK, PSB5_PSB4_FN2, PTM3_FN),
PINMUX_DATA(LCDD2_MARK, PSB5_PSB4_FN1, PTM2_FN),
PINMUX_DATA(DV_D2_MARK, PSB5_PSB4_FN2, PTM2_FN),
PINMUX_DATA(LCDD1_MARK, PSB5_PSB4_FN1, PTM1_FN),
PINMUX_DATA(DV_D1_MARK, PSB5_PSB4_FN2, PTM1_FN),
PINMUX_DATA(LCDD0_MARK, PSB5_PSB4_FN1, PTM0_FN),
PINMUX_DATA(DV_D0_MARK, PSB5_PSB4_FN2, PTM0_FN),
/* PTN FN */
PINMUX_DATA(LCDD23_MARK, PSB3_PSB2_FN1, PTN7_FN),
PINMUX_DATA(SCIF5_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN7_FN),
PINMUX_DATA(LCDD22_MARK, PSB3_PSB2_FN1, PTN6_FN),
PINMUX_DATA(SCIF5_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN6_FN),
PINMUX_DATA(LCDD21_MARK, PSB3_PSB2_FN1, PTN5_FN),
PINMUX_DATA(SCIF5_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN5_FN),
PINMUX_DATA(LCDD20_MARK, PSB3_PSB2_FN1, PTN4_FN),
PINMUX_DATA(SCIF4_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN4_FN),
PINMUX_DATA(LCDD19_MARK, PSB3_PSB2_FN1, PTN3_FN),
PINMUX_DATA(SCIF4_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN3_FN),
PINMUX_DATA(LCDD18_MARK, PSB3_PSB2_FN1, PTN2_FN),
PINMUX_DATA(SCIF4_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN2_FN),
PINMUX_DATA(LCDD17_MARK, PSB5_PSB4_FN1, PTN1_FN),
PINMUX_DATA(DV_VSYNC_MARK, PSB5_PSB4_FN2, PTN1_FN),
PINMUX_DATA(LCDD16_MARK, PSB5_PSB4_FN1, PTN0_FN),
PINMUX_DATA(DV_HSYNC_MARK, PSB5_PSB4_FN2, PTN0_FN),
/* PTQ FN */
PINMUX_DATA(AN3_MARK, PTQ3_FN),
PINMUX_DATA(AN2_MARK, PTQ2_FN),
PINMUX_DATA(AN1_MARK, PTQ1_FN),
PINMUX_DATA(AN0_MARK, PTQ0_FN),
/* PTR FN */
PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
PINMUX_DATA(IOIS16_MARK, PSA13_PSA12_FN1, PTR3_FN),
PINMUX_DATA(LCDLCLK_PTR_MARK, PSA13_PSA12_FN2, PTR3_FN),
PINMUX_DATA(WAIT_MARK, PTR2_FN),
PINMUX_DATA(WE3_ICIOWR_MARK, PTR1_FN),
PINMUX_DATA(WE2_ICIORD_MARK, PTR0_FN),
/* PTS FN */
PINMUX_DATA(SCIF1_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS7_FN),
PINMUX_DATA(SDHI0CD_PTS_MARK, PSC15_PSC14_FN2, PTS7_FN),
PINMUX_DATA(SCIF1_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS6_FN),
PINMUX_DATA(SDHI0WP_PTS_MARK, PSC15_PSC14_FN2, PTS6_FN),
PINMUX_DATA(SCIF1_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS5_FN),
PINMUX_DATA(SDHI0D3_PTS_MARK, PSC15_PSC14_FN2, PTS5_FN),
PINMUX_DATA(SCIF3_PTS_CTS_MARK, PSC15_PSC14_FN1, PTS4_FN),
PINMUX_DATA(SDHI0D2_PTS_MARK, PSC15_PSC14_FN2, PTS4_FN),
PINMUX_DATA(SCIF3_PTS_RTS_MARK, PSC15_PSC14_FN1, PTS3_FN),
PINMUX_DATA(SDHI0D1_PTS_MARK, PSC15_PSC14_FN2, PTS3_FN),
PINMUX_DATA(SCIF3_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS2_FN),
PINMUX_DATA(SDHI0D0_PTS_MARK, PSC15_PSC14_FN2, PTS2_FN),
PINMUX_DATA(SCIF3_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS1_FN),
PINMUX_DATA(SDHI0CMD_PTS_MARK, PSC15_PSC14_FN2, PTS1_FN),
PINMUX_DATA(SCIF3_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS0_FN),
PINMUX_DATA(SDHI0CLK_PTS_MARK, PSC15_PSC14_FN2, PTS0_FN),
/* PTT FN */
PINMUX_DATA(SCIF0_PTT_SCK_MARK, PSC13_PSC12_FN1, PTT5_FN),
PINMUX_DATA(MSIOF0_PTT_TSCK_MARK, PSC13_PSC12_FN2, PTT5_FN),
PINMUX_DATA(SCIF0_PTT_RXD_MARK, PSC13_PSC12_FN1, PTT4_FN),
PINMUX_DATA(MSIOF0_PTT_RXD_MARK, PSC13_PSC12_FN2, PTT4_FN),
PINMUX_DATA(SCIF0_PTT_TXD_MARK, PSC13_PSC12_FN1, PTT3_FN),
PINMUX_DATA(MSIOF0_PTT_TXD_MARK, PSC13_PSC12_FN2, PTT3_FN),
PINMUX_DATA(SCIF2_PTT_SCK_MARK, PSC11_PSC10_FN1, PTT2_FN),
PINMUX_DATA(MSIOF0_PTT_TSYNC_MARK, PSC11_PSC10_FN2, PTT2_FN),
PINMUX_DATA(SCIF2_PTT_RXD_MARK, PSC11_PSC10_FN1, PTT1_FN),
PINMUX_DATA(MSIOF0_PTT_SS1_MARK, PSC11_PSC10_FN2, PTT1_FN),
PINMUX_DATA(MSIOF0_PTT_RSCK_MARK, PSC11_PSC10_FN3, PTT1_FN),
PINMUX_DATA(SCIF2_PTT_TXD_MARK, PSC11_PSC10_FN1, PTT0_FN),
PINMUX_DATA(MSIOF0_PTT_SS2_MARK, PSC11_PSC10_FN2, PTT0_FN),
PINMUX_DATA(MSIOF0_PTT_RSYNC_MARK, PSC11_PSC10_FN3, PTT0_FN),
/* PTU FN */
PINMUX_DATA(FCDE_MARK, PSC9_PSC8_FN1, PTU5_FN),
PINMUX_DATA(SCIF0_PTU_SCK_MARK, PSC9_PSC8_FN2, PTU5_FN),
PINMUX_DATA(FSC_MARK, PSC9_PSC8_FN1, PTU4_FN),
PINMUX_DATA(SCIF0_PTU_RXD_MARK, PSC9_PSC8_FN2, PTU4_FN),
PINMUX_DATA(FWE_MARK, PSC9_PSC8_FN1, PTU3_FN),
PINMUX_DATA(SCIF0_PTU_TXD_MARK, PSC9_PSC8_FN2, PTU3_FN),
PINMUX_DATA(FOE_MARK, PSC7_PSC6_FN1, PTU2_FN),
PINMUX_DATA(SCIF2_PTU_SCK_MARK, PSC7_PSC6_FN2, PTU2_FN),
PINMUX_DATA(VIO_VD2_MARK, PSC7_PSC6_FN3, PTU2_FN),
PINMUX_DATA(FRB_MARK, PSC7_PSC6_FN1, PTU1_FN),
PINMUX_DATA(SCIF2_PTU_RXD_MARK, PSC7_PSC6_FN2, PTU1_FN),
PINMUX_DATA(VIO_CLK2_MARK, PSC7_PSC6_FN3, PTU1_FN),
PINMUX_DATA(FCE_MARK, PSC7_PSC6_FN1, PTU0_FN),
PINMUX_DATA(SCIF2_PTU_TXD_MARK, PSC7_PSC6_FN2, PTU0_FN),
PINMUX_DATA(VIO_HD2_MARK, PSC7_PSC6_FN3, PTU0_FN),
/* PTV FN */
PINMUX_DATA(NAF7_MARK, PSC7_PSC6_FN1, PTV7_FN),
PINMUX_DATA(SCIF1_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV7_FN),
PINMUX_DATA(VIO_D15_MARK, PSC7_PSC6_FN3, PTV7_FN),
PINMUX_DATA(NAF6_MARK, PSC7_PSC6_FN1, PTV6_FN),
PINMUX_DATA(SCIF1_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV6_FN),
PINMUX_DATA(VIO_D14_MARK, PSC7_PSC6_FN3, PTV6_FN),
PINMUX_DATA(NAF5_MARK, PSC7_PSC6_FN1, PTV5_FN),
PINMUX_DATA(SCIF1_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV5_FN),
PINMUX_DATA(VIO_D13_MARK, PSC7_PSC6_FN3, PTV5_FN),
PINMUX_DATA(NAF4_MARK, PSC7_PSC6_FN1, PTV4_FN),
PINMUX_DATA(SCIF3_PTV_CTS_MARK, PSC7_PSC6_FN2, PTV4_FN),
PINMUX_DATA(VIO_D12_MARK, PSC7_PSC6_FN3, PTV4_FN),
PINMUX_DATA(NAF3_MARK, PSC7_PSC6_FN1, PTV3_FN),
PINMUX_DATA(SCIF3_PTV_RTS_MARK, PSC7_PSC6_FN2, PTV3_FN),
PINMUX_DATA(VIO_D11_MARK, PSC7_PSC6_FN3, PTV3_FN),
PINMUX_DATA(NAF2_MARK, PSC7_PSC6_FN1, PTV2_FN),
PINMUX_DATA(SCIF3_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV2_FN),
PINMUX_DATA(VIO_D10_MARK, PSC7_PSC6_FN3, PTV2_FN),
PINMUX_DATA(NAF1_MARK, PSC7_PSC6_FN1, PTV1_FN),
PINMUX_DATA(SCIF3_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV1_FN),
PINMUX_DATA(VIO_D9_MARK, PSC7_PSC6_FN3, PTV1_FN),
PINMUX_DATA(NAF0_MARK, PSC7_PSC6_FN1, PTV0_FN),
PINMUX_DATA(SCIF3_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV0_FN),
PINMUX_DATA(VIO_D8_MARK, PSC7_PSC6_FN3, PTV0_FN),
/* PTW FN */
PINMUX_DATA(IRQ7_MARK, PTW7_FN),
PINMUX_DATA(IRQ6_MARK, PTW6_FN),
PINMUX_DATA(IRQ5_MARK, PTW5_FN),
PINMUX_DATA(IRQ4_MARK, PSD15_PSD14_FN1, PTW4_FN),
PINMUX_DATA(LCDLCLK_PTW_MARK, PSD15_PSD14_FN2, PTW4_FN),
PINMUX_DATA(IRQ3_MARK, PSD13_PSD12_FN1, PTW3_FN),
PINMUX_DATA(ADTRG_MARK, PSD13_PSD12_FN2, PTW3_FN),
PINMUX_DATA(IRQ2_MARK, PSD11_PSD10_FN1, PTW2_FN),
PINMUX_DATA(BS_MARK, PSD11_PSD10_FN2, PTW2_FN),
PINMUX_DATA(VIO_CKO_MARK, PSD11_PSD10_FN3, PTW2_FN),
PINMUX_DATA(IRQ1_MARK, PSD9_PSD8_FN1, PTW1_FN),
PINMUX_DATA(SIUAISPD_MARK, PSD9_PSD8_FN2, PTW1_FN),
PINMUX_DATA(IRQ0_MARK, PSD7_PSD6_FN1, PTW0_FN),
PINMUX_DATA(SIUAOSPD_MARK, PSD7_PSD6_FN2, PTW0_FN),
/* PTX FN */
PINMUX_DATA(DACK1_MARK, PTX7_FN),
PINMUX_DATA(DREQ1_MARK, PSD3_PSD2_FN1, PTX6_FN),
PINMUX_DATA(MSIOF0_PTX_MCK_MARK, PSD3_PSD2_FN2, PTX6_FN),
PINMUX_DATA(DACK1_MARK, PTX5_FN),
PINMUX_DATA(IRDA_OUT_MARK, PSD5_PSD4_FN2, PTX5_FN),
PINMUX_DATA(DREQ1_MARK, PTX4_FN),
PINMUX_DATA(IRDA_IN_MARK, PSD5_PSD4_FN2, PTX4_FN),
PINMUX_DATA(TS0_SDAT_MARK, PTX3_FN),
PINMUX_DATA(TS0_SCK_MARK, PTX2_FN),
PINMUX_DATA(TS0_SDEN_MARK, PTX1_FN),
PINMUX_DATA(TS0_SPSYNC_MARK, PTX0_FN),
/* PTY FN */
PINMUX_DATA(VIO_D7_MARK, PTY7_FN),
PINMUX_DATA(VIO_D6_MARK, PTY6_FN),
PINMUX_DATA(VIO_D5_MARK, PTY5_FN),
PINMUX_DATA(VIO_D4_MARK, PTY4_FN),
PINMUX_DATA(VIO_D3_MARK, PTY3_FN),
PINMUX_DATA(VIO_D2_MARK, PTY2_FN),
PINMUX_DATA(VIO_D1_MARK, PTY1_FN),
PINMUX_DATA(VIO_D0_MARK, PTY0_FN),
/* PTZ FN */
PINMUX_DATA(SIUBOBT_MARK, PTZ7_FN),
PINMUX_DATA(SIUBOLR_MARK, PTZ6_FN),
PINMUX_DATA(SIUBOSLD_MARK, PTZ5_FN),
PINMUX_DATA(SIUBMCK_MARK, PTZ4_FN),
PINMUX_DATA(VIO_FLD_MARK, PSD1_PSD0_FN1, PTZ3_FN),
PINMUX_DATA(SIUBFCK_MARK, PSD1_PSD0_FN2, PTZ3_FN),
PINMUX_DATA(VIO_HD1_MARK, PSD1_PSD0_FN1, PTZ2_FN),
PINMUX_DATA(SIUBILR_MARK, PSD1_PSD0_FN2, PTZ2_FN),
PINMUX_DATA(VIO_VD1_MARK, PSD1_PSD0_FN1, PTZ1_FN),
PINMUX_DATA(SIUBIBT_MARK, PSD1_PSD0_FN2, PTZ1_FN),
PINMUX_DATA(VIO_CLK1_MARK, PSD1_PSD0_FN1, PTZ0_FN),
PINMUX_DATA(SIUBISLD_MARK, PSD1_PSD0_FN2, PTZ0_FN),
};
static struct pinmux_gpio pinmux_gpios[] = {
/* PTA */
PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
/* PTB */
PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
/* PTC */
PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
/* PTD */
PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
/* PTE */
PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
/* PTF */
PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
/* PTG */
PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
/* PTH */
PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
/* PTJ */
PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
/* PTK */
PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
/* PTL */
PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
/* PTM */
PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
/* PTN */
PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
/* PTQ */
PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
/* PTR */
PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
/* PTS */
PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
/* PTT */
PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
/* PTU */
PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
/* PTV */
PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
/* PTW */
PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
/* PTX */
PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
/* PTY */
PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
/* PTZ */
PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
/* SCIF0 */
PINMUX_GPIO(GPIO_FN_SCIF0_PTT_TXD, SCIF0_PTT_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF0_PTT_RXD, SCIF0_PTT_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF0_PTT_SCK, SCIF0_PTT_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF0_PTU_TXD, SCIF0_PTU_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF0_PTU_RXD, SCIF0_PTU_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF0_PTU_SCK, SCIF0_PTU_SCK_MARK),
/* SCIF1 */
PINMUX_GPIO(GPIO_FN_SCIF1_PTS_TXD, SCIF1_PTS_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF1_PTS_RXD, SCIF1_PTS_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF1_PTS_SCK, SCIF1_PTS_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF1_PTV_TXD, SCIF1_PTV_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF1_PTV_RXD, SCIF1_PTV_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF1_PTV_SCK, SCIF1_PTV_SCK_MARK),
/* SCIF2 */
PINMUX_GPIO(GPIO_FN_SCIF2_PTT_TXD, SCIF2_PTT_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_PTT_RXD, SCIF2_PTT_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_PTT_SCK, SCIF2_PTT_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_PTU_TXD, SCIF2_PTU_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_PTU_RXD, SCIF2_PTU_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_PTU_SCK, SCIF2_PTU_SCK_MARK),
/* SCIF3 */
PINMUX_GPIO(GPIO_FN_SCIF3_PTS_TXD, SCIF3_PTS_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RXD, SCIF3_PTS_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTS_SCK, SCIF3_PTS_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RTS, SCIF3_PTS_RTS_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTS_CTS, SCIF3_PTS_CTS_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTV_TXD, SCIF3_PTV_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RXD, SCIF3_PTV_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTV_SCK, SCIF3_PTV_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RTS, SCIF3_PTV_RTS_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_PTV_CTS, SCIF3_PTV_CTS_MARK),
/* SCIF4 */
PINMUX_GPIO(GPIO_FN_SCIF4_PTE_TXD, SCIF4_PTE_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF4_PTE_RXD, SCIF4_PTE_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF4_PTE_SCK, SCIF4_PTE_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF4_PTN_TXD, SCIF4_PTN_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF4_PTN_RXD, SCIF4_PTN_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF4_PTN_SCK, SCIF4_PTN_SCK_MARK),
/* SCIF5 */
PINMUX_GPIO(GPIO_FN_SCIF5_PTE_TXD, SCIF5_PTE_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF5_PTE_RXD, SCIF5_PTE_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF5_PTE_SCK, SCIF5_PTE_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF5_PTN_TXD, SCIF5_PTN_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF5_PTN_RXD, SCIF5_PTN_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF5_PTN_SCK, SCIF5_PTN_SCK_MARK),
/* CEU */
PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
PINMUX_GPIO(GPIO_FN_VIO_CLK1, VIO_CLK1_MARK),
PINMUX_GPIO(GPIO_FN_VIO_VD1, VIO_VD1_MARK),
PINMUX_GPIO(GPIO_FN_VIO_HD1, VIO_HD1_MARK),
PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
/* LCDC */
PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
PINMUX_GPIO(GPIO_FN_LCDLCLK_PTR, LCDLCLK_PTR_MARK),
PINMUX_GPIO(GPIO_FN_LCDLCLK_PTW, LCDLCLK_PTW_MARK),
/* Main LCD */
PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
/* Main LCD - RGB Mode */
PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
/* Main LCD - SYS Mode */
PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
/* IRQ */
PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
/* AUD */
PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
/* SDHI0 (PTD) */
PINMUX_GPIO(GPIO_FN_SDHI0CD_PTD, SDHI0CD_PTD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0WP_PTD, SDHI0WP_PTD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D3_PTD, SDHI0D3_PTD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D2_PTD, SDHI0D2_PTD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D1_PTD, SDHI0D1_PTD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D0_PTD, SDHI0D0_PTD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTD, SDHI0CMD_PTD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTD, SDHI0CLK_PTD_MARK),
/* SDHI0 (PTS) */
PINMUX_GPIO(GPIO_FN_SDHI0CD_PTS, SDHI0CD_PTS_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0WP_PTS, SDHI0WP_PTS_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D3_PTS, SDHI0D3_PTS_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D2_PTS, SDHI0D2_PTS_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D1_PTS, SDHI0D1_PTS_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D0_PTS, SDHI0D0_PTS_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTS, SDHI0CMD_PTS_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTS, SDHI0CLK_PTS_MARK),
/* SDHI1 */
PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
/* SIUA */
PINMUX_GPIO(GPIO_FN_SIUAFCK, SIUAFCK_MARK),
PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
PINMUX_GPIO(GPIO_FN_SIUAMCK, SIUAMCK_MARK),
PINMUX_GPIO(GPIO_FN_SIUAISPD, SIUAISPD_MARK),
PINMUX_GPIO(GPIO_FN_SIUOSPD, SIUAOSPD_MARK),
/* SIUB */
PINMUX_GPIO(GPIO_FN_SIUBFCK, SIUBFCK_MARK),
PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
PINMUX_GPIO(GPIO_FN_SIUBMCK, SIUBMCK_MARK),
/* IRDA */
PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
/* VOU */
PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
/* KEYSC */
PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
/* MSIOF0 (PTF) */
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TXD, MSIOF0_PTF_TXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RXD, MSIOF0_PTF_RXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_MCK, MSIOF0_PTF_MCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSYNC, MSIOF0_PTF_TSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSCK, MSIOF0_PTF_TSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSYNC, MSIOF0_PTF_RSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSCK, MSIOF0_PTF_RSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS1, MSIOF0_PTF_SS1_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS2, MSIOF0_PTF_SS2_MARK),
/* MSIOF0 (PTT+PTX) */
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TXD, MSIOF0_PTT_TXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RXD, MSIOF0_PTT_RXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTX_MCK, MSIOF0_PTX_MCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSYNC, MSIOF0_PTT_TSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSCK, MSIOF0_PTT_TSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSYNC, MSIOF0_PTT_RSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSCK, MSIOF0_PTT_RSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS1, MSIOF0_PTT_SS1_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS2, MSIOF0_PTT_SS2_MARK),
/* MSIOF1 */
PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
/* TSIF */
PINMUX_GPIO(GPIO_FN_TS0_SDAT, TS0_SDAT_MARK),
PINMUX_GPIO(GPIO_FN_TS0_SCK, TS0_SCK_MARK),
PINMUX_GPIO(GPIO_FN_TS0_SDEN, TS0_SDEN_MARK),
PINMUX_GPIO(GPIO_FN_TS0_SPSYNC, TS0_SPSYNC_MARK),
/* FLCTL */
PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
/* DMAC */
PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
/* ADC */
PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
/* CPG */
PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
/* TPU */
PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
/* BSC */
PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
/* ATAPI */
PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
};
static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
PTA7_FN, PTA7_OUT, 0, PTA7_IN,
PTA6_FN, PTA6_OUT, 0, PTA6_IN,
PTA5_FN, PTA5_OUT, 0, PTA5_IN,
PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
},
{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
PTB7_FN, PTB7_OUT, 0, PTB7_IN,
PTB6_FN, PTB6_OUT, 0, PTB6_IN,
PTB5_FN, PTB5_OUT, 0, PTB5_IN,
PTB4_FN, PTB4_OUT, 0, PTB4_IN,
PTB3_FN, PTB3_OUT, 0, PTB3_IN,
PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
PTB0_FN, PTB0_OUT, 0, PTB0_IN }
},
{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
PTC7_FN, PTC7_OUT, 0, PTC7_IN,
PTC6_FN, PTC6_OUT, 0, PTC6_IN,
PTC5_FN, PTC5_OUT, 0, PTC5_IN,
PTC4_FN, PTC4_OUT, 0, PTC4_IN,
PTC3_FN, PTC3_OUT, 0, PTC3_IN,
PTC2_FN, PTC2_OUT, 0, PTC2_IN,
PTC1_FN, PTC1_OUT, 0, PTC1_IN,
PTC0_FN, PTC0_OUT, 0, PTC0_IN }
},
{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
PTD7_FN, PTD7_OUT, 0, PTD7_IN,
PTD6_FN, PTD6_OUT, 0, PTD6_IN,
PTD5_FN, PTD5_OUT, 0, PTD5_IN,
PTD4_FN, PTD4_OUT, 0, PTD4_IN,
PTD3_FN, PTD3_OUT, 0, PTD3_IN,
PTD2_FN, PTD2_OUT, 0, PTD2_IN,
PTD1_FN, PTD1_OUT, 0, PTD1_IN,
PTD0_FN, PTD0_OUT, 0, PTD0_IN }
},
{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
PTE5_FN, PTE5_OUT, 0, PTE5_IN,
PTE4_FN, PTE4_OUT, 0, PTE4_IN,
PTE3_FN, PTE3_OUT, 0, PTE3_IN,
PTE2_FN, PTE2_OUT, 0, PTE2_IN,
PTE1_FN, PTE1_OUT, 0, PTE1_IN,
PTE0_FN, PTE0_OUT, 0, PTE0_IN }
},
{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
PTF7_FN, PTF7_OUT, 0, PTF7_IN,
PTF6_FN, PTF6_OUT, 0, PTF6_IN,
PTF5_FN, PTF5_OUT, 0, PTF5_IN,
PTF4_FN, PTF4_OUT, 0, PTF4_IN,
PTF3_FN, PTF3_OUT, 0, PTF3_IN,
PTF2_FN, PTF2_OUT, 0, PTF2_IN,
PTF1_FN, PTF1_OUT, 0, PTF1_IN,
PTF0_FN, PTF0_OUT, 0, PTF0_IN }
},
{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
PTG5_FN, PTG5_OUT, 0, 0,
PTG4_FN, PTG4_OUT, 0, 0,
PTG3_FN, PTG3_OUT, 0, 0,
PTG2_FN, PTG2_OUT, 0, 0,
PTG1_FN, PTG1_OUT, 0, 0,
PTG0_FN, PTG0_OUT, 0, 0 }
},
{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
PTH7_FN, PTH7_OUT, 0, PTH7_IN,
PTH6_FN, PTH6_OUT, 0, PTH6_IN,
PTH5_FN, PTH5_OUT, 0, PTH5_IN,
PTH4_FN, PTH4_OUT, 0, PTH4_IN,
PTH3_FN, PTH3_OUT, 0, PTH3_IN,
PTH2_FN, PTH2_OUT, 0, PTH2_IN,
PTH1_FN, PTH1_OUT, 0, PTH1_IN,
PTH0_FN, PTH0_OUT, 0, PTH0_IN }
},
{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
PTJ7_FN, PTJ7_OUT, 0, 0,
0, 0, 0, 0,
PTJ5_FN, PTJ5_OUT, 0, 0,
0, 0, 0, 0,
PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
},
{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
PTK7_FN, PTK7_OUT, 0, PTK7_IN,
PTK6_FN, PTK6_OUT, 0, PTK6_IN,
PTK5_FN, PTK5_OUT, 0, PTK5_IN,
PTK4_FN, PTK4_OUT, 0, PTK4_IN,
PTK3_FN, PTK3_OUT, 0, PTK3_IN,
PTK2_FN, PTK2_OUT, 0, PTK2_IN,
PTK1_FN, PTK1_OUT, 0, PTK1_IN,
PTK0_FN, PTK0_OUT, 0, PTK0_IN }
},
{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
PTL7_FN, PTL7_OUT, 0, PTL7_IN,
PTL6_FN, PTL6_OUT, 0, PTL6_IN,
PTL5_FN, PTL5_OUT, 0, PTL5_IN,
PTL4_FN, PTL4_OUT, 0, PTL4_IN,
PTL3_FN, PTL3_OUT, 0, PTL3_IN,
PTL2_FN, PTL2_OUT, 0, PTL2_IN,
PTL1_FN, PTL1_OUT, 0, PTL1_IN,
PTL0_FN, PTL0_OUT, 0, PTL0_IN }
},
{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
PTM7_FN, PTM7_OUT, 0, PTM7_IN,
PTM6_FN, PTM6_OUT, 0, PTM6_IN,
PTM5_FN, PTM5_OUT, 0, PTM5_IN,
PTM4_FN, PTM4_OUT, 0, PTM4_IN,
PTM3_FN, PTM3_OUT, 0, PTM3_IN,
PTM2_FN, PTM2_OUT, 0, PTM2_IN,
PTM1_FN, PTM1_OUT, 0, PTM1_IN,
PTM0_FN, PTM0_OUT, 0, PTM0_IN }
},
{ PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
PTN7_FN, PTN7_OUT, 0, PTN7_IN,
PTN6_FN, PTN6_OUT, 0, PTN6_IN,
PTN5_FN, PTN5_OUT, 0, PTN5_IN,
PTN4_FN, PTN4_OUT, 0, PTN4_IN,
PTN3_FN, PTN3_OUT, 0, PTN3_IN,
PTN2_FN, PTN2_OUT, 0, PTN2_IN,
PTN1_FN, PTN1_OUT, 0, PTN1_IN,
PTN0_FN, PTN0_OUT, 0, PTN0_IN }
},
{ PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PTQ3_FN, 0, 0, PTQ3_IN,
PTQ2_FN, 0, 0, PTQ2_IN,
PTQ1_FN, 0, 0, PTQ1_IN,
PTQ0_FN, 0, 0, PTQ0_IN }
},
{ PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
PTR7_FN, PTR7_OUT, 0, PTR7_IN,
PTR6_FN, PTR6_OUT, 0, PTR6_IN,
PTR5_FN, PTR5_OUT, 0, PTR5_IN,
PTR4_FN, PTR4_OUT, 0, PTR4_IN,
PTR3_FN, 0, 0, PTR3_IN,
PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
PTR1_FN, PTR1_OUT, 0, PTR1_IN,
PTR0_FN, PTR0_OUT, 0, PTR0_IN }
},
{ PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
PTS7_FN, PTS7_OUT, 0, PTS7_IN,
PTS6_FN, PTS6_OUT, 0, PTS6_IN,
PTS5_FN, PTS5_OUT, 0, PTS5_IN,
PTS4_FN, PTS4_OUT, 0, PTS4_IN,
PTS3_FN, PTS3_OUT, 0, PTS3_IN,
PTS2_FN, PTS2_OUT, 0, PTS2_IN,
PTS1_FN, PTS1_OUT, 0, PTS1_IN,
PTS0_FN, PTS0_OUT, 0, PTS0_IN }
},
{ PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
PTT5_FN, PTT5_OUT, 0, PTT5_IN,
PTT4_FN, PTT4_OUT, 0, PTT4_IN,
PTT3_FN, PTT3_OUT, 0, PTT3_IN,
PTT2_FN, PTT2_OUT, 0, PTT2_IN,
PTT1_FN, PTT1_OUT, 0, PTT1_IN,
PTT0_FN, PTT0_OUT, 0, PTT0_IN }
},
{ PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
PTU5_FN, PTU5_OUT, 0, PTU5_IN,
PTU4_FN, PTU4_OUT, 0, PTU4_IN,
PTU3_FN, PTU3_OUT, 0, PTU3_IN,
PTU2_FN, PTU2_OUT, 0, PTU2_IN,
PTU1_FN, PTU1_OUT, 0, PTU1_IN,
PTU0_FN, PTU0_OUT, 0, PTU0_IN }
},
{ PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
PTV7_FN, PTV7_OUT, 0, PTV7_IN,
PTV6_FN, PTV6_OUT, 0, PTV6_IN,
PTV5_FN, PTV5_OUT, 0, PTV5_IN,
PTV4_FN, PTV4_OUT, 0, PTV4_IN,
PTV3_FN, PTV3_OUT, 0, PTV3_IN,
PTV2_FN, PTV2_OUT, 0, PTV2_IN,
PTV1_FN, PTV1_OUT, 0, PTV1_IN,
PTV0_FN, PTV0_OUT, 0, PTV0_IN }
},
{ PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
PTW7_FN, PTW7_OUT, 0, PTW7_IN,
PTW6_FN, PTW6_OUT, 0, PTW6_IN,
PTW5_FN, PTW5_OUT, 0, PTW5_IN,
PTW4_FN, PTW4_OUT, 0, PTW4_IN,
PTW3_FN, PTW3_OUT, 0, PTW3_IN,
PTW2_FN, PTW2_OUT, 0, PTW2_IN,
PTW1_FN, PTW1_OUT, 0, PTW1_IN,
PTW0_FN, PTW0_OUT, 0, PTW0_IN }
},
{ PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
PTX7_FN, PTX7_OUT, 0, PTX7_IN,
PTX6_FN, PTX6_OUT, 0, PTX6_IN,
PTX5_FN, PTX5_OUT, 0, PTX5_IN,
PTX4_FN, PTX4_OUT, 0, PTX4_IN,
PTX3_FN, PTX3_OUT, 0, PTX3_IN,
PTX2_FN, PTX2_OUT, 0, PTX2_IN,
PTX1_FN, PTX1_OUT, 0, PTX1_IN,
PTX0_FN, PTX0_OUT, 0, PTX0_IN }
},
{ PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
PTY7_FN, PTY7_OUT, 0, PTY7_IN,
PTY6_FN, PTY6_OUT, 0, PTY6_IN,
PTY5_FN, PTY5_OUT, 0, PTY5_IN,
PTY4_FN, PTY4_OUT, 0, PTY4_IN,
PTY3_FN, PTY3_OUT, 0, PTY3_IN,
PTY2_FN, PTY2_OUT, 0, PTY2_IN,
PTY1_FN, PTY1_OUT, 0, PTY1_IN,
PTY0_FN, PTY0_OUT, 0, PTY0_IN }
},
{ PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
PTZ4_FN, PTZ4_OUT, 0, PTZ4_IN,
PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
},
{ PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2) {
PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0,
PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0,
PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0,
PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0,
0, 0, 0, 0 }
},
{ PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2) {
PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0,
PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0,
0, 0, 0, 0,
PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3, 0,
PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0,
PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0,
PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0,
0, 0, 0, 0 }
},
{ PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2) {
PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0,
PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0,
PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0,
PSC9_PSC8_FN1, PSC9_PSC8_FN2, 0, 0,
PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 }
},
{ PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2) {
PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0,
PSD13_PSD12_FN1, PSD13_PSD12_FN2, 0, 0,
PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3, 0,
PSD9_PSD8_FN1, PSD9_PSD8_FN2, 0, 0,
PSD7_PSD6_FN1, PSD7_PSD6_FN2, 0, 0,
PSD5_PSD4_FN1, PSD5_PSD4_FN2, 0, 0,
PSD3_PSD2_FN1, PSD3_PSD2_FN2, 0, 0,
PSD1_PSD0_FN1, PSD1_PSD0_FN2, 0, 0 }
},
{}
};
static struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
},
{ PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
},
{ PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
},
{ PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
},
{ PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
0, 0, PTE5_DATA, PTE4_DATA,
PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
},
{ PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
},
{ PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
0, 0, PTG5_DATA, PTG4_DATA,
PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
},
{ PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
},
{ PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
PTJ7_DATA, 0, PTJ5_DATA, 0,
PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
},
{ PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
},
{ PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
},
{ PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
},
{ PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
},
{ PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
0, 0, 0, 0,
PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
},
{ PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
},
{ PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
},
{ PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
0, 0, PTT5_DATA, PTT4_DATA,
PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
},
{ PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
0, 0, PTU5_DATA, PTU4_DATA,
PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
},
{ PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
},
{ PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
},
{ PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
},
{ PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
},
{ PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
},
{ },
};
static struct pinmux_info sh7723_pinmux_info = {
.name = "sh7723_pfc",
.reserved_id = PINMUX_RESERVED,
.data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
.first_gpio = GPIO_PTA7,
.last_gpio = GPIO_FN_IDEA0,
.gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
.gpio_data = pinmux_data,
.gpio_data_size = ARRAY_SIZE(pinmux_data),
};
static int __init plat_pinmux_setup(void)
{
return register_pinmux(&sh7723_pinmux_info);
}
arch_initcall(plat_pinmux_setup);
| gpl-2.0 |
ParanoidAndroid/android_kernel_grouper | fs/ntfs/upcase.c | 14866 | 3933 | /*
* upcase.c - Generate the full NTFS Unicode upcase table in little endian.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001 Richard Russon <ntfs@flatcap.org>
* Copyright (c) 2001-2006 Anton Altaparmakov
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS source
* in the file COPYING); if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "malloc.h"
#include "ntfs.h"
ntfschar *generate_default_upcase(void)
{
static const int uc_run_table[][3] = { /* Start, End, Add */
{0x0061, 0x007B, -32}, {0x0451, 0x045D, -80}, {0x1F70, 0x1F72, 74},
{0x00E0, 0x00F7, -32}, {0x045E, 0x0460, -80}, {0x1F72, 0x1F76, 86},
{0x00F8, 0x00FF, -32}, {0x0561, 0x0587, -48}, {0x1F76, 0x1F78, 100},
{0x0256, 0x0258, -205}, {0x1F00, 0x1F08, 8}, {0x1F78, 0x1F7A, 128},
{0x028A, 0x028C, -217}, {0x1F10, 0x1F16, 8}, {0x1F7A, 0x1F7C, 112},
{0x03AC, 0x03AD, -38}, {0x1F20, 0x1F28, 8}, {0x1F7C, 0x1F7E, 126},
{0x03AD, 0x03B0, -37}, {0x1F30, 0x1F38, 8}, {0x1FB0, 0x1FB2, 8},
{0x03B1, 0x03C2, -32}, {0x1F40, 0x1F46, 8}, {0x1FD0, 0x1FD2, 8},
{0x03C2, 0x03C3, -31}, {0x1F51, 0x1F52, 8}, {0x1FE0, 0x1FE2, 8},
{0x03C3, 0x03CC, -32}, {0x1F53, 0x1F54, 8}, {0x1FE5, 0x1FE6, 7},
{0x03CC, 0x03CD, -64}, {0x1F55, 0x1F56, 8}, {0x2170, 0x2180, -16},
{0x03CD, 0x03CF, -63}, {0x1F57, 0x1F58, 8}, {0x24D0, 0x24EA, -26},
{0x0430, 0x0450, -32}, {0x1F60, 0x1F68, 8}, {0xFF41, 0xFF5B, -32},
{0}
};
static const int uc_dup_table[][2] = { /* Start, End */
{0x0100, 0x012F}, {0x01A0, 0x01A6}, {0x03E2, 0x03EF}, {0x04CB, 0x04CC},
{0x0132, 0x0137}, {0x01B3, 0x01B7}, {0x0460, 0x0481}, {0x04D0, 0x04EB},
{0x0139, 0x0149}, {0x01CD, 0x01DD}, {0x0490, 0x04BF}, {0x04EE, 0x04F5},
{0x014A, 0x0178}, {0x01DE, 0x01EF}, {0x04BF, 0x04BF}, {0x04F8, 0x04F9},
{0x0179, 0x017E}, {0x01F4, 0x01F5}, {0x04C1, 0x04C4}, {0x1E00, 0x1E95},
{0x018B, 0x018B}, {0x01FA, 0x0218}, {0x04C7, 0x04C8}, {0x1EA0, 0x1EF9},
{0}
};
static const int uc_word_table[][2] = { /* Offset, Value */
{0x00FF, 0x0178}, {0x01AD, 0x01AC}, {0x01F3, 0x01F1}, {0x0269, 0x0196},
{0x0183, 0x0182}, {0x01B0, 0x01AF}, {0x0253, 0x0181}, {0x026F, 0x019C},
{0x0185, 0x0184}, {0x01B9, 0x01B8}, {0x0254, 0x0186}, {0x0272, 0x019D},
{0x0188, 0x0187}, {0x01BD, 0x01BC}, {0x0259, 0x018F}, {0x0275, 0x019F},
{0x018C, 0x018B}, {0x01C6, 0x01C4}, {0x025B, 0x0190}, {0x0283, 0x01A9},
{0x0192, 0x0191}, {0x01C9, 0x01C7}, {0x0260, 0x0193}, {0x0288, 0x01AE},
{0x0199, 0x0198}, {0x01CC, 0x01CA}, {0x0263, 0x0194}, {0x0292, 0x01B7},
{0x01A8, 0x01A7}, {0x01DD, 0x018E}, {0x0268, 0x0197},
{0}
};
int i, r;
ntfschar *uc;
uc = ntfs_malloc_nofs(default_upcase_len * sizeof(ntfschar));
if (!uc)
return uc;
memset(uc, 0, default_upcase_len * sizeof(ntfschar));
/* Generate the little endian Unicode upcase table used by ntfs. */
for (i = 0; i < default_upcase_len; i++)
uc[i] = cpu_to_le16(i);
for (r = 0; uc_run_table[r][0]; r++)
for (i = uc_run_table[r][0]; i < uc_run_table[r][1]; i++)
le16_add_cpu(&uc[i], uc_run_table[r][2]);
for (r = 0; uc_dup_table[r][0]; r++)
for (i = uc_dup_table[r][0]; i < uc_dup_table[r][1]; i += 2)
le16_add_cpu(&uc[i + 1], -1);
for (r = 0; uc_word_table[r][0]; r++)
uc[uc_word_table[r][0]] = cpu_to_le16(uc_word_table[r][1]);
return uc;
}
| gpl-2.0 |
mdeejay/android_kernel_monarudo | sound/soc/msm/msm-pcm-lpa.c | 19 | 19809 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/control.h>
#include <sound/q6adm.h>
#include <asm/dma.h>
#include <linux/dma-mapping.h>
#include <linux/android_pmem.h>
#include <sound/compress_params.h>
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
#include <sound/timer.h>
#include "msm-pcm-q6.h"
#include "msm-pcm-routing.h"
#define Q6_EFFECT_DEBUG 0
//htc audio ++
#undef pr_info
#undef pr_err
#define pr_info(fmt, ...) pr_aud_info(fmt, ##__VA_ARGS__)
#define pr_err(fmt, ...) pr_aud_err(fmt, ##__VA_ARGS__)
//htc audio --
static struct audio_locks the_locks;
struct snd_msm {
struct msm_audio *prtd;
unsigned volume;
};
static struct snd_msm lpa_audio;
static struct snd_pcm_hardware msm_pcm_hardware = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
.rate_min = 8000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 1024 * 1024,
/* TODO: Check on the lowest period size we can support */
.period_bytes_min = 128 * 1024,
.period_bytes_max = 256 * 1024,
.periods_min = 4,
.periods_max = 8,
.fifo_size = 0,
};
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
};
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
.count = ARRAY_SIZE(supported_sample_rates),
.list = supported_sample_rates,
.mask = 0,
};
static void event_handler(uint32_t opcode,
uint32_t token, uint32_t *payload, void *priv)
{
struct msm_audio *prtd = priv;
struct snd_pcm_substream *substream = prtd->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_aio_write_param param;
struct audio_buffer *buf = NULL;
unsigned long flag = 0;
int i = 0;
pr_debug("%s\n", __func__);
spin_lock_irqsave(&the_locks.event_lock, flag);
switch (opcode) {
case ASM_DATA_EVENT_WRITE_DONE: {
uint32_t *ptrmem = (uint32_t *)¶m;
pr_debug("ASM_DATA_EVENT_WRITE_DONE\n");
pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
prtd->pcm_irq_pos += prtd->pcm_count;
if (prtd->pcm_irq_pos >= prtd->pcm_size)
prtd->pcm_irq_pos = 0;
if (atomic_read(&prtd->start))
snd_pcm_period_elapsed(substream);
else
if (substream->timer_running)
snd_timer_interrupt(substream->timer, 1);
atomic_inc(&prtd->out_count);
wake_up(&the_locks.write_wait);
if (!atomic_read(&prtd->start)) {
atomic_set(&prtd->pending_buffer, 1);
break;
} else
atomic_set(&prtd->pending_buffer, 0);
buf = prtd->audio_client->port[IN].buf;
if (runtime->status->hw_ptr >= runtime->control->appl_ptr) {
memset((void *)buf[0].data +
(prtd->out_head * prtd->pcm_count),
0, prtd->pcm_count);
}
pr_debug("%s:writing %d bytes of buffer to dsp 2\n",
__func__, prtd->pcm_count);
param.paddr = (unsigned long)buf[0].phys
+ (prtd->out_head * prtd->pcm_count);
param.len = prtd->pcm_count;
param.msw_ts = 0;
param.lsw_ts = 0;
param.flags = NO_TIMESTAMP;
param.uid = (unsigned long)buf[0].phys
+ (prtd->out_head * prtd->pcm_count);
for (i = 0; i < sizeof(struct audio_aio_write_param)/4;
i++, ++ptrmem)
pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
if (q6asm_async_write(prtd->audio_client,
¶m) < 0)
pr_err("%s:q6asm_async_write failed\n",
__func__);
else
prtd->out_head =
(prtd->out_head + 1) & (runtime->periods - 1);
atomic_set(&prtd->pending_buffer, 0);
break;
}
case ASM_DATA_CMDRSP_EOS:
pr_debug("ASM_DATA_CMDRSP_EOS\n");
prtd->cmd_ack = 1;
wake_up(&the_locks.eos_wait);
break;
case APR_BASIC_RSP_RESULT: {
switch (payload[0]) {
case ASM_SESSION_CMD_RUN: {
if (!atomic_read(&prtd->pending_buffer))
break;
if (runtime->status->hw_ptr >=
runtime->control->appl_ptr)
break;
pr_debug("%s:writing %d bytes"
" of buffer to dsp\n",
__func__, prtd->pcm_count);
buf = prtd->audio_client->port[IN].buf;
param.paddr = (unsigned long)buf[prtd->out_head].phys;
param.len = prtd->pcm_count;
param.msw_ts = 0;
param.lsw_ts = 0;
param.flags = NO_TIMESTAMP;
param.uid = (unsigned long)buf[prtd->out_head].phys;
if (q6asm_async_write(prtd->audio_client,
¶m) < 0)
pr_err("%s:q6asm_async_write failed\n",
__func__);
else
prtd->out_head =
(prtd->out_head + 1)
& (runtime->periods - 1);
atomic_set(&prtd->pending_buffer, 0);
}
break;
case ASM_STREAM_CMD_FLUSH:
pr_debug("ASM_STREAM_CMD_FLUSH\n");
prtd->cmd_ack = 1;
wake_up(&the_locks.eos_wait);
break;
default:
break;
}
break;
}
default:
pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
break;
}
spin_unlock_irqrestore(&the_locks.event_lock, flag);
}
static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
int ret;
pr_debug("%s\n", __func__);
prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
prtd->pcm_irq_pos = 0;
/* rate and channels are sent to audio driver */
prtd->samp_rate = runtime->rate;
prtd->channel_mode = runtime->channels;
prtd->out_head = 0;
if (prtd->enabled)
return 0;
ret = q6asm_media_format_block_pcm(prtd->audio_client, runtime->rate,
runtime->channels);
if (ret < 0)
pr_debug("%s: CMD Format block failed\n", __func__);
atomic_set(&prtd->out_count, runtime->periods);
prtd->enabled = 1;
prtd->cmd_ack = 0;
return 0;
}
static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
pr_debug("%s\n", __func__);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
prtd->pcm_irq_pos = 0;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pr_debug("SNDRV_PCM_TRIGGER_START\n");
q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
atomic_set(&prtd->start, 1);
atomic_set(&prtd->stop, 0);
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
atomic_set(&prtd->start, 0);
atomic_set(&prtd->stop, 1);
if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
break;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
atomic_set(&prtd->start, 0);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int msm_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct msm_audio *prtd;
struct asm_softpause_params softpause = {
.enable = SOFT_PAUSE_ENABLE,
.period = SOFT_PAUSE_PERIOD,
.step = SOFT_PAUSE_STEP,
.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
};
struct asm_softvolume_params softvol = {
.period = SOFT_VOLUME_PERIOD,
.step = SOFT_VOLUME_STEP,
.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
};
int ret = 0;
pr_debug("%s\n", __func__);
prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
if (prtd == NULL) {
pr_err("Failed to allocate memory for msm_audio\n");
return -ENOMEM;
}
runtime->hw = msm_pcm_hardware;
prtd->substream = substream;
prtd->audio_client = q6asm_audio_client_alloc(
(app_cb)event_handler, prtd);
if (!prtd->audio_client) {
pr_debug("%s: Could not allocate memory\n", __func__);
kfree(prtd);
return -ENOMEM;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM);
if (ret < 0) {
pr_err("%s: pcm out open failed\n", __func__);
q6asm_audio_client_free(prtd->audio_client);
kfree(prtd);
return -ENOMEM;
}
ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
if (ret < 0) {
pr_err("%s: Set IO mode failed\n", __func__);
q6asm_audio_client_free(prtd->audio_client);
kfree(prtd);
return -ENOMEM;
}
}
/* Capture path */
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
return -EPERM;
pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
prtd->session_id = prtd->audio_client->session;
msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
prtd->session_id, substream->stream);
ret = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_sample_rates);
if (ret < 0)
pr_debug("snd_pcm_hw_constraint_list failed\n");
/* Ensure that buffer size is a multiple of period size */
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
pr_debug("snd_pcm_hw_constraint_integer failed\n");
prtd->dsp_cnt = 0;
atomic_set(&prtd->pending_buffer, 1);
atomic_set(&prtd->stop, 1);
runtime->private_data = prtd;
lpa_audio.prtd = prtd;
lpa_set_volume(lpa_audio.volume);
ret = q6asm_set_softpause(lpa_audio.prtd->audio_client, &softpause);
if (ret < 0)
pr_err("%s: Send SoftPause Param failed ret=%d\n",
__func__, ret);
ret = q6asm_set_softvolume(lpa_audio.prtd->audio_client, &softvol);
if (ret < 0)
pr_err("%s: Send SoftVolume Param failed ret=%d\n",
__func__, ret);
return 0;
}
int lpa_set_volume(unsigned volume)
{
int rc = 0;
if (lpa_audio.prtd && lpa_audio.prtd->audio_client) {
rc = q6asm_set_volume(lpa_audio.prtd->audio_client, volume);
if (rc < 0) {
pr_err("%s: Send Volume command failed"
" rc=%d\n", __func__, rc);
}
}
lpa_audio.volume = volume;
return rc;
}
static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct msm_audio *prtd = runtime->private_data;
int dir = 0;
int rc = 0;
/*
If routing is still enabled, we need to issue EOS to
the DSP
To issue EOS to dsp, we need to be run state otherwise
EOS is not honored.
*/
if (msm_routing_check_backend_enabled(soc_prtd->dai_link->be_id) &&
(!atomic_read(&prtd->stop))) {
rc = q6asm_run(prtd->audio_client, 0, 0, 0);
atomic_set(&prtd->pending_buffer, 0);
prtd->cmd_ack = 0;
q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
pr_debug("%s\n", __func__);
rc = wait_event_timeout(the_locks.eos_wait,
prtd->cmd_ack, 5 * HZ);
if (!rc)
pr_err("EOS cmd timeout\n");
prtd->pcm_irq_pos = 0;
}
dir = IN;
atomic_set(&prtd->pending_buffer, 0);
lpa_audio.prtd = NULL;
q6asm_cmd(prtd->audio_client, CMD_CLOSE);
q6asm_audio_client_buf_free_contiguous(dir,
prtd->audio_client);
atomic_set(&prtd->stop, 1);
pr_debug("%s\n", __func__);
msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
SNDRV_PCM_STREAM_PLAYBACK);
pr_debug("%s\n", __func__);
q6asm_audio_client_free(prtd->audio_client);
kfree(prtd);
return 0;
}
static int msm_pcm_close(struct snd_pcm_substream *substream)
{
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_close(substream);
return ret;
}
static int msm_pcm_prepare(struct snd_pcm_substream *substream)
{
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_prepare(substream);
return ret;
}
static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
pr_debug("%s: pcm_irq_pos = %d\n", __func__, prtd->pcm_irq_pos);
return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
}
static int msm_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
int result = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
pr_debug("%s\n", __func__);
prtd->mmap_flag = 1;
if (runtime->dma_addr && runtime->dma_bytes) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
result = remap_pfn_range(vma, vma->vm_start,
runtime->dma_addr >> PAGE_SHIFT,
runtime->dma_bytes,
vma->vm_page_prot);
} else {
pr_err("Physical address or size of buf is NULL");
return -EINVAL;
}
return result;
}
static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
struct audio_buffer *buf;
int dir, ret;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = IN;
else
return -EPERM;
ret = q6asm_audio_client_buf_alloc_contiguous(dir,
prtd->audio_client,
runtime->hw.period_bytes_min,
runtime->hw.periods_max);
if (ret < 0) {
pr_err("Audio Start: Buffer Allocation failed \
rc = %d\n", ret);
return -ENOMEM;
}
buf = prtd->audio_client->port[dir].buf;
if (buf == NULL || buf[0].data == NULL)
return -ENOMEM;
pr_debug("%s:buf = %p\n", __func__, buf);
dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
dma_buf->dev.dev = substream->pcm->card->dev;
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
dma_buf->bytes = runtime->hw.buffer_bytes_max;
if (!dma_buf->area)
return -ENOMEM;
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
return 0;
}
static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
int rc = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
uint64_t timestamp;
uint64_t temp;
switch (cmd) {
case SNDRV_COMPRESS_TSTAMP: {
struct snd_compr_tstamp tstamp;
pr_debug("SNDRV_COMPRESS_TSTAMP\n");
memset(&tstamp, 0x0, sizeof(struct snd_compr_tstamp));
timestamp = q6asm_get_session_time(prtd->audio_client);
if (timestamp < 0) {
pr_err("%s: Get Session Time return value =%lld\n",
__func__, timestamp);
return -EAGAIN;
}
temp = (timestamp * 2 * runtime->channels);
temp = temp * (runtime->rate/1000);
temp = div_u64(temp, 1000);
tstamp.sampling_rate = runtime->rate;
tstamp.timestamp = timestamp;
pr_debug("%s: bytes_consumed:"
"timestamp = %lld,\n",__func__,
tstamp.timestamp);
if (copy_to_user((void *) arg, &tstamp,
sizeof(struct snd_compr_tstamp)))
return -EFAULT;
return 0;
}
case SNDRV_PCM_IOCTL1_RESET:
prtd->cmd_ack = 0;
rc = q6asm_cmd(prtd->audio_client, CMD_FLUSH);
if (rc < 0)
pr_err("%s: flush cmd failed rc=%d\n", __func__, rc);
rc = wait_event_timeout(the_locks.eos_wait,
prtd->cmd_ack, 5 * HZ);
if (!rc)
pr_err("Flush cmd timeout\n");
prtd->pcm_irq_pos = 0;
break;
case SNDRV_PCM_IOCTL1_ENABLE_EFFECT:
{
struct param {
uint32_t effect_type; /* 0 for POPP, 1 for COPP */
uint32_t module_id;
uint32_t param_id;
uint32_t payload_size;
} q6_param;
void *payload;
pr_info("%s: SNDRV_PCM_IOCTL1_ENABLE_EFFECT\n", __func__);
if (copy_from_user(&q6_param, (void *) arg,
sizeof(q6_param))) {
pr_err("%s: copy param from user failed\n",
__func__);
return -EFAULT;
}
if (q6_param.payload_size <= 0 ||
(q6_param.effect_type != 0 &&
q6_param.effect_type != 1)) {
pr_err("%s: unsupported param: %d, 0x%x, 0x%x, %d\n",
__func__, q6_param.effect_type,
q6_param.module_id, q6_param.param_id,
q6_param.payload_size);
return -EINVAL;
}
payload = kzalloc(q6_param.payload_size, GFP_KERNEL);
if (!payload) {
pr_err("%s: failed to allocate memory\n",
__func__);
return -ENOMEM;
}
if (copy_from_user(payload, (void *) (arg + sizeof(q6_param)),
q6_param.payload_size)) {
pr_err("%s: copy payload from user failed\n",
__func__);
kfree(payload);
return -EFAULT;
}
if (q6_param.effect_type == 0) { /* POPP */
if (!prtd->audio_client) {
pr_debug("%s: audio_client not found\n",
__func__);
kfree(payload);
return -EACCES;
}
rc = q6asm_enable_effect(prtd->audio_client,
q6_param.module_id,
q6_param.param_id,
q6_param.payload_size,
payload);
pr_info("%s: call q6asm_enable_effect, rc %d\n",
__func__, rc);
} else { /* COPP */
int port_id = msm_pcm_routing_get_port(substream);
int index = afe_get_port_index(port_id);
pr_info("%s: use copp topology, port id %d, index %d\n",
__func__, port_id, index);
if (port_id < 0) {
pr_err("%s: invalid port_id %d\n",
__func__, port_id);
} else {
rc = q6adm_enable_effect(index,
q6_param.module_id,
q6_param.param_id,
q6_param.payload_size,
payload);
pr_info("%s: call q6adm_enable_effect, rc %d\n",
__func__, rc);
}
}
#if Q6_EFFECT_DEBUG
{
int *ptr;
int i;
ptr = (int *)payload;
for (i = 0; i < (q6_param.payload_size / 4); i++)
pr_aud_info("0x%08x", *(ptr + i));
}
#endif
kfree(payload);
return rc;
}
default:
break;
}
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
static struct snd_pcm_ops msm_pcm_ops = {
.open = msm_pcm_open,
.hw_params = msm_pcm_hw_params,
.close = msm_pcm_close,
.ioctl = msm_pcm_ioctl,
.prepare = msm_pcm_prepare,
.trigger = msm_pcm_trigger,
.pointer = msm_pcm_pointer,
.mmap = msm_pcm_mmap,
};
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
int ret = 0;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
return ret;
}
static struct snd_soc_platform_driver msm_soc_platform = {
.ops = &msm_pcm_ops,
.pcm_new = msm_asoc_pcm_new,
};
static __devinit int msm_pcm_probe(struct platform_device *pdev)
{
dev_info(&pdev->dev, "%s: dev name %s\n",
__func__, dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
&msm_soc_platform);
}
static int msm_pcm_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver msm_pcm_driver = {
.driver = {
.name = "msm-pcm-lpa",
.owner = THIS_MODULE,
},
.probe = msm_pcm_probe,
.remove = __devexit_p(msm_pcm_remove),
};
static int __init msm_soc_platform_init(void)
{
spin_lock_init(&the_locks.event_lock);
init_waitqueue_head(&the_locks.enable_wait);
init_waitqueue_head(&the_locks.eos_wait);
init_waitqueue_head(&the_locks.write_wait);
init_waitqueue_head(&the_locks.read_wait);
return platform_driver_register(&msm_pcm_driver);
}
module_init(msm_soc_platform_init);
static void __exit msm_soc_platform_exit(void)
{
platform_driver_unregister(&msm_pcm_driver);
}
module_exit(msm_soc_platform_exit);
MODULE_DESCRIPTION("PCM module platform driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
HRTKernel/Hacker_Kernel_SM-G92X_MM | drivers/motor/max77843_haptic.c | 19 | 15749 | /*
* haptic motor driver for max77843 - max77673_haptic.c
*
* Copyright (C) 2011 ByungChang Cha <bc.cha@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/timed_output.h>
#include <linux/hrtimer.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/max77843.h>
#include <linux/mfd/max77843-private.h>
#include <plat/devs.h>
#include <linux/sec_sysfs.h>
#define TEST_MODE_TIME 10000
#define MAX_INTENSITY 10000
#define MOTOR_LRA (1<<7)
#define MOTOR_EN (1<<6)
#define EXT_PWM (0<<5)
#define DIVIDER_128 (1<<1)
#define MAX77843_REG_MAINCTRL1_MRDBTMER_MASK (0x7)
#define MAX77843_REG_MAINCTRL1_MREN (1 << 3)
#define MAX77843_REG_MAINCTRL1_BIASEN (1 << 7)
static struct device *motor_dev;
struct max77843_haptic_data {
struct max77843_dev *max77843;
struct i2c_client *i2c;
struct max77843_haptic_platform_data *pdata;
struct pwm_device *pwm;
struct regulator *regulator;
struct timed_output_dev tout_dev;
struct hrtimer timer;
struct workqueue_struct *workqueue;
struct work_struct work;
spinlock_t lock;
bool running;
bool resumed;
u32 intensity;
u32 timeout;
int duty;
};
struct max77843_haptic_data *g_hap_data;
static void max77843_haptic_i2c(struct max77843_haptic_data *hap_data, bool en)
{
int ret;
u8 lscnfg_val = 0x00;
pr_info("[VIB] %s %d\n", __func__, en);
if (en) {
lscnfg_val = MAX77843_REG_MAINCTRL1_BIASEN;
}
ret = max77843_update_reg(hap_data->i2c, MAX77843_PMIC_REG_MAINCTRL1,
lscnfg_val, MAX77843_REG_MAINCTRL1_BIASEN);
if (ret)
pr_err("[VIB] i2c REG_BIASEN update error %d\n", ret);
if (en) {
ret = max77843_update_reg(hap_data->i2c,
MAX77843_PMIC_REG_MCONFIG, 0xff, MOTOR_EN);
} else {
ret = max77843_update_reg(hap_data->i2c,
MAX77843_PMIC_REG_MCONFIG, 0x0, MOTOR_EN);
}
if (ret)
pr_err("[VIB] i2c MOTOR_EN update error %d\n", ret);
ret = max77843_update_reg(hap_data->i2c, MAX77843_PMIC_REG_MCONFIG,
0xff, MOTOR_LRA);
if (ret)
pr_err("[VIB] i2c MOTOR_LPA update error %d\n", ret);
}
static int haptic_get_time(struct timed_output_dev *tout_dev)
{
struct max77843_haptic_data *hap_data
= container_of(tout_dev, struct max77843_haptic_data, tout_dev);
struct hrtimer *timer = &hap_data->timer;
if (hrtimer_active(timer)) {
ktime_t remain = hrtimer_get_remaining(timer);
struct timeval t = ktime_to_timeval(remain);
return t.tv_sec * 1000 + t.tv_usec / 1000;
}
return 0;
}
static void haptic_enable(struct timed_output_dev *tout_dev, int value)
{
struct max77843_haptic_data *hap_data
= container_of(tout_dev, struct max77843_haptic_data, tout_dev);
struct hrtimer *timer = &hap_data->timer;
unsigned long flags;
cancel_work_sync(&hap_data->work);
hrtimer_cancel(timer);
hap_data->timeout = value;
queue_work(hap_data->workqueue, &hap_data->work);
spin_lock_irqsave(&hap_data->lock, flags);
if (value > 0 && value != TEST_MODE_TIME) {
pr_debug("[VIB] %s value %d\n", __func__, value);
value = min(value, (int)hap_data->pdata->max_timeout);
hrtimer_start(timer, ns_to_ktime((u64)value * NSEC_PER_MSEC),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&hap_data->lock, flags);
}
static enum hrtimer_restart haptic_timer_func(struct hrtimer *timer)
{
struct max77843_haptic_data *hap_data
= container_of(timer, struct max77843_haptic_data, timer);
hap_data->timeout = 0;
queue_work(hap_data->workqueue, &hap_data->work);
return HRTIMER_NORESTART;
}
static int vibetonz_clk_on(struct device *dev, bool en)
{
struct clk *vibetonz_clk = NULL;
#if defined(CONFIG_OF)
struct device_node *np;
np = of_find_node_by_name(NULL,"pwm");
if (np == NULL) {
pr_err("[VIB] %s : pwm error to get dt node\n", __func__);
return -EINVAL;
}
vibetonz_clk = of_clk_get_by_name(np, "gate_timers");
if (!vibetonz_clk) {
pr_info("[VIB] %s fail to get the vibetonz_clk\n", __func__);
return -EINVAL;
}
#else
vibetonz_clk = clk_get(dev, "timers");
#endif
pr_info("[VIB] DEV NAME %s %lu\n",
dev_name(dev), clk_get_rate(vibetonz_clk));
if (IS_ERR(vibetonz_clk)) {
pr_err("[VIB] failed to get clock for the motor\n");
goto err_clk_get;
}
if (en)
clk_enable(vibetonz_clk);
else
clk_disable(vibetonz_clk);
clk_put(vibetonz_clk);
return 0;
err_clk_get:
clk_put(vibetonz_clk);
return -EINVAL;
}
static void haptic_work(struct work_struct *work)
{
struct max77843_haptic_data *hap_data
= container_of(work, struct max77843_haptic_data, work);
pr_info("[VIB] %s\n", __func__);
if (hap_data->timeout > 0 && hap_data->intensity) {
if (hap_data->running)
return;
max77843_haptic_i2c(hap_data, true);
pwm_config(hap_data->pwm, hap_data->duty,
hap_data->pdata->period);
pwm_enable(hap_data->pwm);
if (hap_data->pdata->motor_en)
hap_data->pdata->motor_en(true);
else {
int ret;
ret = regulator_enable(hap_data->regulator);
pr_info("regulator_enable returns %d\n", ret);
}
hap_data->running = true;
} else {
if (!hap_data->running)
return;
if (hap_data->pdata->motor_en)
hap_data->pdata->motor_en(false);
else
regulator_disable(hap_data->regulator);
pwm_disable(hap_data->pwm);
max77843_haptic_i2c(hap_data, false);
hap_data->running = false;
}
return;
}
#if defined(CONFIG_OF)
static struct max77843_haptic_platform_data *of_max77843_haptic_dt(struct device *dev)
{
struct device_node *np_root = dev->parent->of_node;
struct device_node *np_haptic;
struct max77843_haptic_platform_data *pdata;
u32 temp;
const char *temp_str;
int ret;
pdata = kzalloc(sizeof(struct max77843_haptic_platform_data), GFP_KERNEL);
if (!pdata) {
pr_err("%s: failed to allocate driver data\n", __func__);
return NULL;
}
printk("%s : start dt parsing\n", __func__);
np_haptic = of_find_node_by_name(np_root, "haptic");
if (np_haptic == NULL) {
pr_err("[VIB] %s : error to get dt node\n", __func__);
goto err_parsing_dt;
}
ret = of_property_read_u32(np_haptic, "haptic,max_timeout", &temp);
if (IS_ERR_VALUE(ret)) {
pr_err("[VIB] %s : error to get dt node max_timeout\n", __func__);
goto err_parsing_dt;
} else
pdata->max_timeout = (u16)temp;
ret = of_property_read_u32(np_haptic, "haptic,duty", &temp);
if (IS_ERR_VALUE(ret)) {
pr_err("[VIB] %s : error to get dt node duty\n", __func__);
goto err_parsing_dt;
} else
pdata->duty = (u16)temp;
ret = of_property_read_u32(np_haptic, "haptic,period", &temp);
if (IS_ERR_VALUE(ret)) {
pr_err("[VIB] %s : error to get dt node period\n", __func__);
goto err_parsing_dt;
} else
pdata->period = (u16)temp;
ret = of_property_read_u32(np_haptic, "haptic,pwm_id", &temp);
if (IS_ERR_VALUE(ret)) {
pr_err("[VIB] %s : error to get dt node pwm_id\n", __func__);
goto err_parsing_dt;
} else
pdata->pwm_id = (u16)temp;
ret = of_property_read_string(np_haptic, "haptic,regulator_name", &temp_str);
if (IS_ERR_VALUE(ret)) {
pr_err("[VIB] %s : error to get dt node regulator_name\n", __func__);
goto err_parsing_dt;
} else
pdata->regulator_name = (char *)temp_str;
/* debugging */
printk("%s : max_timeout = %d\n", __func__, pdata->max_timeout);
printk("%s : duty = %d\n", __func__, pdata->duty);
printk("%s : period = %d\n", __func__, pdata->period);
printk("%s : pwm_id = %d\n", __func__, pdata->pwm_id);
printk("%s : regulator_name = %s\n", __func__, pdata->regulator_name);
pdata->init_hw = NULL;
pdata->motor_en = NULL;
return pdata;
err_parsing_dt:
kfree(pdata);
return NULL;
}
#endif
static ssize_t store_duty(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
char buff[10] = {0,};
int cnt, ret;
u16 duty;
cnt = count;
cnt = (buf[cnt-1] == '\n') ? cnt-1 : cnt;
memcpy(buff, buf, cnt);
buff[cnt] = '\0';
ret = kstrtou16(buff, 0, &duty);
if (ret != 0) {
dev_err(dev, "[VIB] fail to get duty.\n");
return count;
}
g_hap_data->pdata->duty = (u16)duty;
return count;
}
static ssize_t store_period(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
char buff[10] = {0,};
int cnt, ret;
u16 period;
cnt = count;
cnt = (buf[cnt-1] == '\n') ? cnt-1 : cnt;
memcpy(buff, buf, cnt);
buff[cnt] = '\0';
ret = kstrtou16(buff, 0, &period);
if (ret != 0) {
dev_err(dev, "[VIB] fail to get period.\n");
return count;
}
g_hap_data->pdata->period = (u16)period;
return count;
}
static ssize_t show_duty_period(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "duty: %u, period%u\n", g_hap_data->pdata->duty,
g_hap_data->pdata->period);
}
/* below nodes is SAMSUNG specific nodes */
static DEVICE_ATTR(set_duty, 0220, NULL, store_duty);
static DEVICE_ATTR(set_period, 0220, NULL, store_period);
static DEVICE_ATTR(show_duty_period, 0440, show_duty_period, NULL);
static struct attribute *sec_motor_attributes[] = {
&dev_attr_set_duty.attr,
&dev_attr_set_period.attr,
&dev_attr_show_duty_period.attr,
NULL,
};
static struct attribute_group sec_motor_attr_group = {
.attrs = sec_motor_attributes,
};
static ssize_t intensity_store(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
struct max77843_haptic_data *drvdata
= container_of(tdev, struct max77843_haptic_data, tout_dev);
int duty = drvdata->pdata->period >> 1;
int intensity = 0, ret = 0;
ret = kstrtoint(buf, 0, &intensity);
if (intensity < 0 || MAX_INTENSITY < intensity) {
pr_err("out of rage\n");
return -EINVAL;
}
if (MAX_INTENSITY == intensity)
duty = drvdata->pdata->duty;
else if (0 != intensity) {
long long tmp = drvdata->pdata->duty >> 1;
tmp *= (intensity / 100);
duty += (int)(tmp / 100);
}
drvdata->intensity = intensity;
drvdata->duty = duty;
pwm_config(drvdata->pwm, duty, drvdata->pdata->period);
return count;
}
static ssize_t intensity_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
struct max77843_haptic_data *drvdata
= container_of(tdev, struct max77843_haptic_data, tout_dev);
return sprintf(buf, "intensity: %u\n",
(drvdata->intensity * 100));
}
static DEVICE_ATTR(intensity, 0660, intensity_show, intensity_store);
static int max77843_haptic_probe(struct platform_device *pdev)
{
int error = 0;
struct max77843_dev *max77843 = dev_get_drvdata(pdev->dev.parent);
struct max77843_platform_data *max77843_pdata
= dev_get_platdata(max77843->dev);
struct max77843_haptic_platform_data *pdata
= max77843_pdata->haptic_data;
struct max77843_haptic_data *hap_data;
pr_info("[VIB] ++ %s\n", __func__);
#if defined(CONFIG_OF)
if (pdata == NULL) {
pdata = of_max77843_haptic_dt(&pdev->dev);
if (!pdata) {
pr_err("[VIB] max77843-haptic : %s not found haptic dt!\n",
__func__);
return -1;
}
}
#else
if (pdata == NULL) {
pr_err("%s: no pdata\n", __func__);
return -ENODEV;
}
#endif /* CONFIG_OF */
hap_data = kzalloc(sizeof(struct max77843_haptic_data), GFP_KERNEL);
if (!hap_data) {
kfree(pdata);
return -ENOMEM;
}
platform_set_drvdata(pdev, hap_data);
g_hap_data = hap_data;
hap_data->max77843 = max77843;
hap_data->i2c = max77843->i2c;
hap_data->pdata = pdata;
hap_data->workqueue = create_singlethread_workqueue("hap_work");
if (NULL == hap_data->workqueue) {
error = -EFAULT;
pr_err("[VIB] Failed to create workqueue, err num: %d\n", error);
goto err_work_queue;
}
INIT_WORK(&(hap_data->work), haptic_work);
spin_lock_init(&(hap_data->lock));
hap_data->pwm = pwm_request(hap_data->pdata->pwm_id, "vibrator");
if (IS_ERR(hap_data->pwm)) {
error = -EFAULT;
pr_err("[VIB] Failed to request pwm, err num: %d\n", error);
goto err_pwm_request;
}
pwm_config(hap_data->pwm, pdata->period / 2, pdata->period);
vibetonz_clk_on(&pdev->dev, true);
if (pdata->init_hw)
pdata->init_hw();
else
hap_data->regulator
= regulator_get(NULL, pdata->regulator_name);
if (IS_ERR(hap_data->regulator)) {
error = -EFAULT;
pr_err("[VIB] Failed to get vmoter regulator, err num: %d\n", error);
goto err_regulator_get;
}
/* hrtimer init */
hrtimer_init(&hap_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hap_data->timer.function = haptic_timer_func;
/* timed_output_dev init*/
hap_data->tout_dev.name = "vibrator";
hap_data->tout_dev.get_time = haptic_get_time;
hap_data->tout_dev.enable = haptic_enable;
hap_data->resumed = false;
motor_dev = sec_device_create(hap_data, "motor");
if (IS_ERR(motor_dev)) {
error = -ENODEV;
pr_err("[VIB] Failed to create device\
for samsung specific motor, err num: %d\n", error);
goto exit_sec_devices;
}
error = sysfs_create_group(&motor_dev->kobj, &sec_motor_attr_group);
if (error) {
error = -ENODEV;
pr_err("[VIB] Failed to create sysfs group\
for samsung specific motor, err num: %d\n", error);
goto exit_sysfs;
}
#ifdef CONFIG_ANDROID_TIMED_OUTPUT
error = timed_output_dev_register(&hap_data->tout_dev);
if (error < 0) {
error = -EFAULT;
pr_err("[VIB] Failed to register timed_output : %d\n", error);
goto err_timed_output_register;
}
#endif
error = sysfs_create_file(&hap_data->tout_dev.dev->kobj,
&dev_attr_intensity.attr);
if (error < 0) {
pr_err("Failed to register sysfs : %d\n", error);
goto err_timed_output_register;
}
pr_debug("[VIB] -- %s\n", __func__);
return error;
err_timed_output_register:
sysfs_remove_group(&motor_dev->kobj, &sec_motor_attr_group);
exit_sysfs:
sec_device_destroy(motor_dev->devt);
exit_sec_devices:
regulator_put(hap_data->regulator);
err_regulator_get:
pwm_free(hap_data->pwm);
err_pwm_request:
destroy_workqueue(hap_data->workqueue);
err_work_queue:
kfree(hap_data);
kfree(pdata);
g_hap_data = NULL;
return error;
}
static int __devexit max77843_haptic_remove(struct platform_device *pdev)
{
struct max77843_haptic_data *data = platform_get_drvdata(pdev);
#ifdef CONFIG_ANDROID_TIMED_OUTPUT
timed_output_dev_unregister(&data->tout_dev);
#endif
sysfs_remove_group(&motor_dev->kobj, &sec_motor_attr_group);
sec_device_destroy(motor_dev->devt);
regulator_put(data->regulator);
pwm_free(data->pwm);
destroy_workqueue(data->workqueue);
max77843_haptic_i2c(data, false);
kfree(data);
g_hap_data = NULL;
return 0;
}
static int max77843_haptic_suspend(struct platform_device *pdev,
pm_message_t state)
{
struct max77843_haptic_data *data = platform_get_drvdata(pdev);
pr_info("[VIB] %s\n", __func__);
cancel_work_sync(&g_hap_data->work);
hrtimer_cancel(&g_hap_data->timer);
max77843_haptic_i2c(data, false);
return 0;
}
static int max77843_haptic_resume(struct platform_device *pdev)
{
pr_info("[VIB] %s\n", __func__);
g_hap_data->resumed = true;
return 0;
}
static struct platform_driver max77843_haptic_driver = {
.probe = max77843_haptic_probe,
.remove = max77843_haptic_remove,
.suspend = max77843_haptic_suspend,
.resume = max77843_haptic_resume,
.driver = {
.name = "max77843-haptic",
.owner = THIS_MODULE,
},
};
static int __init max77843_haptic_init(void)
{
pr_debug("[VIB] %s\n", __func__);
return platform_driver_register(&max77843_haptic_driver);
}
module_init(max77843_haptic_init);
static void __exit max77843_haptic_exit(void)
{
platform_driver_unregister(&max77843_haptic_driver);
}
module_exit(max77843_haptic_exit);
MODULE_AUTHOR("ByungChang Cha <bc.cha@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("max77843 haptic driver");
| gpl-2.0 |
gouwa/linux-rk3288 | drivers/net/wireless/rockchip_wlan/rtl8189es/hal/OUTSRC/rtl8188e/HalHWImg8188E_BB.c | 19 | 27437 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#include "Mp_Precomp.h"
#include "../phydm_precomp.h"
#if (RTL8188E_SUPPORT == 1)
static BOOLEAN
CheckPositive(
IN PDM_ODM_T pDM_Odm,
IN const u4Byte Condition1,
IN const u4Byte Condition2
)
{
u1Byte _BoardType = ((pDM_Odm->BoardType & BIT4) >> 4) << 0 | // _GLNA
((pDM_Odm->BoardType & BIT3) >> 3) << 1 | // _GPA
((pDM_Odm->BoardType & BIT7) >> 7) << 2 | // _ALNA
((pDM_Odm->BoardType & BIT6) >> 6) << 3 | // _APA
((pDM_Odm->BoardType & BIT2) >> 2) << 4; // _BT
u4Byte cond1 = Condition1, cond2 = Condition2;
u4Byte driver1 = pDM_Odm->CutVersion << 24 |
pDM_Odm->SupportPlatform << 16 |
pDM_Odm->PackageType << 12 |
pDM_Odm->SupportInterface << 8 |
_BoardType;
u4Byte driver2 = pDM_Odm->TypeGLNA << 0 |
pDM_Odm->TypeGPA << 8 |
pDM_Odm->TypeALNA << 16 |
pDM_Odm->TypeAPA << 24;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
("===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n", cond1, cond2));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
("===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n", driver1, driver2));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
(" (Platform, Interface) = (0x%X, 0x%X)\n", pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
(" (Board, Package) = (0x%X, 0x%X)\n", pDM_Odm->BoardType, pDM_Odm->PackageType));
//============== Value Defined Check ===============//
//QFN Type [15:12] and Cut Version [27:24] need to do value check
if(((cond1 & 0x0000F000) != 0) &&((cond1 & 0x0000F000) != (driver1 & 0x0000F000)))
return FALSE;
if(((cond1 & 0x0F000000) != 0) &&((cond1 & 0x0F000000) != (driver1 & 0x0F000000)))
return FALSE;
//=============== Bit Defined Check ================//
// We don't care [31:28] and [23:20]
//
cond1 &= 0x000F0FFF;
driver1 &= 0x000F0FFF;
if ((cond1 & driver1) == cond1)
{
u4Byte bitMask = 0;
if ((cond1 & 0x0F) == 0) // BoardType is DONTCARE
return TRUE;
if ((cond1 & BIT0) != 0) //GLNA
bitMask |= 0x000000FF;
if ((cond1 & BIT1) != 0) //GPA
bitMask |= 0x0000FF00;
if ((cond1 & BIT2) != 0) //ALNA
bitMask |= 0x00FF0000;
if ((cond1 & BIT3) != 0) //APA
bitMask |= 0xFF000000;
if ((cond2 & bitMask) == (driver2 & bitMask)) // BoardType of each RF path is matched
return TRUE;
else
return FALSE;
}
else
{
return FALSE;
}
}
static BOOLEAN
CheckNegative(
IN PDM_ODM_T pDM_Odm,
IN const u4Byte Condition1,
IN const u4Byte Condition2
)
{
return TRUE;
}
/******************************************************************************
* AGC_TAB.TXT
******************************************************************************/
u4Byte Array_MP_8188E_AGC_TAB[] = {
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0xF6000001,
0xC78, 0xF5010001,
0xC78, 0xF4020001,
0xC78, 0xF3030001,
0xC78, 0xF2040001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0xF7000001,
0xC78, 0xF6010001,
0xC78, 0xF5020001,
0xC78, 0xF4030001,
0xC78, 0xF3040001,
0xA0000000,0x00000000,
0xC78, 0xFB000001,
0xC78, 0xFB010001,
0xC78, 0xFB020001,
0xC78, 0xFB030001,
0xC78, 0xFB040001,
0xB0000000,0x00000000,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0xF1050001,
0xC78, 0xF0060001,
0xC78, 0xEF070001,
0xC78, 0xEE080001,
0xC78, 0xED090001,
0xC78, 0xEC0A0001,
0xC78, 0xEB0B0001,
0xC78, 0xEA0C0001,
0xC78, 0xE90D0001,
0xC78, 0xE80E0001,
0xC78, 0xE70F0001,
0xC78, 0xE6100001,
0xC78, 0xE5110001,
0xC78, 0xE4120001,
0xC78, 0xE3130001,
0xC78, 0xE2140001,
0xC78, 0xC5150001,
0xC78, 0xC4160001,
0xC78, 0xC3170001,
0xC78, 0xC2180001,
0xC78, 0x88190001,
0xC78, 0x871A0001,
0xC78, 0x861B0001,
0xC78, 0x851C0001,
0xC78, 0x841D0001,
0xC78, 0x831E0001,
0xC78, 0x821F0001,
0xC78, 0x81200001,
0xC78, 0x80210001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0xF2050001,
0xC78, 0xF1060001,
0xC78, 0xF0070001,
0xC78, 0xEF080001,
0xC78, 0xEE090001,
0xC78, 0xED0A0001,
0xC78, 0xEC0B0001,
0xC78, 0xEB0C0001,
0xC78, 0xEA0D0001,
0xC78, 0xE90E0001,
0xC78, 0xE80F0001,
0xC78, 0xE7100001,
0xC78, 0xE6110001,
0xC78, 0xE5120001,
0xC78, 0xE4130001,
0xC78, 0xE3140001,
0xC78, 0xE2150001,
0xC78, 0xE1160001,
0xC78, 0x89170001,
0xC78, 0x88180001,
0xC78, 0x87190001,
0xC78, 0x861A0001,
0xC78, 0x851B0001,
0xC78, 0x841C0001,
0xC78, 0x831D0001,
0xC78, 0x821E0001,
0xC78, 0x811F0001,
0xC78, 0x6B200001,
0xC78, 0x6A210001,
0x98000000,0x00000000,0x40000000,0x00000000,
0xC78, 0xFA050001,
0xC78, 0xF9060001,
0xC78, 0xF8070001,
0xC78, 0xF7080001,
0xC78, 0xF6090001,
0xC78, 0xF50A0001,
0xC78, 0xF40B0001,
0xC78, 0xF30C0001,
0xC78, 0xF20D0001,
0xC78, 0xF10E0001,
0xC78, 0xF00F0001,
0xC78, 0xEF100001,
0xC78, 0xEE110001,
0xC78, 0xED120001,
0xC78, 0xEC130001,
0xC78, 0xEB140001,
0xC78, 0xEA150001,
0xC78, 0xE9160001,
0xC78, 0xE8170001,
0xC78, 0xE7180001,
0xC78, 0xE6190001,
0xC78, 0xE51A0001,
0xC78, 0xE41B0001,
0xC78, 0xC71C0001,
0xC78, 0xC61D0001,
0xC78, 0xC51E0001,
0xC78, 0xC41F0001,
0xC78, 0xC3200001,
0xC78, 0xC2210001,
0xA0000000,0x00000000,
0xC78, 0xFB050001,
0xC78, 0xFA060001,
0xC78, 0xF9070001,
0xC78, 0xF8080001,
0xC78, 0xF7090001,
0xC78, 0xF60A0001,
0xC78, 0xF50B0001,
0xC78, 0xF40C0001,
0xC78, 0xF30D0001,
0xC78, 0xF20E0001,
0xC78, 0xF10F0001,
0xC78, 0xF0100001,
0xC78, 0xEF110001,
0xC78, 0xEE120001,
0xC78, 0xED130001,
0xC78, 0xEC140001,
0xC78, 0xEB150001,
0xC78, 0xEA160001,
0xC78, 0xE9170001,
0xC78, 0xE8180001,
0xC78, 0xE7190001,
0xC78, 0xE61A0001,
0xC78, 0xE51B0001,
0xC78, 0xE41C0001,
0xC78, 0xE31D0001,
0xC78, 0xE21E0001,
0xC78, 0xE11F0001,
0xC78, 0x8A200001,
0xC78, 0x89210001,
0xB0000000,0x00000000,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x66220001,
0xC78, 0x65230001,
0xC78, 0x64240001,
0xC78, 0x63250001,
0xC78, 0x62260001,
0xC78, 0x61270001,
0xC78, 0x60280001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x69220001,
0xC78, 0x68230001,
0xC78, 0x67240001,
0xC78, 0x66250001,
0xC78, 0x65260001,
0xC78, 0x64270001,
0xC78, 0x63280001,
0xA0000000,0x00000000,
0xC78, 0x88220001,
0xC78, 0x87230001,
0xC78, 0x86240001,
0xC78, 0x85250001,
0xC78, 0x84260001,
0xC78, 0x83270001,
0xC78, 0x82280001,
0xB0000000,0x00000000,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x4A290001,
0xC78, 0x492A0001,
0xC78, 0x482B0001,
0xC78, 0x472C0001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x62290001,
0xC78, 0x612A0001,
0xC78, 0x462B0001,
0xC78, 0x452C0001,
0x98000000,0x00000000,0x40000000,0x00000000,
0xC78, 0x81290001,
0xC78, 0x242A0001,
0xC78, 0x232B0001,
0xC78, 0x222C0001,
0xA0000000,0x00000000,
0xC78, 0x6B290001,
0xC78, 0x6A2A0001,
0xC78, 0x692B0001,
0xC78, 0x682C0001,
0xB0000000,0x00000000,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x462D0001,
0xC78, 0x452E0001,
0xC78, 0x442F0001,
0xC78, 0x43300001,
0xC78, 0x42310001,
0xC78, 0x41320001,
0xC78, 0x40330001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x442D0001,
0xC78, 0x432E0001,
0xC78, 0x422F0001,
0xC78, 0x41300001,
0xC78, 0x40310001,
0xC78, 0x40320001,
0xC78, 0x40330001,
0xA0000000,0x00000000,
0xC78, 0x672D0001,
0xC78, 0x662E0001,
0xC78, 0x652F0001,
0xC78, 0x64300001,
0xC78, 0x63310001,
0xC78, 0x62320001,
0xC78, 0x61330001,
0xB0000000,0x00000000,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x40340001,
0xC78, 0x40350001,
0xC78, 0x40360001,
0xC78, 0x40370001,
0xC78, 0x40380001,
0xC78, 0x40390001,
0xC78, 0x403A0001,
0xC78, 0x403B0001,
0xC78, 0x403C0001,
0xC78, 0x403D0001,
0xC78, 0x403E0001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x40340001,
0xC78, 0x40350001,
0xC78, 0x40360001,
0xC78, 0x40370001,
0xC78, 0x40380001,
0xC78, 0x40390001,
0xC78, 0x403A0001,
0xC78, 0x403B0001,
0xC78, 0x403C0001,
0xC78, 0x403D0001,
0xC78, 0x403E0001,
0x98000000,0x00000000,0x40000000,0x00000000,
0xC78, 0x60340001,
0xC78, 0x4A350001,
0xC78, 0x49360001,
0xC78, 0x48370001,
0xC78, 0x47380001,
0xC78, 0x46390001,
0xC78, 0x453A0001,
0xC78, 0x443B0001,
0xC78, 0x433C0001,
0xC78, 0x423D0001,
0xC78, 0x413E0001,
0xA0000000,0x00000000,
0xC78, 0x46340001,
0xC78, 0x45350001,
0xC78, 0x44360001,
0xC78, 0x43370001,
0xC78, 0x42380001,
0xC78, 0x41390001,
0xC78, 0x403A0001,
0xC78, 0x403B0001,
0xC78, 0x403C0001,
0xC78, 0x403D0001,
0xC78, 0x403E0001,
0xB0000000,0x00000000,
0xC78, 0x403F0001,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0xFB400001,
0xC78, 0xFA410001,
0xC78, 0xF9420001,
0xC78, 0xF8430001,
0xC78, 0xF7440001,
0xC78, 0xF6450001,
0xC78, 0xF5460001,
0xC78, 0xF4470001,
0xC78, 0xF3480001,
0xC78, 0xF2490001,
0xC78, 0xF14A0001,
0xC78, 0xF04B0001,
0xC78, 0xEF4C0001,
0xC78, 0xEE4D0001,
0xC78, 0xED4E0001,
0xC78, 0xEC4F0001,
0xC78, 0xEB500001,
0xC78, 0xEA510001,
0xC78, 0xE9520001,
0xC78, 0xE8530001,
0xC78, 0xE7540001,
0xC78, 0xE6550001,
0xC78, 0xE5560001,
0xC78, 0xC6570001,
0xC78, 0xC5580001,
0xC78, 0xC4590001,
0xC78, 0xC35A0001,
0xC78, 0xC25B0001,
0xC78, 0xC15C0001,
0xC78, 0xC05D0001,
0xC78, 0xA35E0001,
0xC78, 0xA25F0001,
0xC78, 0xA1600001,
0xC78, 0x88610001,
0xC78, 0x87620001,
0xC78, 0x86630001,
0xC78, 0x85640001,
0xC78, 0x84650001,
0xC78, 0x83660001,
0xC78, 0x82670001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0xFB400001,
0xC78, 0xFA410001,
0xC78, 0xF9420001,
0xC78, 0xF8430001,
0xC78, 0xF7440001,
0xC78, 0xF6450001,
0xC78, 0xF5460001,
0xC78, 0xF4470001,
0xC78, 0xF3480001,
0xC78, 0xF2490001,
0xC78, 0xF14A0001,
0xC78, 0xF04B0001,
0xC78, 0xEF4C0001,
0xC78, 0xEE4D0001,
0xC78, 0xED4E0001,
0xC78, 0xEC4F0001,
0xC78, 0xEB500001,
0xC78, 0xEA510001,
0xC78, 0xE9520001,
0xC78, 0xE8530001,
0xC78, 0xE7540001,
0xC78, 0xE6550001,
0xC78, 0xE5560001,
0xC78, 0xE4570001,
0xC78, 0xE3580001,
0xC78, 0xE2590001,
0xC78, 0xC35A0001,
0xC78, 0xC25B0001,
0xC78, 0xC15C0001,
0xC78, 0x8B5D0001,
0xC78, 0x8A5E0001,
0xC78, 0x895F0001,
0xC78, 0x88600001,
0xC78, 0x87610001,
0xC78, 0x86620001,
0xC78, 0x85630001,
0xC78, 0x84640001,
0xC78, 0x67650001,
0xC78, 0x66660001,
0xC78, 0x65670001,
0x98000000,0x00000000,0x40000000,0x00000000,
0xC78, 0xFB400001,
0xC78, 0xFB410001,
0xC78, 0xFB420001,
0xC78, 0xFB430001,
0xC78, 0xFB440001,
0xC78, 0xFB450001,
0xC78, 0xFB460001,
0xC78, 0xFB470001,
0xC78, 0xFA480001,
0xC78, 0xF9490001,
0xC78, 0xF84A0001,
0xC78, 0xF74B0001,
0xC78, 0xF64C0001,
0xC78, 0xF54D0001,
0xC78, 0xF44E0001,
0xC78, 0xF34F0001,
0xC78, 0xF2500001,
0xC78, 0xF1510001,
0xC78, 0xF0520001,
0xC78, 0xEF530001,
0xC78, 0xEE540001,
0xC78, 0xED550001,
0xC78, 0xEC560001,
0xC78, 0xEB570001,
0xC78, 0xEA580001,
0xC78, 0xE9590001,
0xC78, 0xE85A0001,
0xC78, 0xE75B0001,
0xC78, 0xE65C0001,
0xC78, 0xE55D0001,
0xC78, 0xC65E0001,
0xC78, 0xC55F0001,
0xC78, 0xC4600001,
0xC78, 0xC3610001,
0xC78, 0xC2620001,
0xC78, 0xC1630001,
0xC78, 0xC0640001,
0xC78, 0xA3650001,
0xC78, 0xA2660001,
0xC78, 0xA1670001,
0xA0000000,0x00000000,
0xC78, 0xFB400001,
0xC78, 0xFB410001,
0xC78, 0xFB420001,
0xC78, 0xFB430001,
0xC78, 0xFB440001,
0xC78, 0xFB450001,
0xC78, 0xFB460001,
0xC78, 0xFB470001,
0xC78, 0xFB480001,
0xC78, 0xFA490001,
0xC78, 0xF94A0001,
0xC78, 0xF84B0001,
0xC78, 0xF74C0001,
0xC78, 0xF64D0001,
0xC78, 0xF54E0001,
0xC78, 0xF44F0001,
0xC78, 0xF3500001,
0xC78, 0xF2510001,
0xC78, 0xF1520001,
0xC78, 0xF0530001,
0xC78, 0xEF540001,
0xC78, 0xEE550001,
0xC78, 0xED560001,
0xC78, 0xEC570001,
0xC78, 0xEB580001,
0xC78, 0xEA590001,
0xC78, 0xE95A0001,
0xC78, 0xE85B0001,
0xC78, 0xE75C0001,
0xC78, 0xE65D0001,
0xC78, 0xE55E0001,
0xC78, 0xE45F0001,
0xC78, 0xE3600001,
0xC78, 0xE2610001,
0xC78, 0xC3620001,
0xC78, 0xC2630001,
0xC78, 0xC1640001,
0xC78, 0x8B650001,
0xC78, 0x8A660001,
0xC78, 0x89670001,
0xB0000000,0x00000000,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x66680001,
0xC78, 0x65690001,
0xC78, 0x646A0001,
0xC78, 0x636B0001,
0xC78, 0x626C0001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x64680001,
0xC78, 0x63690001,
0xC78, 0x626A0001,
0xC78, 0x616B0001,
0xC78, 0x606C0001,
0xA0000000,0x00000000,
0xC78, 0x88680001,
0xC78, 0x87690001,
0xC78, 0x866A0001,
0xC78, 0x856B0001,
0xC78, 0x846C0001,
0xB0000000,0x00000000,
0x88000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x616D0001,
0xC78, 0x486E0001,
0xC78, 0x476F0001,
0xC78, 0x46700001,
0xC78, 0x45710001,
0xC78, 0x44720001,
0xC78, 0x43730001,
0xC78, 0x42740001,
0xC78, 0x41750001,
0xC78, 0x40760001,
0xC78, 0x40770001,
0xC78, 0x40780001,
0xC78, 0x40790001,
0xC78, 0x407A0001,
0xC78, 0x407B0001,
0xC78, 0x407C0001,
0x90000001,0x00000000,0x40000000,0x00000000,
0xC78, 0x466D0001,
0xC78, 0x456E0001,
0xC78, 0x446F0001,
0xC78, 0x43700001,
0xC78, 0x42710001,
0xC78, 0x41720001,
0xC78, 0x40730001,
0xC78, 0x40740001,
0xC78, 0x40750001,
0xC78, 0x40760001,
0xC78, 0x40770001,
0xC78, 0x40780001,
0xC78, 0x40790001,
0xC78, 0x407A0001,
0xC78, 0x407B0001,
0xC78, 0x407C0001,
0x98000000,0x00000000,0x40000000,0x00000000,
0xC78, 0x836D0001,
0xC78, 0x826E0001,
0xC78, 0x666F0001,
0xC78, 0x65700001,
0xC78, 0x64710001,
0xC78, 0x63720001,
0xC78, 0x62730001,
0xC78, 0x61740001,
0xC78, 0x48750001,
0xC78, 0x47760001,
0xC78, 0x46770001,
0xC78, 0x45780001,
0xC78, 0x44790001,
0xC78, 0x437A0001,
0xC78, 0x427B0001,
0xC78, 0x417C0001,
0xA0000000,0x00000000,
0xC78, 0x676D0001,
0xC78, 0x666E0001,
0xC78, 0x656F0001,
0xC78, 0x64700001,
0xC78, 0x63710001,
0xC78, 0x62720001,
0xC78, 0x61730001,
0xC78, 0x60740001,
0xC78, 0x46750001,
0xC78, 0x45760001,
0xC78, 0x44770001,
0xC78, 0x43780001,
0xC78, 0x42790001,
0xC78, 0x417A0001,
0xC78, 0x407B0001,
0xC78, 0x407C0001,
0xB0000000,0x00000000,
0xC78, 0x407D0001,
0xC78, 0x407E0001,
0xC78, 0x407F0001,
0xC50, 0x69553422,
0xC50, 0x69553420,
};
void
ODM_ReadAndConfig_MP_8188E_AGC_TAB(
IN PDM_ODM_T pDM_Odm
)
{
u4Byte i = 0;
u1Byte cCond;
BOOLEAN bMatched = TRUE, bSkipped = FALSE;
//ask by Luke.Lee
u4Byte ArrayLen = sizeof(Array_MP_8188E_AGC_TAB)/sizeof(u4Byte);
pu4Byte Array = Array_MP_8188E_AGC_TAB;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===> ODM_ReadAndConfig_MP_8188E_AGC_TAB\n"));
while(( i+1) < ArrayLen)
{
u4Byte v1 = Array[i];
u4Byte v2 = Array[i+1];
if(v1 & (BIT31|BIT30)) //positive & negative condition
{
if(v1 & BIT31) // positive condition
{
cCond = (u1Byte)((v1 & (BIT29|BIT28)) >> 28);
if(cCond == COND_ENDIF) //end
{
bMatched = TRUE;
bSkipped = FALSE;
}
else if(cCond == COND_ELSE) //else
{
bMatched = bSkipped?FALSE:TRUE;
}
else //if , else if
{
if(bSkipped)
bMatched = FALSE;
else
{
if(CheckPositive(pDM_Odm, v1, v2))
{
bMatched = TRUE;
bSkipped = TRUE;
}
else
{
bMatched = FALSE;
bSkipped = FALSE;
}
}
}
}
else if(v1 & BIT30){ //negative condition
//do nothing
}
}
else
{
if(bMatched)
odm_ConfigBB_AGC_8188E(pDM_Odm, v1, bMaskDWord, v2);
}
i = i + 2;
}
}
u4Byte
ODM_GetVersion_MP_8188E_AGC_TAB(void)
{
return 53;
}
/******************************************************************************
* PHY_REG.TXT
******************************************************************************/
u4Byte Array_MP_8188E_PHY_REG[] = {
0x800, 0x80040000,
0x804, 0x00000003,
0x808, 0x0000FC00,
0x80C, 0x0000000A,
0x810, 0x10001331,
0x814, 0x020C3D10,
0x818, 0x02200385,
0x81C, 0x00000000,
0x820, 0x01000100,
0x824, 0x00390204,
0x828, 0x00000000,
0x82C, 0x00000000,
0x830, 0x00000000,
0x834, 0x00000000,
0x838, 0x00000000,
0x83C, 0x00000000,
0x840, 0x00010000,
0x844, 0x00000000,
0x848, 0x00000000,
0x84C, 0x00000000,
0x850, 0x00000000,
0x854, 0x00000000,
0x858, 0x569A11A9,
0x85C, 0x01000014,
0x860, 0x66F60110,
0x864, 0x061F0649,
0x868, 0x00000000,
0x86C, 0x27272700,
0x88000003,0x00000000,0x40000000,0x00000000,
0x870, 0x07000300,
0x98000001,0x00000000,0x40000000,0x00000000,
0x870, 0x07000300,
0x90000003,0x00000000,0x40000000,0x00000000,
0x870, 0x07000300,
0x90000001,0x00000000,0x40000000,0x00000000,
0x870, 0x07000300,
0xA0000000,0x00000000,
0x870, 0x07000760,
0xB0000000,0x00000000,
0x874, 0x25004000,
0x878, 0x00000808,
0x87C, 0x00000000,
0x880, 0xB0000C1C,
0x884, 0x00000001,
0x888, 0x00000000,
0x88C, 0xCCC000C0,
0x890, 0x00000800,
0x894, 0xFFFFFFFE,
0x898, 0x40302010,
0x89C, 0x00706050,
0x900, 0x00000000,
0x904, 0x00000023,
0x908, 0x00000000,
0x90C, 0x81121111,
0x910, 0x00000002,
0x914, 0x00000201,
0xA00, 0x00D047C8,
0xA04, 0x80FF800C,
0xA08, 0x8C838300,
0xA0C, 0x2E7F120F,
0xA10, 0x9500BB78,
0xA14, 0x1114D028,
0xA18, 0x00881117,
0xA1C, 0x89140F00,
0x88000003,0x00000000,0x40000000,0x00000000,
0xA20, 0x13130000,
0xA24, 0x060A0D10,
0xA28, 0x00000103,
0x90000003,0x00000000,0x40000000,0x00000000,
0xA20, 0x13130000,
0xA24, 0x060A0D10,
0xA28, 0x00000103,
0xA0000000,0x00000000,
0xA20, 0x1A1B0000,
0xA24, 0x090E1317,
0xA28, 0x00000204,
0xB0000000,0x00000000,
0xA2C, 0x00D30000,
0xA70, 0x101FBF00,
0xA74, 0x00000007,
0xA78, 0x00000900,
0xA7C, 0x225B0606,
0xA80, 0x218075B1,
0x88000003,0x00000000,0x40000000,0x00000000,
0xB2C, 0x00000000,
0x98000001,0x00000000,0x40000000,0x00000000,
0xB2C, 0x00000000,
0x90000003,0x00000000,0x40000000,0x00000000,
0xB2C, 0x00000000,
0x90000001,0x00000000,0x40000000,0x00000000,
0xB2C, 0x00000000,
0xA0000000,0x00000000,
0xB2C, 0x80000000,
0xB0000000,0x00000000,
0xC00, 0x48071D40,
0xC04, 0x03A05611,
0xC08, 0x000000E4,
0xC0C, 0x6C6C6C6C,
0xC10, 0x08800000,
0xC14, 0x40000100,
0xC18, 0x08800000,
0xC1C, 0x40000100,
0xC20, 0x00000000,
0xC24, 0x00000000,
0xC28, 0x00000000,
0xC2C, 0x00000000,
0xC30, 0x69E9AC47,
0xC34, 0x469652AF,
0xC38, 0x49795994,
0xC3C, 0x0A97971C,
0xC40, 0x1F7C403F,
0xC44, 0x000100B7,
0xC48, 0xEC020107,
0xC4C, 0x007F037F,
0xC50, 0x69553420,
0xC54, 0x43BC0094,
0x88000003,0x00000000,0x40000000,0x00000000,
0xC58, 0x00013159,
0x98000001,0x00000000,0x40000000,0x00000000,
0xC58, 0x00013159,
0x98000400,0x00000000,0x40000000,0x00000000,
0xC58, 0x00013159,
0x98000000,0x00000000,0x40000000,0x00000000,
0xC58, 0x00013159,
0xA0000000,0x00000000,
0xC58, 0x00013169,
0xB0000000,0x00000000,
0xC5C, 0x00250492,
0xC60, 0x00000000,
0xC64, 0x7112848B,
0xC68, 0x47C00BFF,
0xC6C, 0x00000036,
0xC70, 0x2C7F000D,
0x88000003,0x00000000,0x40000000,0x00000000,
0xC74, 0x028610DB,
0x98000001,0x00000000,0x40000000,0x00000000,
0xC74, 0x028610DB,
0x98000400,0x00000000,0x40000000,0x00000000,
0xC74, 0x028610DB,
0x98000000,0x00000000,0x40000000,0x00000000,
0xC74, 0x028610DB,
0xA0000000,0x00000000,
0xC74, 0x020610DB,
0xB0000000,0x00000000,
0xC78, 0x0000001F,
0xC7C, 0x00B91612,
0x88000003,0x00000000,0x40000000,0x00000000,
0xC80, 0x2D4000B5,
0x90000003,0x00000000,0x40000000,0x00000000,
0xC80, 0x2D4000B5,
0xA0000000,0x00000000,
0xC80, 0x390000E4,
0xB0000000,0x00000000,
0xC84, 0x21F60000,
0xC88, 0x40000100,
0xC8C, 0x20200000,
0xC90, 0x00091521,
0xC94, 0x00000000,
0xC98, 0x00121820,
0xC9C, 0x00007F7F,
0xCA0, 0x00000000,
0xCA4, 0x000300A0,
0x88000003,0x00000000,0x40000000,0x00000000,
0xCA8, 0xFFFF0000,
0x98000001,0x00000000,0x40000000,0x00000000,
0xCA8, 0xFFFF0000,
0x98000400,0x00000000,0x40000000,0x00000000,
0xCA8, 0xFFFF0000,
0x98000000,0x00000000,0x40000000,0x00000000,
0xCA8, 0xFFFF0000,
0xA0000000,0x00000000,
0xCA8, 0x00000000,
0xB0000000,0x00000000,
0xCAC, 0x00000000,
0xCB0, 0x00000000,
0xCB4, 0x00000000,
0xCB8, 0x00000000,
0xCBC, 0x28000000,
0xCC0, 0x00000000,
0xCC4, 0x00000000,
0xCC8, 0x00000000,
0xCCC, 0x00000000,
0xCD0, 0x00000000,
0xCD4, 0x00000000,
0xCD8, 0x64B22427,
0xCDC, 0x00766932,
0xCE0, 0x00222222,
0xCE4, 0x00000000,
0xCE8, 0x37644302,
0xCEC, 0x2F97D40C,
0xD00, 0x00000740,
0xD04, 0x00020401,
0xD08, 0x0000907F,
0xD0C, 0x20010201,
0xD10, 0xA0633333,
0xD14, 0x3333BC43,
0xD18, 0x7A8F5B6F,
0xD2C, 0xCC979975,
0xD30, 0x00000000,
0xD34, 0x80608000,
0xD38, 0x00000000,
0xD3C, 0x00127353,
0xD40, 0x00000000,
0xD44, 0x00000000,
0xD48, 0x00000000,
0xD4C, 0x00000000,
0xD50, 0x6437140A,
0xD54, 0x00000000,
0xD58, 0x00000282,
0xD5C, 0x30032064,
0xD60, 0x4653DE68,
0xD64, 0x04518A3C,
0xD68, 0x00002101,
0xD6C, 0x2A201C16,
0xD70, 0x1812362E,
0xD74, 0x322C2220,
0xD78, 0x000E3C24,
0xE00, 0x2D2D2D2D,
0xE04, 0x2D2D2D2D,
0xE08, 0x0390272D,
0xE10, 0x2D2D2D2D,
0xE14, 0x2D2D2D2D,
0xE18, 0x2D2D2D2D,
0xE1C, 0x2D2D2D2D,
0xE28, 0x00000000,
0xE30, 0x1000DC1F,
0xE34, 0x10008C1F,
0xE38, 0x02140102,
0xE3C, 0x681604C2,
0xE40, 0x01007C00,
0xE44, 0x01004800,
0xE48, 0xFB000000,
0xE4C, 0x000028D1,
0xE50, 0x1000DC1F,
0xE54, 0x10008C1F,
0xE58, 0x02140102,
0xE5C, 0x28160D05,
0xE60, 0x00000008,
0xE68, 0x001B25A4,
0xE6C, 0x00C00014,
0xE70, 0x00C00014,
0xE74, 0x01000014,
0xE78, 0x01000014,
0xE7C, 0x01000014,
0xE80, 0x01000014,
0xE84, 0x00C00014,
0xE88, 0x01000014,
0xE8C, 0x00C00014,
0xED0, 0x00C00014,
0xED4, 0x00C00014,
0xED8, 0x00C00014,
0xEDC, 0x00000014,
0xEE0, 0x00000014,
0x88000003,0x00000000,0x40000000,0x00000000,
0xEE8, 0x32555448,
0x98000001,0x00000000,0x40000000,0x00000000,
0xEE8, 0x32555448,
0x90000003,0x00000000,0x40000000,0x00000000,
0xEE8, 0x32555448,
0x90000001,0x00000000,0x40000000,0x00000000,
0xEE8, 0x32555448,
0xA0000000,0x00000000,
0xEE8, 0x21555448,
0xB0000000,0x00000000,
0xEEC, 0x01C00014,
0xF14, 0x00000003,
0xF4C, 0x00000000,
0xF00, 0x00000300,
};
void
ODM_ReadAndConfig_MP_8188E_PHY_REG(
IN PDM_ODM_T pDM_Odm
)
{
u4Byte i = 0;
u1Byte cCond;
BOOLEAN bMatched = TRUE, bSkipped = FALSE;
//ask by Luke.Lee
u4Byte ArrayLen = sizeof(Array_MP_8188E_PHY_REG)/sizeof(u4Byte);
pu4Byte Array = Array_MP_8188E_PHY_REG;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===> ODM_ReadAndConfig_MP_8188E_PHY_REG\n"));
while(( i+1) < ArrayLen)
{
u4Byte v1 = Array[i];
u4Byte v2 = Array[i+1];
if(v1 & (BIT31|BIT30)) //positive & negative condition
{
if(v1 & BIT31) // positive condition
{
cCond = (u1Byte)((v1 & (BIT29|BIT28)) >> 28);
if(cCond == COND_ENDIF) //end
{
bMatched = TRUE;
bSkipped = FALSE;
}
else if(cCond == COND_ELSE) //else
{
bMatched = bSkipped?FALSE:TRUE;
}
else //if , else if
{
if(bSkipped)
bMatched = FALSE;
else
{
if(CheckPositive(pDM_Odm, v1, v2))
{
bMatched = TRUE;
bSkipped = TRUE;
}
else
{
bMatched = FALSE;
bSkipped = FALSE;
}
}
}
}
else if(v1 & BIT30){ //negative condition
//do nothing
}
}
else
{
if(bMatched)
odm_ConfigBB_PHY_8188E(pDM_Odm, v1, bMaskDWord, v2);
}
i = i + 2;
}
}
u4Byte
ODM_GetVersion_MP_8188E_PHY_REG(void)
{
return 53;
}
/******************************************************************************
* PHY_REG_PG.TXT
******************************************************************************/
u4Byte Array_MP_8188E_PHY_REG_PG[] = {
0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000,
0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800,
0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646,
0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840,
0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
};
void
ODM_ReadAndConfig_MP_8188E_PHY_REG_PG(
IN PDM_ODM_T pDM_Odm
)
{
u4Byte i = 0;
u4Byte ArrayLen = sizeof(Array_MP_8188E_PHY_REG_PG)/sizeof(u4Byte);
pu4Byte Array = Array_MP_8188E_PHY_REG_PG;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===> ODM_ReadAndConfig_MP_8188E_PHY_REG_PG\n"));
pDM_Odm->PhyRegPgVersion = 1;
pDM_Odm->PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
for (i = 0; i < ArrayLen; i += 6 )
{
u4Byte v1 = Array[i];
u4Byte v2 = Array[i+1];
u4Byte v3 = Array[i+2];
u4Byte v4 = Array[i+3];
u4Byte v5 = Array[i+4];
u4Byte v6 = Array[i+5];
odm_ConfigBB_PHY_REG_PG_8188E(pDM_Odm, v1, v2, v3, v4, v5, v6);
}
}
#endif // end of HWIMG_SUPPORT
| gpl-2.0 |
reynaldo-samsung/gst-plugins-base | gst/videoscale/vs_lanczos.c | 19 | 51348 | /*
* Image Scaling Functions
* Copyright (c) 2011 David A. Schleef <ds@schleef.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
*
* Modified Lanczos scaling algorithm
* ==================================
*
* This algorithm was developed by the author. The primary goals of
* the algorithm are high-quality video downscaling for medium scale
* factors (in the range of 1.3x to 5.0x) using methods that can be
* converted to SIMD code. Concerns with existing algorithms were
* mainly related to either over-soft filtering (Lanczos) or aliasing
* (bilinear or any other method with inadequate sampling).
*
* The problems with bilinear scaling are apparent when downscaling
* more than a factor of 2. For example, when downscaling by a factor
* of 3, only two-thirds of the input pixels contribute to the output
* pixels. This is only considering scaling in one direction; after
* scaling both vertically and horizontally in a 2-D image, fewer than
* half of the input pixels contribute to the output, so it should not
* be surprising that the output is suboptimal.
*
* The problems with Lanczos scaling are more subtle. From a theoretical
* perspective, Lanczos is an optimal algorithm for resampling equally-
* spaced values. This theoretical perspective is based on analysis
* done in frequency space, thus, Lanczos works very well for audio
* resampling, since the ear hears primarily in frequency space. The
* human visual system is sensitive primarily in the spatial domain,
* therefore any resampling algorithm should take this into account.
* This difference is immediately clear in the size of resampling
* window or envelope that is chosen for resampling: for audio, an
* envelope of a=64 is typical, in image scaling, the envelope is
* usually a=2 or a=3.
*
* One result of the HVS being sensitive in the spatial domain (and
* also probably due to oversampling capabilities of the retina and
* visual cortex) is that it is less sensitive to the exact magnitude
* of high-frequency visual signals than to the appropriate amount of
* energy in the nearby frequency band. A Lanczos kernel with a=2
* or a=3 strongly decreases the amount of energy in the high frequency
* bands. The energy in this area can be increased by increasing a,
* which brings in energy from different areas of the image (bad for
* reasons mentioned above), or by oversampling the input data. We
* have chosen two methods for doing the latter. Firstly, there is
* a sharpness parameter, which increases the cutoff frequency of the
* filter, aliasing higher frequency noise into the passband. And
* secondly, there is the sharpen parameter, which increases the
* contribution of high-frequency (but in-band) components.
*
* An alternate explanation of the usefulness of a sharpening filter
* is that many natural images have a roughly 1/f spectrum. In order
* for a downsampled image to look more "natural" when high frequencies
* are removed, the frequencies in the pass band near the cutoff
* frequency are amplified, causing the spectrum to be more roughly
* 1/f. I said "roughly", not "literally".
*
* This alternate explanation is useful for understanding the author's
* secondary motivation for developing this algorithm, namely, as a
* method of video compression. Several recent techniques (such as
* HTTP Live Streaming and SVC) use image scaling as a method to get
* increased compression out of nominally non-scalable codecs such as
* H.264. For optimal quality, it is thusly important to consider
* the scaler and encoder as a combined unit. Tuning of the sharpness
* and sharpen parameters was performed using the Toro encoder tuner,
* where scaled and encoded video was compared to unscaled and encoded
* video. This tuning suggested values that were very close to the
* values chosen by manual inspection of scaled images and video.
*
* The optimal values of sharpen and sharpness were slightly different
* depending whether the comparison was still images or video. Video
* comparisons were more sensitive to aliasing, since the aliasing
* artifacts tended to move or "crawl" around the video. The default
* values are for video; image scaling may prefer higher values.
*
* A number of related techniques were rejected for various reasons.
* An early technique of selecting the sharpness factor locally based
* on edge detection (in order to use a higher sharpness values without
* the corresponding aliasing on edges) worked very well for still
* images, but caused too much "crawling" on textures in video. Also,
* this method is slow, as it does not parallelize well.
*
* Non-separable techniques were rejected because the fastest would
* have been at least 4x slower.
*
* It is infrequently appreciated that image scaling should ideally be
* done in linear light space. Converting to linear light space has
* a similar effect to a sharpening filter. This approach was not
* taken because the added benefit is minor compared to the additional
* computational cost. Morever, the benefit is decreased by increasing
* the strength of the sharpening filter.
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include "vs_scanline.h"
#include "vs_image.h"
#include "gstvideoscaleorc.h"
#include <gst/gst.h>
#include <math.h>
#define NEED_CLAMP(x,a,b) ((x) < (a) || (x) > (b))
#define ROUND_UP_2(x) (((x)+1)&~1)
#define ROUND_UP_4(x) (((x)+3)&~3)
#define ROUND_UP_8(x) (((x)+7)&~7)
#define SRC_LINE(i) (scale->src->pixels + scale->src->stride * (i))
#define TMP_LINE_S16(i) ((gint16 *)scale->tmpdata + (i)*(scale->dest->width))
#define TMP_LINE_S32(i) ((gint32 *)scale->tmpdata + (i)*(scale->dest->width))
#define TMP_LINE_FLOAT(i) ((float *)scale->tmpdata + (i)*(scale->dest->width))
#define TMP_LINE_DOUBLE(i) ((double *)scale->tmpdata + (i)*(scale->dest->width))
#define TMP_LINE_S16_AYUV(i) ((gint16 *)scale->tmpdata + (i)*4*(scale->dest->width))
#define TMP_LINE_S32_AYUV(i) ((gint32 *)scale->tmpdata + (i)*4*(scale->dest->width))
#define TMP_LINE_FLOAT_AYUV(i) ((float *)scale->tmpdata + (i)*4*(scale->dest->width))
#define TMP_LINE_DOUBLE_AYUV(i) ((double *)scale->tmpdata + (i)*4*(scale->dest->width))
#define PTR_OFFSET(a,b) ((void *)((char *)(a) + (b)))
typedef void (*HorizResampleFunc) (void *dest, const gint32 * offsets,
const void *taps, const void *src, int n_taps, int shift, int n);
typedef struct _Scale1D Scale1D;
struct _Scale1D
{
int n;
double offset;
double scale;
double fx;
double ex;
int dx;
int n_taps;
gint32 *offsets;
void *taps;
};
typedef struct _Scale Scale;
struct _Scale
{
const VSImage *dest;
const VSImage *src;
double sharpness;
gboolean dither;
void *tmpdata;
HorizResampleFunc horiz_resample_func;
Scale1D x_scale1d;
Scale1D y_scale1d;
};
static void
vs_image_scale_lanczos_Y_int16 (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen);
static void vs_image_scale_lanczos_Y_int32 (const VSImage * dest,
const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither,
double a, double sharpen);
static void vs_image_scale_lanczos_Y_float (const VSImage * dest,
const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither,
double a, double sharpen);
static void vs_image_scale_lanczos_Y_double (const VSImage * dest,
const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither,
double a, double sharpen);
static void
vs_image_scale_lanczos_AYUV_int16 (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen);
static void vs_image_scale_lanczos_AYUV_int32 (const VSImage * dest,
const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither,
double a, double sharpen);
static void vs_image_scale_lanczos_AYUV_float (const VSImage * dest,
const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither,
double a, double sharpen);
static void vs_image_scale_lanczos_AYUV_double (const VSImage * dest,
const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither,
double a, double sharpen);
static void vs_image_scale_lanczos_AYUV64_double (const VSImage * dest,
const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither,
double a, double sharpen);
static double
sinc (double x)
{
if (x == 0)
return 1;
return sin (G_PI * x) / (G_PI * x);
}
static double
envelope (double x)
{
if (x <= -1 || x >= 1)
return 0;
return sinc (x);
}
static int
scale1d_get_n_taps (int src_size, int dest_size, double a, double sharpness)
{
double scale;
double fx;
int dx;
scale = src_size / (double) dest_size;
if (scale > 1.0) {
fx = (1.0 / scale) * sharpness;
} else {
fx = (1.0) * sharpness;
}
dx = ceil (a / fx);
return 2 * dx;
}
static void
scale1d_cleanup (Scale1D * scale)
{
g_free (scale->taps);
g_free (scale->offsets);
}
/*
* Calculates a set of taps for each destination element in double
* format. Each set of taps sums to 1.0.
*
*/
static void
scale1d_calculate_taps (Scale1D * scale, int src_size, int dest_size,
int n_taps, double a, double sharpness, double sharpen)
{
int j;
double *tap_array;
gint32 *offsets;
double scale_offset;
double scale_increment;
int dx;
double fx;
double ex;
scale->scale = src_size / (double) dest_size;
scale->offset = scale->scale / 2 - 0.5;
if (scale->scale > 1.0) {
scale->fx = (1.0 / scale->scale) * sharpness;
} else {
scale->fx = (1.0) * sharpness;
}
scale->ex = scale->fx / a;
scale->dx = ceil (a / scale->fx);
g_assert (n_taps >= 2 * scale->dx);
scale->n_taps = n_taps;
scale->taps = g_malloc (sizeof (double) * scale->n_taps * dest_size);
scale->offsets = g_malloc (sizeof (gint32) * dest_size);
tap_array = scale->taps;
offsets = scale->offsets;
scale_offset = scale->offset;
scale_increment = scale->scale;
dx = scale->dx;
fx = scale->fx;
ex = scale->ex;
for (j = 0; j < dest_size; j++) {
double x;
int xi;
int l;
double weight;
double *taps;
x = scale_offset + scale_increment * j;
x = CLAMP (x, 0, src_size);
xi = ceil (x) - dx;
offsets[j] = xi;
weight = 0;
taps = tap_array + j * n_taps;
for (l = 0; l < n_taps; l++) {
int xl = xi + l;
taps[l] = sinc ((x - xl) * fx) * envelope ((x - xl) * ex);
taps[l] -= sharpen * envelope ((x - xl) * ex);
weight += taps[l];
}
g_assert (envelope ((x - (xi - 1)) * ex) == 0);
g_assert (envelope ((x - (xi + n_taps)) * ex) == 0);
for (l = 0; l < n_taps; l++) {
taps[l] /= weight;
}
if (xi < 0) {
int shift = -xi;
for (l = 0; l < shift; l++) {
taps[shift] += taps[l];
}
for (l = 0; l < n_taps - shift; l++) {
taps[l] = taps[shift + l];
}
for (; l < n_taps; l++) {
taps[l] = 0;
}
offsets[j] += shift;
}
if (xi > src_size - n_taps) {
int shift = xi - (src_size - n_taps);
for (l = 0; l < shift; l++) {
taps[n_taps - shift - 1] += taps[n_taps - shift + l];
}
for (l = 0; l < n_taps - shift; l++) {
taps[n_taps - 1 - l] = taps[n_taps - 1 - shift - l];
}
for (l = 0; l < shift; l++) {
taps[l] = 0;
}
offsets[j] -= shift;
}
}
}
/*
* Calculates a set of taps for each destination element in float
* format. Each set of taps sums to 1.0.
*/
static void
scale1d_calculate_taps_float (Scale1D * scale, int src_size, int dest_size,
int n_taps, double a, double sharpness, double sharpen)
{
double *taps_d;
float *taps_f;
int j;
scale1d_calculate_taps (scale, src_size, dest_size, n_taps, a, sharpness,
sharpen);
taps_d = scale->taps;
taps_f = g_malloc (sizeof (float) * scale->n_taps * dest_size);
for (j = 0; j < dest_size * n_taps; j++) {
taps_f[j] = taps_d[j];
}
g_free (taps_d);
scale->taps = taps_f;
}
/*
* Calculates a set of taps for each destination element in gint32
* format. Each set of taps sums to (very nearly) (1<<shift). A
* typical value for shift is 10 to 15, so that applying the taps to
* uint8 values and summing will fit in a (signed) int32.
*/
static void
scale1d_calculate_taps_int32 (Scale1D * scale, int src_size, int dest_size,
int n_taps, double a, double sharpness, double sharpen, int shift)
{
double *taps_d;
gint32 *taps_i;
int i;
int j;
double multiplier;
scale1d_calculate_taps (scale, src_size, dest_size, n_taps, a, sharpness,
sharpen);
taps_d = scale->taps;
taps_i = g_malloc (sizeof (gint32) * scale->n_taps * dest_size);
multiplier = (1 << shift);
for (j = 0; j < dest_size; j++) {
for (i = 0; i < n_taps; i++) {
taps_i[j * n_taps + i] =
floor (0.5 + taps_d[j * n_taps + i] * multiplier);
}
}
g_free (taps_d);
scale->taps = taps_i;
}
/*
* Calculates a set of taps for each destination element in gint16
* format. Each set of taps sums to (1<<shift). A typical value
* for shift is 7, so that applying the taps to uint8 values and
* summing will fit in a (signed) int16.
*/
static void
scale1d_calculate_taps_int16 (Scale1D * scale, int src_size, int dest_size,
int n_taps, double a, double sharpness, double sharpen, int shift)
{
double *taps_d;
gint16 *taps_i;
int i;
int j;
double multiplier;
scale1d_calculate_taps (scale, src_size, dest_size, n_taps, a, sharpness,
sharpen);
taps_d = scale->taps;
taps_i = g_malloc (sizeof (gint16) * scale->n_taps * dest_size);
multiplier = (1 << shift);
/* Various methods for converting floating point taps to integer.
* The dB values are the SSIM value between scaling an image via
* the floating point pathway vs. the integer pathway using the
* given code to generate the taps. Only one image was tested,
* scaling from 1920x1080 to 640x360. Several variations of the
* methods were also tested, with nothing appearing useful. */
#if 0
/* Standard round to integer. This causes bad DC errors. */
/* 44.588 dB */
for (j = 0; j < dest_size; j++) {
for (i = 0; i < n_taps; i++) {
taps_i[j * n_taps + i] =
floor (0.5 + taps_d[j * n_taps + i] * multiplier);
}
}
#endif
#if 0
/* Dithering via error propogation. Works pretty well, but
* really we want to propogate errors across rows, which would
* mean having several sets of tap arrays. Possible, but more work,
* and it may not even be better. */
/* 57.0961 dB */
{
double err = 0;
for (j = 0; j < dest_size; j++) {
for (i = 0; i < n_taps; i++) {
err += taps_d[j * n_taps + i] * multiplier;
taps_i[j * n_taps + i] = floor (err);
err -= floor (err);
}
}
}
#endif
#if 1
/* Round to integer, but with an adjustable bias that we use to
* eliminate the DC error. This search method is a bit crude, and
* could perhaps be improved somewhat. */
/* 60.4851 dB */
for (j = 0; j < dest_size; j++) {
int k;
for (k = 0; k < 100; k++) {
int sum = 0;
double offset;
offset = k * 0.01;
for (i = 0; i < n_taps; i++) {
taps_i[j * n_taps + i] =
floor (offset + taps_d[j * n_taps + i] * multiplier);
sum += taps_i[j * n_taps + i];
}
if (sum >= (1 << shift))
break;
}
}
#endif
#if 0
/* Round to integer, but adjust the multiplier. The search method is
* wrong a lot, but was sufficient enough to calculate dB error. */
/* 58.6517 dB */
for (j = 0; j < dest_size; j++) {
int k;
int sum = 0;
for (k = 0; k < 200; k++) {
sum = 0;
multiplier = (1 << shift) - 1.0 + k * 0.01;
for (i = 0; i < n_taps; i++) {
taps_i[j * n_taps + i] =
floor (0.5 + taps_d[j * n_taps + i] * multiplier);
sum += taps_i[j * n_taps + i];
}
if (sum >= (1 << shift))
break;
}
if (sum != (1 << shift)) {
GST_ERROR ("%g %d", multiplier, sum);
}
}
#endif
#if 0
/* Round to integer, but subtract the error from the largest tap */
/* 58.3677 dB */
for (j = 0; j < dest_size; j++) {
int err = -multiplier;
for (i = 0; i < n_taps; i++) {
taps_i[j * n_taps + i] =
floor (0.5 + taps_d[j * n_taps + i] * multiplier);
err += taps_i[j * n_taps + i];
}
if (taps_i[j * n_taps + (n_taps / 2 - 1)] >
taps_i[j * n_taps + (n_taps / 2)]) {
taps_i[j * n_taps + (n_taps / 2 - 1)] -= err;
} else {
taps_i[j * n_taps + (n_taps / 2)] -= err;
}
}
#endif
g_free (taps_d);
scale->taps = taps_i;
}
void
vs_image_scale_lanczos_Y (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, int submethod,
double a, double sharpen)
{
switch (submethod) {
case 0:
default:
vs_image_scale_lanczos_Y_int16 (dest, src, tmpbuf, sharpness, dither, a,
sharpen);
break;
case 1:
vs_image_scale_lanczos_Y_int32 (dest, src, tmpbuf, sharpness, dither, a,
sharpen);
break;
case 2:
vs_image_scale_lanczos_Y_float (dest, src, tmpbuf, sharpness, dither, a,
sharpen);
break;
case 3:
vs_image_scale_lanczos_Y_double (dest, src, tmpbuf, sharpness, dither, a,
sharpen);
break;
}
}
void
vs_image_scale_lanczos_AYUV (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, int submethod,
double a, double sharpen)
{
switch (submethod) {
case 0:
default:
vs_image_scale_lanczos_AYUV_int16 (dest, src, tmpbuf, sharpness, dither,
a, sharpen);
break;
case 1:
vs_image_scale_lanczos_AYUV_int32 (dest, src, tmpbuf, sharpness, dither,
a, sharpen);
break;
case 2:
vs_image_scale_lanczos_AYUV_float (dest, src, tmpbuf, sharpness, dither,
a, sharpen);
break;
case 3:
vs_image_scale_lanczos_AYUV_double (dest, src, tmpbuf, sharpness, dither,
a, sharpen);
break;
}
}
void
vs_image_scale_lanczos_AYUV64 (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, int submethod,
double a, double sharpen)
{
vs_image_scale_lanczos_AYUV64_double (dest, src, tmpbuf, sharpness, dither,
a, sharpen);
}
#define RESAMPLE_HORIZ_FLOAT(function, dest_type, tap_type, src_type, _n_taps) \
static void \
function (dest_type *dest, const gint32 *offsets, \
const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \
{ \
int i; \
int k; \
dest_type sum; \
const src_type *srcline; \
const tap_type *tapsline; \
for (i = 0; i < n; i++) { \
srcline = src + offsets[i]; \
tapsline = taps + i * _n_taps; \
sum = 0; \
for (k = 0; k < _n_taps; k++) { \
sum += srcline[k] * tapsline[k]; \
} \
dest[i] = sum; \
} \
}
#define RESAMPLE_HORIZ(function, dest_type, tap_type, src_type, _n_taps, _shift) \
static void \
function (dest_type *dest, const gint32 *offsets, \
const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \
{ \
int i; \
int k; \
dest_type sum; \
const src_type *srcline; \
const tap_type *tapsline; \
int offset; \
if (_shift > 0) offset = (1<<_shift)>>1; \
else offset = 0; \
for (i = 0; i < n; i++) { \
srcline = src + offsets[i]; \
tapsline = taps + i * _n_taps; \
sum = 0; \
for (k = 0; k < _n_taps; k++) { \
sum += srcline[k] * tapsline[k]; \
} \
dest[i] = (sum + offset) >> _shift; \
} \
}
#define RESAMPLE_HORIZ_AYUV_FLOAT(function, dest_type, tap_type, src_type, _n_taps) \
static void \
function (dest_type *dest, const gint32 *offsets, \
const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \
{ \
int i; \
int k; \
dest_type sum1; \
dest_type sum2; \
dest_type sum3; \
dest_type sum4; \
const src_type *srcline; \
const tap_type *tapsline; \
for (i = 0; i < n; i++) { \
srcline = src + 4*offsets[i]; \
tapsline = taps + i * _n_taps; \
sum1 = 0; \
sum2 = 0; \
sum3 = 0; \
sum4 = 0; \
for (k = 0; k < _n_taps; k++) { \
sum1 += srcline[k*4+0] * tapsline[k]; \
sum2 += srcline[k*4+1] * tapsline[k]; \
sum3 += srcline[k*4+2] * tapsline[k]; \
sum4 += srcline[k*4+3] * tapsline[k]; \
} \
dest[i*4+0] = sum1; \
dest[i*4+1] = sum2; \
dest[i*4+2] = sum3; \
dest[i*4+3] = sum4; \
} \
}
#define RESAMPLE_HORIZ_AYUV(function, dest_type, tap_type, src_type, _n_taps, _shift) \
static void \
function (dest_type *dest, const gint32 *offsets, \
const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \
{ \
int i; \
int k; \
dest_type sum1; \
dest_type sum2; \
dest_type sum3; \
dest_type sum4; \
const src_type *srcline; \
const tap_type *tapsline; \
int offset; \
if (_shift > 0) offset = (1<<_shift)>>1; \
else offset = 0; \
for (i = 0; i < n; i++) { \
srcline = src + 4*offsets[i]; \
tapsline = taps + i * _n_taps; \
sum1 = 0; \
sum2 = 0; \
sum3 = 0; \
sum4 = 0; \
for (k = 0; k < _n_taps; k++) { \
sum1 += srcline[k*4+0] * tapsline[k]; \
sum2 += srcline[k*4+1] * tapsline[k]; \
sum3 += srcline[k*4+2] * tapsline[k]; \
sum4 += srcline[k*4+3] * tapsline[k]; \
} \
dest[i*4+0] = (sum1 + offset) >> _shift; \
dest[i*4+1] = (sum2 + offset) >> _shift; \
dest[i*4+2] = (sum3 + offset) >> _shift; \
dest[i*4+3] = (sum4 + offset) >> _shift; \
} \
}
/* *INDENT-OFF* */
RESAMPLE_HORIZ_FLOAT (resample_horiz_double_u8_generic, double, double,
guint8, n_taps)
RESAMPLE_HORIZ_FLOAT (resample_horiz_float_u8_generic, float, float,
guint8, n_taps)
RESAMPLE_HORIZ_AYUV_FLOAT (resample_horiz_double_ayuv_generic, double, double,
guint8, n_taps)
RESAMPLE_HORIZ_AYUV_FLOAT (resample_horiz_float_ayuv_generic, float, float,
guint8, n_taps)
RESAMPLE_HORIZ_AYUV_FLOAT (resample_horiz_double_ayuv_generic_s16, double, double,
guint16, n_taps)
RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_generic, gint32, gint32,
guint8, n_taps, shift)
RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_generic, gint16, gint16,
guint8, n_taps, shift)
RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_generic, gint32, gint32,
guint8, n_taps, shift)
RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_generic, gint16, gint16,
guint8, n_taps, shift)
/* Candidates for orcification */
RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps16_shift0, gint32, gint32,
guint8, 16, 0)
RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps12_shift0, gint32, gint32,
guint8, 12, 0)
RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps8_shift0, gint32, gint32,
guint8, 8, 0)
RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps4_shift0, gint32, gint32,
guint8, 4, 0)
RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps16_shift0, gint16, gint16,
guint8, 16, 0)
RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps12_shift0, gint16, gint16,
guint8, 12, 0)
RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps8_shift0, gint16, gint16,
guint8, 8, 0)
RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps4_shift0, gint16, gint16,
guint8, 4, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps16_shift0, gint32, gint32,
guint8, 16, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps12_shift0, gint32, gint32,
guint8, 12, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps8_shift0, gint32, gint32,
guint8, 8, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps4_shift0, gint32, gint32,
guint8, 4, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps16_shift0, gint16, gint16,
guint8, 16, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps12_shift0, gint16, gint16,
guint8, 12, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps8_shift0, gint16, gint16,
guint8, 8, 0)
RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps4_shift0, gint16, gint16,
guint8, 4, 0)
/* *INDENT-ON* */
#define RESAMPLE_VERT(function, tap_type, src_type, _n_taps, _shift) \
static void \
function (guint8 *dest, \
const tap_type *taps, const src_type *src, int stride, int n_taps, \
int shift, int n) \
{ \
int i; \
int l; \
gint32 sum_y; \
gint32 offset = (1<<_shift) >> 1; \
for (i = 0; i < n; i++) { \
sum_y = 0; \
for (l = 0; l < n_taps; l++) { \
const src_type *line = PTR_OFFSET(src, stride * l); \
sum_y += line[i] * taps[l]; \
} \
dest[i] = CLAMP ((sum_y + offset) >> _shift, 0, 255); \
} \
}
#define RESAMPLE_VERT_DITHER(function, tap_type, src_type, _n_taps, _shift) \
static void \
function (guint8 *dest, \
const tap_type *taps, const src_type *src, int stride, int n_taps, \
int shift, int n) \
{ \
int i; \
int l; \
gint32 sum_y; \
gint32 err_y = 0; \
gint32 mask = (1<<_shift) - 1; \
for (i = 0; i < n; i++) { \
sum_y = 0; \
for (l = 0; l < n_taps; l++) { \
const src_type *line = PTR_OFFSET(src, stride * l); \
sum_y += line[i] * taps[l]; \
} \
err_y += sum_y; \
dest[i] = CLAMP (err_y >> _shift, 0, 255); \
err_y &= mask; \
} \
}
/* *INDENT-OFF* */
RESAMPLE_VERT (resample_vert_int32_generic, gint32, gint32, n_taps, shift)
RESAMPLE_VERT_DITHER (resample_vert_dither_int32_generic, gint32, gint32,
n_taps, shift)
RESAMPLE_VERT (resample_vert_int16_generic, gint16, gint16, n_taps, shift);
RESAMPLE_VERT_DITHER (resample_vert_dither_int16_generic, gint16, gint16,
n_taps, shift)
/* *INDENT-ON* */
#define RESAMPLE_VERT_FLOAT(function, dest_type, clamp, tap_type, src_type, _n_taps, _shift) \
static void \
function (dest_type *dest, \
const tap_type *taps, const src_type *src, int stride, int n_taps, \
int shift, int n) \
{ \
int i; \
int l; \
src_type sum_y; \
for (i = 0; i < n; i++) { \
sum_y = 0; \
for (l = 0; l < n_taps; l++) { \
const src_type *line = PTR_OFFSET(src, stride * l); \
sum_y += line[i] * taps[l]; \
} \
dest[i] = CLAMP (floor(0.5 + sum_y), 0, clamp); \
} \
}
#define RESAMPLE_VERT_FLOAT_DITHER(function, dest_type, clamp, tap_type, src_type, _n_taps, _shift) \
static void \
function (dest_type *dest, \
const tap_type *taps, const src_type *src, int stride, int n_taps, \
int shift, int n) \
{ \
int i; \
int l; \
src_type sum_y; \
src_type err_y = 0; \
for (i = 0; i < n; i++) { \
sum_y = 0; \
for (l = 0; l < n_taps; l++) { \
const src_type *line = PTR_OFFSET(src, stride * l); \
sum_y += line[i] * taps[l]; \
} \
err_y += sum_y; \
dest[i] = CLAMP (floor (err_y), 0, clamp); \
err_y -= floor (err_y); \
} \
}
/* *INDENT-OFF* */
RESAMPLE_VERT_FLOAT (resample_vert_double_generic, guint8, 255, double, double, n_taps,
shift)
RESAMPLE_VERT_FLOAT_DITHER (resample_vert_dither_double_generic, guint8, 255, double, double,
n_taps, shift)
RESAMPLE_VERT_FLOAT (resample_vert_double_generic_u16, guint16, 65535, double, double, n_taps,
shift)
RESAMPLE_VERT_FLOAT_DITHER (resample_vert_dither_double_generic_u16, guint16, 65535, double, double,
n_taps, shift)
RESAMPLE_VERT_FLOAT (resample_vert_float_generic, guint8, 255, float, float, n_taps, shift)
RESAMPLE_VERT_FLOAT_DITHER (resample_vert_dither_float_generic, guint8, 255, float, float,
n_taps, shift)
/* *INDENT-ON* */
#define S16_SHIFT1 7
#define S16_SHIFT2 7
#define S16_MIDSHIFT 0
#define S16_POSTSHIFT (S16_SHIFT1+S16_SHIFT2-S16_MIDSHIFT)
static void
vs_scale_lanczos_Y_int16 (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
gint16 *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_S16 (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, S16_MIDSHIFT, scale->dest->width);
tmp_yi++;
}
taps = (gint16 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_int16_generic (destline,
taps, TMP_LINE_S16 (scale->y_scale1d.offsets[j]),
sizeof (gint16) * scale->dest->width, scale->y_scale1d.n_taps,
S16_POSTSHIFT, scale->dest->width);
} else {
resample_vert_int16_generic (destline,
taps, TMP_LINE_S16 (scale->y_scale1d.offsets[j]),
sizeof (gint16) * scale->dest->width, scale->y_scale1d.n_taps,
S16_POSTSHIFT, scale->dest->width);
}
}
}
void
vs_image_scale_lanczos_Y_int16 (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
n_taps = ROUND_UP_4 (n_taps);
scale1d_calculate_taps_int16 (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen, S16_SHIFT1);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps_int16 (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen, S16_SHIFT2);
scale->dither = dither;
switch (scale->x_scale1d.n_taps) {
case 4:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_u8_taps4_shift0;
break;
case 8:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_u8_taps8_shift0;
break;
case 12:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_u8_taps12_shift0;
break;
case 16:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_u8_taps16_shift0;
break;
default:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_u8_generic;
break;
}
scale->tmpdata =
g_malloc (sizeof (gint16) * scale->dest->width * scale->src->height);
vs_scale_lanczos_Y_int16 (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
#define S32_SHIFT1 11
#define S32_SHIFT2 11
#define S32_MIDSHIFT 0
#define S32_POSTSHIFT (S32_SHIFT1+S32_SHIFT2-S32_MIDSHIFT)
static void
vs_scale_lanczos_Y_int32 (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
gint32 *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_S32 (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, S32_MIDSHIFT, scale->dest->width);
tmp_yi++;
}
taps = (gint32 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_int32_generic (destline,
taps, TMP_LINE_S32 (scale->y_scale1d.offsets[j]),
sizeof (gint32) * scale->dest->width,
scale->y_scale1d.n_taps, S32_POSTSHIFT, scale->dest->width);
} else {
resample_vert_int32_generic (destline,
taps, TMP_LINE_S32 (scale->y_scale1d.offsets[j]),
sizeof (gint32) * scale->dest->width,
scale->y_scale1d.n_taps, S32_POSTSHIFT, scale->dest->width);
}
}
}
void
vs_image_scale_lanczos_Y_int32 (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
n_taps = ROUND_UP_4 (n_taps);
scale1d_calculate_taps_int32 (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen, S32_SHIFT1);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps_int32 (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen, S32_SHIFT2);
scale->dither = dither;
switch (scale->x_scale1d.n_taps) {
case 4:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_u8_taps4_shift0;
break;
case 8:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_u8_taps8_shift0;
break;
case 12:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_u8_taps12_shift0;
break;
case 16:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_u8_taps16_shift0;
break;
default:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_u8_generic;
break;
}
scale->tmpdata =
g_malloc (sizeof (int32_t) * scale->dest->width * scale->src->height);
vs_scale_lanczos_Y_int32 (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
static void
vs_scale_lanczos_Y_double (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
double *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_DOUBLE (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, 0, scale->dest->width);
tmp_yi++;
}
taps = (double *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_double_generic (destline,
taps, TMP_LINE_DOUBLE (scale->y_scale1d.offsets[j]),
sizeof (double) * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width);
} else {
resample_vert_double_generic (destline,
taps, TMP_LINE_DOUBLE (scale->y_scale1d.offsets[j]),
sizeof (double) * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width);
}
}
}
void
vs_image_scale_lanczos_Y_double (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
scale1d_calculate_taps (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen);
scale->dither = dither;
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_double_u8_generic;
scale->tmpdata =
g_malloc (sizeof (double) * scale->dest->width * scale->src->height);
vs_scale_lanczos_Y_double (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
static void
vs_scale_lanczos_Y_float (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
float *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_FLOAT (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, 0, scale->dest->width);
tmp_yi++;
}
taps = (float *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_float_generic (destline,
taps, TMP_LINE_FLOAT (scale->y_scale1d.offsets[j]),
sizeof (float) * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width);
} else {
resample_vert_float_generic (destline,
taps, TMP_LINE_FLOAT (scale->y_scale1d.offsets[j]),
sizeof (float) * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width);
}
}
}
void
vs_image_scale_lanczos_Y_float (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
scale1d_calculate_taps_float (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps_float (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen);
scale->dither = dither;
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_float_u8_generic;
scale->tmpdata =
g_malloc (sizeof (float) * scale->dest->width * scale->src->height);
vs_scale_lanczos_Y_float (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
static void
vs_scale_lanczos_AYUV_int16 (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
gint16 *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_S16_AYUV (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, S16_MIDSHIFT, scale->dest->width);
tmp_yi++;
}
taps = (gint16 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_int16_generic (destline,
taps, TMP_LINE_S16_AYUV (scale->y_scale1d.offsets[j]),
sizeof (gint16) * 4 * scale->dest->width,
scale->y_scale1d.n_taps, S16_POSTSHIFT, scale->dest->width * 4);
} else {
resample_vert_int16_generic (destline,
taps, TMP_LINE_S16_AYUV (scale->y_scale1d.offsets[j]),
sizeof (gint16) * 4 * scale->dest->width,
scale->y_scale1d.n_taps, S16_POSTSHIFT, scale->dest->width * 4);
}
}
}
void
vs_image_scale_lanczos_AYUV_int16 (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
n_taps = ROUND_UP_4 (n_taps);
scale1d_calculate_taps_int16 (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen, S16_SHIFT1);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps_int16 (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen, S16_SHIFT2);
scale->dither = dither;
switch (scale->x_scale1d.n_taps) {
case 4:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps4_shift0;
break;
case 8:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps8_shift0;
break;
case 12:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps12_shift0;
break;
case 16:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps16_shift0;
break;
default:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int16_int16_ayuv_generic;
break;
}
scale->tmpdata =
g_malloc (sizeof (gint16) * scale->dest->width * scale->src->height * 4);
vs_scale_lanczos_AYUV_int16 (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
static void
vs_scale_lanczos_AYUV_int32 (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
gint32 *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_S32_AYUV (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, S32_MIDSHIFT, scale->dest->width);
tmp_yi++;
}
taps = (gint32 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_int32_generic (destline,
taps, TMP_LINE_S32_AYUV (scale->y_scale1d.offsets[j]),
sizeof (gint32) * 4 * scale->dest->width, scale->y_scale1d.n_taps,
S32_POSTSHIFT, scale->dest->width * 4);
} else {
resample_vert_int32_generic (destline,
taps, TMP_LINE_S32_AYUV (scale->y_scale1d.offsets[j]),
sizeof (gint32) * 4 * scale->dest->width, scale->y_scale1d.n_taps,
S32_POSTSHIFT, scale->dest->width * 4);
}
}
}
void
vs_image_scale_lanczos_AYUV_int32 (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
n_taps = ROUND_UP_4 (n_taps);
scale1d_calculate_taps_int32 (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen, S32_SHIFT1);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps_int32 (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen, S32_SHIFT2);
scale->dither = dither;
switch (scale->x_scale1d.n_taps) {
case 4:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps4_shift0;
break;
case 8:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps8_shift0;
break;
case 12:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps12_shift0;
break;
case 16:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps16_shift0;
break;
default:
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_int32_int32_ayuv_generic;
break;
}
scale->tmpdata =
g_malloc (sizeof (int32_t) * scale->dest->width * scale->src->height * 4);
vs_scale_lanczos_AYUV_int32 (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
static void
vs_scale_lanczos_AYUV_double (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
double *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_DOUBLE_AYUV (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, 0, scale->dest->width);
tmp_yi++;
}
taps = (double *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_double_generic (destline,
taps, TMP_LINE_DOUBLE_AYUV (scale->y_scale1d.offsets[j]),
sizeof (double) * 4 * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width * 4);
} else {
resample_vert_double_generic (destline,
taps, TMP_LINE_DOUBLE_AYUV (scale->y_scale1d.offsets[j]),
sizeof (double) * 4 * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width * 4);
}
}
}
void
vs_image_scale_lanczos_AYUV_double (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
scale1d_calculate_taps (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen);
scale->dither = dither;
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_double_ayuv_generic;
scale->tmpdata =
g_malloc (sizeof (double) * scale->dest->width * scale->src->height * 4);
vs_scale_lanczos_AYUV_double (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
static void
vs_scale_lanczos_AYUV_float (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint8 *destline;
float *taps;
destline = scale->dest->pixels + scale->dest->stride * j;
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_FLOAT_AYUV (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, 0, scale->dest->width);
tmp_yi++;
}
taps = (float *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_float_generic (destline,
taps, TMP_LINE_FLOAT_AYUV (scale->y_scale1d.offsets[j]),
sizeof (float) * 4 * scale->dest->width, scale->y_scale1d.n_taps, 0,
scale->dest->width * 4);
} else {
resample_vert_float_generic (destline,
taps, TMP_LINE_FLOAT_AYUV (scale->y_scale1d.offsets[j]),
sizeof (float) * 4 * scale->dest->width, scale->y_scale1d.n_taps, 0,
scale->dest->width * 4);
}
}
}
void
vs_image_scale_lanczos_AYUV_float (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
scale1d_calculate_taps_float (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps_float (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen);
scale->dither = dither;
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_float_ayuv_generic;
scale->tmpdata =
g_malloc (sizeof (float) * scale->dest->width * scale->src->height * 4);
vs_scale_lanczos_AYUV_float (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
static void
vs_scale_lanczos_AYUV64_double (Scale * scale)
{
int j;
int yi;
int tmp_yi;
tmp_yi = 0;
for (j = 0; j < scale->dest->height; j++) {
guint16 *destline;
double *taps;
destline = (guint16 *) (scale->dest->pixels + scale->dest->stride * j);
yi = scale->y_scale1d.offsets[j];
while (tmp_yi < yi + scale->y_scale1d.n_taps) {
scale->horiz_resample_func (TMP_LINE_DOUBLE_AYUV (tmp_yi),
scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi),
scale->x_scale1d.n_taps, 0, scale->dest->width);
tmp_yi++;
}
taps = (double *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps;
if (scale->dither) {
resample_vert_dither_double_generic_u16 (destline,
taps, TMP_LINE_DOUBLE_AYUV (scale->y_scale1d.offsets[j]),
sizeof (double) * 4 * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width * 4);
} else {
resample_vert_double_generic_u16 (destline,
taps, TMP_LINE_DOUBLE_AYUV (scale->y_scale1d.offsets[j]),
sizeof (double) * 4 * scale->dest->width,
scale->y_scale1d.n_taps, 0, scale->dest->width * 4);
}
}
}
void
vs_image_scale_lanczos_AYUV64_double (const VSImage * dest, const VSImage * src,
uint8_t * tmpbuf, double sharpness, gboolean dither, double a,
double sharpen)
{
Scale s = { 0 };
Scale *scale = &s;
int n_taps;
scale->dest = dest;
scale->src = src;
n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness);
scale1d_calculate_taps (&scale->x_scale1d,
src->width, dest->width, n_taps, a, sharpness, sharpen);
n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness);
scale1d_calculate_taps (&scale->y_scale1d,
src->height, dest->height, n_taps, a, sharpness, sharpen);
scale->dither = dither;
scale->horiz_resample_func =
(HorizResampleFunc) resample_horiz_double_ayuv_generic_s16;
scale->tmpdata =
g_malloc (sizeof (double) * scale->dest->width * scale->src->height * 4);
vs_scale_lanczos_AYUV64_double (scale);
scale1d_cleanup (&scale->x_scale1d);
scale1d_cleanup (&scale->y_scale1d);
g_free (scale->tmpdata);
}
| gpl-2.0 |
stevezilla/u-boot-amherst | arch/arm/cpu/armv7/omap3/sys_info.c | 19 | 8730 | /*
* (C) Copyright 2008
* Texas Instruments, <www.ti.com>
*
* Author :
* Manikandan Pillai <mani.pillai@ti.com>
*
* Derived from Beagle Board and 3430 SDP code by
* Richard Woodruff <r-woodruff2@ti.com>
* Syed Mohammed Khasim <khasim@ti.com>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <asm/io.h>
#include <asm/arch/mem.h> /* get mem tables */
#include <asm/arch/sys_proto.h>
#include <i2c.h>
#include <linux/compiler.h>
extern omap3_sysinfo sysinfo;
static struct ctrl *ctrl_base = (struct ctrl *)OMAP34XX_CTRL_BASE;
#ifdef CONFIG_DISPLAY_CPUINFO
static char *rev_s[CPU_3XX_MAX_REV] = {
"1.0",
"2.0",
"2.1",
"3.0",
"3.1",
"UNKNOWN",
"UNKNOWN",
"3.1.2"};
/* this is the revision table for 37xx CPUs */
static char *rev_s_37xx[CPU_37XX_MAX_REV] = {
"1.0",
"1.1",
"1.2"};
#endif /* CONFIG_DISPLAY_CPUINFO */
/*****************************************************************
* get_dieid(u32 *id) - read die ID
*****************************************************************/
void get_dieid(u32 *id)
{
struct ctrl_id *id_base = (struct ctrl_id *)OMAP34XX_ID_L4_IO_BASE;
id[3] = readl(&id_base->die_id_0);
id[2] = readl(&id_base->die_id_1);
id[1] = readl(&id_base->die_id_2);
id[0] = readl(&id_base->die_id_3);
}
/*****************************************************************
* dieid_num_r(void) - read and set die ID
*****************************************************************/
void dieid_num_r(void)
{
char *uid_s, die_id[34];
u32 id[4];
memset(die_id, 0, sizeof(die_id));
uid_s = getenv("dieid#");
if (uid_s == NULL) {
get_dieid(id);
sprintf(die_id, "%08x%08x%08x%08x", id[0], id[1], id[2], id[3]);
setenv("dieid#", die_id);
uid_s = die_id;
}
printf("Die ID #%s\n", uid_s);
}
/******************************************
* get_cpu_type(void) - extract cpu info
******************************************/
u32 get_cpu_type(void)
{
return readl(&ctrl_base->ctrl_omap_stat);
}
/******************************************
* get_cpu_id(void) - extract cpu id
* returns 0 for ES1.0, cpuid otherwise
******************************************/
u32 get_cpu_id(void)
{
struct ctrl_id *id_base;
u32 cpuid = 0;
/*
* On ES1.0 the IDCODE register is not exposed on L4
* so using CPU ID to differentiate between ES1.0 and > ES1.0.
*/
__asm__ __volatile__("mrc p15, 0, %0, c0, c0, 0":"=r"(cpuid));
if ((cpuid & 0xf) == 0x0) {
return 0;
} else {
/* Decode the IDs on > ES1.0 */
id_base = (struct ctrl_id *) OMAP34XX_ID_L4_IO_BASE;
cpuid = readl(&id_base->idcode);
}
return cpuid;
}
/******************************************
* get_cpu_family(void) - extract cpu info
******************************************/
u32 get_cpu_family(void)
{
u16 hawkeye;
u32 cpu_family;
u32 cpuid = get_cpu_id();
if (cpuid == 0)
return CPU_OMAP34XX;
hawkeye = (cpuid >> HAWKEYE_SHIFT) & 0xffff;
switch (hawkeye) {
case HAWKEYE_OMAP34XX:
cpu_family = CPU_OMAP34XX;
break;
case HAWKEYE_AM35XX:
cpu_family = CPU_AM35XX;
break;
case HAWKEYE_OMAP36XX:
cpu_family = CPU_OMAP36XX;
break;
default:
cpu_family = CPU_OMAP34XX;
}
return cpu_family;
}
/******************************************
* get_cpu_rev(void) - extract version info
******************************************/
u32 get_cpu_rev(void)
{
u32 cpuid = get_cpu_id();
if (cpuid == 0)
return CPU_3XX_ES10;
else
return (cpuid >> CPU_3XX_ID_SHIFT) & 0xf;
}
/*****************************************************************
* get_sku_id(void) - read sku_id to get info on max clock rate
*****************************************************************/
u32 get_sku_id(void)
{
struct ctrl_id *id_base = (struct ctrl_id *)OMAP34XX_ID_L4_IO_BASE;
return readl(&id_base->sku_id) & SKUID_CLK_MASK;
}
/***************************************************************************
* get_gpmc0_base() - Return current address hardware will be
* fetching from. The below effectively gives what is correct, its a bit
* mis-leading compared to the TRM. For the most general case the mask
* needs to be also taken into account this does work in practice.
* - for u-boot we currently map:
* -- 0 to nothing,
* -- 4 to flash
* -- 8 to enent
* -- c to wifi
****************************************************************************/
u32 get_gpmc0_base(void)
{
u32 b;
b = readl(&gpmc_cfg->cs[0].config7);
b &= 0x1F; /* keep base [5:0] */
b = b << 24; /* ret 0x0b000000 */
return b;
}
/*******************************************************************
* get_gpmc0_width() - See if bus is in x8 or x16 (mainly for nand)
*******************************************************************/
u32 get_gpmc0_width(void)
{
return WIDTH_16BIT;
}
/*************************************************************************
* get_board_rev() - setup to pass kernel board revision information
* returns:(bit[0-3] sub version, higher bit[7-4] is higher version)
*************************************************************************/
u32 __weak get_board_rev(void)
{
return 0x20;
}
/********************************************************
* get_base(); get upper addr of current execution
*******************************************************/
u32 get_base(void)
{
u32 val;
__asm__ __volatile__("mov %0, pc \n":"=r"(val)::"memory");
val &= 0xF0000000;
val >>= 28;
return val;
}
/********************************************************
* is_running_in_flash() - tell if currently running in
* FLASH.
*******************************************************/
u32 is_running_in_flash(void)
{
if (get_base() < 4)
return 1; /* in FLASH */
return 0; /* running in SRAM or SDRAM */
}
/********************************************************
* is_running_in_sram() - tell if currently running in
* SRAM.
*******************************************************/
u32 is_running_in_sram(void)
{
if (get_base() == 4)
return 1; /* in SRAM */
return 0; /* running in FLASH or SDRAM */
}
/********************************************************
* is_running_in_sdram() - tell if currently running in
* SDRAM.
*******************************************************/
u32 is_running_in_sdram(void)
{
if (get_base() > 4)
return 1; /* in SDRAM */
return 0; /* running in SRAM or FLASH */
}
/***************************************************************
* get_boot_type() - Is this an XIP type device or a stream one
* bits 4-0 specify type. Bit 5 says mem/perif
***************************************************************/
u32 get_boot_type(void)
{
return (readl(&ctrl_base->status) & SYSBOOT_MASK);
}
/*************************************************************
* get_device_type(): tell if GP/HS/EMU/TST
*************************************************************/
u32 get_device_type(void)
{
return ((readl(&ctrl_base->status) & (DEVICE_MASK)) >> 8);
}
#ifdef CONFIG_DISPLAY_CPUINFO
/**
* Print CPU information
*/
int print_cpuinfo (void)
{
char *cpu_family_s, *cpu_s, *sec_s, *max_clk;
switch (get_cpu_family()) {
case CPU_OMAP34XX:
cpu_family_s = "OMAP";
switch (get_cpu_type()) {
case OMAP3503:
cpu_s = "3503";
break;
case OMAP3515:
cpu_s = "3515";
break;
case OMAP3525:
cpu_s = "3525";
break;
case OMAP3530:
cpu_s = "3530";
break;
default:
cpu_s = "35XX";
break;
}
if ((get_cpu_rev() >= CPU_3XX_ES31) &&
(get_sku_id() == SKUID_CLK_720MHZ))
max_clk = "720 MHz";
else
max_clk = "600 MHz";
break;
case CPU_AM35XX:
cpu_family_s = "AM";
switch (get_cpu_type()) {
case AM3505:
cpu_s = "3505";
break;
case AM3517:
cpu_s = "3517";
break;
default:
cpu_s = "35XX";
break;
}
max_clk = "600 Mhz";
break;
case CPU_OMAP36XX:
cpu_family_s = "OMAP";
switch (get_cpu_type()) {
case OMAP3730:
cpu_s = "3630/3730";
break;
default:
cpu_s = "36XX/37XX";
break;
}
max_clk = "1 Ghz";
break;
default:
cpu_family_s = "OMAP";
cpu_s = "35XX";
max_clk = "600 Mhz";
}
switch (get_device_type()) {
case TST_DEVICE:
sec_s = "TST";
break;
case EMU_DEVICE:
sec_s = "EMU";
break;
case HS_DEVICE:
sec_s = "HS";
break;
case GP_DEVICE:
sec_s = "GP";
break;
default:
sec_s = "?";
}
if (CPU_OMAP36XX == get_cpu_family())
printf("%s%s-%s ES%s, CPU-OPP2, L3-200MHz, Max CPU Clock %s\n",
cpu_family_s, cpu_s, sec_s,
rev_s_37xx[get_cpu_rev()], max_clk);
else
printf("%s%s-%s ES%s, CPU-OPP2, L3-165MHz, Max CPU Clock %s\n",
cpu_family_s, cpu_s, sec_s,
rev_s[get_cpu_rev()], max_clk);
return 0;
}
#endif /* CONFIG_DISPLAY_CPUINFO */
| gpl-2.0 |
TSCLKS/linux | drivers/i2c/busses/i2c-cadence.c | 275 | 26422 | /*
* I2C bus driver for the Cadence I2C controller.
*
* Copyright (C) 2009 - 2014 Xilinx, Inc.
*
* This program is free software; you can redistribute it
* and/or modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2 of the License, or (at your option) any
* later version.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
/* Register offsets for the I2C device. */
#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
#define CDNS_I2C_SR_OFFSET 0x04 /* Status Register, RO */
#define CDNS_I2C_ADDR_OFFSET 0x08 /* I2C Address Register, RW */
#define CDNS_I2C_DATA_OFFSET 0x0C /* I2C Data Register, RW */
#define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */
#define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */
#define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */
#define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */
#define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */
/* Control Register Bit mask definitions */
#define CDNS_I2C_CR_HOLD BIT(4) /* Hold Bus bit */
#define CDNS_I2C_CR_ACK_EN BIT(3)
#define CDNS_I2C_CR_NEA BIT(2)
#define CDNS_I2C_CR_MS BIT(1)
/* Read or Write Master transfer 0 = Transmitter, 1 = Receiver */
#define CDNS_I2C_CR_RW BIT(0)
/* 1 = Auto init FIFO to zeroes */
#define CDNS_I2C_CR_CLR_FIFO BIT(6)
#define CDNS_I2C_CR_DIVA_SHIFT 14
#define CDNS_I2C_CR_DIVA_MASK (3 << CDNS_I2C_CR_DIVA_SHIFT)
#define CDNS_I2C_CR_DIVB_SHIFT 8
#define CDNS_I2C_CR_DIVB_MASK (0x3f << CDNS_I2C_CR_DIVB_SHIFT)
/* Status Register Bit mask definitions */
#define CDNS_I2C_SR_BA BIT(8)
#define CDNS_I2C_SR_RXDV BIT(5)
/*
* I2C Address Register Bit mask definitions
* Normal addressing mode uses [6:0] bits. Extended addressing mode uses [9:0]
* bits. A write access to this register always initiates a transfer if the I2C
* is in master mode.
*/
#define CDNS_I2C_ADDR_MASK 0x000003FF /* I2C Address Mask */
/*
* I2C Interrupt Registers Bit mask definitions
* All the four interrupt registers (Status/Mask/Enable/Disable) have the same
* bit definitions.
*/
#define CDNS_I2C_IXR_ARB_LOST BIT(9)
#define CDNS_I2C_IXR_RX_UNF BIT(7)
#define CDNS_I2C_IXR_TX_OVF BIT(6)
#define CDNS_I2C_IXR_RX_OVF BIT(5)
#define CDNS_I2C_IXR_SLV_RDY BIT(4)
#define CDNS_I2C_IXR_TO BIT(3)
#define CDNS_I2C_IXR_NACK BIT(2)
#define CDNS_I2C_IXR_DATA BIT(1)
#define CDNS_I2C_IXR_COMP BIT(0)
#define CDNS_I2C_IXR_ALL_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \
CDNS_I2C_IXR_RX_UNF | \
CDNS_I2C_IXR_TX_OVF | \
CDNS_I2C_IXR_RX_OVF | \
CDNS_I2C_IXR_SLV_RDY | \
CDNS_I2C_IXR_TO | \
CDNS_I2C_IXR_NACK | \
CDNS_I2C_IXR_DATA | \
CDNS_I2C_IXR_COMP)
#define CDNS_I2C_IXR_ERR_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \
CDNS_I2C_IXR_RX_UNF | \
CDNS_I2C_IXR_TX_OVF | \
CDNS_I2C_IXR_RX_OVF | \
CDNS_I2C_IXR_NACK)
#define CDNS_I2C_ENABLED_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \
CDNS_I2C_IXR_RX_UNF | \
CDNS_I2C_IXR_TX_OVF | \
CDNS_I2C_IXR_RX_OVF | \
CDNS_I2C_IXR_NACK | \
CDNS_I2C_IXR_DATA | \
CDNS_I2C_IXR_COMP)
#define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000)
#define CDNS_I2C_FIFO_DEPTH 16
/* FIFO depth at which the DATA interrupt occurs */
#define CDNS_I2C_DATA_INTR_DEPTH (CDNS_I2C_FIFO_DEPTH - 2)
#define CDNS_I2C_MAX_TRANSFER_SIZE 255
/* Transfer size in multiples of data interrupt depth */
#define CDNS_I2C_TRANSFER_SIZE (CDNS_I2C_MAX_TRANSFER_SIZE - 3)
#define DRIVER_NAME "cdns-i2c"
#define CDNS_I2C_SPEED_MAX 400000
#define CDNS_I2C_SPEED_DEFAULT 100000
#define CDNS_I2C_DIVA_MAX 4
#define CDNS_I2C_DIVB_MAX 64
#define CDNS_I2C_TIMEOUT_MAX 0xFF
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
/**
* struct cdns_i2c - I2C device private data structure
* @membase: Base address of the I2C device
* @adap: I2C adapter instance
* @p_msg: Message pointer
* @err_status: Error status in Interrupt Status Register
* @xfer_done: Transfer complete status
* @p_send_buf: Pointer to transmit buffer
* @p_recv_buf: Pointer to receive buffer
* @suspended: Flag holding the device's PM status
* @send_count: Number of bytes still expected to send
* @recv_count: Number of bytes still expected to receive
* @irq: IRQ number
* @input_clk: Input clock to I2C controller
* @i2c_clk: Maximum I2C clock speed
* @bus_hold_flag: Flag used in repeated start for clearing HOLD bit
* @clk: Pointer to struct clk
* @clk_rate_change_nb: Notifier block for clock rate changes
*/
struct cdns_i2c {
void __iomem *membase;
struct i2c_adapter adap;
struct i2c_msg *p_msg;
int err_status;
struct completion xfer_done;
unsigned char *p_send_buf;
unsigned char *p_recv_buf;
u8 suspended;
unsigned int send_count;
unsigned int recv_count;
int irq;
unsigned long input_clk;
unsigned int i2c_clk;
unsigned int bus_hold_flag;
struct clk *clk;
struct notifier_block clk_rate_change_nb;
};
#define to_cdns_i2c(_nb) container_of(_nb, struct cdns_i2c, \
clk_rate_change_nb)
/**
* cdns_i2c_clear_bus_hold() - Clear bus hold bit
* @id: Pointer to driver data struct
*
* Helper to clear the controller's bus hold bit.
*/
static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
{
u32 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
if (reg & CDNS_I2C_CR_HOLD)
cdns_i2c_writereg(reg & ~CDNS_I2C_CR_HOLD, CDNS_I2C_CR_OFFSET);
}
/**
* cdns_i2c_isr - Interrupt handler for the I2C device
* @irq: irq number for the I2C device
* @ptr: void pointer to cdns_i2c structure
*
* This function handles the data interrupt, transfer complete interrupt and
* the error interrupts of the I2C device.
*
* Return: IRQ_HANDLED always
*/
static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
{
unsigned int isr_status, avail_bytes;
unsigned int bytes_to_recv, bytes_to_send;
struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */
int done_flag = 0;
irqreturn_t status = IRQ_NONE;
isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
/* Handling nack and arbitration lost interrupt */
if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
done_flag = 1;
status = IRQ_HANDLED;
}
/* Handling Data interrupt */
if ((isr_status & CDNS_I2C_IXR_DATA) &&
(id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) {
/* Always read data interrupt threshold bytes */
bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH;
id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH;
avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
/*
* if the tranfer size register value is zero, then
* check for the remaining bytes and update the
* transfer size register.
*/
if (!avail_bytes) {
if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
CDNS_I2C_XFER_SIZE_OFFSET);
else
cdns_i2c_writereg(id->recv_count,
CDNS_I2C_XFER_SIZE_OFFSET);
}
/* Process the data received */
while (bytes_to_recv--)
*(id->p_recv_buf)++ =
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
if (!id->bus_hold_flag &&
(id->recv_count <= CDNS_I2C_FIFO_DEPTH))
cdns_i2c_clear_bus_hold(id);
status = IRQ_HANDLED;
}
/* Handling Transfer Complete interrupt */
if (isr_status & CDNS_I2C_IXR_COMP) {
if (!id->p_recv_buf) {
/*
* If the device is sending data If there is further
* data to be sent. Calculate the available space
* in FIFO and fill the FIFO with that many bytes.
*/
if (id->send_count) {
avail_bytes = CDNS_I2C_FIFO_DEPTH -
cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
if (id->send_count > avail_bytes)
bytes_to_send = avail_bytes;
else
bytes_to_send = id->send_count;
while (bytes_to_send--) {
cdns_i2c_writereg(
(*(id->p_send_buf)++),
CDNS_I2C_DATA_OFFSET);
id->send_count--;
}
} else {
/*
* Signal the completion of transaction and
* clear the hold bus bit if there are no
* further messages to be processed.
*/
done_flag = 1;
}
if (!id->send_count && !id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
} else {
if (!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
/*
* If the device is receiving data, then signal
* the completion of transaction and read the data
* present in the FIFO. Signal the completion of
* transaction.
*/
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
CDNS_I2C_SR_RXDV) {
*(id->p_recv_buf)++ =
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
id->recv_count--;
}
done_flag = 1;
}
status = IRQ_HANDLED;
}
/* Update the status for errors */
id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
if (id->err_status)
status = IRQ_HANDLED;
cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
if (done_flag)
complete(&id->xfer_done);
return status;
}
/**
* cdns_i2c_mrecv - Prepare and start a master receive operation
* @id: pointer to the i2c device structure
*/
static void cdns_i2c_mrecv(struct cdns_i2c *id)
{
unsigned int ctrl_reg;
unsigned int isr_status;
id->p_recv_buf = id->p_msg->buf;
id->recv_count = id->p_msg->len;
/* Put the controller in master receive mode and clear the FIFO */
ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO;
if (id->p_msg->flags & I2C_M_RECV_LEN)
id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
/*
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register */
isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
/*
* The no. of bytes to receive is checked against the limit of
* max transfer size. Set transfer size register with no of bytes
* receive if it is less than transfer size and transfer size if
* it is more. Enable the interrupts.
*/
if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
CDNS_I2C_XFER_SIZE_OFFSET);
else
cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
/* Clear the bus hold flag if bytes to receive is less than FIFO size */
if (!id->bus_hold_flag &&
((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
(id->recv_count <= CDNS_I2C_FIFO_DEPTH))
cdns_i2c_clear_bus_hold(id);
/* Set the slave address in address register - triggers operation */
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
}
/**
* cdns_i2c_msend - Prepare and start a master send operation
* @id: pointer to the i2c device
*/
static void cdns_i2c_msend(struct cdns_i2c *id)
{
unsigned int avail_bytes;
unsigned int bytes_to_send;
unsigned int ctrl_reg;
unsigned int isr_status;
id->p_recv_buf = NULL;
id->p_send_buf = id->p_msg->buf;
id->send_count = id->p_msg->len;
/* Set the controller in Master transmit mode and clear the FIFO. */
ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
ctrl_reg &= ~CDNS_I2C_CR_RW;
ctrl_reg |= CDNS_I2C_CR_CLR_FIFO;
/*
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if (id->send_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */
isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
/*
* Calculate the space available in FIFO. Check the message length
* against the space available, and fill the FIFO accordingly.
* Enable the interrupts.
*/
avail_bytes = CDNS_I2C_FIFO_DEPTH -
cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
if (id->send_count > avail_bytes)
bytes_to_send = avail_bytes;
else
bytes_to_send = id->send_count;
while (bytes_to_send--) {
cdns_i2c_writereg((*(id->p_send_buf)++), CDNS_I2C_DATA_OFFSET);
id->send_count--;
}
/*
* Clear the bus hold flag if there is no more data
* and if it is the last message.
*/
if (!id->bus_hold_flag && !id->send_count)
cdns_i2c_clear_bus_hold(id);
/* Set the slave address in address register - triggers operation. */
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
}
/**
* cdns_i2c_master_reset - Reset the interface
* @adap: pointer to the i2c adapter driver instance
*
* This function cleanup the fifos, clear the hold bit and status
* and disable the interrupts.
*/
static void cdns_i2c_master_reset(struct i2c_adapter *adap)
{
struct cdns_i2c *id = adap->algo_data;
u32 regval;
/* Disable the interrupts */
cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
/* Clear the hold bit and fifos */
regval = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
regval &= ~CDNS_I2C_CR_HOLD;
regval |= CDNS_I2C_CR_CLR_FIFO;
cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET);
/* Update the transfercount register to zero */
cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET);
/* Clear the interupt status register */
regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET);
/* Clear the status register */
regval = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET);
cdns_i2c_writereg(regval, CDNS_I2C_SR_OFFSET);
}
static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
struct i2c_adapter *adap)
{
int ret;
u32 reg;
id->p_msg = msg;
id->err_status = 0;
reinit_completion(&id->xfer_done);
/* Check for the TEN Bit mode on each msg */
reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
if (msg->flags & I2C_M_TEN) {
if (reg & CDNS_I2C_CR_NEA)
cdns_i2c_writereg(reg & ~CDNS_I2C_CR_NEA,
CDNS_I2C_CR_OFFSET);
} else {
if (!(reg & CDNS_I2C_CR_NEA))
cdns_i2c_writereg(reg | CDNS_I2C_CR_NEA,
CDNS_I2C_CR_OFFSET);
}
/* Check for the R/W flag on each msg */
if (msg->flags & I2C_M_RD)
cdns_i2c_mrecv(id);
else
cdns_i2c_msend(id);
/* Wait for the signal of completion */
ret = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
if (!ret) {
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
"timeout waiting on completion\n");
return -ETIMEDOUT;
}
cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK,
CDNS_I2C_IDR_OFFSET);
/* If it is bus arbitration error, try again */
if (id->err_status & CDNS_I2C_IXR_ARB_LOST)
return -EAGAIN;
return 0;
}
/**
* cdns_i2c_master_xfer - The main i2c transfer function
* @adap: pointer to the i2c adapter driver instance
* @msgs: pointer to the i2c message structure
* @num: the number of messages to transfer
*
* Initiates the send/recv activity based on the transfer message received.
*
* Return: number of msgs processed on success, negative error otherwise
*/
static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
int num)
{
int ret, count;
u32 reg;
struct cdns_i2c *id = adap->algo_data;
/* Check if the bus is free */
if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA)
return -EAGAIN;
/*
* Set the flag to one when multiple messages are to be
* processed with a repeated start.
*/
if (num > 1) {
id->bus_hold_flag = 1;
reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
reg |= CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET);
} else {
id->bus_hold_flag = 0;
}
/* Process the msg one by one */
for (count = 0; count < num; count++, msgs++) {
if (count == (num - 1))
id->bus_hold_flag = 0;
ret = cdns_i2c_process_msg(id, msgs, adap);
if (ret)
return ret;
/* Report the other error interrupts to application */
if (id->err_status) {
cdns_i2c_master_reset(adap);
if (id->err_status & CDNS_I2C_IXR_NACK)
return -ENXIO;
return -EIO;
}
}
return num;
}
/**
* cdns_i2c_func - Returns the supported features of the I2C driver
* @adap: pointer to the i2c adapter structure
*
* Return: 32 bit value, each bit corresponding to a feature
*/
static u32 cdns_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
(I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
I2C_FUNC_SMBUS_BLOCK_DATA;
}
static const struct i2c_algorithm cdns_i2c_algo = {
.master_xfer = cdns_i2c_master_xfer,
.functionality = cdns_i2c_func,
};
/**
* cdns_i2c_calc_divs - Calculate clock dividers
* @f: I2C clock frequency
* @input_clk: Input clock frequency
* @a: First divider (return value)
* @b: Second divider (return value)
*
* f is used as input and output variable. As input it is used as target I2C
* frequency. On function exit f holds the actually resulting I2C frequency.
*
* Return: 0 on success, negative errno otherwise.
*/
static int cdns_i2c_calc_divs(unsigned long *f, unsigned long input_clk,
unsigned int *a, unsigned int *b)
{
unsigned long fscl = *f, best_fscl = *f, actual_fscl, temp;
unsigned int div_a, div_b, calc_div_a = 0, calc_div_b = 0;
unsigned int last_error, current_error;
/* calculate (divisor_a+1) x (divisor_b+1) */
temp = input_clk / (22 * fscl);
/*
* If the calculated value is negative or 0, the fscl input is out of
* range. Return error.
*/
if (!temp || (temp > (CDNS_I2C_DIVA_MAX * CDNS_I2C_DIVB_MAX)))
return -EINVAL;
last_error = -1;
for (div_a = 0; div_a < CDNS_I2C_DIVA_MAX; div_a++) {
div_b = DIV_ROUND_UP(input_clk, 22 * fscl * (div_a + 1));
if ((div_b < 1) || (div_b > CDNS_I2C_DIVB_MAX))
continue;
div_b--;
actual_fscl = input_clk / (22 * (div_a + 1) * (div_b + 1));
if (actual_fscl > fscl)
continue;
current_error = ((actual_fscl > fscl) ? (actual_fscl - fscl) :
(fscl - actual_fscl));
if (last_error > current_error) {
calc_div_a = div_a;
calc_div_b = div_b;
best_fscl = actual_fscl;
last_error = current_error;
}
}
*a = calc_div_a;
*b = calc_div_b;
*f = best_fscl;
return 0;
}
/**
* cdns_i2c_setclk - This function sets the serial clock rate for the I2C device
* @clk_in: I2C clock input frequency in Hz
* @id: Pointer to the I2C device structure
*
* The device must be idle rather than busy transferring data before setting
* these device options.
* The data rate is set by values in the control register.
* The formula for determining the correct register values is
* Fscl = Fpclk/(22 x (divisor_a+1) x (divisor_b+1))
* See the hardware data sheet for a full explanation of setting the serial
* clock rate. The clock can not be faster than the input clock divide by 22.
* The two most common clock rates are 100KHz and 400KHz.
*
* Return: 0 on success, negative error otherwise
*/
static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
{
unsigned int div_a, div_b;
unsigned int ctrl_reg;
int ret = 0;
unsigned long fscl = id->i2c_clk;
ret = cdns_i2c_calc_divs(&fscl, clk_in, &div_a, &div_b);
if (ret)
return ret;
ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK);
ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) |
(div_b << CDNS_I2C_CR_DIVB_SHIFT));
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
return 0;
}
/**
* cdns_i2c_clk_notifier_cb - Clock rate change callback
* @nb: Pointer to notifier block
* @event: Notification reason
* @data: Pointer to notification data object
*
* This function is called when the cdns_i2c input clock frequency changes.
* The callback checks whether a valid bus frequency can be generated after the
* change. If so, the change is acknowledged, otherwise the change is aborted.
* New dividers are written to the HW in the pre- or post change notification
* depending on the scaling direction.
*
* Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
* to acknowedge the change, NOTIFY_DONE if the notification is
* considered irrelevant.
*/
static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
event, void *data)
{
struct clk_notifier_data *ndata = data;
struct cdns_i2c *id = to_cdns_i2c(nb);
if (id->suspended)
return NOTIFY_OK;
switch (event) {
case PRE_RATE_CHANGE:
{
unsigned long input_clk = ndata->new_rate;
unsigned long fscl = id->i2c_clk;
unsigned int div_a, div_b;
int ret;
ret = cdns_i2c_calc_divs(&fscl, input_clk, &div_a, &div_b);
if (ret) {
dev_warn(id->adap.dev.parent,
"clock rate change rejected\n");
return NOTIFY_STOP;
}
/* scale up */
if (ndata->new_rate > ndata->old_rate)
cdns_i2c_setclk(ndata->new_rate, id);
return NOTIFY_OK;
}
case POST_RATE_CHANGE:
id->input_clk = ndata->new_rate;
/* scale down */
if (ndata->new_rate < ndata->old_rate)
cdns_i2c_setclk(ndata->new_rate, id);
return NOTIFY_OK;
case ABORT_RATE_CHANGE:
/* scale up */
if (ndata->new_rate > ndata->old_rate)
cdns_i2c_setclk(ndata->old_rate, id);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
/**
* cdns_i2c_suspend - Suspend method for the driver
* @_dev: Address of the platform_device structure
*
* Put the driver into low power mode.
*
* Return: 0 always
*/
static int __maybe_unused cdns_i2c_suspend(struct device *_dev)
{
struct platform_device *pdev = container_of(_dev,
struct platform_device, dev);
struct cdns_i2c *xi2c = platform_get_drvdata(pdev);
clk_disable(xi2c->clk);
xi2c->suspended = 1;
return 0;
}
/**
* cdns_i2c_resume - Resume from suspend
* @_dev: Address of the platform_device structure
*
* Resume operation after suspend.
*
* Return: 0 on success and error value on error
*/
static int __maybe_unused cdns_i2c_resume(struct device *_dev)
{
struct platform_device *pdev = container_of(_dev,
struct platform_device, dev);
struct cdns_i2c *xi2c = platform_get_drvdata(pdev);
int ret;
ret = clk_enable(xi2c->clk);
if (ret) {
dev_err(_dev, "Cannot enable clock.\n");
return ret;
}
xi2c->suspended = 0;
return 0;
}
static SIMPLE_DEV_PM_OPS(cdns_i2c_dev_pm_ops, cdns_i2c_suspend,
cdns_i2c_resume);
/**
* cdns_i2c_probe - Platform registration call
* @pdev: Handle to the platform device structure
*
* This function does all the memory allocation and registration for the i2c
* device. User can modify the address mode to 10 bit address mode using the
* ioctl call with option I2C_TENBIT.
*
* Return: 0 on success, negative error otherwise
*/
static int cdns_i2c_probe(struct platform_device *pdev)
{
struct resource *r_mem;
struct cdns_i2c *id;
int ret;
id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL);
if (!id)
return -ENOMEM;
platform_set_drvdata(pdev, id);
r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
id->membase = devm_ioremap_resource(&pdev->dev, r_mem);
if (IS_ERR(id->membase))
return PTR_ERR(id->membase);
id->irq = platform_get_irq(pdev, 0);
id->adap.dev.of_node = pdev->dev.of_node;
id->adap.algo = &cdns_i2c_algo;
id->adap.timeout = CDNS_I2C_TIMEOUT;
id->adap.retries = 3; /* Default retry value. */
id->adap.algo_data = id;
id->adap.dev.parent = &pdev->dev;
init_completion(&id->xfer_done);
snprintf(id->adap.name, sizeof(id->adap.name),
"Cadence I2C at %08lx", (unsigned long)r_mem->start);
id->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(id->clk)) {
dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(id->clk);
}
ret = clk_prepare_enable(id->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable clock.\n");
return ret;
}
id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb;
if (clk_notifier_register(id->clk, &id->clk_rate_change_nb))
dev_warn(&pdev->dev, "Unable to register clock notifier.\n");
id->input_clk = clk_get_rate(id->clk);
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&id->i2c_clk);
if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX))
id->i2c_clk = CDNS_I2C_SPEED_DEFAULT;
cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS,
CDNS_I2C_CR_OFFSET);
ret = cdns_i2c_setclk(id->input_clk, id);
if (ret) {
dev_err(&pdev->dev, "invalid SCL clock: %u Hz\n", id->i2c_clk);
ret = -EINVAL;
goto err_clk_dis;
}
ret = devm_request_irq(&pdev->dev, id->irq, cdns_i2c_isr, 0,
DRIVER_NAME, id);
if (ret) {
dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
goto err_clk_dis;
}
ret = i2c_add_adapter(&id->adap);
if (ret < 0) {
dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
goto err_clk_dis;
}
/*
* Cadence I2C controller has a bug wherein it generates
* invalid read transaction after HW timeout in master receiver mode.
* HW timeout is not used by this driver and the interrupt is disabled.
* But the feature itself cannot be disabled. Hence maximum value
* is written to this register to reduce the chances of error.
*/
cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
return 0;
err_clk_dis:
clk_disable_unprepare(id->clk);
return ret;
}
/**
* cdns_i2c_remove - Unregister the device after releasing the resources
* @pdev: Handle to the platform device structure
*
* This function frees all the resources allocated to the device.
*
* Return: 0 always
*/
static int cdns_i2c_remove(struct platform_device *pdev)
{
struct cdns_i2c *id = platform_get_drvdata(pdev);
i2c_del_adapter(&id->adap);
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
clk_disable_unprepare(id->clk);
return 0;
}
static const struct of_device_id cdns_i2c_of_match[] = {
{ .compatible = "cdns,i2c-r1p10", },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, cdns_i2c_of_match);
static struct platform_driver cdns_i2c_drv = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = cdns_i2c_of_match,
.pm = &cdns_i2c_dev_pm_ops,
},
.probe = cdns_i2c_probe,
.remove = cdns_i2c_remove,
};
module_platform_driver(cdns_i2c_drv);
MODULE_AUTHOR("Xilinx Inc.");
MODULE_DESCRIPTION("Cadence I2C bus driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
fenten/701kernel | drivers/net/wireless/orinoco/scan.c | 531 | 5409 | /* Helpers for managing scan queues
*
* See copyright notice in main.c
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include "hermes.h"
#include "orinoco.h"
#include "main.h"
#include "scan.h"
#define ZERO_DBM_OFFSET 0x95
#define MAX_SIGNAL_LEVEL 0x8A
#define MIN_SIGNAL_LEVEL 0x2F
#define SIGNAL_TO_DBM(x) \
(clamp_t(s32, (x), MIN_SIGNAL_LEVEL, MAX_SIGNAL_LEVEL) \
- ZERO_DBM_OFFSET)
#define SIGNAL_TO_MBM(x) (SIGNAL_TO_DBM(x) * 100)
static int symbol_build_supp_rates(u8 *buf, const __le16 *rates)
{
int i;
u8 rate;
buf[0] = WLAN_EID_SUPP_RATES;
for (i = 0; i < 5; i++) {
rate = le16_to_cpu(rates[i]);
/* NULL terminated */
if (rate == 0x0)
break;
buf[i + 2] = rate;
}
buf[1] = i;
return i + 2;
}
static int prism_build_supp_rates(u8 *buf, const u8 *rates)
{
int i;
buf[0] = WLAN_EID_SUPP_RATES;
for (i = 0; i < 8; i++) {
/* NULL terminated */
if (rates[i] == 0x0)
break;
buf[i + 2] = rates[i];
}
buf[1] = i;
/* We might still have another 2 rates, which need to go in
* extended supported rates */
if (i == 8 && rates[i] > 0) {
buf[10] = WLAN_EID_EXT_SUPP_RATES;
for (; i < 10; i++) {
/* NULL terminated */
if (rates[i] == 0x0)
break;
buf[i + 2] = rates[i];
}
buf[11] = i - 8;
}
return (i < 8) ? i + 2 : i + 4;
}
static void orinoco_add_hostscan_result(struct orinoco_private *priv,
const union hermes_scan_info *bss)
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
u8 *ie;
u8 ie_buf[46];
u64 timestamp;
s32 signal;
u16 capability;
u16 beacon_interval;
int ie_len;
int freq;
int len;
len = le16_to_cpu(bss->a.essid_len);
/* Reconstruct SSID and bitrate IEs to pass up */
ie_buf[0] = WLAN_EID_SSID;
ie_buf[1] = len;
memcpy(&ie_buf[2], bss->a.essid, len);
ie = ie_buf + len + 2;
ie_len = ie_buf[1] + 2;
switch (priv->firmware_type) {
case FIRMWARE_TYPE_SYMBOL:
ie_len += symbol_build_supp_rates(ie, bss->s.rates);
break;
case FIRMWARE_TYPE_INTERSIL:
ie_len += prism_build_supp_rates(ie, bss->p.rates);
break;
case FIRMWARE_TYPE_AGERE:
default:
break;
}
freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
channel = ieee80211_get_channel(wiphy, freq);
timestamp = 0;
capability = le16_to_cpu(bss->a.capabilities);
beacon_interval = le16_to_cpu(bss->a.beacon_interv);
signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level));
cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
capability, beacon_interval, ie_buf, ie_len,
signal, GFP_KERNEL);
}
void orinoco_add_extscan_result(struct orinoco_private *priv,
struct agere_ext_scan_info *bss,
size_t len)
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
u8 *ie;
u64 timestamp;
s32 signal;
u16 capability;
u16 beacon_interval;
size_t ie_len;
int chan, freq;
ie_len = len - sizeof(*bss);
ie = orinoco_get_ie(bss->data, ie_len, WLAN_EID_DS_PARAMS);
chan = ie ? ie[2] : 0;
freq = ieee80211_dsss_chan_to_freq(chan);
channel = ieee80211_get_channel(wiphy, freq);
timestamp = le64_to_cpu(bss->timestamp);
capability = le16_to_cpu(bss->capabilities);
beacon_interval = le16_to_cpu(bss->beacon_interval);
ie = bss->data;
signal = SIGNAL_TO_MBM(bss->level);
cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
capability, beacon_interval, ie, ie_len,
signal, GFP_KERNEL);
}
void orinoco_add_hostscan_results(struct orinoco_private *priv,
unsigned char *buf,
size_t len)
{
int offset; /* In the scan data */
size_t atom_len;
bool abort = false;
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE:
atom_len = sizeof(struct agere_scan_apinfo);
offset = 0;
break;
case FIRMWARE_TYPE_SYMBOL:
/* Lack of documentation necessitates this hack.
* Different firmwares have 68 or 76 byte long atoms.
* We try modulo first. If the length divides by both,
* we check what would be the channel in the second
* frame for a 68-byte atom. 76-byte atoms have 0 there.
* Valid channel cannot be 0. */
if (len % 76)
atom_len = 68;
else if (len % 68)
atom_len = 76;
else if (len >= 1292 && buf[68] == 0)
atom_len = 76;
else
atom_len = 68;
offset = 0;
break;
case FIRMWARE_TYPE_INTERSIL:
offset = 4;
if (priv->has_hostscan) {
atom_len = le16_to_cpup((__le16 *)buf);
/* Sanity check for atom_len */
if (atom_len < sizeof(struct prism2_scan_apinfo)) {
printk(KERN_ERR "%s: Invalid atom_len in scan "
"data: %zu\n", priv->ndev->name,
atom_len);
abort = true;
goto scan_abort;
}
} else
atom_len = offsetof(struct prism2_scan_apinfo, atim);
break;
default:
abort = true;
goto scan_abort;
}
/* Check that we got an whole number of atoms */
if ((len - offset) % atom_len) {
printk(KERN_ERR "%s: Unexpected scan data length %zu, "
"atom_len %zu, offset %d\n", priv->ndev->name, len,
atom_len, offset);
abort = true;
goto scan_abort;
}
/* Process the entries one by one */
for (; offset + atom_len <= len; offset += atom_len) {
union hermes_scan_info *atom;
atom = (union hermes_scan_info *) (buf + offset);
orinoco_add_hostscan_result(priv, atom);
}
scan_abort:
if (priv->scan_request) {
cfg80211_scan_done(priv->scan_request, abort);
priv->scan_request = NULL;
}
}
| gpl-2.0 |
sudosurootdev/linux | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 787 | 18380 | /**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include <drm/drmP.h>
#include <drm/ttm/ttm_placement.h>
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_3D))
return false;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint32_t result;
if (!dev_priv->has_mob)
return false;
spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
spin_unlock(&dev_priv->cap_lock);
return (result != 0);
}
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
hwversion = ioread32(fifo_mem +
((fifo->capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
if (hwversion == 0)
return false;
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
/* Non-Screen Object path does not support surfaces */
if (!dev_priv->sou_priv)
return false;
return true;
}
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
return false;
}
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t dummy;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
return -ENOMEM;
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
/*
* Allow mapping the first page read-only to user-space.
*/
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
min <<= 2;
if (min < PAGE_SIZE)
min = PAGE_SIZE;
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
wmb();
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
static DEFINE_SPINLOCK(ping_lock);
unsigned long irq_flags;
/*
* The ping_lock is needed because we don't have an atomic
* test-and-set of the SVGA_FIFO_BUSY register.
*/
spin_lock_irqsave(&ping_lock, irq_flags);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
spin_unlock_irqrestore(&ping_lock, irq_flags);
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
vmw_write(dev_priv, SVGA_REG_ENABLE,
dev_priv->enable_state);
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
}
if (likely(fifo->dynamic_buffer != NULL)) {
vfree(fifo->dynamic_buffer);
fifo->dynamic_buffer = NULL;
}
}
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
return ((max - next_cmd) + (stop - min) <= bytes);
}
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
int ret = 0;
unsigned long end_jiffies = jiffies + timeout;
DEFINE_WAIT(__wait);
DRM_INFO("Fifo wait noirq.\n");
for (;;) {
prepare_to_wait(&dev_priv->fifo_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (!vmw_fifo_is_full(dev_priv, bytes))
break;
if (time_after_eq(jiffies, end_jiffies)) {
ret = -EBUSY;
DRM_ERROR("SVGA device lockup.\n");
break;
}
schedule_timeout(1);
if (interruptible && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
finish_wait(&dev_priv->fifo_queue, &__wait);
wake_up_all(&dev_priv->fifo_queue);
DRM_INFO("Fifo noirq exit.\n");
return ret;
}
static int vmw_fifo_wait(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
long ret = 1L;
unsigned long irq_flags;
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
else
ret = wait_event_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
return ret;
}
/**
* Reserve @bytes number of bytes in the fifo.
*
* This function will return NULL (error) on two conditions:
* If it timeouts waiting for fifo space, or if @bytes is larger than the
* available fifo space.
*
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret;
mutex_lock(&fifo_state->fifo_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
if (unlikely(bytes >= (max - min)))
goto out_err;
BUG_ON(fifo_state->reserved_size != 0);
BUG_ON(fifo_state->dynamic_buffer != NULL);
fifo_state->reserved_size = bytes;
while (1) {
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
if (next_cmd >= stop) {
if (likely((next_cmd + bytes < max ||
(next_cmd + bytes == max && stop > min))))
reserve_in_place = true;
else if (vmw_fifo_is_full(dev_priv, bytes)) {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
} else
need_bounce = true;
} else {
if (likely((next_cmd + bytes < stop)))
reserve_in_place = true;
else {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
}
}
if (reserve_in_place) {
if (reserveable || bytes <= sizeof(uint32_t)) {
fifo_state->using_bounce_buffer = false;
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
return fifo_mem + (next_cmd >> 2);
} else {
need_bounce = true;
}
}
if (need_bounce) {
fifo_state->using_bounce_buffer = true;
if (bytes < fifo_state->static_buffer_size)
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
return fifo_state->dynamic_buffer;
}
}
}
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
return NULL;
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
if (bytes < chunk_size)
chunk_size = bytes;
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
mb();
memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
if (rest)
memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
rest);
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
while (bytes > 0) {
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
mb();
bytes -= sizeof(uint32_t);
}
}
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
fifo_state->reserved_size = 0;
if (fifo_state->using_bounce_buffer) {
if (reserveable)
vmw_fifo_res_copy(fifo_state, fifo_mem,
next_cmd, max, min, bytes);
else
vmw_fifo_slow_copy(fifo_state, fifo_mem,
next_cmd, max, min, bytes);
if (fifo_state->dynamic_buffer) {
vfree(fifo_state->dynamic_buffer);
fifo_state->dynamic_buffer = NULL;
}
}
down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
next_cmd -= max - min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
}
if (reserveable)
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
mb();
up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
void *fm;
int ret = 0;
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
goto out_err;
}
do {
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
/*
* Don't request hardware to send a fence. The
* waiting code in vmwgfx_irq.c will emulate this.
*/
vmw_fifo_commit(dev_priv, 0);
return 0;
}
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
iowrite32(*seqno, &cmd_fence->fence);
vmw_fifo_commit(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
out_err:
return ret;
}
/**
* vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
* legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* See the vmw_fifo_emit_dummy_query documentation.
*/
static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
} *cmd;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of fifo space for dummy query.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
if (bo->mem.mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
cmd->body.guestResult.offset = bo->offset;
} else {
cmd->body.guestResult.gmrId = bo->mem.start;
cmd->body.guestResult.offset = 0;
}
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
* guest-backed resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* See the vmw_fifo_emit_dummy_query documentation.
*/
static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
} *cmd;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of fifo space for dummy query.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
cmd->body.mobid = bo->mem.start;
cmd->body.offset = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
* appropriate resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* This function is used to emit a dummy occlusion query with
* no primitives rendered between query begin and query end.
* It's used to provide a query barrier, in order to know that when
* this query is finished, all preceding queries are also finished.
*
* A Query results structure should have been initialized at the start
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
* must also be either reserved or pinned when this function is called.
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
| gpl-2.0 |
BrokenDev/kernel_samsung_msm8660-common | net/netfilter/nf_conntrack_expect.c | 787 | 17540 | /* Expectation handling for nf_conntrack. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <net/net_namespace.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>
unsigned int nf_ct_expect_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
unsigned int nf_ct_expect_max __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
static HLIST_HEAD(nf_ct_userspace_expect_list);
/* nf_conntrack_expect helper functions */
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
u32 pid, int report)
{
struct nf_conn_help *master_help = nfct_help(exp->master);
struct net *net = nf_ct_exp_net(exp);
NF_CT_ASSERT(!timer_pending(&exp->timeout));
hlist_del_rcu(&exp->hnode);
net->ct.expect_count--;
hlist_del(&exp->lnode);
if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
master_help->expecting[exp->class]--;
nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
nf_ct_expect_put(exp);
NF_CT_STAT_INC(net, expect_delete);
}
EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
static void nf_ct_expectation_timed_out(unsigned long ul_expect)
{
struct nf_conntrack_expect *exp = (void *)ul_expect;
spin_lock_bh(&nf_conntrack_lock);
nf_ct_unlink_expect(exp);
spin_unlock_bh(&nf_conntrack_lock);
nf_ct_expect_put(exp);
}
static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
if (unlikely(!nf_conntrack_hash_rnd)) {
init_nf_conntrack_hash_rnd();
}
hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
(__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
return ((u64)hash * nf_ct_expect_hsize) >> 32;
}
struct nf_conntrack_expect *
__nf_ct_expect_find(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
struct hlist_node *n;
unsigned int h;
if (!net->ct.expect_count)
return NULL;
h = nf_ct_expect_dst_hash(tuple);
hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
nf_ct_zone(i->master) == zone)
return i;
}
return NULL;
}
EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
/* Just find a expectation corresponding to a tuple. */
struct nf_conntrack_expect *
nf_ct_expect_find_get(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
rcu_read_lock();
i = __nf_ct_expect_find(net, zone, tuple);
if (i && !atomic_inc_not_zero(&i->use))
i = NULL;
rcu_read_unlock();
return i;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
/* If an expectation for this connection is found, it gets delete from
* global list then returned. */
struct nf_conntrack_expect *
nf_ct_find_expectation(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i, *exp = NULL;
struct hlist_node *n;
unsigned int h;
if (!net->ct.expect_count)
return NULL;
h = nf_ct_expect_dst_hash(tuple);
hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
nf_ct_zone(i->master) == zone) {
exp = i;
break;
}
}
if (!exp)
return NULL;
/* If master is not in hash table yet (ie. packet hasn't left
this machine yet), how can other end know about expected?
Hence these are not the droids you are looking for (if
master ct never got confirmed, we'd hold a reference to it
and weird things would happen to future packets). */
if (!nf_ct_is_confirmed(exp->master))
return NULL;
if (exp->flags & NF_CT_EXPECT_PERMANENT) {
atomic_inc(&exp->use);
return exp;
} else if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
return exp;
}
return NULL;
}
/* delete all expectations for this conntrack */
void nf_ct_remove_expectations(struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_expect *exp;
struct hlist_node *n, *next;
/* Optimization: most connection never expect any others. */
if (!help)
return;
hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
}
}
}
EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
/* Would two expected things clash? */
static inline int expect_clash(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
/* Part covered by intersection of masks must be unequal,
otherwise they clash */
struct nf_conntrack_tuple_mask intersect_mask;
int count;
intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
intersect_mask.src.u3.all[count] =
a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
}
return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
}
static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
return a->master == b->master && a->class == b->class &&
nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
nf_ct_zone(a->master) == nf_ct_zone(b->master);
}
/* Generally a bad idea to call this: could have matched already. */
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
{
spin_lock_bh(&nf_conntrack_lock);
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
}
spin_unlock_bh(&nf_conntrack_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
/* We don't increase the master conntrack refcount for non-fulfilled
* conntracks. During the conntrack destruction, the expectations are
* always killed before the conntrack itself */
struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
{
struct nf_conntrack_expect *new;
new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
if (!new)
return NULL;
new->master = me;
atomic_set(&new->use, 1);
return new;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
u_int8_t family,
const union nf_inet_addr *saddr,
const union nf_inet_addr *daddr,
u_int8_t proto, const __be16 *src, const __be16 *dst)
{
int len;
if (family == AF_INET)
len = 4;
else
len = 16;
exp->flags = 0;
exp->class = class;
exp->expectfn = NULL;
exp->helper = NULL;
exp->tuple.src.l3num = family;
exp->tuple.dst.protonum = proto;
if (saddr) {
memcpy(&exp->tuple.src.u3, saddr, len);
if (sizeof(exp->tuple.src.u3) > len)
/* address needs to be cleared for nf_ct_tuple_equal */
memset((void *)&exp->tuple.src.u3 + len, 0x00,
sizeof(exp->tuple.src.u3) - len);
memset(&exp->mask.src.u3, 0xFF, len);
if (sizeof(exp->mask.src.u3) > len)
memset((void *)&exp->mask.src.u3 + len, 0x00,
sizeof(exp->mask.src.u3) - len);
} else {
memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
}
if (src) {
exp->tuple.src.u.all = *src;
exp->mask.src.u.all = htons(0xFFFF);
} else {
exp->tuple.src.u.all = 0;
exp->mask.src.u.all = 0;
}
memcpy(&exp->tuple.dst.u3, daddr, len);
if (sizeof(exp->tuple.dst.u3) > len)
/* address needs to be cleared for nf_ct_tuple_equal */
memset((void *)&exp->tuple.dst.u3 + len, 0x00,
sizeof(exp->tuple.dst.u3) - len);
exp->tuple.dst.u.all = *dst;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_init);
static void nf_ct_expect_free_rcu(struct rcu_head *head)
{
struct nf_conntrack_expect *exp;
exp = container_of(head, struct nf_conntrack_expect, rcu);
kmem_cache_free(nf_ct_expect_cachep, exp);
}
void nf_ct_expect_put(struct nf_conntrack_expect *exp)
{
if (atomic_dec_and_test(&exp->use))
call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_put);
static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
{
struct nf_conn_help *master_help = nfct_help(exp->master);
struct net *net = nf_ct_exp_net(exp);
const struct nf_conntrack_expect_policy *p;
unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
/* two references : one for hash insert, one for the timer */
atomic_add(2, &exp->use);
if (master_help) {
hlist_add_head(&exp->lnode, &master_help->expectations);
master_help->expecting[exp->class]++;
} else if (exp->flags & NF_CT_EXPECT_USERSPACE)
hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
net->ct.expect_count++;
setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
(unsigned long)exp);
if (master_help) {
p = &rcu_dereference_protected(
master_help->helper,
lockdep_is_held(&nf_conntrack_lock)
)->expect_policy[exp->class];
exp->timeout.expires = jiffies + p->timeout * HZ;
}
add_timer(&exp->timeout);
NF_CT_STAT_INC(net, expect_create);
}
/* Race with expectations being used means we could have none to find; OK. */
static void evict_oldest_expect(struct nf_conn *master,
struct nf_conntrack_expect *new)
{
struct nf_conn_help *master_help = nfct_help(master);
struct nf_conntrack_expect *exp, *last = NULL;
struct hlist_node *n;
hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
if (exp->class == new->class)
last = exp;
}
if (last && del_timer(&last->timeout)) {
nf_ct_unlink_expect(last);
nf_ct_expect_put(last);
}
}
static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
{
const struct nf_conntrack_expect_policy *p;
struct nf_conntrack_expect *i;
struct nf_conn *master = expect->master;
struct nf_conn_help *master_help = nfct_help(master);
struct net *net = nf_ct_exp_net(expect);
struct hlist_node *n, *next;
unsigned int h;
int ret = 1;
/* Don't allow expectations created from kernel-space with no helper */
if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
(!master_help || (master_help && !master_help->helper))) {
ret = -ESHUTDOWN;
goto out;
}
h = nf_ct_expect_dst_hash(&expect->tuple);
hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
if (expect_matches(i, expect)) {
if (del_timer(&i->timeout)) {
nf_ct_unlink_expect(i);
nf_ct_expect_put(i);
break;
}
} else if (expect_clash(i, expect)) {
ret = -EBUSY;
goto out;
}
}
/* Will be over limit? */
if (master_help) {
p = &rcu_dereference_protected(
master_help->helper,
lockdep_is_held(&nf_conntrack_lock)
)->expect_policy[expect->class];
if (p->max_expected &&
master_help->expecting[expect->class] >= p->max_expected) {
evict_oldest_expect(master, expect);
if (master_help->expecting[expect->class]
>= p->max_expected) {
ret = -EMFILE;
goto out;
}
}
}
if (net->ct.expect_count >= nf_ct_expect_max) {
if (net_ratelimit())
printk(KERN_WARNING
"nf_conntrack: expectation table full\n");
ret = -EMFILE;
}
out:
return ret;
}
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
u32 pid, int report)
{
int ret;
spin_lock_bh(&nf_conntrack_lock);
ret = __nf_ct_expect_check(expect);
if (ret <= 0)
goto out;
ret = 0;
nf_ct_expect_insert(expect);
spin_unlock_bh(&nf_conntrack_lock);
nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
return ret;
out:
spin_unlock_bh(&nf_conntrack_lock);
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
void nf_ct_remove_userspace_expectations(void)
{
struct nf_conntrack_expect *exp;
struct hlist_node *n, *next;
hlist_for_each_entry_safe(exp, n, next,
&nf_ct_userspace_expect_list, lnode) {
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
}
}
}
EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
#ifdef CONFIG_PROC_FS
struct ct_expect_iter_state {
struct seq_net_private p;
unsigned int bucket;
};
static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
if (n)
return n;
}
return NULL;
}
static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct hlist_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
}
return head;
}
static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_node *head = ct_expect_get_first(seq);
if (head)
while (pos && (head = ct_expect_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return ct_expect_get_idx(seq, *pos);
}
static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
return ct_expect_get_next(seq, v);
}
static void exp_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int exp_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_expect *expect;
struct nf_conntrack_helper *helper;
struct hlist_node *n = v;
char *delim = "";
expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
if (expect->timeout.function)
seq_printf(s, "%ld ", timer_pending(&expect->timeout)
? (long)(expect->timeout.expires - jiffies)/HZ : 0);
else
seq_printf(s, "- ");
seq_printf(s, "l3proto = %u proto=%u ",
expect->tuple.src.l3num,
expect->tuple.dst.protonum);
print_tuple(s, &expect->tuple,
__nf_ct_l3proto_find(expect->tuple.src.l3num),
__nf_ct_l4proto_find(expect->tuple.src.l3num,
expect->tuple.dst.protonum));
if (expect->flags & NF_CT_EXPECT_PERMANENT) {
seq_printf(s, "PERMANENT");
delim = ",";
}
if (expect->flags & NF_CT_EXPECT_INACTIVE) {
seq_printf(s, "%sINACTIVE", delim);
delim = ",";
}
if (expect->flags & NF_CT_EXPECT_USERSPACE)
seq_printf(s, "%sUSERSPACE", delim);
helper = rcu_dereference(nfct_help(expect->master)->helper);
if (helper) {
seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
if (helper->expect_policy[expect->class].name)
seq_printf(s, "/%s",
helper->expect_policy[expect->class].name);
}
return seq_putc(s, '\n');
}
static const struct seq_operations exp_seq_ops = {
.start = exp_seq_start,
.next = exp_seq_next,
.stop = exp_seq_stop,
.show = exp_seq_show
};
static int exp_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &exp_seq_ops,
sizeof(struct ct_expect_iter_state));
}
static const struct file_operations exp_file_ops = {
.owner = THIS_MODULE,
.open = exp_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif /* CONFIG_PROC_FS */
static int exp_proc_init(struct net *net)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc;
proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
if (!proc)
return -ENOMEM;
#endif /* CONFIG_PROC_FS */
return 0;
}
static void exp_proc_remove(struct net *net)
{
#ifdef CONFIG_PROC_FS
proc_net_remove(net, "nf_conntrack_expect");
#endif /* CONFIG_PROC_FS */
}
module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
int nf_conntrack_expect_init(struct net *net)
{
int err = -ENOMEM;
if (net_eq(net, &init_net)) {
if (!nf_ct_expect_hsize) {
nf_ct_expect_hsize = net->ct.htable_size / 256;
if (!nf_ct_expect_hsize)
nf_ct_expect_hsize = 1;
}
nf_ct_expect_max = nf_ct_expect_hsize * 4;
}
net->ct.expect_count = 0;
net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
if (net->ct.expect_hash == NULL)
goto err1;
if (net_eq(net, &init_net)) {
nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
sizeof(struct nf_conntrack_expect),
0, 0, NULL);
if (!nf_ct_expect_cachep)
goto err2;
}
err = exp_proc_init(net);
if (err < 0)
goto err3;
return 0;
err3:
if (net_eq(net, &init_net))
kmem_cache_destroy(nf_ct_expect_cachep);
err2:
nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
err1:
return err;
}
void nf_conntrack_expect_fini(struct net *net)
{
exp_proc_remove(net);
if (net_eq(net, &init_net)) {
rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
}
nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
}
| gpl-2.0 |
lacvapps/linux | arch/alpha/kernel/process.c | 787 | 9936 | /*
* linux/arch/alpha/kernel/process.c
*
* Copyright (C) 1995 Linus Torvalds
*/
/*
* This file handles the architecture-dependent parts of process handling.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/time.h>
#include <linux/major.h>
#include <linux/stat.h>
#include <linux/vt.h>
#include <linux/mman.h>
#include <linux/elfcore.h>
#include <linux/reboot.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <asm/reg.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/hwrpb.h>
#include <asm/fpu.h>
#include "proto.h"
#include "pci_impl.h"
/*
* Power off function, if any
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
#ifdef CONFIG_ALPHA_WTINT
/*
* Sleep the CPU.
* EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts.
*/
void arch_cpu_idle(void)
{
wtint(0);
local_irq_enable();
}
void arch_cpu_idle_dead(void)
{
wtint(INT_MAX);
}
#endif /* ALPHA_WTINT */
struct halt_info {
int mode;
char *restart_cmd;
};
static void
common_shutdown_1(void *generic_ptr)
{
struct halt_info *how = (struct halt_info *)generic_ptr;
struct percpu_struct *cpup;
unsigned long *pflags, flags;
int cpuid = smp_processor_id();
/* No point in taking interrupts anymore. */
local_irq_disable();
cpup = (struct percpu_struct *)
((unsigned long)hwrpb + hwrpb->processor_offset
+ hwrpb->processor_size * cpuid);
pflags = &cpup->flags;
flags = *pflags;
/* Clear reason to "default"; clear "bootstrap in progress". */
flags &= ~0x00ff0001UL;
#ifdef CONFIG_SMP
/* Secondaries halt here. */
if (cpuid != boot_cpuid) {
flags |= 0x00040000UL; /* "remain halted" */
*pflags = flags;
set_cpu_present(cpuid, false);
set_cpu_possible(cpuid, false);
halt();
}
#endif
if (how->mode == LINUX_REBOOT_CMD_RESTART) {
if (!how->restart_cmd) {
flags |= 0x00020000UL; /* "cold bootstrap" */
} else {
/* For SRM, we could probably set environment
variables to get this to work. We'd have to
delay this until after srm_paging_stop unless
we ever got srm_fixup working.
At the moment, SRM will use the last boot device,
but the file and flags will be the defaults, when
doing a "warm" bootstrap. */
flags |= 0x00030000UL; /* "warm bootstrap" */
}
} else {
flags |= 0x00040000UL; /* "remain halted" */
}
*pflags = flags;
#ifdef CONFIG_SMP
/* Wait for the secondaries to halt. */
set_cpu_present(boot_cpuid, false);
set_cpu_possible(boot_cpuid, false);
while (cpumask_weight(cpu_present_mask))
barrier();
#endif
/* If booted from SRM, reset some of the original environment. */
if (alpha_using_srm) {
#ifdef CONFIG_DUMMY_CONSOLE
/* If we've gotten here after SysRq-b, leave interrupt
context before taking over the console. */
if (in_interrupt())
irq_exit();
/* This has the effect of resetting the VGA video origin. */
console_lock();
do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
console_unlock();
#endif
pci_restore_srm_config();
set_hae(srm_hae);
}
if (alpha_mv.kill_arch)
alpha_mv.kill_arch(how->mode);
if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
/* Unfortunately, since MILO doesn't currently understand
the hwrpb bits above, we can't reliably halt the
processor and keep it halted. So just loop. */
return;
}
if (alpha_using_srm)
srm_paging_stop();
halt();
}
static void
common_shutdown(int mode, char *restart_cmd)
{
struct halt_info args;
args.mode = mode;
args.restart_cmd = restart_cmd;
on_each_cpu(common_shutdown_1, &args, 0);
}
void
machine_restart(char *restart_cmd)
{
common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd);
}
void
machine_halt(void)
{
common_shutdown(LINUX_REBOOT_CMD_HALT, NULL);
}
void
machine_power_off(void)
{
common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL);
}
/* Used by sysrq-p, among others. I don't believe r9-r15 are ever
saved in the context it's used. */
void
show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
dik_show_regs(regs, NULL);
}
/*
* Re-start a thread when doing execve()
*/
void
start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
regs->pc = pc;
regs->ps = 8;
wrusp(sp);
}
EXPORT_SYMBOL(start_thread);
/*
* Free current thread data structures etc..
*/
void
exit_thread(void)
{
}
void
flush_thread(void)
{
/* Arrange for each exec'ed process to start off with a clean slate
with respect to the FPU. This is all exceptions disabled. */
current_thread_info()->ieee_state = 0;
wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
/* Clean slate for TLS. */
current_thread_info()->pcb.unique = 0;
}
void
release_thread(struct task_struct *dead_task)
{
}
/*
* Copy architecture-specific thread state
*/
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg,
struct task_struct *p)
{
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
struct thread_info *childti = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
struct pt_regs *regs = current_pt_regs();
struct switch_stack *childstack, *stack;
childstack = ((struct switch_stack *) childregs) - 1;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
childstack->r26 = (unsigned long) ret_from_kernel_thread;
childstack->r9 = usp; /* function */
childstack->r10 = kthread_arg;
childregs->hae = alpha_mv.hae_cache,
childti->pcb.usp = 0;
return 0;
}
/* Note: if CLONE_SETTLS is not set, then we must inherit the
value from the parent, which will have been set by the block
copy in dup_task_struct. This is non-intuitive, but is
required for proper operation in the case of a threaded
application calling fork. */
if (clone_flags & CLONE_SETTLS)
childti->pcb.unique = regs->r20;
childti->pcb.usp = usp ?: rdusp();
*childregs = *regs;
childregs->r0 = 0;
childregs->r19 = 0;
childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
regs->r20 = 0;
stack = ((struct switch_stack *) regs) - 1;
*childstack = *stack;
childstack->r26 = (unsigned long) ret_from_fork;
return 0;
}
/*
* Fill in the user structure for a ELF core dump.
*/
void
dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
{
/* switch stack follows right below pt_regs: */
struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
dest[ 0] = pt->r0;
dest[ 1] = pt->r1;
dest[ 2] = pt->r2;
dest[ 3] = pt->r3;
dest[ 4] = pt->r4;
dest[ 5] = pt->r5;
dest[ 6] = pt->r6;
dest[ 7] = pt->r7;
dest[ 8] = pt->r8;
dest[ 9] = sw->r9;
dest[10] = sw->r10;
dest[11] = sw->r11;
dest[12] = sw->r12;
dest[13] = sw->r13;
dest[14] = sw->r14;
dest[15] = sw->r15;
dest[16] = pt->r16;
dest[17] = pt->r17;
dest[18] = pt->r18;
dest[19] = pt->r19;
dest[20] = pt->r20;
dest[21] = pt->r21;
dest[22] = pt->r22;
dest[23] = pt->r23;
dest[24] = pt->r24;
dest[25] = pt->r25;
dest[26] = pt->r26;
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = pt->pc;
/* Once upon a time this was the PS value. Which is stupid
since that is always 8 for usermode. Usurped for the more
useful value of the thread's UNIQUE field. */
dest[32] = ti->pcb.unique;
}
EXPORT_SYMBOL(dump_elf_thread);
int
dump_elf_task(elf_greg_t *dest, struct task_struct *task)
{
dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
return 1;
}
EXPORT_SYMBOL(dump_elf_task);
int
dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
{
struct switch_stack *sw = (struct switch_stack *)task_pt_regs(task) - 1;
memcpy(dest, sw->fp, 32 * 8);
return 1;
}
EXPORT_SYMBOL(dump_elf_task_fp);
/*
* Return saved PC of a blocked thread. This assumes the frame
* pointer is the 6th saved long on the kernel stack and that the
* saved return address is the first long in the frame. This all
* holds provided the thread blocked through a call to schedule() ($15
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
*
* Under heavy swap load I've seen this lose in an ugly way. So do
* some extra sanity checking on the ranges we expect these pointers
* to be in so that we can fail gracefully. This is just for ps after
* all. -- r~
*/
unsigned long
thread_saved_pc(struct task_struct *t)
{
unsigned long base = (unsigned long)task_stack_page(t);
unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
if (sp > base && sp+6*8 < base + 16*1024) {
fp = ((unsigned long*)sp)[6];
if (fp > sp && fp < base + 16*1024)
return *(unsigned long *)fp;
}
return 0;
}
unsigned long
get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* This one depends on the frame size of schedule(). Do a
* "disass schedule" in gdb to find the frame size. Also, the
* code assumes that sleep_on() follows immediately after
* interruptible_sleep_on() and that add_timer() follows
* immediately after interruptible_sleep(). Ugly, isn't it?
* Maybe adding a wchan field to task_struct would be better,
* after all...
*/
pc = thread_saved_pc(p);
if (in_sched_functions(pc)) {
schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6];
return ((unsigned long *)schedule_frame)[12];
}
return pc;
}
| gpl-2.0 |
alban/linux | tools/thermal/tmon/tmon.c | 1299 | 9053 | /*
* tmon.c Thermal Monitor (TMON) main function and entry point
*
* Copyright (C) 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 or later as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Author: Jacob Pan <jacob.jun.pan@linux.intel.com>
*
*/
#include <getopt.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <ncurses.h>
#include <ctype.h>
#include <time.h>
#include <signal.h>
#include <limits.h>
#include <sys/time.h>
#include <pthread.h>
#include <math.h>
#include <stdarg.h>
#include <syslog.h>
#include "tmon.h"
unsigned long ticktime = 1; /* seconds */
unsigned long no_control = 1; /* monitoring only or use cooling device for
* temperature control.
*/
double time_elapsed = 0.0;
unsigned long target_temp_user = 65; /* can be select by tui later */
int dialogue_on;
int tmon_exit;
static short daemon_mode;
static int logging; /* for recording thermal data to a file */
static int debug_on;
FILE *tmon_log;
/*cooling device used for the PID controller */
char ctrl_cdev[CDEV_NAME_SIZE] = "None";
int target_thermal_zone; /* user selected target zone instance */
static void start_daemon_mode(void);
pthread_t event_tid;
pthread_mutex_t input_lock;
void usage()
{
printf("Usage: tmon [OPTION...]\n");
printf(" -c, --control cooling device in control\n");
printf(" -d, --daemon run as daemon, no TUI\n");
printf(" -g, --debug debug message in syslog\n");
printf(" -h, --help show this help message\n");
printf(" -l, --log log data to /var/tmp/tmon.log\n");
printf(" -t, --time-interval sampling time interval, > 1 sec.\n");
printf(" -T, --target-temp initial target temperature\n");
printf(" -v, --version show version\n");
printf(" -z, --zone target thermal zone id\n");
exit(0);
}
void version()
{
printf("TMON version %s\n", VERSION);
exit(EXIT_SUCCESS);
}
static void tmon_cleanup(void)
{
syslog(LOG_INFO, "TMON exit cleanup\n");
fflush(stdout);
refresh();
if (tmon_log)
fclose(tmon_log);
if (event_tid) {
pthread_mutex_lock(&input_lock);
pthread_cancel(event_tid);
pthread_mutex_unlock(&input_lock);
pthread_mutex_destroy(&input_lock);
}
closelog();
/* relax control knobs, undo throttling */
set_ctrl_state(0);
keypad(stdscr, FALSE);
echo();
nocbreak();
close_windows();
endwin();
free_thermal_data();
exit(1);
}
static void tmon_sig_handler(int sig)
{
syslog(LOG_INFO, "TMON caught signal %d\n", sig);
refresh();
switch (sig) {
case SIGTERM:
printf("sigterm, exit and clean up\n");
fflush(stdout);
break;
case SIGKILL:
printf("sigkill, exit and clean up\n");
fflush(stdout);
break;
case SIGINT:
printf("ctrl-c, exit and clean up\n");
fflush(stdout);
break;
default:
break;
}
tmon_exit = true;
}
static void start_syslog(void)
{
if (debug_on)
setlogmask(LOG_UPTO(LOG_DEBUG));
else
setlogmask(LOG_UPTO(LOG_ERR));
openlog("tmon.log", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL0);
syslog(LOG_NOTICE, "TMON started by User %d", getuid());
}
static void prepare_logging(void)
{
int i;
struct stat logstat;
if (!logging)
return;
/* open local data log file */
tmon_log = fopen(TMON_LOG_FILE, "w+");
if (!tmon_log) {
syslog(LOG_ERR, "failed to open log file %s\n", TMON_LOG_FILE);
return;
}
if (lstat(TMON_LOG_FILE, &logstat) < 0) {
syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE);
fclose(tmon_log);
tmon_log = NULL;
return;
}
/* The log file must be a regular file owned by us */
if (S_ISLNK(logstat.st_mode)) {
syslog(LOG_ERR, "Log file is a symlink. Will not log\n");
fclose(tmon_log);
tmon_log = NULL;
return;
}
if (logstat.st_uid != getuid()) {
syslog(LOG_ERR, "We don't own the log file. Not logging\n");
fclose(tmon_log);
tmon_log = NULL;
return;
}
fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n");
for (i = 0; i < ptdata.nr_tz_sensor; i++) {
char binding_str[33]; /* size of long + 1 */
int j;
memset(binding_str, 0, sizeof(binding_str));
for (j = 0; j < 32; j++)
binding_str[j] = (ptdata.tzi[i].cdev_binding & 1<<j) ?
'1' : '0';
fprintf(tmon_log, "#thermal zone %s%02d cdevs binding: %32s\n",
ptdata.tzi[i].type,
ptdata.tzi[i].instance,
binding_str);
for (j = 0; j < ptdata.tzi[i].nr_trip_pts; j++) {
fprintf(tmon_log, "#\tTP%02d type:%s, temp:%lu\n", j,
trip_type_name[ptdata.tzi[i].tp[j].type],
ptdata.tzi[i].tp[j].temp);
}
}
for (i = 0; i < ptdata.nr_cooling_dev; i++)
fprintf(tmon_log, "#cooling devices%02d: %s\n",
i, ptdata.cdi[i].type);
fprintf(tmon_log, "#---------- THERMAL DATA LOG STARTED -----------\n");
fprintf(tmon_log, "Samples TargetTemp ");
for (i = 0; i < ptdata.nr_tz_sensor; i++) {
fprintf(tmon_log, "%s%d ", ptdata.tzi[i].type,
ptdata.tzi[i].instance);
}
for (i = 0; i < ptdata.nr_cooling_dev; i++)
fprintf(tmon_log, "%s%d ", ptdata.cdi[i].type,
ptdata.cdi[i].instance);
fprintf(tmon_log, "\n");
}
static struct option opts[] = {
{ "control", 1, NULL, 'c' },
{ "daemon", 0, NULL, 'd' },
{ "time-interval", 1, NULL, 't' },
{ "target-temp", 1, NULL, 'T' },
{ "log", 0, NULL, 'l' },
{ "help", 0, NULL, 'h' },
{ "version", 0, NULL, 'v' },
{ "debug", 0, NULL, 'g' },
{ 0, 0, NULL, 0 }
};
int main(int argc, char **argv)
{
int err = 0;
int id2 = 0, c;
double yk = 0.0, temp; /* controller output */
int target_tz_index;
if (geteuid() != 0) {
printf("TMON needs to be run as root\n");
exit(EXIT_FAILURE);
}
while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) {
switch (c) {
case 'c':
no_control = 0;
strncpy(ctrl_cdev, optarg, CDEV_NAME_SIZE);
break;
case 'd':
start_daemon_mode();
printf("Run TMON in daemon mode\n");
break;
case 't':
ticktime = strtod(optarg, NULL);
if (ticktime < 1)
ticktime = 1;
break;
case 'T':
temp = strtod(optarg, NULL);
if (temp < 0) {
fprintf(stderr, "error: temperature must be positive\n");
return 1;
}
target_temp_user = temp;
break;
case 'l':
printf("Logging data to /var/tmp/tmon.log\n");
logging = 1;
break;
case 'h':
usage();
break;
case 'v':
version();
break;
case 'g':
debug_on = 1;
break;
case 'z':
target_thermal_zone = strtod(optarg, NULL);
break;
default:
break;
}
}
if (pthread_mutex_init(&input_lock, NULL) != 0) {
fprintf(stderr, "\n mutex init failed, exit\n");
return 1;
}
start_syslog();
if (signal(SIGINT, tmon_sig_handler) == SIG_ERR)
syslog(LOG_DEBUG, "Cannot handle SIGINT\n");
if (signal(SIGTERM, tmon_sig_handler) == SIG_ERR)
syslog(LOG_DEBUG, "Cannot handle SIGINT\n");
if (probe_thermal_sysfs()) {
pthread_mutex_destroy(&input_lock);
closelog();
return -1;
}
initialize_curses();
setup_windows();
signal(SIGWINCH, resize_handler);
show_title_bar();
show_sensors_w();
show_cooling_device();
update_thermal_data();
show_data_w();
prepare_logging();
init_thermal_controller();
nodelay(stdscr, TRUE);
err = pthread_create(&event_tid, NULL, &handle_tui_events, NULL);
if (err != 0) {
printf("\ncan't create thread :[%s]", strerror(err));
tmon_cleanup();
exit(EXIT_FAILURE);
}
/* validate range of user selected target zone, default to the first
* instance if out of range
*/
target_tz_index = zone_instance_to_index(target_thermal_zone);
if (target_tz_index < 0) {
target_thermal_zone = ptdata.tzi[0].instance;
syslog(LOG_ERR, "target zone is not found, default to %d\n",
target_thermal_zone);
}
while (1) {
sleep(ticktime);
show_title_bar();
show_sensors_w();
update_thermal_data();
if (!dialogue_on) {
show_data_w();
show_cooling_device();
}
cur_thermal_record++;
time_elapsed += ticktime;
controller_handler(trec[0].temp[target_tz_index] / 1000,
&yk);
trec[0].pid_out_pct = yk;
if (!dialogue_on)
show_control_w();
if (tmon_exit)
break;
}
tmon_cleanup();
return 0;
}
static void start_daemon_mode()
{
daemon_mode = 1;
/* fork */
pid_t sid, pid = fork();
if (pid < 0) {
exit(EXIT_FAILURE);
} else if (pid > 0)
/* kill parent */
exit(EXIT_SUCCESS);
/* disable TUI, it may not be necessary, but saves some resource */
disable_tui();
/* change the file mode mask */
umask(S_IWGRP | S_IWOTH);
/* new SID for the daemon process */
sid = setsid();
if (sid < 0)
exit(EXIT_FAILURE);
/* change working directory */
if ((chdir("/")) < 0)
exit(EXIT_FAILURE);
sleep(10);
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
}
| gpl-2.0 |
HashBang173/linux-kexec | tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c | 1299 | 2944 | /*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Test various attributes of the EBB event are enforced.
*/
int event_attributes(void)
{
struct event event, leader;
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
/* Expected to succeed */
FAIL_IF(event_open(&event));
event_close(&event);
event_init(&event, 0x001e); /* CYCLES - no PMC specified */
event_leader_ebb_init(&event);
/* Expected to fail, no PMC specified */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x2001e);
event_leader_ebb_init(&event);
event.attr.exclusive = 0;
/* Expected to fail, not exclusive */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x3001e);
event_leader_ebb_init(&event);
event.attr.freq = 1;
/* Expected to fail, sets freq */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x4001e);
event_leader_ebb_init(&event);
event.attr.sample_period = 1;
/* Expected to fail, sets sample_period */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
event.attr.enable_on_exec = 1;
/* Expected to fail, sets enable_on_exec */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
event.attr.inherit = 1;
/* Expected to fail, sets inherit */
FAIL_IF(event_open(&event) == 0);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
FAIL_IF(event_open(&leader));
event_init(&event, 0x20002);
event_ebb_init(&event);
/* Expected to succeed */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
FAIL_IF(event_open(&leader));
event_init(&event, 0x20002);
/* Expected to fail, event doesn't request EBB, leader does */
FAIL_IF(event_open_with_group(&event, leader.fd) == 0);
event_close(&leader);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
/* Clear the EBB flag */
leader.attr.config &= ~(1ull << 63);
FAIL_IF(event_open(&leader));
event_init(&event, 0x20002);
event_ebb_init(&event);
/* Expected to fail, leader doesn't request EBB */
FAIL_IF(event_open_with_group(&event, leader.fd) == 0);
event_close(&leader);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
leader.attr.exclusive = 0;
/* Expected to fail, leader isn't exclusive */
FAIL_IF(event_open(&leader) == 0);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
leader.attr.pinned = 0;
/* Expected to fail, leader isn't pinned */
FAIL_IF(event_open(&leader) == 0);
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
/* Expected to fail, not a task event */
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(&event, 0) == 0);
return 0;
}
int main(void)
{
return test_harness(event_attributes, "event_attributes");
}
| gpl-2.0 |
alexax66/LP-Kernel-SM-E500H | drivers/tty/serial/sccnxp.c | 2067 | 26783 | /*
* NXP (Philips) SCC+++(SCN+++) serial driver
*
* Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
*
* Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#if defined(CONFIG_SERIAL_SCCNXP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/err.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/platform_data/serial-sccnxp.h>
#include <linux/regulator/consumer.h>
#define SCCNXP_NAME "uart-sccnxp"
#define SCCNXP_MAJOR 204
#define SCCNXP_MINOR 205
#define SCCNXP_MR_REG (0x00)
# define MR0_BAUD_NORMAL (0 << 0)
# define MR0_BAUD_EXT1 (1 << 0)
# define MR0_BAUD_EXT2 (5 << 0)
# define MR0_FIFO (1 << 3)
# define MR0_TXLVL (1 << 4)
# define MR1_BITS_5 (0 << 0)
# define MR1_BITS_6 (1 << 0)
# define MR1_BITS_7 (2 << 0)
# define MR1_BITS_8 (3 << 0)
# define MR1_PAR_EVN (0 << 2)
# define MR1_PAR_ODD (1 << 2)
# define MR1_PAR_NO (4 << 2)
# define MR2_STOP1 (7 << 0)
# define MR2_STOP2 (0xf << 0)
#define SCCNXP_SR_REG (0x01)
#define SCCNXP_CSR_REG SCCNXP_SR_REG
# define SR_RXRDY (1 << 0)
# define SR_FULL (1 << 1)
# define SR_TXRDY (1 << 2)
# define SR_TXEMT (1 << 3)
# define SR_OVR (1 << 4)
# define SR_PE (1 << 5)
# define SR_FE (1 << 6)
# define SR_BRK (1 << 7)
#define SCCNXP_CR_REG (0x02)
# define CR_RX_ENABLE (1 << 0)
# define CR_RX_DISABLE (1 << 1)
# define CR_TX_ENABLE (1 << 2)
# define CR_TX_DISABLE (1 << 3)
# define CR_CMD_MRPTR1 (0x01 << 4)
# define CR_CMD_RX_RESET (0x02 << 4)
# define CR_CMD_TX_RESET (0x03 << 4)
# define CR_CMD_STATUS_RESET (0x04 << 4)
# define CR_CMD_BREAK_RESET (0x05 << 4)
# define CR_CMD_START_BREAK (0x06 << 4)
# define CR_CMD_STOP_BREAK (0x07 << 4)
# define CR_CMD_MRPTR0 (0x0b << 4)
#define SCCNXP_RHR_REG (0x03)
#define SCCNXP_THR_REG SCCNXP_RHR_REG
#define SCCNXP_IPCR_REG (0x04)
#define SCCNXP_ACR_REG SCCNXP_IPCR_REG
# define ACR_BAUD0 (0 << 7)
# define ACR_BAUD1 (1 << 7)
# define ACR_TIMER_MODE (6 << 4)
#define SCCNXP_ISR_REG (0x05)
#define SCCNXP_IMR_REG SCCNXP_ISR_REG
# define IMR_TXRDY (1 << 0)
# define IMR_RXRDY (1 << 1)
# define ISR_TXRDY(x) (1 << ((x * 4) + 0))
# define ISR_RXRDY(x) (1 << ((x * 4) + 1))
#define SCCNXP_IPR_REG (0x0d)
#define SCCNXP_OPCR_REG SCCNXP_IPR_REG
#define SCCNXP_SOP_REG (0x0e)
#define SCCNXP_ROP_REG (0x0f)
/* Route helpers */
#define MCTRL_MASK(sig) (0xf << (sig))
#define MCTRL_IBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_IP0)
#define MCTRL_OBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_OP0)
/* Supported chip types */
enum {
SCCNXP_TYPE_SC2681 = 2681,
SCCNXP_TYPE_SC2691 = 2691,
SCCNXP_TYPE_SC2692 = 2692,
SCCNXP_TYPE_SC2891 = 2891,
SCCNXP_TYPE_SC2892 = 2892,
SCCNXP_TYPE_SC28202 = 28202,
SCCNXP_TYPE_SC68681 = 68681,
SCCNXP_TYPE_SC68692 = 68692,
};
struct sccnxp_port {
struct uart_driver uart;
struct uart_port port[SCCNXP_MAX_UARTS];
bool opened[SCCNXP_MAX_UARTS];
const char *name;
int irq;
u8 imr;
u8 addr_mask;
int freq_std;
int flags;
#define SCCNXP_HAVE_IO 0x00000001
#define SCCNXP_HAVE_MR0 0x00000002
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
struct console console;
#endif
spinlock_t lock;
bool poll;
struct timer_list timer;
struct sccnxp_pdata pdata;
struct regulator *regulator;
};
static inline u8 sccnxp_raw_read(void __iomem *base, u8 reg, u8 shift)
{
return readb(base + (reg << shift));
}
static inline void sccnxp_raw_write(void __iomem *base, u8 reg, u8 shift, u8 v)
{
writeb(v, base + (reg << shift));
}
static inline u8 sccnxp_read(struct uart_port *port, u8 reg)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
return sccnxp_raw_read(port->membase, reg & s->addr_mask,
port->regshift);
}
static inline void sccnxp_write(struct uart_port *port, u8 reg, u8 v)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
sccnxp_raw_write(port->membase, reg & s->addr_mask, port->regshift, v);
}
static inline u8 sccnxp_port_read(struct uart_port *port, u8 reg)
{
return sccnxp_read(port, (port->line << 3) + reg);
}
static inline void sccnxp_port_write(struct uart_port *port, u8 reg, u8 v)
{
sccnxp_write(port, (port->line << 3) + reg, v);
}
static int sccnxp_update_best_err(int a, int b, int *besterr)
{
int err = abs(a - b);
if ((*besterr < 0) || (*besterr > err)) {
*besterr = err;
return 0;
}
return 1;
}
static const struct {
u8 csr;
u8 acr;
u8 mr0;
int baud;
} baud_std[] = {
{ 0, ACR_BAUD0, MR0_BAUD_NORMAL, 50, },
{ 0, ACR_BAUD1, MR0_BAUD_NORMAL, 75, },
{ 1, ACR_BAUD0, MR0_BAUD_NORMAL, 110, },
{ 2, ACR_BAUD0, MR0_BAUD_NORMAL, 134, },
{ 3, ACR_BAUD1, MR0_BAUD_NORMAL, 150, },
{ 3, ACR_BAUD0, MR0_BAUD_NORMAL, 200, },
{ 4, ACR_BAUD0, MR0_BAUD_NORMAL, 300, },
{ 0, ACR_BAUD1, MR0_BAUD_EXT1, 450, },
{ 1, ACR_BAUD0, MR0_BAUD_EXT2, 880, },
{ 3, ACR_BAUD1, MR0_BAUD_EXT1, 900, },
{ 5, ACR_BAUD0, MR0_BAUD_NORMAL, 600, },
{ 7, ACR_BAUD0, MR0_BAUD_NORMAL, 1050, },
{ 2, ACR_BAUD0, MR0_BAUD_EXT2, 1076, },
{ 6, ACR_BAUD0, MR0_BAUD_NORMAL, 1200, },
{ 10, ACR_BAUD1, MR0_BAUD_NORMAL, 1800, },
{ 7, ACR_BAUD1, MR0_BAUD_NORMAL, 2000, },
{ 8, ACR_BAUD0, MR0_BAUD_NORMAL, 2400, },
{ 5, ACR_BAUD1, MR0_BAUD_EXT1, 3600, },
{ 9, ACR_BAUD0, MR0_BAUD_NORMAL, 4800, },
{ 10, ACR_BAUD0, MR0_BAUD_NORMAL, 7200, },
{ 11, ACR_BAUD0, MR0_BAUD_NORMAL, 9600, },
{ 8, ACR_BAUD0, MR0_BAUD_EXT1, 14400, },
{ 12, ACR_BAUD1, MR0_BAUD_NORMAL, 19200, },
{ 9, ACR_BAUD0, MR0_BAUD_EXT1, 28800, },
{ 12, ACR_BAUD0, MR0_BAUD_NORMAL, 38400, },
{ 11, ACR_BAUD0, MR0_BAUD_EXT1, 57600, },
{ 12, ACR_BAUD1, MR0_BAUD_EXT1, 115200, },
{ 12, ACR_BAUD0, MR0_BAUD_EXT1, 230400, },
{ 0, 0, 0, 0 }
};
static int sccnxp_set_baud(struct uart_port *port, int baud)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
int div_std, tmp_baud, bestbaud = baud, besterr = -1;
u8 i, acr = 0, csr = 0, mr0 = 0;
/* Find best baud from table */
for (i = 0; baud_std[i].baud && besterr; i++) {
if (baud_std[i].mr0 && !(s->flags & SCCNXP_HAVE_MR0))
continue;
div_std = DIV_ROUND_CLOSEST(s->freq_std, baud_std[i].baud);
tmp_baud = DIV_ROUND_CLOSEST(port->uartclk, div_std);
if (!sccnxp_update_best_err(baud, tmp_baud, &besterr)) {
acr = baud_std[i].acr;
csr = baud_std[i].csr;
mr0 = baud_std[i].mr0;
bestbaud = tmp_baud;
}
}
if (s->flags & SCCNXP_HAVE_MR0) {
/* Enable FIFO, set half level for TX */
mr0 |= MR0_FIFO | MR0_TXLVL;
/* Update MR0 */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR0);
sccnxp_port_write(port, SCCNXP_MR_REG, mr0);
}
sccnxp_port_write(port, SCCNXP_ACR_REG, acr | ACR_TIMER_MODE);
sccnxp_port_write(port, SCCNXP_CSR_REG, (csr << 4) | csr);
if (baud != bestbaud)
dev_dbg(port->dev, "Baudrate desired: %i, calculated: %i\n",
baud, bestbaud);
return bestbaud;
}
static void sccnxp_enable_irq(struct uart_port *port, int mask)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
s->imr |= mask << (port->line * 4);
sccnxp_write(port, SCCNXP_IMR_REG, s->imr);
}
static void sccnxp_disable_irq(struct uart_port *port, int mask)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
s->imr &= ~(mask << (port->line * 4));
sccnxp_write(port, SCCNXP_IMR_REG, s->imr);
}
static void sccnxp_set_bit(struct uart_port *port, int sig, int state)
{
u8 bitmask;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(sig)) {
bitmask = 1 << MCTRL_OBIT(s->pdata.mctrl_cfg[port->line], sig);
if (state)
sccnxp_write(port, SCCNXP_SOP_REG, bitmask);
else
sccnxp_write(port, SCCNXP_ROP_REG, bitmask);
}
}
static void sccnxp_handle_rx(struct uart_port *port)
{
u8 sr;
unsigned int ch, flag;
for (;;) {
sr = sccnxp_port_read(port, SCCNXP_SR_REG);
if (!(sr & SR_RXRDY))
break;
sr &= SR_PE | SR_FE | SR_OVR | SR_BRK;
ch = sccnxp_port_read(port, SCCNXP_RHR_REG);
port->icount.rx++;
flag = TTY_NORMAL;
if (unlikely(sr)) {
if (sr & SR_BRK) {
port->icount.brk++;
sccnxp_port_write(port, SCCNXP_CR_REG,
CR_CMD_BREAK_RESET);
if (uart_handle_break(port))
continue;
} else if (sr & SR_PE)
port->icount.parity++;
else if (sr & SR_FE)
port->icount.frame++;
else if (sr & SR_OVR) {
port->icount.overrun++;
sccnxp_port_write(port, SCCNXP_CR_REG,
CR_CMD_STATUS_RESET);
}
sr &= port->read_status_mask;
if (sr & SR_BRK)
flag = TTY_BREAK;
else if (sr & SR_PE)
flag = TTY_PARITY;
else if (sr & SR_FE)
flag = TTY_FRAME;
else if (sr & SR_OVR)
flag = TTY_OVERRUN;
}
if (uart_handle_sysrq_char(port, ch))
continue;
if (sr & port->ignore_status_mask)
continue;
uart_insert_char(port, sr, SR_OVR, ch, flag);
}
tty_flip_buffer_push(&port->state->port);
}
static void sccnxp_handle_tx(struct uart_port *port)
{
u8 sr;
struct circ_buf *xmit = &port->state->xmit;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
if (unlikely(port->x_char)) {
sccnxp_port_write(port, SCCNXP_THR_REG, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
/* Disable TX if FIFO is empty */
if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXEMT) {
sccnxp_disable_irq(port, IMR_TXRDY);
/* Set direction to input */
if (s->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
}
return;
}
while (!uart_circ_empty(xmit)) {
sr = sccnxp_port_read(port, SCCNXP_SR_REG);
if (!(sr & SR_TXRDY))
break;
sccnxp_port_write(port, SCCNXP_THR_REG, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static void sccnxp_handle_events(struct sccnxp_port *s)
{
int i;
u8 isr;
do {
isr = sccnxp_read(&s->port[0], SCCNXP_ISR_REG);
isr &= s->imr;
if (!isr)
break;
for (i = 0; i < s->uart.nr; i++) {
if (s->opened[i] && (isr & ISR_RXRDY(i)))
sccnxp_handle_rx(&s->port[i]);
if (s->opened[i] && (isr & ISR_TXRDY(i)))
sccnxp_handle_tx(&s->port[i]);
}
} while (1);
}
static void sccnxp_timer(unsigned long data)
{
struct sccnxp_port *s = (struct sccnxp_port *)data;
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_handle_events(s);
spin_unlock_irqrestore(&s->lock, flags);
if (!timer_pending(&s->timer))
mod_timer(&s->timer, jiffies +
usecs_to_jiffies(s->pdata.poll_time_us));
}
static irqreturn_t sccnxp_ist(int irq, void *dev_id)
{
struct sccnxp_port *s = (struct sccnxp_port *)dev_id;
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_handle_events(s);
spin_unlock_irqrestore(&s->lock, flags);
return IRQ_HANDLED;
}
static void sccnxp_start_tx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
/* Set direction to output */
if (s->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 1);
sccnxp_enable_irq(port, IMR_TXRDY);
spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_stop_tx(struct uart_port *port)
{
/* Do nothing */
}
static void sccnxp_stop_rx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE);
spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_tx_empty(struct uart_port *port)
{
u8 val;
unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
spin_lock_irqsave(&s->lock, flags);
val = sccnxp_port_read(port, SCCNXP_SR_REG);
spin_unlock_irqrestore(&s->lock, flags);
return (val & SR_TXEMT) ? TIOCSER_TEMT : 0;
}
static void sccnxp_enable_ms(struct uart_port *port)
{
/* Do nothing */
}
static void sccnxp_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
if (!(s->flags & SCCNXP_HAVE_IO))
return;
spin_lock_irqsave(&s->lock, flags);
sccnxp_set_bit(port, DTR_OP, mctrl & TIOCM_DTR);
sccnxp_set_bit(port, RTS_OP, mctrl & TIOCM_RTS);
spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_get_mctrl(struct uart_port *port)
{
u8 bitmask, ipr;
unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
if (!(s->flags & SCCNXP_HAVE_IO))
return mctrl;
spin_lock_irqsave(&s->lock, flags);
ipr = ~sccnxp_read(port, SCCNXP_IPCR_REG);
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DSR_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
DSR_IP);
mctrl &= ~TIOCM_DSR;
mctrl |= (ipr & bitmask) ? TIOCM_DSR : 0;
}
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(CTS_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
CTS_IP);
mctrl &= ~TIOCM_CTS;
mctrl |= (ipr & bitmask) ? TIOCM_CTS : 0;
}
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DCD_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
DCD_IP);
mctrl &= ~TIOCM_CAR;
mctrl |= (ipr & bitmask) ? TIOCM_CAR : 0;
}
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(RNG_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
RNG_IP);
mctrl &= ~TIOCM_RNG;
mctrl |= (ipr & bitmask) ? TIOCM_RNG : 0;
}
spin_unlock_irqrestore(&s->lock, flags);
return mctrl;
}
static void sccnxp_break_ctl(struct uart_port *port, int break_state)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, break_state ?
CR_CMD_START_BREAK : CR_CMD_STOP_BREAK);
spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
u8 mr1, mr2;
int baud;
spin_lock_irqsave(&s->lock, flags);
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
/* Disable RX & TX, reset break condition, status and FIFOs */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET |
CR_RX_DISABLE | CR_TX_DISABLE);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET);
/* Word size */
switch (termios->c_cflag & CSIZE) {
case CS5:
mr1 = MR1_BITS_5;
break;
case CS6:
mr1 = MR1_BITS_6;
break;
case CS7:
mr1 = MR1_BITS_7;
break;
case CS8:
default:
mr1 = MR1_BITS_8;
break;
}
/* Parity */
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
mr1 |= MR1_PAR_ODD;
} else
mr1 |= MR1_PAR_NO;
/* Stop bits */
mr2 = (termios->c_cflag & CSTOPB) ? MR2_STOP2 : MR2_STOP1;
/* Update desired format */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR1);
sccnxp_port_write(port, SCCNXP_MR_REG, mr1);
sccnxp_port_write(port, SCCNXP_MR_REG, mr2);
/* Set read status mask */
port->read_status_mask = SR_OVR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SR_PE | SR_FE;
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= SR_BRK;
/* Set status ignore mask */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNBRK)
port->ignore_status_mask |= SR_BRK;
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SR_PE | SR_OVR | SR_FE | SR_BRK;
/* Setup baudrate */
baud = uart_get_baud_rate(port, termios, old, 50,
(s->flags & SCCNXP_HAVE_MR0) ?
230400 : 38400);
baud = sccnxp_set_baud(port, baud);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
/* Report actual baudrate back to core */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
/* Enable RX & TX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE);
spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_startup(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
if (s->flags & SCCNXP_HAVE_IO) {
/* Outputs are controlled manually */
sccnxp_write(port, SCCNXP_OPCR_REG, 0);
}
/* Reset break condition, status and FIFOs */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET);
/* Enable RX & TX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE);
/* Enable RX interrupt */
sccnxp_enable_irq(port, IMR_RXRDY);
s->opened[port->line] = 1;
spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
static void sccnxp_shutdown(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
s->opened[port->line] = 0;
/* Disable interrupts */
sccnxp_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
/* Disable TX & RX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE | CR_TX_DISABLE);
/* Leave direction to input */
if (s->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
spin_unlock_irqrestore(&s->lock, flags);
}
static const char *sccnxp_type(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
return (port->type == PORT_SC26XX) ? s->name : NULL;
}
static void sccnxp_release_port(struct uart_port *port)
{
/* Do nothing */
}
static int sccnxp_request_port(struct uart_port *port)
{
/* Do nothing */
return 0;
}
static void sccnxp_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_SC26XX;
}
static int sccnxp_verify_port(struct uart_port *port, struct serial_struct *s)
{
if ((s->type == PORT_UNKNOWN) || (s->type == PORT_SC26XX))
return 0;
if (s->irq == port->irq)
return 0;
return -EINVAL;
}
static const struct uart_ops sccnxp_ops = {
.tx_empty = sccnxp_tx_empty,
.set_mctrl = sccnxp_set_mctrl,
.get_mctrl = sccnxp_get_mctrl,
.stop_tx = sccnxp_stop_tx,
.start_tx = sccnxp_start_tx,
.stop_rx = sccnxp_stop_rx,
.enable_ms = sccnxp_enable_ms,
.break_ctl = sccnxp_break_ctl,
.startup = sccnxp_startup,
.shutdown = sccnxp_shutdown,
.set_termios = sccnxp_set_termios,
.type = sccnxp_type,
.release_port = sccnxp_release_port,
.request_port = sccnxp_request_port,
.config_port = sccnxp_config_port,
.verify_port = sccnxp_verify_port,
};
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
static void sccnxp_console_putchar(struct uart_port *port, int c)
{
int tryes = 100000;
while (tryes--) {
if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXRDY) {
sccnxp_port_write(port, SCCNXP_THR_REG, c);
break;
}
barrier();
}
}
static void sccnxp_console_write(struct console *co, const char *c, unsigned n)
{
struct sccnxp_port *s = (struct sccnxp_port *)co->data;
struct uart_port *port = &s->port[co->index];
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
uart_console_write(port, c, n, sccnxp_console_putchar);
spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_console_setup(struct console *co, char *options)
{
struct sccnxp_port *s = (struct sccnxp_port *)co->data;
struct uart_port *port = &s->port[(co->index > 0) ? co->index : 0];
int baud = 9600, bits = 8, parity = 'n', flow = 'n';
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
#endif
static int sccnxp_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int chiptype = pdev->id_entry->driver_data;
struct sccnxp_pdata *pdata = dev_get_platdata(&pdev->dev);
int i, ret, fifosize, freq_min, freq_max;
struct sccnxp_port *s;
void __iomem *membase;
if (!res) {
dev_err(&pdev->dev, "Missing memory resource data\n");
return -EADDRNOTAVAIL;
}
s = devm_kzalloc(&pdev->dev, sizeof(struct sccnxp_port), GFP_KERNEL);
if (!s) {
dev_err(&pdev->dev, "Error allocating port structure\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, s);
spin_lock_init(&s->lock);
/* Individual chip settings */
switch (chiptype) {
case SCCNXP_TYPE_SC2681:
s->name = "SC2681";
s->uart.nr = 2;
s->freq_std = 3686400;
s->addr_mask = 0x0f;
s->flags = SCCNXP_HAVE_IO;
fifosize = 3;
freq_min = 1000000;
freq_max = 4000000;
break;
case SCCNXP_TYPE_SC2691:
s->name = "SC2691";
s->uart.nr = 1;
s->freq_std = 3686400;
s->addr_mask = 0x07;
s->flags = 0;
fifosize = 3;
freq_min = 1000000;
freq_max = 4000000;
break;
case SCCNXP_TYPE_SC2692:
s->name = "SC2692";
s->uart.nr = 2;
s->freq_std = 3686400;
s->addr_mask = 0x0f;
s->flags = SCCNXP_HAVE_IO;
fifosize = 3;
freq_min = 1000000;
freq_max = 4000000;
break;
case SCCNXP_TYPE_SC2891:
s->name = "SC2891";
s->uart.nr = 1;
s->freq_std = 3686400;
s->addr_mask = 0x0f;
s->flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0;
fifosize = 16;
freq_min = 100000;
freq_max = 8000000;
break;
case SCCNXP_TYPE_SC2892:
s->name = "SC2892";
s->uart.nr = 2;
s->freq_std = 3686400;
s->addr_mask = 0x0f;
s->flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0;
fifosize = 16;
freq_min = 100000;
freq_max = 8000000;
break;
case SCCNXP_TYPE_SC28202:
s->name = "SC28202";
s->uart.nr = 2;
s->freq_std = 14745600;
s->addr_mask = 0x7f;
s->flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0;
fifosize = 256;
freq_min = 1000000;
freq_max = 50000000;
break;
case SCCNXP_TYPE_SC68681:
s->name = "SC68681";
s->uart.nr = 2;
s->freq_std = 3686400;
s->addr_mask = 0x0f;
s->flags = SCCNXP_HAVE_IO;
fifosize = 3;
freq_min = 1000000;
freq_max = 4000000;
break;
case SCCNXP_TYPE_SC68692:
s->name = "SC68692";
s->uart.nr = 2;
s->freq_std = 3686400;
s->addr_mask = 0x0f;
s->flags = SCCNXP_HAVE_IO;
fifosize = 3;
freq_min = 1000000;
freq_max = 4000000;
break;
default:
dev_err(&pdev->dev, "Unsupported chip type %i\n", chiptype);
ret = -ENOTSUPP;
goto err_out;
}
if (!pdata) {
dev_warn(&pdev->dev,
"No platform data supplied, using defaults\n");
s->pdata.frequency = s->freq_std;
} else
memcpy(&s->pdata, pdata, sizeof(struct sccnxp_pdata));
if (s->pdata.poll_time_us) {
dev_info(&pdev->dev, "Using poll mode, resolution %u usecs\n",
s->pdata.poll_time_us);
s->poll = 1;
}
if (!s->poll) {
s->irq = platform_get_irq(pdev, 0);
if (s->irq < 0) {
dev_err(&pdev->dev, "Missing irq resource data\n");
ret = -ENXIO;
goto err_out;
}
}
/* Check input frequency */
if ((s->pdata.frequency < freq_min) ||
(s->pdata.frequency > freq_max)) {
dev_err(&pdev->dev, "Frequency out of bounds\n");
ret = -EINVAL;
goto err_out;
}
s->regulator = devm_regulator_get(&pdev->dev, "VCC");
if (!IS_ERR(s->regulator)) {
ret = regulator_enable(s->regulator);
if (ret) {
dev_err(&pdev->dev,
"Failed to enable regulator: %i\n", ret);
return ret;
}
}
membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(membase)) {
ret = PTR_ERR(membase);
goto err_out;
}
s->uart.owner = THIS_MODULE;
s->uart.dev_name = "ttySC";
s->uart.major = SCCNXP_MAJOR;
s->uart.minor = SCCNXP_MINOR;
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
s->uart.cons = &s->console;
s->uart.cons->device = uart_console_device;
s->uart.cons->write = sccnxp_console_write;
s->uart.cons->setup = sccnxp_console_setup;
s->uart.cons->flags = CON_PRINTBUFFER;
s->uart.cons->index = -1;
s->uart.cons->data = s;
strcpy(s->uart.cons->name, "ttySC");
#endif
ret = uart_register_driver(&s->uart);
if (ret) {
dev_err(&pdev->dev, "Registering UART driver failed\n");
goto err_out;
}
for (i = 0; i < s->uart.nr; i++) {
s->port[i].line = i;
s->port[i].dev = &pdev->dev;
s->port[i].irq = s->irq;
s->port[i].type = PORT_SC26XX;
s->port[i].fifosize = fifosize;
s->port[i].flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
s->port[i].iotype = UPIO_MEM;
s->port[i].mapbase = res->start;
s->port[i].membase = membase;
s->port[i].regshift = s->pdata.reg_shift;
s->port[i].uartclk = s->pdata.frequency;
s->port[i].ops = &sccnxp_ops;
uart_add_one_port(&s->uart, &s->port[i]);
/* Set direction to input */
if (s->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(&s->port[i], DIR_OP, 0);
}
/* Disable interrupts */
s->imr = 0;
sccnxp_write(&s->port[0], SCCNXP_IMR_REG, 0);
if (!s->poll) {
ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL,
sccnxp_ist,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
dev_name(&pdev->dev), s);
if (!ret)
return 0;
dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
} else {
init_timer(&s->timer);
setup_timer(&s->timer, sccnxp_timer, (unsigned long)s);
mod_timer(&s->timer, jiffies +
usecs_to_jiffies(s->pdata.poll_time_us));
return 0;
}
err_out:
platform_set_drvdata(pdev, NULL);
return ret;
}
static int sccnxp_remove(struct platform_device *pdev)
{
int i;
struct sccnxp_port *s = platform_get_drvdata(pdev);
if (!s->poll)
devm_free_irq(&pdev->dev, s->irq, s);
else
del_timer_sync(&s->timer);
for (i = 0; i < s->uart.nr; i++)
uart_remove_one_port(&s->uart, &s->port[i]);
uart_unregister_driver(&s->uart);
platform_set_drvdata(pdev, NULL);
if (!IS_ERR(s->regulator))
return regulator_disable(s->regulator);
return 0;
}
static const struct platform_device_id sccnxp_id_table[] = {
{ "sc2681", SCCNXP_TYPE_SC2681 },
{ "sc2691", SCCNXP_TYPE_SC2691 },
{ "sc2692", SCCNXP_TYPE_SC2692 },
{ "sc2891", SCCNXP_TYPE_SC2891 },
{ "sc2892", SCCNXP_TYPE_SC2892 },
{ "sc28202", SCCNXP_TYPE_SC28202 },
{ "sc68681", SCCNXP_TYPE_SC68681 },
{ "sc68692", SCCNXP_TYPE_SC68692 },
{ },
};
MODULE_DEVICE_TABLE(platform, sccnxp_id_table);
static struct platform_driver sccnxp_uart_driver = {
.driver = {
.name = SCCNXP_NAME,
.owner = THIS_MODULE,
},
.probe = sccnxp_probe,
.remove = sccnxp_remove,
.id_table = sccnxp_id_table,
};
module_platform_driver(sccnxp_uart_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("SCCNXP serial driver");
| gpl-2.0 |
jdkoreclipse/incrediblec_2.6.38 | net/ipv4/netfilter/nf_defrag_ipv4.c | 2835 | 3301 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/ip.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#endif
#include <net/netfilter/nf_conntrack_zones.h>
/* Returns new sk_buff, or NULL */
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
int err;
skb_orphan(skb);
local_bh_disable();
err = ip_defrag(skb, user);
local_bh_enable();
if (!err)
ip_send_check(ip_hdr(skb));
return err;
}
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
u16 zone = NF_CT_DEFAULT_ZONE;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
if (skb->nfct)
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge &&
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
#endif
if (hooknum == NF_INET_PRE_ROUTING)
return IP_DEFRAG_CONNTRACK_IN + zone;
else
return IP_DEFRAG_CONNTRACK_OUT + zone;
}
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(skb->sk);
if (sk && (sk->sk_family == PF_INET) &&
inet->nodefrag)
return NF_ACCEPT;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
/* Previously seen (loopback)? Ignore. Do this before
fragment check. */
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
#endif
#endif
/* Gather fragments. */
if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
if (nf_ct_ipv4_gather_frags(skb, user))
return NF_STOLEN;
}
return NF_ACCEPT;
}
static struct nf_hook_ops ipv4_defrag_ops[] = {
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
};
static int __init nf_defrag_init(void)
{
return nf_register_hooks(ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops));
}
static void __exit nf_defrag_fini(void)
{
nf_unregister_hooks(ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops));
}
void nf_defrag_ipv4_enable(void)
{
}
EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable);
module_init(nf_defrag_init);
module_exit(nf_defrag_fini);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Neph81/cm-kernel_lge_iproj | arch/mips/mm/sc-ip22.c | 2835 | 3889 | /*
* sc-ip22.c: Indy cache management functions.
*
* Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
* derived from r4xx0.c by David S. Miller (davem@davemloft.net).
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/bcache.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/bootinfo.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/mc.h>
/* Secondary cache size in bytes, if present. */
static unsigned long scache_size;
#undef DEBUG_CACHE
#define SC_SIZE 0x00080000
#define SC_LINE 32
#define CI_MASK (SC_SIZE - SC_LINE)
#define SC_INDEX(n) ((n) & CI_MASK)
static inline void indy_sc_wipe(unsigned long first, unsigned long last)
{
unsigned long tmp;
__asm__ __volatile__(
".set\tpush\t\t\t# indy_sc_wipe\n\t"
".set\tnoreorder\n\t"
".set\tmips3\n\t"
".set\tnoat\n\t"
"mfc0\t%2, $12\n\t"
"li\t$1, 0x80\t\t\t# Go 64 bit\n\t"
"mtc0\t$1, $12\n\t"
"dli\t$1, 0x9000000080000000\n\t"
"or\t%0, $1\t\t\t# first line to flush\n\t"
"or\t%1, $1\t\t\t# last line to flush\n\t"
".set\tat\n\t"
"1:\tsw\t$0, 0(%0)\n\t"
"bne\t%0, %1, 1b\n\t"
" daddu\t%0, 32\n\t"
"mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t"
"nop; nop; nop; nop;\n\t"
".set\tpop"
: "=r" (first), "=r" (last), "=&r" (tmp)
: "0" (first), "1" (last));
}
static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
{
unsigned long first_line, last_line;
unsigned long flags;
#ifdef DEBUG_CACHE
printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
#endif
/* Catch bad driver code */
BUG_ON(size == 0);
/* Which lines to flush? */
first_line = SC_INDEX(addr);
last_line = SC_INDEX(addr + size - 1);
local_irq_save(flags);
if (first_line <= last_line) {
indy_sc_wipe(first_line, last_line);
goto out;
}
indy_sc_wipe(first_line, SC_SIZE - SC_LINE);
indy_sc_wipe(0, last_line);
out:
local_irq_restore(flags);
}
static void indy_sc_enable(void)
{
unsigned long addr, tmp1, tmp2;
/* This is really cool... */
#ifdef DEBUG_CACHE
printk("Enabling R4600 SCACHE\n");
#endif
__asm__ __volatile__(
".set\tpush\n\t"
".set\tnoreorder\n\t"
".set\tmips3\n\t"
"mfc0\t%2, $12\n\t"
"nop; nop; nop; nop;\n\t"
"li\t%1, 0x80\n\t"
"mtc0\t%1, $12\n\t"
"nop; nop; nop; nop;\n\t"
"li\t%0, 0x1\n\t"
"dsll\t%0, 31\n\t"
"lui\t%1, 0x9000\n\t"
"dsll32\t%1, 0\n\t"
"or\t%0, %1, %0\n\t"
"sb\t$0, 0(%0)\n\t"
"mtc0\t$0, $12\n\t"
"nop; nop; nop; nop;\n\t"
"mtc0\t%2, $12\n\t"
"nop; nop; nop; nop;\n\t"
".set\tpop"
: "=r" (tmp1), "=r" (tmp2), "=r" (addr));
}
static void indy_sc_disable(void)
{
unsigned long tmp1, tmp2, tmp3;
#ifdef DEBUG_CACHE
printk("Disabling R4600 SCACHE\n");
#endif
__asm__ __volatile__(
".set\tpush\n\t"
".set\tnoreorder\n\t"
".set\tmips3\n\t"
"li\t%0, 0x1\n\t"
"dsll\t%0, 31\n\t"
"lui\t%1, 0x9000\n\t"
"dsll32\t%1, 0\n\t"
"or\t%0, %1, %0\n\t"
"mfc0\t%2, $12\n\t"
"nop; nop; nop; nop\n\t"
"li\t%1, 0x80\n\t"
"mtc0\t%1, $12\n\t"
"nop; nop; nop; nop\n\t"
"sh\t$0, 0(%0)\n\t"
"mtc0\t$0, $12\n\t"
"nop; nop; nop; nop\n\t"
"mtc0\t%2, $12\n\t"
"nop; nop; nop; nop\n\t"
".set\tpop"
: "=r" (tmp1), "=r" (tmp2), "=r" (tmp3));
}
static inline int __init indy_sc_probe(void)
{
unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17);
if (size == 0)
return 0;
size <<= PAGE_SHIFT;
printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n",
size >> 10);
scache_size = size;
return 1;
}
/* XXX Check with wje if the Indy caches can differenciate between
writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
.bc_wback_inv = indy_sc_wback_invalidate,
.bc_inv = indy_sc_wback_invalidate
};
void __cpuinit indy_sc_init(void)
{
if (indy_sc_probe()) {
indy_sc_enable();
bcops = &indy_sc_ops;
}
}
| gpl-2.0 |
AD5GB/kernel_n5_3.10-experimental | drivers/leds/trigger/ledtrig-oneshot.c | 3091 | 5219 | /*
* One-shot LED Trigger
*
* Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
*
* Based on ledtrig-timer.c by Richard Purdie <rpurdie@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include "../leds.h"
#define DEFAULT_DELAY 100
struct oneshot_trig_data {
unsigned int invert;
};
static ssize_t led_shot(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
led_blink_set_oneshot(led_cdev,
&led_cdev->blink_delay_on, &led_cdev->blink_delay_off,
oneshot_data->invert);
/* content is ignored */
return size;
}
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
return sprintf(buf, "%u\n", oneshot_data->invert);
}
static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
unsigned long state;
int ret;
ret = kstrtoul(buf, 0, &state);
if (ret)
return ret;
oneshot_data->invert = !!state;
if (oneshot_data->invert)
__led_set_brightness(led_cdev, LED_FULL);
else
__led_set_brightness(led_cdev, LED_OFF);
return size;
}
static ssize_t led_delay_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
}
static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
unsigned long state;
int ret;
ret = kstrtoul(buf, 0, &state);
if (ret)
return ret;
led_cdev->blink_delay_on = state;
return size;
}
static ssize_t led_delay_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
}
static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
unsigned long state;
int ret;
ret = kstrtoul(buf, 0, &state);
if (ret)
return ret;
led_cdev->blink_delay_off = state;
return size;
}
static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
static DEVICE_ATTR(shot, 0200, NULL, led_shot);
static void oneshot_trig_activate(struct led_classdev *led_cdev)
{
struct oneshot_trig_data *oneshot_data;
int rc;
oneshot_data = kzalloc(sizeof(*oneshot_data), GFP_KERNEL);
if (!oneshot_data)
return;
led_cdev->trigger_data = oneshot_data;
rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
if (rc)
goto err_out_trig_data;
rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
if (rc)
goto err_out_delayon;
rc = device_create_file(led_cdev->dev, &dev_attr_invert);
if (rc)
goto err_out_delayoff;
rc = device_create_file(led_cdev->dev, &dev_attr_shot);
if (rc)
goto err_out_invert;
led_cdev->blink_delay_on = DEFAULT_DELAY;
led_cdev->blink_delay_off = DEFAULT_DELAY;
led_cdev->activated = true;
return;
err_out_invert:
device_remove_file(led_cdev->dev, &dev_attr_invert);
err_out_delayoff:
device_remove_file(led_cdev->dev, &dev_attr_delay_off);
err_out_delayon:
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
err_out_trig_data:
kfree(led_cdev->trigger_data);
}
static void oneshot_trig_deactivate(struct led_classdev *led_cdev)
{
struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
if (led_cdev->activated) {
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
device_remove_file(led_cdev->dev, &dev_attr_delay_off);
device_remove_file(led_cdev->dev, &dev_attr_invert);
device_remove_file(led_cdev->dev, &dev_attr_shot);
kfree(oneshot_data);
led_cdev->activated = false;
}
/* Stop blinking */
led_set_brightness(led_cdev, LED_OFF);
}
static struct led_trigger oneshot_led_trigger = {
.name = "oneshot",
.activate = oneshot_trig_activate,
.deactivate = oneshot_trig_deactivate,
};
static int __init oneshot_trig_init(void)
{
return led_trigger_register(&oneshot_led_trigger);
}
static void __exit oneshot_trig_exit(void)
{
led_trigger_unregister(&oneshot_led_trigger);
}
module_init(oneshot_trig_init);
module_exit(oneshot_trig_exit);
MODULE_AUTHOR("Fabio Baltieri <fabio.baltieri@gmail.com>");
MODULE_DESCRIPTION("One-shot LED trigger");
MODULE_LICENSE("GPL");
| gpl-2.0 |
OlegKyiashko/LGOGP-kernel | drivers/usb/serial/keyspan.c | 3603 | 68132 | /*
Keyspan USB to Serial Converter driver
(C) Copyright (C) 2000-2001 Hugh Blemings <hugh@blemings.org>
(C) Copyright (C) 2002 Greg Kroah-Hartman <greg@kroah.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
See http://blemings.org/hugh/keyspan.html for more information.
Code in this driver inspired by and in a number of places taken
from Brian Warner's original Keyspan-PDA driver.
This driver has been put together with the support of Innosys, Inc.
and Keyspan, Inc the manufacturers of the Keyspan USB-serial products.
Thanks Guys :)
Thanks to Paulus for miscellaneous tidy ups, some largish chunks
of much nicer and/or completely new code and (perhaps most uniquely)
having the patience to sit down and explain why and where he'd changed
stuff.
Tip 'o the hat to IBM (and previously Linuxcare :) for supporting
staff in their work on open source projects.
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "keyspan.h"
static bool debug;
/*
* Version Information
*/
#define DRIVER_VERSION "v1.1.5"
#define DRIVER_AUTHOR "Hugh Blemings <hugh@misc.nu"
#define DRIVER_DESC "Keyspan USB to Serial Converter Driver"
#define INSTAT_BUFLEN 32
#define GLOCONT_BUFLEN 64
#define INDAT49W_BUFLEN 512
/* Per device and per port private data */
struct keyspan_serial_private {
const struct keyspan_device_details *device_details;
struct urb *instat_urb;
char instat_buf[INSTAT_BUFLEN];
/* added to support 49wg, where data from all 4 ports comes in
on 1 EP and high-speed supported */
struct urb *indat_urb;
char indat_buf[INDAT49W_BUFLEN];
/* XXX this one probably will need a lock */
struct urb *glocont_urb;
char glocont_buf[GLOCONT_BUFLEN];
char ctrl_buf[8]; /* for EP0 control message */
};
struct keyspan_port_private {
/* Keep track of which input & output endpoints to use */
int in_flip;
int out_flip;
/* Keep duplicate of device details in each port
structure as well - simplifies some of the
callback functions etc. */
const struct keyspan_device_details *device_details;
/* Input endpoints and buffer for this port */
struct urb *in_urbs[2];
char in_buffer[2][64];
/* Output endpoints and buffer for this port */
struct urb *out_urbs[2];
char out_buffer[2][64];
/* Input ack endpoint */
struct urb *inack_urb;
char inack_buffer[1];
/* Output control endpoint */
struct urb *outcont_urb;
char outcont_buffer[64];
/* Settings for the port */
int baud;
int old_baud;
unsigned int cflag;
unsigned int old_cflag;
enum {flow_none, flow_cts, flow_xon} flow_control;
int rts_state; /* Handshaking pins (outputs) */
int dtr_state;
int cts_state; /* Handshaking pins (inputs) */
int dsr_state;
int dcd_state;
int ri_state;
int break_on;
unsigned long tx_start_time[2];
int resend_cont; /* need to resend control packet */
};
/* Include Keyspan message headers. All current Keyspan Adapters
make use of one of five message formats which are referred
to as USA-26, USA-28, USA-49, USA-90, USA-67 by Keyspan and
within this driver. */
#include "keyspan_usa26msg.h"
#include "keyspan_usa28msg.h"
#include "keyspan_usa49msg.h"
#include "keyspan_usa90msg.h"
#include "keyspan_usa67msg.h"
module_usb_serial_driver(keyspan_driver, serial_drivers);
static void keyspan_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv;
dbg("%s", __func__);
p_priv = usb_get_serial_port_data(port);
if (break_state == -1)
p_priv->break_on = 1;
else
p_priv->break_on = 0;
keyspan_send_setup(port, 0);
}
static void keyspan_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
int baud_rate, device_port;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
unsigned int cflag;
dbg("%s", __func__);
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
cflag = tty->termios->c_cflag;
device_port = port->number - port->serial->minor;
/* Baud rate calculation takes baud rate as an integer
so other rates can be generated if desired. */
baud_rate = tty_get_baud_rate(tty);
/* If no match or invalid, don't change */
if (d_details->calculate_baud_rate(baud_rate, d_details->baudclk,
NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) {
/* FIXME - more to do here to ensure rate changes cleanly */
/* FIXME - calcuate exact rate from divisor ? */
p_priv->baud = baud_rate;
} else
baud_rate = tty_termios_baud_rate(old_termios);
tty_encode_baud_rate(tty, baud_rate, baud_rate);
/* set CTS/RTS handshake etc. */
p_priv->cflag = cflag;
p_priv->flow_control = (cflag & CRTSCTS)? flow_cts: flow_none;
/* Mark/Space not supported */
tty->termios->c_cflag &= ~CMSPAR;
keyspan_send_setup(port, 0);
}
static int keyspan_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
unsigned int value;
value = ((p_priv->rts_state) ? TIOCM_RTS : 0) |
((p_priv->dtr_state) ? TIOCM_DTR : 0) |
((p_priv->cts_state) ? TIOCM_CTS : 0) |
((p_priv->dsr_state) ? TIOCM_DSR : 0) |
((p_priv->dcd_state) ? TIOCM_CAR : 0) |
((p_priv->ri_state) ? TIOCM_RNG : 0);
return value;
}
static int keyspan_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
if (set & TIOCM_RTS)
p_priv->rts_state = 1;
if (set & TIOCM_DTR)
p_priv->dtr_state = 1;
if (clear & TIOCM_RTS)
p_priv->rts_state = 0;
if (clear & TIOCM_DTR)
p_priv->dtr_state = 0;
keyspan_send_setup(port, 0);
return 0;
}
/* Write function is similar for the four protocols used
with only a minor change for usa90 (usa19hs) required */
static int keyspan_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
int flip;
int left, todo;
struct urb *this_urb;
int err, maxDataLen, dataOffset;
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
if (d_details->msg_format == msg_usa90) {
maxDataLen = 64;
dataOffset = 0;
} else {
maxDataLen = 63;
dataOffset = 1;
}
dbg("%s - for port %d (%d chars), flip=%d",
__func__, port->number, count, p_priv->out_flip);
for (left = count; left > 0; left -= todo) {
todo = left;
if (todo > maxDataLen)
todo = maxDataLen;
flip = p_priv->out_flip;
/* Check we have a valid urb/endpoint before we use it... */
this_urb = p_priv->out_urbs[flip];
if (this_urb == NULL) {
/* no bulk out, so return 0 bytes written */
dbg("%s - no output urb :(", __func__);
return count;
}
dbg("%s - endpoint %d flip %d",
__func__, usb_pipeendpoint(this_urb->pipe), flip);
if (this_urb->status == -EINPROGRESS) {
if (time_before(jiffies,
p_priv->tx_start_time[flip] + 10 * HZ))
break;
usb_unlink_urb(this_urb);
break;
}
/* First byte in buffer is "last flag" (except for usa19hx)
- unused so for now so set to zero */
((char *)this_urb->transfer_buffer)[0] = 0;
memcpy(this_urb->transfer_buffer + dataOffset, buf, todo);
buf += todo;
/* send the data out the bulk port */
this_urb->transfer_buffer_length = todo + dataOffset;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("usb_submit_urb(write bulk) failed (%d)", err);
p_priv->tx_start_time[flip] = jiffies;
/* Flip for next time if usa26 or usa28 interface
(not used on usa49) */
p_priv->out_flip = (flip + 1) & d_details->outdat_endp_flip;
}
return count - left;
}
static void usa26_indat_callback(struct urb *urb)
{
int i, err;
int endpoint;
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
dbg("%s", __func__);
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
dbg("%s - nonzero status: %x on endpoint %d.",
__func__, status, endpoint);
return;
}
port = urb->context;
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no errors on individual bytes, only
possible overrun err */
if (data[0] & RXERROR_OVERRUN)
err = TTY_OVERRUN;
else
err = 0;
for (i = 1; i < urb->actual_length ; ++i)
tty_insert_flip_char(tty, data[i], err);
} else {
/* some bytes had errors, every byte has status */
dbg("%s - RX error!!!!", __func__);
for (i = 0; i + 1 < urb->actual_length; i += 2) {
int stat = data[i], flag = 0;
if (stat & RXERROR_OVERRUN)
flag |= TTY_OVERRUN;
if (stat & RXERROR_FRAMING)
flag |= TTY_FRAME;
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
tty_insert_flip_char(tty, data[i+1], flag);
}
}
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
/* Outdat handling is common for all devices */
static void usa2x_outdat_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]);
usb_serial_port_softint(port);
}
static void usa26_inack_callback(struct urb *urb)
{
dbg("%s", __func__);
}
static void usa26_outcont_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dbg("%s - sending setup", __func__);
keyspan_usa26_send_setup(port->serial, port,
p_priv->resend_cont - 1);
}
}
static void usa26_instat_callback(struct urb *urb)
{
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa26_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
struct tty_struct *tty;
int old_dcd_state, err;
int status = urb->status;
serial = urb->context;
if (status) {
dbg("%s - nonzero status: %x", __func__, status);
return;
}
if (urb->actual_length != 9) {
dbg("%s - %d byte report??", __func__, urb->actual_length);
goto exit;
}
msg = (struct keyspan_usa26_portStatusMessage *)data;
#if 0
dbg("%s - port status: port %d cts %d dcd %d dsr %d ri %d toff %d txoff %d rxen %d cr %d",
__func__, msg->port, msg->hskia_cts, msg->gpia_dcd, msg->dsr, msg->ri, msg->_txOff,
msg->_txXoff, msg->rxEnabled, msg->controlResponse);
#endif
/* Now do something useful with the data */
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dbg("%s - Unexpected port number %d", __func__, msg->port);
goto exit;
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->hskia_cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->gpia_dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state) {
tty = tty_port_tty_get(&port->port);
if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
tty_kref_put(tty);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
exit: ;
}
static void usa26_glocont_callback(struct urb *urb)
{
dbg("%s", __func__);
}
static void usa28_indat_callback(struct urb *urb)
{
int err;
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data;
struct keyspan_port_private *p_priv;
int status = urb->status;
dbg("%s", __func__);
port = urb->context;
p_priv = usb_get_serial_port_data(port);
data = urb->transfer_buffer;
if (urb != p_priv->in_urbs[p_priv->in_flip])
return;
do {
if (status) {
dbg("%s - nonzero status: %x on endpoint %d.",
__func__, status, usb_pipeendpoint(urb->pipe));
return;
}
port = urb->context;
p_priv = usb_get_serial_port_data(port);
data = urb->transfer_buffer;
tty =tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)",
__func__, err);
p_priv->in_flip ^= 1;
urb = p_priv->in_urbs[p_priv->in_flip];
} while (urb->status != -EINPROGRESS);
}
static void usa28_inack_callback(struct urb *urb)
{
dbg("%s", __func__);
}
static void usa28_outcont_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dbg("%s - sending setup", __func__);
keyspan_usa28_send_setup(port->serial, port,
p_priv->resend_cont - 1);
}
}
static void usa28_instat_callback(struct urb *urb)
{
int err;
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa28_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
struct tty_struct *tty;
int old_dcd_state;
int status = urb->status;
serial = urb->context;
if (status) {
dbg("%s - nonzero status: %x", __func__, status);
return;
}
if (urb->actual_length != sizeof(struct keyspan_usa28_portStatusMessage)) {
dbg("%s - bad length %d", __func__, urb->actual_length);
goto exit;
}
/*dbg("%s %x %x %x %x %x %x %x %x %x %x %x %x", __func__
data[0], data[1], data[2], data[3], data[4], data[5],
data[6], data[7], data[8], data[9], data[10], data[11]);*/
/* Now do something useful with the data */
msg = (struct keyspan_usa28_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dbg("%s - Unexpected port number %d", __func__, msg->port);
goto exit;
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if( old_dcd_state != p_priv->dcd_state && old_dcd_state) {
tty = tty_port_tty_get(&port->port);
if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
tty_kref_put(tty);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
exit: ;
}
static void usa28_glocont_callback(struct urb *urb)
{
dbg("%s", __func__);
}
static void usa49_glocont_callback(struct urb *urb)
{
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int i;
dbg("%s", __func__);
serial = urb->context;
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dbg("%s - sending setup", __func__);
keyspan_usa49_send_setup(serial, port,
p_priv->resend_cont - 1);
break;
}
}
}
/* This is actually called glostat in the Keyspan
doco */
static void usa49_instat_callback(struct urb *urb)
{
int err;
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa49_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state;
int status = urb->status;
dbg("%s", __func__);
serial = urb->context;
if (status) {
dbg("%s - nonzero status: %x", __func__, status);
return;
}
if (urb->actual_length !=
sizeof(struct keyspan_usa49_portStatusMessage)) {
dbg("%s - bad length %d", __func__, urb->actual_length);
goto exit;
}
/*dbg(" %x %x %x %x %x %x %x %x %x %x %x", __func__,
data[0], data[1], data[2], data[3], data[4], data[5],
data[6], data[7], data[8], data[9], data[10]);*/
/* Now do something useful with the data */
msg = (struct keyspan_usa49_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
if (msg->portNumber >= serial->num_ports) {
dbg("%s - Unexpected port number %d",
__func__, msg->portNumber);
goto exit;
}
port = serial->port[msg->portNumber];
p_priv = usb_get_serial_port_data(port);
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state && old_dcd_state) {
struct tty_struct *tty = tty_port_tty_get(&port->port);
if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
tty_kref_put(tty);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
exit: ;
}
static void usa49_inack_callback(struct urb *urb)
{
dbg("%s", __func__);
}
static void usa49_indat_callback(struct urb *urb)
{
int i, err;
int endpoint;
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
dbg("%s", __func__);
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
dbg("%s - nonzero status: %x on endpoint %d.", __func__,
status, endpoint);
return;
}
port = urb->context;
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no error on any byte */
tty_insert_flip_string(tty, data + 1,
urb->actual_length - 1);
} else {
/* some bytes had errors, every byte has status */
for (i = 0; i + 1 < urb->actual_length; i += 2) {
int stat = data[i], flag = 0;
if (stat & RXERROR_OVERRUN)
flag |= TTY_OVERRUN;
if (stat & RXERROR_FRAMING)
flag |= TTY_FRAME;
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
tty_insert_flip_char(tty, data[i+1], flag);
}
}
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
static void usa49wg_indat_callback(struct urb *urb)
{
int i, len, x, err;
struct usb_serial *serial;
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
dbg("%s", __func__);
serial = urb->context;
if (status) {
dbg("%s - nonzero status: %x", __func__, status);
return;
}
/* inbound data is in the form P#, len, status, data */
i = 0;
len = 0;
if (urb->actual_length) {
while (i < urb->actual_length) {
/* Check port number from message*/
if (data[i] >= serial->num_ports) {
dbg("%s - Unexpected port number %d",
__func__, data[i]);
return;
}
port = serial->port[data[i++]];
tty = tty_port_tty_get(&port->port);
len = data[i++];
/* 0x80 bit is error flag */
if ((data[i] & 0x80) == 0) {
/* no error on any byte */
i++;
for (x = 1; x < len ; ++x)
tty_insert_flip_char(tty, data[i++], 0);
} else {
/*
* some bytes had errors, every byte has status
*/
for (x = 0; x + 1 < len; x += 2) {
int stat = data[i], flag = 0;
if (stat & RXERROR_OVERRUN)
flag |= TTY_OVERRUN;
if (stat & RXERROR_FRAMING)
flag |= TTY_FRAME;
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
tty_insert_flip_char(tty,
data[i+1], flag);
i += 2;
}
}
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
/* not used, usa-49 doesn't have per-port control endpoints */
static void usa49_outcont_callback(struct urb *urb)
{
dbg("%s", __func__);
}
static void usa90_indat_callback(struct urb *urb)
{
int i, err;
int endpoint;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
dbg("%s", __func__);
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
dbg("%s - nonzero status: %x on endpoint %d.",
__func__, status, endpoint);
return;
}
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
/* if current mode is DMA, looks like usa28 format
otherwise looks like usa26 data format */
if (p_priv->baud > 57600)
tty_insert_flip_string(tty, data, urb->actual_length);
else {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no errors on individual bytes, only
possible overrun err*/
if (data[0] & RXERROR_OVERRUN)
err = TTY_OVERRUN;
else
err = 0;
for (i = 1; i < urb->actual_length ; ++i)
tty_insert_flip_char(tty, data[i],
err);
} else {
/* some bytes had errors, every byte has status */
dbg("%s - RX error!!!!", __func__);
for (i = 0; i + 1 < urb->actual_length; i += 2) {
int stat = data[i], flag = 0;
if (stat & RXERROR_OVERRUN)
flag |= TTY_OVERRUN;
if (stat & RXERROR_FRAMING)
flag |= TTY_FRAME;
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
tty_insert_flip_char(tty, data[i+1],
flag);
}
}
}
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
static void usa90_instat_callback(struct urb *urb)
{
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa90_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
struct tty_struct *tty;
int old_dcd_state, err;
int status = urb->status;
serial = urb->context;
if (status) {
dbg("%s - nonzero status: %x", __func__, status);
return;
}
if (urb->actual_length < 14) {
dbg("%s - %d byte report??", __func__, urb->actual_length);
goto exit;
}
msg = (struct keyspan_usa90_portStatusMessage *)data;
/* Now do something useful with the data */
port = serial->port[0];
p_priv = usb_get_serial_port_data(port);
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state && old_dcd_state) {
tty = tty_port_tty_get(&port->port);
if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
tty_kref_put(tty);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
exit:
;
}
static void usa90_outcont_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dbg("%s - sending setup", __func__);
keyspan_usa90_send_setup(port->serial, port,
p_priv->resend_cont - 1);
}
}
/* Status messages from the 28xg */
static void usa67_instat_callback(struct urb *urb)
{
int err;
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa67_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state;
int status = urb->status;
dbg("%s", __func__);
serial = urb->context;
if (status) {
dbg("%s - nonzero status: %x", __func__, status);
return;
}
if (urb->actual_length !=
sizeof(struct keyspan_usa67_portStatusMessage)) {
dbg("%s - bad length %d", __func__, urb->actual_length);
return;
}
/* Now do something useful with the data */
msg = (struct keyspan_usa67_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dbg("%s - Unexpected port number %d", __func__, msg->port);
return;
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->hskia_cts) ? 1 : 0);
p_priv->dcd_state = ((msg->gpia_dcd) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state && old_dcd_state) {
struct tty_struct *tty = tty_port_tty_get(&port->port);
if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
tty_kref_put(tty);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
static void usa67_glocont_callback(struct urb *urb)
{
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int i;
dbg("%s", __func__);
serial = urb->context;
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dbg("%s - sending setup", __func__);
keyspan_usa67_send_setup(serial, port,
p_priv->resend_cont - 1);
break;
}
}
}
static int keyspan_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
int flip;
int data_len;
struct urb *this_urb;
dbg("%s", __func__);
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
/* FIXME: locking */
if (d_details->msg_format == msg_usa90)
data_len = 64;
else
data_len = 63;
flip = p_priv->out_flip;
/* Check both endpoints to see if any are available. */
this_urb = p_priv->out_urbs[flip];
if (this_urb != NULL) {
if (this_urb->status != -EINPROGRESS)
return data_len;
flip = (flip + 1) & d_details->outdat_endp_flip;
this_urb = p_priv->out_urbs[flip];
if (this_urb != NULL) {
if (this_urb->status != -EINPROGRESS)
return data_len;
}
}
return 0;
}
static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct keyspan_port_private *p_priv;
struct keyspan_serial_private *s_priv;
struct usb_serial *serial = port->serial;
const struct keyspan_device_details *d_details;
int i, err;
int baud_rate, device_port;
struct urb *urb;
unsigned int cflag = 0;
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
dbg("%s - port%d.", __func__, port->number);
/* Set some sane defaults */
p_priv->rts_state = 1;
p_priv->dtr_state = 1;
p_priv->baud = 9600;
/* force baud and lcr to be set on open */
p_priv->old_baud = 0;
p_priv->old_cflag = 0;
p_priv->out_flip = 0;
p_priv->in_flip = 0;
/* Reset low level data toggle and start reading from endpoints */
for (i = 0; i < 2; i++) {
urb = p_priv->in_urbs[i];
if (urb == NULL)
continue;
/* make sure endpoint data toggle is synchronized
with the device */
usb_clear_halt(urb->dev, urb->pipe);
err = usb_submit_urb(urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit urb %d failed (%d)",
__func__, i, err);
}
/* Reset low level data toggle on out endpoints */
for (i = 0; i < 2; i++) {
urb = p_priv->out_urbs[i];
if (urb == NULL)
continue;
/* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 0); */
}
/* get the terminal config for the setup message now so we don't
* need to send 2 of them */
device_port = port->number - port->serial->minor;
if (tty) {
cflag = tty->termios->c_cflag;
/* Baud rate calculation takes baud rate as an integer
so other rates can be generated if desired. */
baud_rate = tty_get_baud_rate(tty);
/* If no match or invalid, leave as default */
if (baud_rate >= 0
&& d_details->calculate_baud_rate(baud_rate, d_details->baudclk,
NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) {
p_priv->baud = baud_rate;
}
}
/* set CTS/RTS handshake etc. */
p_priv->cflag = cflag;
p_priv->flow_control = (cflag & CRTSCTS)? flow_cts: flow_none;
keyspan_send_setup(port, 1);
/* mdelay(100); */
/* keyspan_set_termios(port, NULL); */
return 0;
}
static inline void stop_urb(struct urb *urb)
{
if (urb && urb->status == -EINPROGRESS)
usb_kill_urb(urb);
}
static void keyspan_dtr_rts(struct usb_serial_port *port, int on)
{
struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
p_priv->rts_state = on;
p_priv->dtr_state = on;
keyspan_send_setup(port, 0);
}
static void keyspan_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
p_priv->rts_state = 0;
p_priv->dtr_state = 0;
if (serial->dev) {
keyspan_send_setup(port, 2);
/* pilot-xfer seems to work best with this delay */
mdelay(100);
/* keyspan_set_termios(port, NULL); */
}
/*while (p_priv->outcont_urb->status == -EINPROGRESS) {
dbg("%s - urb in progress", __func__);
}*/
p_priv->out_flip = 0;
p_priv->in_flip = 0;
if (serial->dev) {
/* Stop reading/writing urbs */
stop_urb(p_priv->inack_urb);
/* stop_urb(p_priv->outcont_urb); */
for (i = 0; i < 2; i++) {
stop_urb(p_priv->in_urbs[i]);
stop_urb(p_priv->out_urbs[i]);
}
}
}
/* download the firmware to a pre-renumeration device */
static int keyspan_fake_startup(struct usb_serial *serial)
{
int response;
const struct ihex_binrec *record;
char *fw_name;
const struct firmware *fw;
dbg("Keyspan startup version %04x product %04x",
le16_to_cpu(serial->dev->descriptor.bcdDevice),
le16_to_cpu(serial->dev->descriptor.idProduct));
if ((le16_to_cpu(serial->dev->descriptor.bcdDevice) & 0x8000)
!= 0x8000) {
dbg("Firmware already loaded. Quitting.");
return 1;
}
/* Select firmware image on the basis of idProduct */
switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
case keyspan_usa28_pre_product_id:
fw_name = "keyspan/usa28.fw";
break;
case keyspan_usa28x_pre_product_id:
fw_name = "keyspan/usa28x.fw";
break;
case keyspan_usa28xa_pre_product_id:
fw_name = "keyspan/usa28xa.fw";
break;
case keyspan_usa28xb_pre_product_id:
fw_name = "keyspan/usa28xb.fw";
break;
case keyspan_usa19_pre_product_id:
fw_name = "keyspan/usa19.fw";
break;
case keyspan_usa19qi_pre_product_id:
fw_name = "keyspan/usa19qi.fw";
break;
case keyspan_mpr_pre_product_id:
fw_name = "keyspan/mpr.fw";
break;
case keyspan_usa19qw_pre_product_id:
fw_name = "keyspan/usa19qw.fw";
break;
case keyspan_usa18x_pre_product_id:
fw_name = "keyspan/usa18x.fw";
break;
case keyspan_usa19w_pre_product_id:
fw_name = "keyspan/usa19w.fw";
break;
case keyspan_usa49w_pre_product_id:
fw_name = "keyspan/usa49w.fw";
break;
case keyspan_usa49wlc_pre_product_id:
fw_name = "keyspan/usa49wlc.fw";
break;
default:
dev_err(&serial->dev->dev, "Unknown product ID (%04x)\n",
le16_to_cpu(serial->dev->descriptor.idProduct));
return 1;
}
if (request_ihex_firmware(&fw, fw_name, &serial->dev->dev)) {
dev_err(&serial->dev->dev, "Required keyspan firmware image (%s) unavailable.\n", fw_name);
return(1);
}
dbg("Uploading Keyspan %s firmware.", fw_name);
/* download the firmware image */
response = ezusb_set_reset(serial, 1);
record = (const struct ihex_binrec *)fw->data;
while (record) {
response = ezusb_writememory(serial, be32_to_cpu(record->addr),
(unsigned char *)record->data,
be16_to_cpu(record->len), 0xa0);
if (response < 0) {
dev_err(&serial->dev->dev, "ezusb_writememory failed for Keyspan firmware (%d %04X %p %d)\n",
response, be32_to_cpu(record->addr),
record->data, be16_to_cpu(record->len));
break;
}
record = ihex_next_binrec(record);
}
release_firmware(fw);
/* bring device out of reset. Renumeration will occur in a
moment and the new device will bind to the real driver */
response = ezusb_set_reset(serial, 0);
/* we don't want this device to have a driver assigned to it. */
return 1;
}
/* Helper functions used by keyspan_setup_urbs */
static struct usb_endpoint_descriptor const *find_ep(struct usb_serial const *serial,
int endpoint)
{
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *ep;
int i;
iface_desc = serial->interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
ep = &iface_desc->endpoint[i].desc;
if (ep->bEndpointAddress == endpoint)
return ep;
}
dev_warn(&serial->interface->dev, "found no endpoint descriptor for "
"endpoint %x\n", endpoint);
return NULL;
}
static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
int dir, void *ctx, char *buf, int len,
void (*callback)(struct urb *))
{
struct urb *urb;
struct usb_endpoint_descriptor const *ep_desc;
char const *ep_type_name;
if (endpoint == -1)
return NULL; /* endpoint not needed */
dbg("%s - alloc for endpoint %d.", __func__, endpoint);
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
if (urb == NULL) {
dbg("%s - alloc for endpoint %d failed.", __func__, endpoint);
return NULL;
}
if (endpoint == 0) {
/* control EP filled in when used */
return urb;
}
ep_desc = find_ep(serial, endpoint);
if (!ep_desc) {
/* leak the urb, something's wrong and the callers don't care */
return urb;
}
if (usb_endpoint_xfer_int(ep_desc)) {
ep_type_name = "INT";
usb_fill_int_urb(urb, serial->dev,
usb_sndintpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx,
ep_desc->bInterval);
} else if (usb_endpoint_xfer_bulk(ep_desc)) {
ep_type_name = "BULK";
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
} else {
dev_warn(&serial->interface->dev,
"unsupported endpoint type %x\n",
usb_endpoint_type(ep_desc));
usb_free_urb(urb);
return NULL;
}
dbg("%s - using urb %p for %s endpoint %x",
__func__, urb, ep_type_name, endpoint);
return urb;
}
static struct callbacks {
void (*instat_callback)(struct urb *);
void (*glocont_callback)(struct urb *);
void (*indat_callback)(struct urb *);
void (*outdat_callback)(struct urb *);
void (*inack_callback)(struct urb *);
void (*outcont_callback)(struct urb *);
} keyspan_callbacks[] = {
{
/* msg_usa26 callbacks */
.instat_callback = usa26_instat_callback,
.glocont_callback = usa26_glocont_callback,
.indat_callback = usa26_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa26_inack_callback,
.outcont_callback = usa26_outcont_callback,
}, {
/* msg_usa28 callbacks */
.instat_callback = usa28_instat_callback,
.glocont_callback = usa28_glocont_callback,
.indat_callback = usa28_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa28_inack_callback,
.outcont_callback = usa28_outcont_callback,
}, {
/* msg_usa49 callbacks */
.instat_callback = usa49_instat_callback,
.glocont_callback = usa49_glocont_callback,
.indat_callback = usa49_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa49_inack_callback,
.outcont_callback = usa49_outcont_callback,
}, {
/* msg_usa90 callbacks */
.instat_callback = usa90_instat_callback,
.glocont_callback = usa28_glocont_callback,
.indat_callback = usa90_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa28_inack_callback,
.outcont_callback = usa90_outcont_callback,
}, {
/* msg_usa67 callbacks */
.instat_callback = usa67_instat_callback,
.glocont_callback = usa67_glocont_callback,
.indat_callback = usa26_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa26_inack_callback,
.outcont_callback = usa26_outcont_callback,
}
};
/* Generic setup urbs function that uses
data in device_details */
static void keyspan_setup_urbs(struct usb_serial *serial)
{
int i, j;
struct keyspan_serial_private *s_priv;
const struct keyspan_device_details *d_details;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
struct callbacks *cback;
int endp;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
d_details = s_priv->device_details;
/* Setup values for the various callback routines */
cback = &keyspan_callbacks[d_details->msg_format];
/* Allocate and set up urbs for each one that is in use,
starting with instat endpoints */
s_priv->instat_urb = keyspan_setup_urb
(serial, d_details->instat_endpoint, USB_DIR_IN,
serial, s_priv->instat_buf, INSTAT_BUFLEN,
cback->instat_callback);
s_priv->indat_urb = keyspan_setup_urb
(serial, d_details->indat_endpoint, USB_DIR_IN,
serial, s_priv->indat_buf, INDAT49W_BUFLEN,
usa49wg_indat_callback);
s_priv->glocont_urb = keyspan_setup_urb
(serial, d_details->glocont_endpoint, USB_DIR_OUT,
serial, s_priv->glocont_buf, GLOCONT_BUFLEN,
cback->glocont_callback);
/* Setup endpoints for each port specific thing */
for (i = 0; i < d_details->num_ports; i++) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
/* Do indat endpoints first, once for each flip */
endp = d_details->indat_endpoints[i];
for (j = 0; j <= d_details->indat_endp_flip; ++j, ++endp) {
p_priv->in_urbs[j] = keyspan_setup_urb
(serial, endp, USB_DIR_IN, port,
p_priv->in_buffer[j], 64,
cback->indat_callback);
}
for (; j < 2; ++j)
p_priv->in_urbs[j] = NULL;
/* outdat endpoints also have flip */
endp = d_details->outdat_endpoints[i];
for (j = 0; j <= d_details->outdat_endp_flip; ++j, ++endp) {
p_priv->out_urbs[j] = keyspan_setup_urb
(serial, endp, USB_DIR_OUT, port,
p_priv->out_buffer[j], 64,
cback->outdat_callback);
}
for (; j < 2; ++j)
p_priv->out_urbs[j] = NULL;
/* inack endpoint */
p_priv->inack_urb = keyspan_setup_urb
(serial, d_details->inack_endpoints[i], USB_DIR_IN,
port, p_priv->inack_buffer, 1, cback->inack_callback);
/* outcont endpoint */
p_priv->outcont_urb = keyspan_setup_urb
(serial, d_details->outcont_endpoints[i], USB_DIR_OUT,
port, p_priv->outcont_buffer, 64,
cback->outcont_callback);
}
}
/* usa19 function doesn't require prescaler */
static int keyspan_usa19_calc_baud(u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
div, /* divisor */
cnt; /* inverse of divisor (programmed into 8051) */
dbg("%s - %d.", __func__, baud_rate);
/* prevent divide by zero... */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* Any "standard" rate over 57k6 is marginal on the USA-19
as we run out of divisor resolution. */
if (baud_rate > 57600)
return KEYSPAN_INVALID_BAUD_RATE;
/* calculate the divisor and the counter (its inverse) */
div = baudclk / b16;
if (div == 0)
return KEYSPAN_INVALID_BAUD_RATE;
else
cnt = 0 - div;
if (div > 0xffff)
return KEYSPAN_INVALID_BAUD_RATE;
/* return the counter values if non-null */
if (rate_low)
*rate_low = (u8) (cnt & 0xff);
if (rate_hi)
*rate_hi = (u8) ((cnt >> 8) & 0xff);
if (rate_low && rate_hi)
dbg("%s - %d %02x %02x.",
__func__, baud_rate, *rate_hi, *rate_low);
return KEYSPAN_BAUD_RATE_OK;
}
/* usa19hs function doesn't require prescaler */
static int keyspan_usa19hs_calc_baud(u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
div; /* divisor */
dbg("%s - %d.", __func__, baud_rate);
/* prevent divide by zero... */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* calculate the divisor */
div = baudclk / b16;
if (div == 0)
return KEYSPAN_INVALID_BAUD_RATE;
if (div > 0xffff)
return KEYSPAN_INVALID_BAUD_RATE;
/* return the counter values if non-null */
if (rate_low)
*rate_low = (u8) (div & 0xff);
if (rate_hi)
*rate_hi = (u8) ((div >> 8) & 0xff);
if (rate_low && rate_hi)
dbg("%s - %d %02x %02x.",
__func__, baud_rate, *rate_hi, *rate_low);
return KEYSPAN_BAUD_RATE_OK;
}
static int keyspan_usa19w_calc_baud(u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
clk, /* clock with 13/8 prescaler */
div, /* divisor using 13/8 prescaler */
res, /* resulting baud rate using 13/8 prescaler */
diff, /* error using 13/8 prescaler */
smallest_diff;
u8 best_prescaler;
int i;
dbg("%s - %d.", __func__, baud_rate);
/* prevent divide by zero */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* Calculate prescaler by trying them all and looking
for best fit */
/* start with largest possible difference */
smallest_diff = 0xffffffff;
/* 0 is an invalid prescaler, used as a flag */
best_prescaler = 0;
for (i = 8; i <= 0xff; ++i) {
clk = (baudclk * 8) / (u32) i;
div = clk / b16;
if (div == 0)
continue;
res = clk / div;
diff = (res > b16) ? (res-b16) : (b16-res);
if (diff < smallest_diff) {
best_prescaler = i;
smallest_diff = diff;
}
}
if (best_prescaler == 0)
return KEYSPAN_INVALID_BAUD_RATE;
clk = (baudclk * 8) / (u32) best_prescaler;
div = clk / b16;
/* return the divisor and prescaler if non-null */
if (rate_low)
*rate_low = (u8) (div & 0xff);
if (rate_hi)
*rate_hi = (u8) ((div >> 8) & 0xff);
if (prescaler) {
*prescaler = best_prescaler;
/* dbg("%s - %d %d", __func__, *prescaler, div); */
}
return KEYSPAN_BAUD_RATE_OK;
}
/* USA-28 supports different maximum baud rates on each port */
static int keyspan_usa28_calc_baud(u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
div, /* divisor */
cnt; /* inverse of divisor (programmed into 8051) */
dbg("%s - %d.", __func__, baud_rate);
/* prevent divide by zero */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* calculate the divisor and the counter (its inverse) */
div = KEYSPAN_USA28_BAUDCLK / b16;
if (div == 0)
return KEYSPAN_INVALID_BAUD_RATE;
else
cnt = 0 - div;
/* check for out of range, based on portnum,
and return result */
if (portnum == 0) {
if (div > 0xffff)
return KEYSPAN_INVALID_BAUD_RATE;
} else {
if (portnum == 1) {
if (div > 0xff)
return KEYSPAN_INVALID_BAUD_RATE;
} else
return KEYSPAN_INVALID_BAUD_RATE;
}
/* return the counter values if not NULL
(port 1 will ignore retHi) */
if (rate_low)
*rate_low = (u8) (cnt & 0xff);
if (rate_hi)
*rate_hi = (u8) ((cnt >> 8) & 0xff);
dbg("%s - %d OK.", __func__, baud_rate);
return KEYSPAN_BAUD_RATE_OK;
}
static int keyspan_usa26_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa26_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
int outcont_urb;
struct urb *this_urb;
int device_port, err;
dbg("%s reset=%d", __func__, reset_port);
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
device_port = port->number - port->serial->minor;
outcont_urb = d_details->outcont_endpoints[port->number];
this_urb = p_priv->outcont_urb;
dbg("%s - endpoint %d", __func__, usb_pipeendpoint(this_urb->pipe));
/* Make sure we have an urb then send the message */
if (this_urb == NULL) {
dbg("%s - oops no urb.", __func__);
return -1;
}
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
/* dbg("%s - already writing", __func__); */
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa26_portControlMessage));
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0xff;
if (d_details->calculate_baud_rate
(p_priv->baud, d_details->baudclk, &msg.baudHi,
&msg.baudLo, &msg.prescaler, device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dbg("%s - Invalid baud rate %d requested, using 9600.",
__func__, p_priv->baud);
msg.baudLo = 0;
msg.baudHi = 125; /* Values for 9600 baud */
msg.prescaler = 10;
}
msg.setPrescaler = 0xff;
}
msg.lcr = (p_priv->cflag & CSTOPB)? STOPBITS_678_2: STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD)?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
msg.setLcr = 0xff;
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
msg.setFlowControl = 0xff;
msg.forwardingLength = 16;
msg.xonChar = 17;
msg.xoffChar = 19;
/* Opening port */
if (reset_port == 1) {
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
}
/* Closing port */
else if (reset_port == 2) {
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
}
/* Sending intermediate configs */
else {
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
}
/* Do handshaking outputs */
msg.setTxTriState_setRts = 0xff;
msg.txTriState_rts = p_priv->rts_state;
msg.setHskoa_setDtr = 0xff;
msg.hskoa_dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
#if 0
else {
dbg("%s - usb_submit_urb(%d) OK %d bytes (end %d)", __func__
outcont_urb, this_urb->transfer_buffer_length,
usb_pipeendpoint(this_urb->pipe));
}
#endif
return 0;
}
static int keyspan_usa28_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa28_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int device_port, err;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
device_port = port->number - port->serial->minor;
/* only do something if we have a bulk out endpoint */
this_urb = p_priv->outcont_urb;
if (this_urb == NULL) {
dbg("%s - oops no urb.", __func__);
return -1;
}
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
dbg("%s already writing", __func__);
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa28_portControlMessage));
msg.setBaudRate = 1;
if (d_details->calculate_baud_rate(p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, NULL, device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dbg("%s - Invalid baud rate requested %d.",
__func__, p_priv->baud);
msg.baudLo = 0xff;
msg.baudHi = 0xb2; /* Values for 9600 baud */
}
/* If parity is enabled, we must calculate it ourselves. */
msg.parity = 0; /* XXX for now */
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
/* Do handshaking outputs, DTR is inverted relative to RTS */
msg.rts = p_priv->rts_state;
msg.dtr = p_priv->dtr_state;
msg.forwardingLength = 16;
msg.forwardMs = 10;
msg.breakThreshold = 45;
msg.xonChar = 17;
msg.xoffChar = 19;
/*msg.returnStatus = 1;
msg.resetDataToggle = 0xff;*/
/* Opening port */
if (reset_port == 1) {
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txForceXoff = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
}
/* Closing port */
else if (reset_port == 2) {
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txForceXoff = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
}
/* Sending intermediate configs */
else {
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txForceXoff = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
}
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed", __func__);
#if 0
else {
dbg("%s - usb_submit_urb(setup) OK %d bytes", __func__,
this_urb->transfer_buffer_length);
}
#endif
return 0;
}
static int keyspan_usa49_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa49_portControlMessage msg;
struct usb_ctrlrequest *dr = NULL;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int err, device_port;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
this_urb = s_priv->glocont_urb;
/* Work out which port within the device is being setup */
device_port = port->number - port->serial->minor;
/* Make sure we have an urb then send the message */
if (this_urb == NULL) {
dbg("%s - oops no urb for port %d.", __func__, port->number);
return -1;
}
dbg("%s - endpoint %d port %d (%d)",
__func__, usb_pipeendpoint(this_urb->pipe),
port->number, device_port);
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
/* dbg("%s - already writing", __func__); */
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa49_portControlMessage));
/*msg.portNumber = port->number;*/
msg.portNumber = device_port;
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0xff;
if (d_details->calculate_baud_rate
(p_priv->baud, d_details->baudclk, &msg.baudHi,
&msg.baudLo, &msg.prescaler, device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dbg("%s - Invalid baud rate %d requested, using 9600.",
__func__, p_priv->baud);
msg.baudLo = 0;
msg.baudHi = 125; /* Values for 9600 baud */
msg.prescaler = 10;
}
/* msg.setPrescaler = 0xff; */
}
msg.lcr = (p_priv->cflag & CSTOPB)? STOPBITS_678_2: STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD)?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
msg.setLcr = 0xff;
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
msg.setFlowControl = 0xff;
msg.forwardingLength = 16;
msg.xonChar = 17;
msg.xoffChar = 19;
/* Opening port */
if (reset_port == 1) {
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
msg.enablePort = 1;
msg.disablePort = 0;
}
/* Closing port */
else if (reset_port == 2) {
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
msg.enablePort = 0;
msg.disablePort = 1;
}
/* Sending intermediate configs */
else {
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
msg.enablePort = 0;
msg.disablePort = 0;
}
/* Do handshaking outputs */
msg.setRts = 0xff;
msg.rts = p_priv->rts_state;
msg.setDtr = 0xff;
msg.dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
/* if the device is a 49wg, we send control message on usb
control EP 0 */
if (d_details->product_id == keyspan_usa49wg_product_id) {
dr = (void *)(s_priv->ctrl_buf);
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT;
dr->bRequest = 0xB0; /* 49wg control message */;
dr->wValue = 0;
dr->wIndex = 0;
dr->wLength = cpu_to_le16(sizeof(msg));
memcpy(s_priv->glocont_buf, &msg, sizeof(msg));
usb_fill_control_urb(this_urb, serial->dev,
usb_sndctrlpipe(serial->dev, 0),
(unsigned char *)dr, s_priv->glocont_buf,
sizeof(msg), usa49_glocont_callback, serial);
} else {
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
}
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
#if 0
else {
dbg("%s - usb_submit_urb(%d) OK %d bytes (end %d)", __func__,
outcont_urb, this_urb->transfer_buffer_length,
usb_pipeendpoint(this_urb->pipe));
}
#endif
return 0;
}
static int keyspan_usa90_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa90_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int err;
u8 prescaler;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
/* only do something if we have a bulk out endpoint */
this_urb = p_priv->outcont_urb;
if (this_urb == NULL) {
dbg("%s - oops no urb.", __func__);
return -1;
}
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
dbg("%s already writing", __func__);
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa90_portControlMessage));
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0x01;
if (d_details->calculate_baud_rate
(p_priv->baud, d_details->baudclk, &msg.baudHi,
&msg.baudLo, &prescaler, 0) == KEYSPAN_INVALID_BAUD_RATE) {
dbg("%s - Invalid baud rate %d requested, using 9600.",
__func__, p_priv->baud);
p_priv->baud = 9600;
d_details->calculate_baud_rate(p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, &prescaler, 0);
}
msg.setRxMode = 1;
msg.setTxMode = 1;
}
/* modes must always be correctly specified */
if (p_priv->baud > 57600) {
msg.rxMode = RXMODE_DMA;
msg.txMode = TXMODE_DMA;
} else {
msg.rxMode = RXMODE_BYHAND;
msg.txMode = TXMODE_BYHAND;
}
msg.lcr = (p_priv->cflag & CSTOPB)? STOPBITS_678_2: STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD)?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
if (p_priv->old_cflag != p_priv->cflag) {
p_priv->old_cflag = p_priv->cflag;
msg.setLcr = 0x01;
}
if (p_priv->flow_control == flow_cts)
msg.txFlowControl = TXFLOW_CTS;
msg.setTxFlowControl = 0x01;
msg.setRxFlowControl = 0x01;
msg.rxForwardingLength = 16;
msg.rxForwardingTimeout = 16;
msg.txAckSetting = 0;
msg.xonChar = 17;
msg.xoffChar = 19;
/* Opening port */
if (reset_port == 1) {
msg.portEnabled = 1;
msg.rxFlush = 1;
msg.txBreak = (p_priv->break_on);
}
/* Closing port */
else if (reset_port == 2)
msg.portEnabled = 0;
/* Sending intermediate configs */
else {
msg.portEnabled = 1;
msg.txBreak = (p_priv->break_on);
}
/* Do handshaking outputs */
msg.setRts = 0x01;
msg.rts = p_priv->rts_state;
msg.setDtr = 0x01;
msg.dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
return 0;
}
static int keyspan_usa67_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa67_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int err, device_port;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
this_urb = s_priv->glocont_urb;
/* Work out which port within the device is being setup */
device_port = port->number - port->serial->minor;
/* Make sure we have an urb then send the message */
if (this_urb == NULL) {
dbg("%s - oops no urb for port %d.", __func__,
port->number);
return -1;
}
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
/* dbg("%s - already writing", __func__); */
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa67_portControlMessage));
msg.port = device_port;
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0xff;
if (d_details->calculate_baud_rate
(p_priv->baud, d_details->baudclk, &msg.baudHi,
&msg.baudLo, &msg.prescaler, device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dbg("%s - Invalid baud rate %d requested, using 9600.",
__func__, p_priv->baud);
msg.baudLo = 0;
msg.baudHi = 125; /* Values for 9600 baud */
msg.prescaler = 10;
}
msg.setPrescaler = 0xff;
}
msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD)?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
msg.setLcr = 0xff;
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
msg.setFlowControl = 0xff;
msg.forwardingLength = 16;
msg.xonChar = 17;
msg.xoffChar = 19;
if (reset_port == 1) {
/* Opening port */
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
} else if (reset_port == 2) {
/* Closing port */
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
} else {
/* Sending intermediate configs */
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
}
/* Do handshaking outputs */
msg.setTxTriState_setRts = 0xff;
msg.txTriState_rts = p_priv->rts_state;
msg.setHskoa_setDtr = 0xff;
msg.hskoa_dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__,
err);
return 0;
}
static void keyspan_send_setup(struct usb_serial_port *port, int reset_port)
{
struct usb_serial *serial = port->serial;
struct keyspan_serial_private *s_priv;
const struct keyspan_device_details *d_details;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
d_details = s_priv->device_details;
switch (d_details->msg_format) {
case msg_usa26:
keyspan_usa26_send_setup(serial, port, reset_port);
break;
case msg_usa28:
keyspan_usa28_send_setup(serial, port, reset_port);
break;
case msg_usa49:
keyspan_usa49_send_setup(serial, port, reset_port);
break;
case msg_usa90:
keyspan_usa90_send_setup(serial, port, reset_port);
break;
case msg_usa67:
keyspan_usa67_send_setup(serial, port, reset_port);
break;
}
}
/* Gets called by the "real" driver (ie once firmware is loaded
and renumeration has taken place. */
static int keyspan_startup(struct usb_serial *serial)
{
int i, err;
struct usb_serial_port *port;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
dbg("%s", __func__);
for (i = 0; (d_details = keyspan_devices[i]) != NULL; ++i)
if (d_details->product_id ==
le16_to_cpu(serial->dev->descriptor.idProduct))
break;
if (d_details == NULL) {
dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
__func__, le16_to_cpu(serial->dev->descriptor.idProduct));
return 1;
}
/* Setup private data for serial driver */
s_priv = kzalloc(sizeof(struct keyspan_serial_private), GFP_KERNEL);
if (!s_priv) {
dbg("%s - kmalloc for keyspan_serial_private failed.",
__func__);
return -ENOMEM;
}
s_priv->device_details = d_details;
usb_set_serial_data(serial, s_priv);
/* Now setup per port private data */
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
p_priv = kzalloc(sizeof(struct keyspan_port_private),
GFP_KERNEL);
if (!p_priv) {
dbg("%s - kmalloc for keyspan_port_private (%d) failed!.", __func__, i);
return 1;
}
p_priv->device_details = d_details;
usb_set_serial_port_data(port, p_priv);
}
keyspan_setup_urbs(serial);
if (s_priv->instat_urb != NULL) {
err = usb_submit_urb(s_priv->instat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit instat urb failed %d", __func__,
err);
}
if (s_priv->indat_urb != NULL) {
err = usb_submit_urb(s_priv->indat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit indat urb failed %d", __func__,
err);
}
return 0;
}
static void keyspan_disconnect(struct usb_serial *serial)
{
int i, j;
struct usb_serial_port *port;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
/* Stop reading/writing urbs */
stop_urb(s_priv->instat_urb);
stop_urb(s_priv->glocont_urb);
stop_urb(s_priv->indat_urb);
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
stop_urb(p_priv->inack_urb);
stop_urb(p_priv->outcont_urb);
for (j = 0; j < 2; j++) {
stop_urb(p_priv->in_urbs[j]);
stop_urb(p_priv->out_urbs[j]);
}
}
/* Now free them */
usb_free_urb(s_priv->instat_urb);
usb_free_urb(s_priv->indat_urb);
usb_free_urb(s_priv->glocont_urb);
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
usb_free_urb(p_priv->inack_urb);
usb_free_urb(p_priv->outcont_urb);
for (j = 0; j < 2; j++) {
usb_free_urb(p_priv->in_urbs[j]);
usb_free_urb(p_priv->out_urbs[j]);
}
}
}
static void keyspan_release(struct usb_serial *serial)
{
int i;
struct usb_serial_port *port;
struct keyspan_serial_private *s_priv;
dbg("%s", __func__);
s_priv = usb_get_serial_data(serial);
/* dbg("Freeing serial->private."); */
kfree(s_priv);
/* dbg("Freeing port->private."); */
/* Now free per port private data */
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
kfree(usb_get_serial_port_data(port));
}
}
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("keyspan/usa28.fw");
MODULE_FIRMWARE("keyspan/usa28x.fw");
MODULE_FIRMWARE("keyspan/usa28xa.fw");
MODULE_FIRMWARE("keyspan/usa28xb.fw");
MODULE_FIRMWARE("keyspan/usa19.fw");
MODULE_FIRMWARE("keyspan/usa19qi.fw");
MODULE_FIRMWARE("keyspan/mpr.fw");
MODULE_FIRMWARE("keyspan/usa19qw.fw");
MODULE_FIRMWARE("keyspan/usa18x.fw");
MODULE_FIRMWARE("keyspan/usa19w.fw");
MODULE_FIRMWARE("keyspan/usa49w.fw");
MODULE_FIRMWARE("keyspan/usa49wlc.fw");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
TeamRegular/android_kernel_lge_e2nxx-stock | fs/ext4/indirect.c | 3603 | 45190 | /*
* linux/fs/ext4/indirect.c
*
* from
*
* linux/fs/ext4/inode.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Goal-directed block allocation by Stephen Tweedie
* (sct@redhat.com), 1993, 1998
*/
#include "ext4_jbd2.h"
#include "truncate.h"
#include <trace/events/ext4.h>
typedef struct {
__le32 *p;
__le32 key;
struct buffer_head *bh;
} Indirect;
static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
p->key = *(p->p = v);
p->bh = bh;
}
/**
* ext4_block_to_path - parse the block number into array of offsets
* @inode: inode in question (we are only interested in its superblock)
* @i_block: block number to be parsed
* @offsets: array to store the offsets in
* @boundary: set this non-zero if the referred-to block is likely to be
* followed (on disk) by an indirect block.
*
* To store the locations of file's data ext4 uses a data structure common
* for UNIX filesystems - tree of pointers anchored in the inode, with
* data blocks at leaves and indirect blocks in intermediate nodes.
* This function translates the block number into path in that tree -
* return value is the path length and @offsets[n] is the offset of
* pointer to (n+1)th node in the nth one. If @block is out of range
* (negative or too large) warning is printed and zero returned.
*
* Note: function doesn't find node addresses, so no IO is needed. All
* we need to know is the capacity of indirect blocks (taken from the
* inode->i_sb).
*/
/*
* Portability note: the last comparison (check that we fit into triple
* indirect block) is spelled differently, because otherwise on an
* architecture with 32-bit longs and 8Kb pages we might get into trouble
* if our filesystem had 8Kb blocks. We might use long long, but that would
* kill us on x86. Oh, well, at least the sign propagation does not matter -
* i_block would have to be negative in the very beginning, so we would not
* get there at all.
*/
static int ext4_block_to_path(struct inode *inode,
ext4_lblk_t i_block,
ext4_lblk_t offsets[4], int *boundary)
{
int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
const long direct_blocks = EXT4_NDIR_BLOCKS,
indirect_blocks = ptrs,
double_blocks = (1 << (ptrs_bits * 2));
int n = 0;
int final = 0;
if (i_block < direct_blocks) {
offsets[n++] = i_block;
final = direct_blocks;
} else if ((i_block -= direct_blocks) < indirect_blocks) {
offsets[n++] = EXT4_IND_BLOCK;
offsets[n++] = i_block;
final = ptrs;
} else if ((i_block -= indirect_blocks) < double_blocks) {
offsets[n++] = EXT4_DIND_BLOCK;
offsets[n++] = i_block >> ptrs_bits;
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
offsets[n++] = EXT4_TIND_BLOCK;
offsets[n++] = i_block >> (ptrs_bits * 2);
offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
i_block + direct_blocks +
indirect_blocks + double_blocks, inode->i_ino);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
return n;
}
/**
* ext4_get_branch - read the chain of indirect blocks leading to data
* @inode: inode in question
* @depth: depth of the chain (1 - direct pointer, etc.)
* @offsets: offsets of pointers in inode/indirect blocks
* @chain: place to store the result
* @err: here we store the error value
*
* Function fills the array of triples <key, p, bh> and returns %NULL
* if everything went OK or the pointer to the last filled triple
* (incomplete one) otherwise. Upon the return chain[i].key contains
* the number of (i+1)-th block in the chain (as it is stored in memory,
* i.e. little-endian 32-bit), chain[i].p contains the address of that
* number (it points into struct inode for i==0 and into the bh->b_data
* for i>0) and chain[i].bh points to the buffer_head of i-th indirect
* block for i>0 and NULL for i==0. In other words, it holds the block
* numbers of the chain, addresses they were taken from (and where we can
* verify that chain did not change) and buffer_heads hosting these
* numbers.
*
* Function stops when it stumbles upon zero pointer (absent block)
* (pointer to last triple returned, *@err == 0)
* or when it gets an IO error reading an indirect block
* (ditto, *@err == -EIO)
* or when it reads all @depth-1 indirect blocks successfully and finds
* the whole chain, all way to the data (returns %NULL, *err == 0).
*
* Need to be called with
* down_read(&EXT4_I(inode)->i_data_sem)
*/
static Indirect *ext4_get_branch(struct inode *inode, int depth,
ext4_lblk_t *offsets,
Indirect chain[4], int *err)
{
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
*err = 0;
/* i_data is not going away, no lock needed */
add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
if (!p->key)
goto no_block;
while (--depth) {
bh = sb_getblk(sb, le32_to_cpu(p->key));
if (unlikely(!bh))
goto failure;
if (!bh_uptodate_or_lock(bh)) {
if (bh_submit_read(bh) < 0) {
put_bh(bh);
goto failure;
}
/* validate block references */
if (ext4_check_indirect_blockref(inode, bh)) {
put_bh(bh);
goto failure;
}
}
add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
/* Reader: end */
if (!p->key)
goto no_block;
}
return NULL;
failure:
*err = -EIO;
no_block:
return p;
}
/**
* ext4_find_near - find a place for allocation with sufficient locality
* @inode: owner
* @ind: descriptor of indirect block.
*
* This function returns the preferred place for block allocation.
* It is used when heuristic for sequential allocation fails.
* Rules are:
* + if there is a block to the left of our position - allocate near it.
* + if pointer will live in indirect block - allocate near that block.
* + if pointer will live in inode - allocate in the same
* cylinder group.
*
* In the latter case we colour the starting block by the callers PID to
* prevent it from clashing with concurrent allocations for a different inode
* in the same block group. The PID is used here so that functionally related
* files will be close-by on-disk.
*
* Caller must make sure that @ind is valid and will stay that way.
*/
static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
{
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
__le32 *p;
/* Try to find previous block */
for (p = ind->p - 1; p >= start; p--) {
if (*p)
return le32_to_cpu(*p);
}
/* No such thing, so let's try location of indirect block */
if (ind->bh)
return ind->bh->b_blocknr;
/*
* It is going to be referred to from the inode itself? OK, just put it
* into the same cylinder group then.
*/
return ext4_inode_to_goal_block(inode);
}
/**
* ext4_find_goal - find a preferred place for allocation.
* @inode: owner
* @block: block we want
* @partial: pointer to the last triple within a chain
*
* Normally this function find the preferred place for block allocation,
* returns it.
* Because this is only used for non-extent files, we limit the block nr
* to 32 bits.
*/
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
Indirect *partial)
{
ext4_fsblk_t goal;
/*
* XXX need to get goal block from mballoc's data structures
*/
goal = ext4_find_near(inode, partial);
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
return goal;
}
/**
* ext4_blks_to_allocate - Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
* @branch: chain of indirect blocks
* @k: number of blocks need for indirect blocks
* @blks: number of data blocks to be mapped.
* @blocks_to_boundary: the offset in the indirect block
*
* return the total number of blocks to be allocate, including the
* direct and indirect blocks.
*/
static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
int blocks_to_boundary)
{
unsigned int count = 0;
/*
* Simple case, [t,d]Indirect block(s) has not allocated yet
* then it's clear blocks on that path have not allocated
*/
if (k > 0) {
/* right now we don't handle cross boundary allocation */
if (blks < blocks_to_boundary + 1)
count += blks;
else
count += blocks_to_boundary + 1;
return count;
}
count++;
while (count < blks && count <= blocks_to_boundary &&
le32_to_cpu(*(branch[0].p + count)) == 0) {
count++;
}
return count;
}
/**
* ext4_alloc_blocks: multiple allocate blocks needed for a branch
* @handle: handle for this transaction
* @inode: inode which needs allocated blocks
* @iblock: the logical block to start allocated at
* @goal: preferred physical block of allocation
* @indirect_blks: the number of blocks need to allocate for indirect
* blocks
* @blks: number of desired blocks
* @new_blocks: on return it will store the new block numbers for
* the indirect blocks(if needed) and the first direct block,
* @err: on return it will store the error code
*
* This function will return the number of blocks allocated as
* requested by the passed-in parameters.
*/
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, ext4_fsblk_t goal,
int indirect_blks, int blks,
ext4_fsblk_t new_blocks[4], int *err)
{
struct ext4_allocation_request ar;
int target, i;
unsigned long count = 0, blk_allocated = 0;
int index = 0;
ext4_fsblk_t current_block = 0;
int ret = 0;
/*
* Here we try to allocate the requested multiple blocks at once,
* on a best-effort basis.
* To build a branch, we should allocate blocks for
* the indirect blocks(if not allocated yet), and at least
* the first direct block of this branch. That's the
* minimum number of blocks need to allocate(required)
*/
/* first we try to allocate the indirect blocks */
target = indirect_blks;
while (target > 0) {
count = target;
/* allocating blocks for indirect blocks and direct blocks */
current_block = ext4_new_meta_blocks(handle, inode, goal,
0, &count, err);
if (*err)
goto failed_out;
if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
EXT4_ERROR_INODE(inode,
"current_block %llu + count %lu > %d!",
current_block, count,
EXT4_MAX_BLOCK_FILE_PHYS);
*err = -EIO;
goto failed_out;
}
target -= count;
/* allocate blocks for indirect blocks */
while (index < indirect_blks && count) {
new_blocks[index++] = current_block++;
count--;
}
if (count > 0) {
/*
* save the new block number
* for the first direct block
*/
new_blocks[index] = current_block;
printk(KERN_INFO "%s returned more blocks than "
"requested\n", __func__);
WARN_ON(1);
break;
}
}
target = blks - count ;
blk_allocated = count;
if (!target)
goto allocated;
/* Now allocate data blocks */
memset(&ar, 0, sizeof(ar));
ar.inode = inode;
ar.goal = goal;
ar.len = target;
ar.logical = iblock;
if (S_ISREG(inode->i_mode))
/* enable in-core preallocation only for regular files */
ar.flags = EXT4_MB_HINT_DATA;
current_block = ext4_mb_new_blocks(handle, &ar, err);
if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
EXT4_ERROR_INODE(inode,
"current_block %llu + ar.len %d > %d!",
current_block, ar.len,
EXT4_MAX_BLOCK_FILE_PHYS);
*err = -EIO;
goto failed_out;
}
if (*err && (target == blks)) {
/*
* if the allocation failed and we didn't allocate
* any blocks before
*/
goto failed_out;
}
if (!*err) {
if (target == blks) {
/*
* save the new block number
* for the first direct block
*/
new_blocks[index] = current_block;
}
blk_allocated += ar.len;
}
allocated:
/* total number of blocks allocated for direct blocks */
ret = blk_allocated;
*err = 0;
return ret;
failed_out:
for (i = 0; i < index; i++)
ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
return ret;
}
/**
* ext4_alloc_branch - allocate and set up a chain of blocks.
* @handle: handle for this transaction
* @inode: owner
* @indirect_blks: number of allocated indirect blocks
* @blks: number of allocated direct blocks
* @goal: preferred place for allocation
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
* This function allocates blocks, zeroes out all but the last one,
* links them into chain and (if we are synchronous) writes them to disk.
* In other words, it prepares a branch that can be spliced onto the
* inode. It stores the information about that chain in the branch[], in
* the same format as ext4_get_branch() would do. We are calling it after
* we had read the existing part of chain and partial points to the last
* triple of that (one with zero ->key). Upon the exit we have the same
* picture as after the successful ext4_get_block(), except that in one
* place chain is disconnected - *branch->p is still zero (we did not
* set the last link), but branch->key contains the number that should
* be placed into *branch->p to fill that gap.
*
* If allocation fails we free all blocks we've allocated (and forget
* their buffer_heads) and return the error value the from failed
* ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0.
*/
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, int indirect_blks,
int *blks, ext4_fsblk_t goal,
ext4_lblk_t *offsets, Indirect *branch)
{
int blocksize = inode->i_sb->s_blocksize;
int i, n = 0;
int err = 0;
struct buffer_head *bh;
int num;
ext4_fsblk_t new_blocks[4];
ext4_fsblk_t current_block;
num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
*blks, new_blocks, &err);
if (err)
return err;
branch[0].key = cpu_to_le32(new_blocks[0]);
/*
* metadata blocks and data blocks are allocated.
*/
for (n = 1; n <= indirect_blks; n++) {
/*
* Get buffer_head for parent block, zero it out
* and set the pointer to new one, then send
* parent to disk.
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
if (unlikely(!bh)) {
err = -EIO;
goto failed;
}
branch[n].bh = bh;
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
err = ext4_journal_get_create_access(handle, bh);
if (err) {
/* Don't brelse(bh) here; it's done in
* ext4_journal_forget() below */
unlock_buffer(bh);
goto failed;
}
memset(bh->b_data, 0, blocksize);
branch[n].p = (__le32 *) bh->b_data + offsets[n];
branch[n].key = cpu_to_le32(new_blocks[n]);
*branch[n].p = branch[n].key;
if (n == indirect_blks) {
current_block = new_blocks[n];
/*
* End of chain, update the last new metablock of
* the chain to point to the new allocated
* data blocks numbers
*/
for (i = 1; i < num; i++)
*(branch[n].p + i) = cpu_to_le32(++current_block);
}
BUFFER_TRACE(bh, "marking uptodate");
set_buffer_uptodate(bh);
unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto failed;
}
*blks = num;
return err;
failed:
/* Allocation failed, free what we already allocated */
ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
for (i = 1; i <= n ; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
EXT4_FREE_BLOCKS_FORGET);
}
for (i = n+1; i < indirect_blks; i++)
ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
return err;
}
/**
* ext4_splice_branch - splice the allocated branch onto inode.
* @handle: handle for this transaction
* @inode: owner
* @block: (logical) number of block we are adding
* @chain: chain of indirect blocks (with a missing link - see
* ext4_alloc_branch)
* @where: location of missing link
* @num: number of indirect blocks we are adding
* @blks: number of direct blocks we are adding
*
* This function fills the missing link and does all housekeeping needed in
* inode (->i_blocks, etc.). In case of success we end up with the full
* chain to new block and return 0.
*/
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t block, Indirect *where, int num,
int blks)
{
int i;
int err = 0;
ext4_fsblk_t current_block;
/*
* If we're splicing into a [td]indirect block (as opposed to the
* inode) then we need to get write access to the [td]indirect block
* before the splice.
*/
if (where->bh) {
BUFFER_TRACE(where->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, where->bh);
if (err)
goto err_out;
}
/* That's it */
*where->p = where->key;
/*
* Update the host buffer_head or inode to point to more just allocated
* direct blocks blocks
*/
if (num == 0 && blks > 1) {
current_block = le32_to_cpu(where->key) + 1;
for (i = 1; i < blks; i++)
*(where->p + i) = cpu_to_le32(current_block++);
}
/* We are done with atomic stuff, now do the rest of housekeeping */
/* had we spliced it onto indirect block? */
if (where->bh) {
/*
* If we spliced it onto an indirect block, we haven't
* altered the inode. Note however that if it is being spliced
* onto an indirect block at the very end of the file (the
* file is growing) then we *will* alter the inode to reflect
* the new i_size. But that is not done here - it is done in
* generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
*/
jbd_debug(5, "splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, where->bh);
if (err)
goto err_out;
} else {
/*
* OK, we spliced it into the inode itself on a direct block.
*/
ext4_mark_inode_dirty(handle, inode);
jbd_debug(5, "splicing direct\n");
}
return err;
err_out:
for (i = 1; i <= num; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET);
}
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
blks, 0);
return err;
}
/*
* The ext4_ind_map_blocks() function handles non-extents inodes
* (i.e., using the traditional indirect/double-indirect i_blocks
* scheme) for ext4_map_blocks().
*
* Allocation strategy is simple: if we have to allocate something, we will
* have to go the whole way to leaf. So let's do it before attaching anything
* to tree, set linkage between the newborn blocks, write them if sync is
* required, recheck the path, free and repeat if check fails, otherwise
* set the last missing link (that will protect us from any truncate-generated
* removals - all blocks on the path are immune now) and possibly force the
* write on the parent block.
* That has a nice additional property: no special recovery from the failed
* allocations is needed - we simply release blocks and do not touch anything
* reachable from inode.
*
* `handle' can be NULL if create == 0.
*
* return > 0, # of blocks mapped or allocated.
* return = 0, if plain lookup failed.
* return < 0, error case.
*
* The ext4_ind_get_blocks() function should be called with
* down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
* blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
* blocks.
*/
int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
int flags)
{
int err = -EIO;
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
ext4_fsblk_t goal;
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
int count = 0;
ext4_fsblk_t first_block = 0;
trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
depth = ext4_block_to_path(inode, map->m_lblk, offsets,
&blocks_to_boundary);
if (depth == 0)
goto out;
partial = ext4_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */
if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
count++;
/*map more blocks*/
while (count < map->m_len && count <= blocks_to_boundary) {
ext4_fsblk_t blk;
blk = le32_to_cpu(*(chain[depth-1].p + count));
if (blk == first_block + count)
count++;
else
break;
}
goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
goto cleanup;
/*
* Okay, we need to do block allocation.
*/
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
"non-extent mapped inodes with bigalloc");
return -ENOSPC;
}
goal = ext4_find_goal(inode, map->m_lblk, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
/*
* Next look up the indirect map to count the totoal number of
* direct blocks to allocate for this branch.
*/
count = ext4_blks_to_allocate(partial, indirect_blks,
map->m_len, blocks_to_boundary);
/*
* Block out ext4_truncate while we alter the tree
*/
err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
&count, goal,
offsets + (partial - chain), partial);
/*
* The ext4_splice_branch call will free and forget any buffers
* on the new chain if there is a failure, but that risks using
* up transaction credits, especially for bitmaps where the
* credits cannot be returned. Can we handle this somehow? We
* may need to return -EAGAIN upwards in the worst case. --sct
*/
if (!err)
err = ext4_splice_branch(handle, inode, map->m_lblk,
partial, indirect_blks, count);
if (err)
goto cleanup;
map->m_flags |= EXT4_MAP_NEW;
ext4_update_inode_fsync_trans(handle, inode, 1);
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);
map->m_len = count;
if (count > blocks_to_boundary)
map->m_flags |= EXT4_MAP_BOUNDARY;
err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
cleanup:
while (partial > chain) {
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
out:
trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
map->m_pblk, map->m_len, err);
return err;
}
/*
* O_DIRECT for ext3 (or indirect map) based files
*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
*
* If the O_DIRECT write is intantiating holes inside i_size and the machine
* crashes then stale disk data _may_ be exposed inside the file. But current
* VFS code falls back into buffered path in that case so we are safe.
*/
ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
handle_t *handle;
ssize_t ret;
int orphan = 0;
size_t count = iov_length(iov, nr_segs);
int retries = 0;
if (rw == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
ret = ext4_orphan_add(handle, inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
}
orphan = 1;
ei->i_disksize = inode->i_size;
ext4_journal_stop(handle);
}
}
retry:
if (rw == READ && ext4_should_dioread_nolock(inode)) {
if (unlikely(!list_empty(&ei->i_completed_io_list))) {
mutex_lock(&inode->i_mutex);
ext4_flush_completed_IO(inode);
mutex_unlock(&inode->i_mutex);
}
ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL, NULL, 0);
} else {
ret = blockdev_direct_IO(rw, iocb, inode, iov,
offset, nr_segs, ext4_get_block);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
ext4_truncate_failed_write(inode);
}
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
if (orphan) {
int err;
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, 2);
if (IS_ERR(handle)) {
/* This is really bad luck. We've written the data
* but cannot extend i_size. Bail out and pretend
* the write failed... */
ret = PTR_ERR(handle);
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
goto out;
}
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
if (ret > 0) {
loff_t end = offset + ret;
if (end > inode->i_size) {
ei->i_disksize = end;
i_size_write(inode, end);
/*
* We're going to return a positive `ret'
* here due to non-zero-length I/O, so there's
* no way of reporting error returns from
* ext4_mark_inode_dirty() to userspace. So
* ignore it.
*/
ext4_mark_inode_dirty(handle, inode);
}
}
err = ext4_journal_stop(handle);
if (ret == 0)
ret = err;
}
out:
return ret;
}
/*
* Calculate the number of metadata blocks need to reserve
* to allocate a new block at @lblocks for non extent file based file
*/
int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
int blk_bits;
if (lblock < EXT4_NDIR_BLOCKS)
return 0;
lblock -= EXT4_NDIR_BLOCKS;
if (ei->i_da_metadata_calc_len &&
(lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
ei->i_da_metadata_calc_len++;
return 0;
}
ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
ei->i_da_metadata_calc_len = 1;
blk_bits = order_base_2(lblock);
return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
}
int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
int indirects;
/* if nrblocks are contiguous */
if (chunk) {
/*
* With N contiguous data blocks, we need at most
* N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
* 2 dindirect blocks, and 1 tindirect block
*/
return DIV_ROUND_UP(nrblocks,
EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
/*
* if nrblocks are not contiguous, worse case, each block touch
* a indirect block, and each indirect block touch a double indirect
* block, plus a triple indirect block
*/
indirects = nrblocks * 2 + 1;
return indirects;
}
/*
* Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make
* sure we don't overflow the journal.
*
* start_transaction gets us a new handle for a truncate transaction,
* and extend_transaction tries to extend the existing one a bit. If
* extend fails, we need to propagate the failure up and restart the
* transaction in the top-level truncate loop. --sct
*/
static handle_t *start_transaction(struct inode *inode)
{
handle_t *result;
result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode));
if (!IS_ERR(result))
return result;
ext4_std_error(inode->i_sb, PTR_ERR(result));
return result;
}
/*
* Try to extend this transaction for the purposes of truncation.
*
* Returns 0 if we managed to create more room. If we can't create more
* room, and the transaction must be restarted we return 1.
*/
static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
{
if (!ext4_handle_valid(handle))
return 0;
if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
return 0;
if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
return 0;
return 1;
}
/*
* Probably it should be a library function... search for first non-zero word
* or memcmp with zero_page, whatever is better for particular architecture.
* Linus?
*/
static inline int all_zeroes(__le32 *p, __le32 *q)
{
while (p < q)
if (*p++)
return 0;
return 1;
}
/**
* ext4_find_shared - find the indirect blocks for partial truncation.
* @inode: inode in question
* @depth: depth of the affected branch
* @offsets: offsets of pointers in that branch (see ext4_block_to_path)
* @chain: place to store the pointers to partial indirect blocks
* @top: place to the (detached) top of branch
*
* This is a helper function used by ext4_truncate().
*
* When we do truncate() we may have to clean the ends of several
* indirect blocks but leave the blocks themselves alive. Block is
* partially truncated if some data below the new i_size is referred
* from it (and it is on the path to the first completely truncated
* data block, indeed). We have to free the top of that path along
* with everything to the right of the path. Since no allocation
* past the truncation point is possible until ext4_truncate()
* finishes, we may safely do the latter, but top of branch may
* require special attention - pageout below the truncation point
* might try to populate it.
*
* We atomically detach the top of branch from the tree, store the
* block number of its root in *@top, pointers to buffer_heads of
* partially truncated blocks - in @chain[].bh and pointers to
* their last elements that should not be removed - in
* @chain[].p. Return value is the pointer to last filled element
* of @chain.
*
* The work left to caller to do the actual freeing of subtrees:
* a) free the subtree starting from *@top
* b) free the subtrees whose roots are stored in
* (@chain[i].p+1 .. end of @chain[i].bh->b_data)
* c) free the subtrees growing from the inode past the @chain[0].
* (no partially truncated stuff there). */
static Indirect *ext4_find_shared(struct inode *inode, int depth,
ext4_lblk_t offsets[4], Indirect chain[4],
__le32 *top)
{
Indirect *partial, *p;
int k, err;
*top = 0;
/* Make k index the deepest non-null offset + 1 */
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
partial = ext4_get_branch(inode, k, offsets, chain, &err);
/* Writer: pointers */
if (!partial)
partial = chain + k-1;
/*
* If the branch acquired continuation since we've looked at it -
* fine, it should all survive and (new) top doesn't belong to us.
*/
if (!partial->key && *partial->p)
/* Writer: end */
goto no_top;
for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
;
/*
* OK, we've found the last block that must survive. The rest of our
* branch should be detached before unlocking. However, if that rest
* of branch is all ours and does not grow immediately from the inode
* it's easier to cheat and just decrement partial->p.
*/
if (p == chain + k - 1 && p > chain) {
p->p--;
} else {
*top = *p->p;
/* Nope, don't do this in ext4. Must leave the tree intact */
#if 0
*p->p = 0;
#endif
}
/* Writer: end */
while (partial > p) {
brelse(partial->bh);
partial--;
}
no_top:
return partial;
}
/*
* Zero a number of block pointers in either an inode or an indirect block.
* If we restart the transaction we must again get write access to the
* indirect block for further modification.
*
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
*
* Return 0 on success, 1 on invalid block range
* and < 0 on fatal error.
*/
static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh,
ext4_fsblk_t block_to_free,
unsigned long count, __le32 *first,
__le32 *last)
{
__le32 *p;
int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
int err;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
flags |= EXT4_FREE_BLOCKS_METADATA;
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
count)) {
EXT4_ERROR_INODE(inode, "attempt to clear invalid "
"blocks %llu len %lu",
(unsigned long long) block_to_free, count);
return 1;
}
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (unlikely(err))
goto out_err;
}
err = ext4_mark_inode_dirty(handle, inode);
if (unlikely(err))
goto out_err;
err = ext4_truncate_restart_trans(handle, inode,
ext4_blocks_for_truncate(inode));
if (unlikely(err))
goto out_err;
if (bh) {
BUFFER_TRACE(bh, "retaking write access");
err = ext4_journal_get_write_access(handle, bh);
if (unlikely(err))
goto out_err;
}
}
for (p = first; p < last; p++)
*p = 0;
ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
return 0;
out_err:
ext4_std_error(inode->i_sb, err);
return err;
}
/**
* ext4_free_data - free a list of data blocks
* @handle: handle for this transaction
* @inode: inode we are dealing with
* @this_bh: indirect buffer_head which contains *@first and *@last
* @first: array of block numbers
* @last: points immediately past the end of array
*
* We are freeing all blocks referred from that array (numbers are stored as
* little-endian 32-bit) and updating @inode->i_blocks appropriately.
*
* We accumulate contiguous runs of blocks to free. Conveniently, if these
* blocks are contiguous then releasing them at one time will only affect one
* or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
* actually use a lot of journal space.
*
* @this_bh will be %NULL if @first and @last point into the inode's direct
* block pointers.
*/
static void ext4_free_data(handle_t *handle, struct inode *inode,
struct buffer_head *this_bh,
__le32 *first, __le32 *last)
{
ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
unsigned long count = 0; /* Number of blocks in the run */
__le32 *block_to_free_p = NULL; /* Pointer into inode/ind
corresponding to
block_to_free */
ext4_fsblk_t nr; /* Current block # */
__le32 *p; /* Pointer into inode/ind
for current block */
int err = 0;
if (this_bh) { /* For indirect block */
BUFFER_TRACE(this_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, this_bh);
/* Important: if we can't update the indirect pointers
* to the blocks, we can't free them. */
if (err)
return;
}
for (p = first; p < last; p++) {
nr = le32_to_cpu(*p);
if (nr) {
/* accumulate blocks to free if they're contiguous */
if (count == 0) {
block_to_free = nr;
block_to_free_p = p;
count = 1;
} else if (nr == block_to_free + count) {
count++;
} else {
err = ext4_clear_blocks(handle, inode, this_bh,
block_to_free, count,
block_to_free_p, p);
if (err)
break;
block_to_free = nr;
block_to_free_p = p;
count = 1;
}
}
}
if (!err && count > 0)
err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
count, block_to_free_p, p);
if (err < 0)
/* fatal error */
return;
if (this_bh) {
BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
/*
* The buffer head should have an attached journal head at this
* point. However, if the data is corrupted and an indirect
* block pointed to itself, it would have been detached when
* the block was cleared. Check for this instead of OOPSing.
*/
if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
ext4_handle_dirty_metadata(handle, inode, this_bh);
else
EXT4_ERROR_INODE(inode,
"circular indirect block detected at "
"block %llu",
(unsigned long long) this_bh->b_blocknr);
}
}
/**
* ext4_free_branches - free an array of branches
* @handle: JBD handle for this transaction
* @inode: inode we are dealing with
* @parent_bh: the buffer_head which contains *@first and *@last
* @first: array of block numbers
* @last: pointer immediately past the end of array
* @depth: depth of the branches to free
*
* We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
static void ext4_free_branches(handle_t *handle, struct inode *inode,
struct buffer_head *parent_bh,
__le32 *first, __le32 *last, int depth)
{
ext4_fsblk_t nr;
__le32 *p;
if (ext4_handle_is_aborted(handle))
return;
if (depth--) {
struct buffer_head *bh;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
p = last;
while (--p >= first) {
nr = le32_to_cpu(*p);
if (!nr)
continue; /* A hole */
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
nr, 1)) {
EXT4_ERROR_INODE(inode,
"invalid indirect mapped "
"block %lu (level %d)",
(unsigned long) nr, depth);
break;
}
/* Go read the buffer for the next level down */
bh = sb_bread(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
* (should be rare).
*/
if (!bh) {
EXT4_ERROR_INODE_BLOCK(inode, nr,
"Read failure");
continue;
}
/* This zaps the entire block. Bottom up. */
BUFFER_TRACE(bh, "free child branches");
ext4_free_branches(handle, inode, bh,
(__le32 *) bh->b_data,
(__le32 *) bh->b_data + addr_per_block,
depth);
brelse(bh);
/*
* Everything below this this pointer has been
* released. Now let this top-of-subtree go.
*
* We want the freeing of this indirect block to be
* atomic in the journal with the updating of the
* bitmap block which owns it. So make some room in
* the journal.
*
* We zero the parent pointer *after* freeing its
* pointee in the bitmaps, so if extend_transaction()
* for some reason fails to put the bitmap changes and
* the release into the same transaction, recovery
* will merely complain about releasing a free block,
* rather than leaking blocks.
*/
if (ext4_handle_is_aborted(handle))
return;
if (try_to_extend_transaction(handle, inode)) {
ext4_mark_inode_dirty(handle, inode);
ext4_truncate_restart_trans(handle, inode,
ext4_blocks_for_truncate(inode));
}
/*
* The forget flag here is critical because if
* we are journaling (and not doing data
* journaling), we have to make sure a revoke
* record is written to prevent the journal
* replay from overwriting the (former)
* indirect block if it gets reallocated as a
* data block. This must happen in the same
* transaction where the data blocks are
* actually freed.
*/
ext4_free_blocks(handle, inode, NULL, nr, 1,
EXT4_FREE_BLOCKS_METADATA|
EXT4_FREE_BLOCKS_FORGET);
if (parent_bh) {
/*
* The block which we have just freed is
* pointed to by an indirect block: journal it
*/
BUFFER_TRACE(parent_bh, "get_write_access");
if (!ext4_journal_get_write_access(handle,
parent_bh)){
*p = 0;
BUFFER_TRACE(parent_bh,
"call ext4_handle_dirty_metadata");
ext4_handle_dirty_metadata(handle,
inode,
parent_bh);
}
}
}
} else {
/* We have reached the bottom of the tree. */
BUFFER_TRACE(parent_bh, "free data blocks");
ext4_free_data(handle, inode, parent_bh, first, last);
}
}
void ext4_ind_truncate(struct inode *inode)
{
handle_t *handle;
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *i_data = ei->i_data;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
struct address_space *mapping = inode->i_mapping;
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
int n = 0;
ext4_lblk_t last_block, max_block;
loff_t page_len;
unsigned blocksize = inode->i_sb->s_blocksize;
int err;
handle = start_transaction(inode);
if (IS_ERR(handle))
return; /* AKPM: return what? */
last_block = (inode->i_size + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (inode->i_size % PAGE_CACHE_SIZE != 0) {
page_len = PAGE_CACHE_SIZE -
(inode->i_size & (PAGE_CACHE_SIZE - 1));
err = ext4_discard_partial_page_buffers(handle,
mapping, inode->i_size, page_len, 0);
if (err)
goto out_stop;
}
if (last_block != max_block) {
n = ext4_block_to_path(inode, last_block, offsets, NULL);
if (n == 0)
goto out_stop; /* error */
}
/*
* OK. This truncate is going to happen. We add the inode to the
* orphan list, so that if this truncate spans multiple transactions,
* and we crash, we will resume the truncate when the filesystem
* recovers. It also marks the inode dirty, to catch the new size.
*
* Implication: the file must always be in a sane, consistent
* truncatable state while each transaction commits.
*/
if (ext4_orphan_add(handle, inode))
goto out_stop;
/*
* From here we block out all ext4_get_block() callers who want to
* modify the block allocation tree.
*/
down_write(&ei->i_data_sem);
ext4_discard_preallocations(inode);
/*
* The orphan list entry will now protect us from any crash which
* occurs before the truncate completes, so it is now safe to propagate
* the new, shorter inode size (held for now in i_size) into the
* on-disk inode. We do this via i_disksize, which is the value which
* ext4 *really* writes onto the disk inode.
*/
ei->i_disksize = inode->i_size;
if (last_block == max_block) {
/*
* It is unnecessary to free any data blocks if last_block is
* equal to the indirect block limit.
*/
goto out_unlock;
} else if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto do_indirects;
}
partial = ext4_find_shared(inode, n, offsets, chain, &nr);
/* Kill the top of shared branch (not detached) */
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
ext4_free_branches(handle, inode, NULL,
&nr, &nr+1, (chain+n-1) - partial);
*partial->p = 0;
/*
* We mark the inode dirty prior to restart,
* and prior to stop. No need for it here.
*/
} else {
/* Shared branch grows from an indirect block */
BUFFER_TRACE(partial->bh, "get_write_access");
ext4_free_branches(handle, inode, partial->bh,
partial->p,
partial->p+1, (chain+n-1) - partial);
}
}
/* Clear the ends of indirect blocks on the shared branch */
while (partial > chain) {
ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
(__le32*)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
default:
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
case EXT4_IND_BLOCK:
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
case EXT4_DIND_BLOCK:
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
case EXT4_TIND_BLOCK:
;
}
out_unlock:
up_write(&ei->i_data_sem);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
/*
* In a multi-transaction truncate, we only make the final transaction
* synchronous
*/
if (IS_SYNC(inode))
ext4_handle_sync(handle);
out_stop:
/*
* If this was a simple ftruncate(), and the file will remain alive
* then we need to clear up the orphan record which we created above.
* However, if this was a real unlink then we were called by
* ext4_delete_inode(), and we allow that function to clean up the
* orphan info for us.
*/
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
}
| gpl-2.0 |
johnnyslt/kernel_zte_v967s | net/ipv4/netfilter/nf_nat_pptp.c | 4371 | 9943 | /*
* nf_nat_pptp.c
*
* NAT support for PPTP (Point to Point Tunneling Protocol).
* PPTP is a a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
* GRE is defined in RFC 1701 and RFC 1702. Documentation of
* PPTP can be found in RFC 2637
*
* (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*
* TODO: - NAT to a unique tuple, not to TCP source port
* (needs netfilter tuple reservation)
*/
#include <linux/module.h>
#include <linux/tcp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#define NF_NAT_PPTP_VERSION "3.0"
#define REQ_CID(req, off) (*(__be16 *)((char *)(req) + (off)))
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
MODULE_ALIAS("ip_nat_pptp");
static void pptp_nat_expected(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
struct net *net = nf_ct_net(ct);
const struct nf_conn *master = ct->master;
struct nf_conntrack_expect *other_exp;
struct nf_conntrack_tuple t;
const struct nf_ct_pptp_master *ct_pptp_info;
const struct nf_nat_pptp *nat_pptp_info;
struct nf_nat_ipv4_range range;
ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
/* And here goes the grand finale of corrosion... */
if (exp->dir == IP_CT_DIR_ORIGINAL) {
pr_debug("we are PNS->PAC\n");
/* therefore, build tuple for PAC->PNS */
t.src.l3num = AF_INET;
t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
t.src.u.gre.key = ct_pptp_info->pac_call_id;
t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
t.dst.u.gre.key = ct_pptp_info->pns_call_id;
t.dst.protonum = IPPROTO_GRE;
} else {
pr_debug("we are PAC->PNS\n");
/* build tuple for PNS->PAC */
t.src.l3num = AF_INET;
t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
t.src.u.gre.key = nat_pptp_info->pns_call_id;
t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
t.dst.u.gre.key = nat_pptp_info->pac_call_id;
t.dst.protonum = IPPROTO_GRE;
}
pr_debug("trying to unexpect other dir: ");
nf_ct_dump_tuple_ip(&t);
other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
if (other_exp) {
nf_ct_unexpect_related(other_exp);
nf_ct_expect_put(other_exp);
pr_debug("success\n");
} else {
pr_debug("not found!\n");
}
/* This must be a fresh one. */
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip
= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
if (exp->dir == IP_CT_DIR_ORIGINAL) {
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
range.min = range.max = exp->saved_proto;
}
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip
= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
if (exp->dir == IP_CT_DIR_REPLY) {
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
range.min = range.max = exp->saved_proto;
}
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
/* outbound packets == from PNS to PAC */
static int
pptp_outbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq)
{
struct nf_ct_pptp_master *ct_pptp_info;
struct nf_nat_pptp *nat_pptp_info;
u_int16_t msg;
__be16 new_callid;
unsigned int cid_off;
ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info;
nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
new_callid = ct_pptp_info->pns_call_id;
switch (msg = ntohs(ctlh->messageType)) {
case PPTP_OUT_CALL_REQUEST:
cid_off = offsetof(union pptp_ctrl_union, ocreq.callID);
/* FIXME: ideally we would want to reserve a call ID
* here. current netfilter NAT core is not able to do
* this :( For now we use TCP source port. This breaks
* multiple calls within one control session */
/* save original call ID in nat_info */
nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
/* don't use tcph->source since we are at a DSTmanip
* hook (e.g. PREROUTING) and pkt is not mangled yet */
new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
/* save new call ID in ct info */
ct_pptp_info->pns_call_id = new_callid;
break;
case PPTP_IN_CALL_REPLY:
cid_off = offsetof(union pptp_ctrl_union, icack.callID);
break;
case PPTP_CALL_CLEAR_REQUEST:
cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
break;
default:
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
pptp_msg_name[0]);
/* fall through */
case PPTP_SET_LINK_INFO:
/* only need to NAT in case PAC is behind NAT box */
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
case PPTP_STOP_SESSION_REQUEST:
case PPTP_STOP_SESSION_REPLY:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* no need to alter packet */
return NF_ACCEPT;
}
/* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
* down to here */
pr_debug("altering call id from 0x%04x to 0x%04x\n",
ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
/* mangle packet */
if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
cid_off + sizeof(struct pptp_pkt_hdr) +
sizeof(struct PptpControlHeader),
sizeof(new_callid), (char *)&new_callid,
sizeof(new_callid)) == 0)
return NF_DROP;
return NF_ACCEPT;
}
static void
pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
struct nf_conntrack_expect *expect_reply)
{
const struct nf_conn *ct = expect_orig->master;
struct nf_ct_pptp_master *ct_pptp_info;
struct nf_nat_pptp *nat_pptp_info;
ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info;
nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
/* save original PAC call ID in nat_info */
nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
/* alter expectation for PNS->PAC direction */
expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id;
expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id;
expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id;
expect_orig->dir = IP_CT_DIR_ORIGINAL;
/* alter expectation for PAC->PNS direction */
expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id;
expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id;
expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id;
expect_reply->dir = IP_CT_DIR_REPLY;
}
/* inbound packets == from PAC to PNS */
static int
pptp_inbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq)
{
const struct nf_nat_pptp *nat_pptp_info;
u_int16_t msg;
__be16 new_pcid;
unsigned int pcid_off;
nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
new_pcid = nat_pptp_info->pns_call_id;
switch (msg = ntohs(ctlh->messageType)) {
case PPTP_OUT_CALL_REPLY:
pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID);
break;
case PPTP_IN_CALL_CONNECT:
pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID);
break;
case PPTP_IN_CALL_REQUEST:
/* only need to nat in case PAC is behind NAT box */
return NF_ACCEPT;
case PPTP_WAN_ERROR_NOTIFY:
pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID);
break;
case PPTP_CALL_DISCONNECT_NOTIFY:
pcid_off = offsetof(union pptp_ctrl_union, disc.callID);
break;
case PPTP_SET_LINK_INFO:
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
break;
default:
pr_debug("unknown inbound packet %s\n",
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
pptp_msg_name[0]);
/* fall through */
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
case PPTP_STOP_SESSION_REQUEST:
case PPTP_STOP_SESSION_REPLY:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* no need to alter packet */
return NF_ACCEPT;
}
/* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST,
* WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
/* mangle packet */
pr_debug("altering peer call id from 0x%04x to 0x%04x\n",
ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
pcid_off + sizeof(struct pptp_pkt_hdr) +
sizeof(struct PptpControlHeader),
sizeof(new_pcid), (char *)&new_pcid,
sizeof(new_pcid)) == 0)
return NF_DROP;
return NF_ACCEPT;
}
static int __init nf_nat_helper_pptp_init(void)
{
nf_nat_need_gre();
BUG_ON(nf_nat_pptp_hook_outbound != NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
BUG_ON(nf_nat_pptp_hook_inbound != NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
return 0;
}
static void __exit nf_nat_helper_pptp_fini(void)
{
RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL);
synchronize_rcu();
}
module_init(nf_nat_helper_pptp_init);
module_exit(nf_nat_helper_pptp_fini);
| gpl-2.0 |
pavian/LITMUS_RT_WITH_HMP | arch/um/drivers/port_user.c | 4627 | 3728 | /*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
* Licensed under the GPL
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <termios.h>
#include <unistd.h>
#include <netinet/in.h>
#include "chan_user.h"
#include <os.h>
#include "port.h"
#include <um_malloc.h>
struct port_chan {
int raw;
struct termios tt;
void *kernel_data;
char dev[sizeof("32768\0")];
};
static void *port_init(char *str, int device, const struct chan_opts *opts)
{
struct port_chan *data;
void *kern_data;
char *end;
int port;
if (*str != ':') {
printk(UM_KERN_ERR "port_init : channel type 'port' must "
"specify a port number\n");
return NULL;
}
str++;
port = strtoul(str, &end, 0);
if ((*end != '\0') || (end == str)) {
printk(UM_KERN_ERR "port_init : couldn't parse port '%s'\n",
str);
return NULL;
}
kern_data = port_data(port);
if (kern_data == NULL)
return NULL;
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
goto err;
*data = ((struct port_chan) { .raw = opts->raw,
.kernel_data = kern_data });
sprintf(data->dev, "%d", port);
return data;
err:
port_kern_free(kern_data);
return NULL;
}
static void port_free(void *d)
{
struct port_chan *data = d;
port_kern_free(data->kernel_data);
kfree(data);
}
static int port_open(int input, int output, int primary, void *d,
char **dev_out)
{
struct port_chan *data = d;
int fd, err;
fd = port_wait(data->kernel_data);
if ((fd >= 0) && data->raw) {
CATCH_EINTR(err = tcgetattr(fd, &data->tt));
if (err)
return err;
err = raw(fd);
if (err)
return err;
}
*dev_out = data->dev;
return fd;
}
static void port_close(int fd, void *d)
{
struct port_chan *data = d;
port_remove_dev(data->kernel_data);
os_close_file(fd);
}
const struct chan_ops port_ops = {
.type = "port",
.init = port_init,
.open = port_open,
.close = port_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = port_free,
.winch = 1,
};
int port_listen_fd(int port)
{
struct sockaddr_in addr;
int fd, err, arg;
fd = socket(PF_INET, SOCK_STREAM, 0);
if (fd == -1)
return -errno;
arg = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &arg, sizeof(arg)) < 0) {
err = -errno;
goto out;
}
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = htonl(INADDR_ANY);
if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
err = -errno;
goto out;
}
if (listen(fd, 1) < 0) {
err = -errno;
goto out;
}
err = os_set_fd_block(fd, 0);
if (err < 0)
goto out;
return fd;
out:
close(fd);
return err;
}
struct port_pre_exec_data {
int sock_fd;
int pipe_fd;
};
static void port_pre_exec(void *arg)
{
struct port_pre_exec_data *data = arg;
dup2(data->sock_fd, 0);
dup2(data->sock_fd, 1);
dup2(data->sock_fd, 2);
close(data->sock_fd);
dup2(data->pipe_fd, 3);
shutdown(3, SHUT_RD);
close(data->pipe_fd);
}
int port_connection(int fd, int *socket, int *pid_out)
{
int new, err;
char *argv[] = { "/usr/sbin/in.telnetd", "-L",
"/usr/lib/uml/port-helper", NULL };
struct port_pre_exec_data data;
new = accept(fd, NULL, 0);
if (new < 0)
return -errno;
err = os_pipe(socket, 0, 0);
if (err < 0)
goto out_close;
data = ((struct port_pre_exec_data)
{ .sock_fd = new,
.pipe_fd = socket[1] });
err = run_helper(port_pre_exec, &data, argv);
if (err < 0)
goto out_shutdown;
*pid_out = err;
return new;
out_shutdown:
shutdown(socket[0], SHUT_RDWR);
close(socket[0]);
shutdown(socket[1], SHUT_RDWR);
close(socket[1]);
out_close:
close(new);
return err;
}
| gpl-2.0 |
losfair/android_kernel_sony_u8500_3_4 | drivers/platform/x86/eeepc-laptop.c | 4883 | 38037 | /*
* eeepc-laptop.c - Asus Eee PC extras
*
* Based on asus_acpi.c as patched for the Eee PC by Asus:
* ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
* Based on eee.c from eeepc-linux
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/leds.h>
#include <linux/dmi.h>
#define EEEPC_LAPTOP_VERSION "0.1"
#define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver"
#define EEEPC_LAPTOP_FILE "eeepc"
#define EEEPC_ACPI_CLASS "hotkey"
#define EEEPC_ACPI_DEVICE_NAME "Hotkey"
#define EEEPC_ACPI_HID "ASUS010"
MODULE_AUTHOR("Corentin Chary, Eric Cooper");
MODULE_DESCRIPTION(EEEPC_LAPTOP_NAME);
MODULE_LICENSE("GPL");
static bool hotplug_disabled;
module_param(hotplug_disabled, bool, 0444);
MODULE_PARM_DESC(hotplug_disabled,
"Disable hotplug for wireless device. "
"If your laptop need that, please report to "
"acpi4asus-user@lists.sourceforge.net.");
/*
* Definitions for Asus EeePC
*/
#define NOTIFY_BRN_MIN 0x20
#define NOTIFY_BRN_MAX 0x2f
enum {
DISABLE_ASL_WLAN = 0x0001,
DISABLE_ASL_BLUETOOTH = 0x0002,
DISABLE_ASL_IRDA = 0x0004,
DISABLE_ASL_CAMERA = 0x0008,
DISABLE_ASL_TV = 0x0010,
DISABLE_ASL_GPS = 0x0020,
DISABLE_ASL_DISPLAYSWITCH = 0x0040,
DISABLE_ASL_MODEM = 0x0080,
DISABLE_ASL_CARDREADER = 0x0100,
DISABLE_ASL_3G = 0x0200,
DISABLE_ASL_WIMAX = 0x0400,
DISABLE_ASL_HWCF = 0x0800
};
enum {
CM_ASL_WLAN = 0,
CM_ASL_BLUETOOTH,
CM_ASL_IRDA,
CM_ASL_1394,
CM_ASL_CAMERA,
CM_ASL_TV,
CM_ASL_GPS,
CM_ASL_DVDROM,
CM_ASL_DISPLAYSWITCH,
CM_ASL_PANELBRIGHT,
CM_ASL_BIOSFLASH,
CM_ASL_ACPIFLASH,
CM_ASL_CPUFV,
CM_ASL_CPUTEMPERATURE,
CM_ASL_FANCPU,
CM_ASL_FANCHASSIS,
CM_ASL_USBPORT1,
CM_ASL_USBPORT2,
CM_ASL_USBPORT3,
CM_ASL_MODEM,
CM_ASL_CARDREADER,
CM_ASL_3G,
CM_ASL_WIMAX,
CM_ASL_HWCF,
CM_ASL_LID,
CM_ASL_TYPE,
CM_ASL_PANELPOWER, /*P901*/
CM_ASL_TPD
};
static const char *cm_getv[] = {
"WLDG", "BTHG", NULL, NULL,
"CAMG", NULL, NULL, NULL,
NULL, "PBLG", NULL, NULL,
"CFVG", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODG",
"CRDG", "M3GG", "WIMG", "HWCF",
"LIDG", "TYPE", "PBPG", "TPDG"
};
static const char *cm_setv[] = {
"WLDS", "BTHS", NULL, NULL,
"CAMS", NULL, NULL, NULL,
"SDSP", "PBLS", "HDPS", NULL,
"CFVS", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODS",
"CRDS", "M3GS", "WIMS", NULL,
NULL, NULL, "PBPS", "TPDS"
};
static const struct key_entry eeepc_keymap[] = {
{ KE_KEY, 0x10, { KEY_WLAN } },
{ KE_KEY, 0x11, { KEY_WLAN } },
{ KE_KEY, 0x12, { KEY_PROG1 } },
{ KE_KEY, 0x13, { KEY_MUTE } },
{ KE_KEY, 0x14, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x15, { KEY_VOLUMEUP } },
{ KE_KEY, 0x16, { KEY_DISPLAY_OFF } },
{ KE_KEY, 0x1a, { KEY_COFFEE } },
{ KE_KEY, 0x1b, { KEY_ZOOM } },
{ KE_KEY, 0x1c, { KEY_PROG2 } },
{ KE_KEY, 0x1d, { KEY_PROG3 } },
{ KE_KEY, NOTIFY_BRN_MIN, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, NOTIFY_BRN_MAX, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x30, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x31, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x32, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x37, { KEY_F13 } }, /* Disable Touchpad */
{ KE_KEY, 0x38, { KEY_F14 } },
{ KE_END, 0 },
};
/*
* This is the main structure, we can use it to store useful information
*/
struct eeepc_laptop {
acpi_handle handle; /* the handle of the acpi device */
u32 cm_supported; /* the control methods supported
by this BIOS */
bool cpufv_disabled;
bool hotplug_disabled;
u16 event_count[128]; /* count for each event */
struct platform_device *platform_device;
struct acpi_device *device; /* the device we are in */
struct device *hwmon_device;
struct backlight_device *backlight_device;
struct input_dev *inputdev;
struct rfkill *wlan_rfkill;
struct rfkill *bluetooth_rfkill;
struct rfkill *wwan3g_rfkill;
struct rfkill *wimax_rfkill;
struct hotplug_slot *hotplug_slot;
struct mutex hotplug_lock;
struct led_classdev tpd_led;
int tpd_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
};
/*
* ACPI Helpers
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
struct acpi_object_list params;
union acpi_object in_obj;
acpi_status status;
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = val;
status = acpi_evaluate_object(handle, (char *)method, ¶ms, NULL);
return (status == AE_OK ? 0 : -1);
}
static int read_acpi_int(acpi_handle handle, const char *method, int *val)
{
acpi_status status;
unsigned long long result;
status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
if (ACPI_FAILURE(status)) {
*val = -1;
return -1;
} else {
*val = result;
return 0;
}
}
static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
{
const char *method = cm_setv[cm];
if (method == NULL)
return -ENODEV;
if ((eeepc->cm_supported & (0x1 << cm)) == 0)
return -ENODEV;
if (write_acpi_int(eeepc->handle, method, value))
pr_warn("Error writing %s\n", method);
return 0;
}
static int get_acpi(struct eeepc_laptop *eeepc, int cm)
{
const char *method = cm_getv[cm];
int value;
if (method == NULL)
return -ENODEV;
if ((eeepc->cm_supported & (0x1 << cm)) == 0)
return -ENODEV;
if (read_acpi_int(eeepc->handle, method, &value))
pr_warn("Error reading %s\n", method);
return value;
}
static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
acpi_handle *handle)
{
const char *method = cm_setv[cm];
acpi_status status;
if (method == NULL)
return -ENODEV;
if ((eeepc->cm_supported & (0x1 << cm)) == 0)
return -ENODEV;
status = acpi_get_handle(eeepc->handle, (char *)method,
handle);
if (status != AE_OK) {
pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
}
/*
* Sys helpers
*/
static int parse_arg(const char *buf, unsigned long count, int *val)
{
if (!count)
return 0;
if (sscanf(buf, "%i", val) != 1)
return -EINVAL;
return count;
}
static ssize_t store_sys_acpi(struct device *dev, int cm,
const char *buf, size_t count)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
value = set_acpi(eeepc, cm, value);
if (value < 0)
return -EIO;
return rv;
}
static ssize_t show_sys_acpi(struct device *dev, int cm, char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int value = get_acpi(eeepc, cm);
if (value < 0)
return -EIO;
return sprintf(buf, "%d\n", value);
}
#define EEEPC_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
static ssize_t show_##_name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return show_sys_acpi(dev, _cm, buf); \
} \
static ssize_t store_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return store_sys_acpi(dev, _cm, buf, count); \
} \
static struct device_attribute dev_attr_##_name = { \
.attr = { \
.name = __stringify(_name), \
.mode = _mode }, \
.show = show_##_name, \
.store = store_##_name, \
}
EEEPC_CREATE_DEVICE_ATTR(camera, 0644, CM_ASL_CAMERA);
EEEPC_CREATE_DEVICE_ATTR(cardr, 0644, CM_ASL_CARDREADER);
EEEPC_CREATE_DEVICE_ATTR(disp, 0200, CM_ASL_DISPLAYSWITCH);
struct eeepc_cpufv {
int num;
int cur;
};
static int get_cpufv(struct eeepc_laptop *eeepc, struct eeepc_cpufv *c)
{
c->cur = get_acpi(eeepc, CM_ASL_CPUFV);
c->num = (c->cur >> 8) & 0xff;
c->cur &= 0xff;
if (c->cur < 0 || c->num <= 0 || c->num > 12)
return -ENODEV;
return 0;
}
static ssize_t show_available_cpufv(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int i;
ssize_t len = 0;
if (get_cpufv(eeepc, &c))
return -ENODEV;
for (i = 0; i < c.num; i++)
len += sprintf(buf + len, "%d ", i);
len += sprintf(buf + len, "\n");
return len;
}
static ssize_t show_cpufv(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
if (get_cpufv(eeepc, &c))
return -ENODEV;
return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
}
static ssize_t store_cpufv(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int rv, value;
if (eeepc->cpufv_disabled)
return -EPERM;
if (get_cpufv(eeepc, &c))
return -ENODEV;
rv = parse_arg(buf, count, &value);
if (rv < 0)
return rv;
if (!rv || value < 0 || value >= c.num)
return -EINVAL;
set_acpi(eeepc, CM_ASL_CPUFV, value);
return rv;
}
static ssize_t show_cpufv_disabled(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", eeepc->cpufv_disabled);
}
static ssize_t store_cpufv_disabled(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv < 0)
return rv;
switch (value) {
case 0:
if (eeepc->cpufv_disabled)
pr_warn("cpufv enabled (not officially supported "
"on this model)\n");
eeepc->cpufv_disabled = false;
return rv;
case 1:
return -EPERM;
default:
return -EINVAL;
}
}
static struct device_attribute dev_attr_cpufv = {
.attr = {
.name = "cpufv",
.mode = 0644 },
.show = show_cpufv,
.store = store_cpufv
};
static struct device_attribute dev_attr_available_cpufv = {
.attr = {
.name = "available_cpufv",
.mode = 0444 },
.show = show_available_cpufv
};
static struct device_attribute dev_attr_cpufv_disabled = {
.attr = {
.name = "cpufv_disabled",
.mode = 0644 },
.show = show_cpufv_disabled,
.store = store_cpufv_disabled
};
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
&dev_attr_cpufv.attr,
&dev_attr_available_cpufv.attr,
&dev_attr_cpufv_disabled.attr,
NULL
};
static struct attribute_group platform_attribute_group = {
.attrs = platform_attributes
};
static int eeepc_platform_init(struct eeepc_laptop *eeepc)
{
int result;
eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, -1);
if (!eeepc->platform_device)
return -ENOMEM;
platform_set_drvdata(eeepc->platform_device, eeepc);
result = platform_device_add(eeepc->platform_device);
if (result)
goto fail_platform_device;
result = sysfs_create_group(&eeepc->platform_device->dev.kobj,
&platform_attribute_group);
if (result)
goto fail_sysfs;
return 0;
fail_sysfs:
platform_device_del(eeepc->platform_device);
fail_platform_device:
platform_device_put(eeepc->platform_device);
return result;
}
static void eeepc_platform_exit(struct eeepc_laptop *eeepc)
{
sysfs_remove_group(&eeepc->platform_device->dev.kobj,
&platform_attribute_group);
platform_device_unregister(eeepc->platform_device);
}
/*
* LEDs
*/
/*
* These functions actually update the LED's, and are called from a
* workqueue. By doing this as separate work rather than when the LED
* subsystem asks, we avoid messing with the Asus ACPI stuff during a
* potentially bad time, such as a timer interrupt.
*/
static void tpd_led_update(struct work_struct *work)
{
struct eeepc_laptop *eeepc;
eeepc = container_of(work, struct eeepc_laptop, tpd_led_work);
set_acpi(eeepc, CM_ASL_TPD, eeepc->tpd_led_wk);
}
static void tpd_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct eeepc_laptop *eeepc;
eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
eeepc->tpd_led_wk = (value > 0) ? 1 : 0;
queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
}
static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
{
struct eeepc_laptop *eeepc;
eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
return get_acpi(eeepc, CM_ASL_TPD);
}
static int eeepc_led_init(struct eeepc_laptop *eeepc)
{
int rv;
if (get_acpi(eeepc, CM_ASL_TPD) == -ENODEV)
return 0;
eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!eeepc->led_workqueue)
return -ENOMEM;
INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
eeepc->tpd_led.name = "eeepc::touchpad";
eeepc->tpd_led.brightness_set = tpd_led_set;
if (get_acpi(eeepc, CM_ASL_TPD) >= 0) /* if method is available */
eeepc->tpd_led.brightness_get = tpd_led_get;
eeepc->tpd_led.max_brightness = 1;
rv = led_classdev_register(&eeepc->platform_device->dev,
&eeepc->tpd_led);
if (rv) {
destroy_workqueue(eeepc->led_workqueue);
return rv;
}
return 0;
}
static void eeepc_led_exit(struct eeepc_laptop *eeepc)
{
if (!IS_ERR_OR_NULL(eeepc->tpd_led.dev))
led_classdev_unregister(&eeepc->tpd_led);
if (eeepc->led_workqueue)
destroy_workqueue(eeepc->led_workqueue);
}
/*
* PCI hotplug (for wlan rfkill)
*/
static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
{
if (get_acpi(eeepc, CM_ASL_WLAN) == 1)
return false;
return true;
}
static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
{
struct pci_dev *port;
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
bool absent;
u32 l;
if (eeepc->wlan_rfkill)
rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
mutex_lock(&eeepc->hotplug_lock);
if (eeepc->hotplug_slot) {
port = acpi_get_pci_dev(handle);
if (!port) {
pr_warning("Unable to find port\n");
goto out_unlock;
}
bus = port->subordinate;
if (!bus) {
pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
pr_err("Unable to read PCI config space?\n");
goto out_unlock;
}
absent = (l == 0xffffffff);
if (blocked != absent) {
pr_warn("BIOS says wireless lan is %s, "
"but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
pr_warn("skipped wireless hotplug as probably "
"inappropriate for this model\n");
goto out_unlock;
}
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
/* Device already present */
pci_dev_put(dev);
goto out_unlock;
}
dev = pci_scan_single_device(bus, 0);
if (dev) {
pci_bus_assign_resources(bus);
if (pci_bus_add_device(dev))
pr_err("Unable to hotplug wifi\n");
}
} else {
dev = pci_get_slot(bus, 0);
if (dev) {
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
}
}
out_unlock:
mutex_unlock(&eeepc->hotplug_lock);
}
static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_SUCCESS(status))
eeepc_rfkill_hotplug(eeepc, handle);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
struct eeepc_laptop *eeepc = data;
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
eeepc_rfkill_hotplug(eeepc, handle);
}
static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
char *node)
{
acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_SUCCESS(status)) {
status = acpi_install_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify,
eeepc);
if (ACPI_FAILURE(status))
pr_warn("Failed to register notify on %s\n", node);
/*
* Refresh pci hotplug in case the rfkill state was
* changed during setup.
*/
eeepc_rfkill_hotplug(eeepc, handle);
} else
return -ENODEV;
return 0;
}
static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_SUCCESS(status)) {
status = acpi_remove_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify);
if (ACPI_FAILURE(status))
pr_err("Error removing rfkill notify handler %s\n",
node);
/*
* Refresh pci hotplug in case the rfkill
* state was changed after
* eeepc_unregister_rfkill_notifier()
*/
eeepc_rfkill_hotplug(eeepc, handle);
}
}
static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
struct eeepc_laptop *eeepc = hotplug_slot->private;
int val = get_acpi(eeepc, CM_ASL_WLAN);
if (val == 1 || val == 0)
*value = val;
else
return -EINVAL;
return 0;
}
static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
{
kfree(hotplug_slot->info);
kfree(hotplug_slot);
}
static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
.owner = THIS_MODULE,
.get_adapter_status = eeepc_get_adapter_status,
.get_power_status = eeepc_get_adapter_status,
};
static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
{
int ret = -ENOMEM;
struct pci_bus *bus = pci_find_bus(0, 1);
if (!bus) {
pr_err("Unable to find wifi PCI bus\n");
return -ENODEV;
}
eeepc->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
if (!eeepc->hotplug_slot)
goto error_slot;
eeepc->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!eeepc->hotplug_slot->info)
goto error_info;
eeepc->hotplug_slot->private = eeepc;
eeepc->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
eeepc_get_adapter_status(eeepc->hotplug_slot,
&eeepc->hotplug_slot->info->adapter_status);
ret = pci_hp_register(eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
}
return 0;
error_register:
kfree(eeepc->hotplug_slot->info);
error_info:
kfree(eeepc->hotplug_slot);
eeepc->hotplug_slot = NULL;
error_slot:
return ret;
}
/*
* Rfkill devices
*/
static int eeepc_rfkill_set(void *data, bool blocked)
{
acpi_handle handle = data;
return write_acpi_int(handle, NULL, !blocked);
}
static const struct rfkill_ops eeepc_rfkill_ops = {
.set_block = eeepc_rfkill_set,
};
static int eeepc_new_rfkill(struct eeepc_laptop *eeepc,
struct rfkill **rfkill,
const char *name,
enum rfkill_type type, int cm)
{
acpi_handle handle;
int result;
result = acpi_setter_handle(eeepc, cm, &handle);
if (result < 0)
return result;
*rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
&eeepc_rfkill_ops, handle);
if (!*rfkill)
return -EINVAL;
rfkill_init_sw_state(*rfkill, get_acpi(eeepc, cm) != 1);
result = rfkill_register(*rfkill);
if (result) {
rfkill_destroy(*rfkill);
*rfkill = NULL;
return result;
}
return 0;
}
static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
{
eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
if (eeepc->wlan_rfkill) {
rfkill_unregister(eeepc->wlan_rfkill);
rfkill_destroy(eeepc->wlan_rfkill);
eeepc->wlan_rfkill = NULL;
}
if (eeepc->hotplug_slot)
pci_hp_deregister(eeepc->hotplug_slot);
if (eeepc->bluetooth_rfkill) {
rfkill_unregister(eeepc->bluetooth_rfkill);
rfkill_destroy(eeepc->bluetooth_rfkill);
eeepc->bluetooth_rfkill = NULL;
}
if (eeepc->wwan3g_rfkill) {
rfkill_unregister(eeepc->wwan3g_rfkill);
rfkill_destroy(eeepc->wwan3g_rfkill);
eeepc->wwan3g_rfkill = NULL;
}
if (eeepc->wimax_rfkill) {
rfkill_unregister(eeepc->wimax_rfkill);
rfkill_destroy(eeepc->wimax_rfkill);
eeepc->wimax_rfkill = NULL;
}
}
static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
{
int result = 0;
mutex_init(&eeepc->hotplug_lock);
result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
"eeepc-wlan", RFKILL_TYPE_WLAN,
CM_ASL_WLAN);
if (result && result != -ENODEV)
goto exit;
result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
"eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
CM_ASL_BLUETOOTH);
if (result && result != -ENODEV)
goto exit;
result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
"eeepc-wwan3g", RFKILL_TYPE_WWAN,
CM_ASL_3G);
if (result && result != -ENODEV)
goto exit;
result = eeepc_new_rfkill(eeepc, &eeepc->wimax_rfkill,
"eeepc-wimax", RFKILL_TYPE_WIMAX,
CM_ASL_WIMAX);
if (result && result != -ENODEV)
goto exit;
if (eeepc->hotplug_disabled)
return 0;
result = eeepc_setup_pci_hotplug(eeepc);
/*
* If we get -EBUSY then something else is handling the PCI hotplug -
* don't fail in this case
*/
if (result == -EBUSY)
result = 0;
eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
exit:
if (result && result != -ENODEV)
eeepc_rfkill_exit(eeepc);
return result;
}
/*
* Platform driver - hibernate/resume callbacks
*/
static int eeepc_hotk_thaw(struct device *device)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(device);
if (eeepc->wlan_rfkill) {
bool wlan;
/*
* Work around bios bug - acpi _PTS turns off the wireless led
* during suspend. Normally it restores it on resume, but
* we should kick it ourselves in case hibernation is aborted.
*/
wlan = get_acpi(eeepc, CM_ASL_WLAN);
set_acpi(eeepc, CM_ASL_WLAN, wlan);
}
return 0;
}
static int eeepc_hotk_restore(struct device *device)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(device);
/* Refresh both wlan rfkill state and pci hotplug */
if (eeepc->wlan_rfkill) {
eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5");
eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6");
eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7");
}
if (eeepc->bluetooth_rfkill)
rfkill_set_sw_state(eeepc->bluetooth_rfkill,
get_acpi(eeepc, CM_ASL_BLUETOOTH) != 1);
if (eeepc->wwan3g_rfkill)
rfkill_set_sw_state(eeepc->wwan3g_rfkill,
get_acpi(eeepc, CM_ASL_3G) != 1);
if (eeepc->wimax_rfkill)
rfkill_set_sw_state(eeepc->wimax_rfkill,
get_acpi(eeepc, CM_ASL_WIMAX) != 1);
return 0;
}
static const struct dev_pm_ops eeepc_pm_ops = {
.thaw = eeepc_hotk_thaw,
.restore = eeepc_hotk_restore,
};
static struct platform_driver platform_driver = {
.driver = {
.name = EEEPC_LAPTOP_FILE,
.owner = THIS_MODULE,
.pm = &eeepc_pm_ops,
}
};
/*
* Hwmon device
*/
#define EEEPC_EC_SC00 0x61
#define EEEPC_EC_FAN_PWM (EEEPC_EC_SC00 + 2) /* Fan PWM duty cycle (%) */
#define EEEPC_EC_FAN_HRPM (EEEPC_EC_SC00 + 5) /* High byte, fan speed (RPM) */
#define EEEPC_EC_FAN_LRPM (EEEPC_EC_SC00 + 6) /* Low byte, fan speed (RPM) */
#define EEEPC_EC_SFB0 0xD0
#define EEEPC_EC_FAN_CTRL (EEEPC_EC_SFB0 + 3) /* Byte containing SF25 */
static int eeepc_get_fan_pwm(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_PWM, &value);
return value * 255 / 100;
}
static void eeepc_set_fan_pwm(int value)
{
value = SENSORS_LIMIT(value, 0, 255);
value = value * 100 / 255;
ec_write(EEEPC_EC_FAN_PWM, value);
}
static int eeepc_get_fan_rpm(void)
{
u8 high = 0;
u8 low = 0;
ec_read(EEEPC_EC_FAN_HRPM, &high);
ec_read(EEEPC_EC_FAN_LRPM, &low);
return high << 8 | low;
}
static int eeepc_get_fan_ctrl(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
if (value & 0x02)
return 1; /* manual */
else
return 2; /* automatic */
}
static void eeepc_set_fan_ctrl(int manual)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
if (manual == 1)
value |= 0x02;
else
value &= ~0x02;
ec_write(EEEPC_EC_FAN_CTRL, value);
}
static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
{
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
set(value);
return rv;
}
static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
{
return sprintf(buf, "%d\n", get());
}
#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _set, _get) \
static ssize_t show_##_name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return show_sys_hwmon(_set, buf); \
} \
static ssize_t store_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return store_sys_hwmon(_get, buf, count); \
} \
static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
eeepc_get_fan_pwm, eeepc_set_fan_pwm);
EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
static ssize_t
show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "eeepc\n");
}
static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
static struct attribute_group hwmon_attribute_group = {
.attrs = hwmon_attributes
};
static void eeepc_hwmon_exit(struct eeepc_laptop *eeepc)
{
struct device *hwmon;
hwmon = eeepc->hwmon_device;
if (!hwmon)
return;
sysfs_remove_group(&hwmon->kobj,
&hwmon_attribute_group);
hwmon_device_unregister(hwmon);
eeepc->hwmon_device = NULL;
}
static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
{
struct device *hwmon;
int result;
hwmon = hwmon_device_register(&eeepc->platform_device->dev);
if (IS_ERR(hwmon)) {
pr_err("Could not register eeepc hwmon device\n");
eeepc->hwmon_device = NULL;
return PTR_ERR(hwmon);
}
eeepc->hwmon_device = hwmon;
result = sysfs_create_group(&hwmon->kobj,
&hwmon_attribute_group);
if (result)
eeepc_hwmon_exit(eeepc);
return result;
}
/*
* Backlight device
*/
static int read_brightness(struct backlight_device *bd)
{
struct eeepc_laptop *eeepc = bl_get_data(bd);
return get_acpi(eeepc, CM_ASL_PANELBRIGHT);
}
static int set_brightness(struct backlight_device *bd, int value)
{
struct eeepc_laptop *eeepc = bl_get_data(bd);
return set_acpi(eeepc, CM_ASL_PANELBRIGHT, value);
}
static int update_bl_status(struct backlight_device *bd)
{
return set_brightness(bd, bd->props.brightness);
}
static const struct backlight_ops eeepcbl_ops = {
.get_brightness = read_brightness,
.update_status = update_bl_status,
};
static int eeepc_backlight_notify(struct eeepc_laptop *eeepc)
{
struct backlight_device *bd = eeepc->backlight_device;
int old = bd->props.brightness;
backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
return old;
}
static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
{
struct backlight_properties props;
struct backlight_device *bd;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = 15;
bd = backlight_device_register(EEEPC_LAPTOP_FILE,
&eeepc->platform_device->dev, eeepc,
&eeepcbl_ops, &props);
if (IS_ERR(bd)) {
pr_err("Could not register eeepc backlight device\n");
eeepc->backlight_device = NULL;
return PTR_ERR(bd);
}
eeepc->backlight_device = bd;
bd->props.brightness = read_brightness(bd);
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
return 0;
}
static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
{
if (eeepc->backlight_device)
backlight_device_unregister(eeepc->backlight_device);
eeepc->backlight_device = NULL;
}
/*
* Input device (i.e. hotkeys)
*/
static int eeepc_input_init(struct eeepc_laptop *eeepc)
{
struct input_dev *input;
int error;
input = input_allocate_device();
if (!input) {
pr_info("Unable to allocate input device\n");
return -ENOMEM;
}
input->name = "Asus EeePC extra buttons";
input->phys = EEEPC_LAPTOP_FILE "/input0";
input->id.bustype = BUS_HOST;
input->dev.parent = &eeepc->platform_device->dev;
error = sparse_keymap_setup(input, eeepc_keymap, NULL);
if (error) {
pr_err("Unable to setup input device keymap\n");
goto err_free_dev;
}
error = input_register_device(input);
if (error) {
pr_err("Unable to register input device\n");
goto err_free_keymap;
}
eeepc->inputdev = input;
return 0;
err_free_keymap:
sparse_keymap_free(input);
err_free_dev:
input_free_device(input);
return error;
}
static void eeepc_input_exit(struct eeepc_laptop *eeepc)
{
if (eeepc->inputdev) {
sparse_keymap_free(eeepc->inputdev);
input_unregister_device(eeepc->inputdev);
}
eeepc->inputdev = NULL;
}
/*
* ACPI driver
*/
static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
{
if (!eeepc->inputdev)
return ;
if (!sparse_keymap_report_event(eeepc->inputdev, event, 1, true))
pr_info("Unknown key %x pressed\n", event);
}
static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
{
struct eeepc_laptop *eeepc = acpi_driver_data(device);
u16 count;
if (event > ACPI_MAX_SYS_NOTIFY)
return;
count = eeepc->event_count[event % 128]++;
acpi_bus_generate_proc_event(device, event, count);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
count);
/* Brightness events are special */
if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) {
/* Ignore them completely if the acpi video driver is used */
if (eeepc->backlight_device != NULL) {
int old_brightness, new_brightness;
/* Update the backlight device. */
old_brightness = eeepc_backlight_notify(eeepc);
/* Convert event to keypress (obsolescent hack) */
new_brightness = event - NOTIFY_BRN_MIN;
if (new_brightness < old_brightness) {
event = NOTIFY_BRN_MIN; /* brightness down */
} else if (new_brightness > old_brightness) {
event = NOTIFY_BRN_MAX; /* brightness up */
} else {
/*
* no change in brightness - already at min/max,
* event will be desired value (or else ignored)
*/
}
eeepc_input_notify(eeepc, event);
}
} else {
/* Everything else is a bona-fide keypress event */
eeepc_input_notify(eeepc, event);
}
}
static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
{
const char *model;
model = dmi_get_system_info(DMI_PRODUCT_NAME);
if (!model)
return;
/*
* Blacklist for setting cpufv (cpu speed).
*
* EeePC 4G ("701") implements CFVS, but it is not supported
* by the pre-installed OS, and the original option to change it
* in the BIOS setup screen was removed in later versions.
*
* Judging by the lack of "Super Hybrid Engine" on Asus product pages,
* this applies to all "701" models (4G/4G Surf/2G Surf).
*
* So Asus made a deliberate decision not to support it on this model.
* We have several reports that using it can cause the system to hang
*
* The hang has also been reported on a "702" (Model name "8G"?).
*
* We avoid dmi_check_system() / dmi_match(), because they use
* substring matching. We don't want to affect the "701SD"
* and "701SDX" models, because they do support S.H.E.
*/
if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
eeepc->cpufv_disabled = true;
pr_info("model %s does not officially support setting cpu "
"speed\n", model);
pr_info("cpufv disabled to avoid instability\n");
}
/*
* Blacklist for wlan hotplug
*
* Eeepc 1005HA doesn't work like others models and don't need the
* hotplug code. In fact, current hotplug code seems to unplug another
* device...
*/
if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
strcmp(model, "1005PE") == 0) {
eeepc->hotplug_disabled = true;
pr_info("wlan hotplug disabled\n");
}
}
static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
{
int dummy;
/* Some BIOSes do not report cm although it is available.
Check if cm_getv[cm] works and, if yes, assume cm should be set. */
if (!(eeepc->cm_supported & (1 << cm))
&& !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
pr_info("%s (%x) not reported by BIOS,"
" enabling anyway\n", name, 1 << cm);
eeepc->cm_supported |= 1 << cm;
}
}
static void cmsg_quirks(struct eeepc_laptop *eeepc)
{
cmsg_quirk(eeepc, CM_ASL_LID, "LID");
cmsg_quirk(eeepc, CM_ASL_TYPE, "TYPE");
cmsg_quirk(eeepc, CM_ASL_PANELPOWER, "PANELPOWER");
cmsg_quirk(eeepc, CM_ASL_TPD, "TPD");
}
static int __devinit eeepc_acpi_init(struct eeepc_laptop *eeepc)
{
unsigned int init_flags;
int result;
result = acpi_bus_get_status(eeepc->device);
if (result)
return result;
if (!eeepc->device->status.present) {
pr_err("Hotkey device not present, aborting\n");
return -ENODEV;
}
init_flags = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
pr_notice("Hotkey init flags 0x%x\n", init_flags);
if (write_acpi_int(eeepc->handle, "INIT", init_flags)) {
pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
/* get control methods supported */
if (read_acpi_int(eeepc->handle, "CMSG", &eeepc->cm_supported)) {
pr_err("Get control methods supported failed\n");
return -ENODEV;
}
cmsg_quirks(eeepc);
pr_info("Get control methods supported: 0x%x\n", eeepc->cm_supported);
return 0;
}
static void __devinit eeepc_enable_camera(struct eeepc_laptop *eeepc)
{
/*
* If the following call to set_acpi() fails, it's because there's no
* camera so we can ignore the error.
*/
if (get_acpi(eeepc, CM_ASL_CAMERA) == 0)
set_acpi(eeepc, CM_ASL_CAMERA, 1);
}
static bool eeepc_device_present;
static int __devinit eeepc_acpi_add(struct acpi_device *device)
{
struct eeepc_laptop *eeepc;
int result;
pr_notice(EEEPC_LAPTOP_NAME "\n");
eeepc = kzalloc(sizeof(struct eeepc_laptop), GFP_KERNEL);
if (!eeepc)
return -ENOMEM;
eeepc->handle = device->handle;
strcpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
device->driver_data = eeepc;
eeepc->device = device;
eeepc->hotplug_disabled = hotplug_disabled;
eeepc_dmi_check(eeepc);
result = eeepc_acpi_init(eeepc);
if (result)
goto fail_platform;
eeepc_enable_camera(eeepc);
/*
* Register the platform device first. It is used as a parent for the
* sub-devices below.
*
* Note that if there are multiple instances of this ACPI device it
* will bail out, because the platform device is registered with a
* fixed name. Of course it doesn't make sense to have more than one,
* and machine-specific scripts find the fixed name convenient. But
* It's also good for us to exclude multiple instances because both
* our hwmon and our wlan rfkill subdevice use global ACPI objects
* (the EC and the wlan PCI slot respectively).
*/
result = eeepc_platform_init(eeepc);
if (result)
goto fail_platform;
if (!acpi_video_backlight_support()) {
result = eeepc_backlight_init(eeepc);
if (result)
goto fail_backlight;
} else
pr_info("Backlight controlled by ACPI video driver\n");
result = eeepc_input_init(eeepc);
if (result)
goto fail_input;
result = eeepc_hwmon_init(eeepc);
if (result)
goto fail_hwmon;
result = eeepc_led_init(eeepc);
if (result)
goto fail_led;
result = eeepc_rfkill_init(eeepc);
if (result)
goto fail_rfkill;
eeepc_device_present = true;
return 0;
fail_rfkill:
eeepc_led_exit(eeepc);
fail_led:
eeepc_hwmon_exit(eeepc);
fail_hwmon:
eeepc_input_exit(eeepc);
fail_input:
eeepc_backlight_exit(eeepc);
fail_backlight:
eeepc_platform_exit(eeepc);
fail_platform:
kfree(eeepc);
return result;
}
static int eeepc_acpi_remove(struct acpi_device *device, int type)
{
struct eeepc_laptop *eeepc = acpi_driver_data(device);
eeepc_backlight_exit(eeepc);
eeepc_rfkill_exit(eeepc);
eeepc_input_exit(eeepc);
eeepc_hwmon_exit(eeepc);
eeepc_led_exit(eeepc);
eeepc_platform_exit(eeepc);
kfree(eeepc);
return 0;
}
static const struct acpi_device_id eeepc_device_ids[] = {
{EEEPC_ACPI_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
static struct acpi_driver eeepc_acpi_driver = {
.name = EEEPC_LAPTOP_NAME,
.class = EEEPC_ACPI_CLASS,
.owner = THIS_MODULE,
.ids = eeepc_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = eeepc_acpi_add,
.remove = eeepc_acpi_remove,
.notify = eeepc_acpi_notify,
},
};
static int __init eeepc_laptop_init(void)
{
int result;
result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
result = acpi_bus_register_driver(&eeepc_acpi_driver);
if (result < 0)
goto fail_acpi_driver;
if (!eeepc_device_present) {
result = -ENODEV;
goto fail_no_device;
}
return 0;
fail_no_device:
acpi_bus_unregister_driver(&eeepc_acpi_driver);
fail_acpi_driver:
platform_driver_unregister(&platform_driver);
return result;
}
static void __exit eeepc_laptop_exit(void)
{
acpi_bus_unregister_driver(&eeepc_acpi_driver);
platform_driver_unregister(&platform_driver);
}
module_init(eeepc_laptop_init);
module_exit(eeepc_laptop_exit);
| gpl-2.0 |
HelllGuest/sprout_kernel | drivers/block/paride/pt.c | 8467 | 23780 | /*
pt.c (c) 1998 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
This is the high-level driver for parallel port ATAPI tape
drives based on chips supported by the paride module.
The driver implements both rewinding and non-rewinding
devices, filemarks, and the rewind ioctl. It allocates
a small internal "bounce buffer" for each open device, but
otherwise expects buffering and blocking to be done at the
user level. As with most block-structured tapes, short
writes are padded to full tape blocks, so reading back a file
may return more data than was actually written.
By default, the driver will autoprobe for a single parallel
port ATAPI tape drive, but if their individual parameters are
specified, the driver can handle up to 4 drives.
The rewinding devices are named /dev/pt0, /dev/pt1, ...
while the non-rewinding devices are /dev/npt0, /dev/npt1, etc.
The behaviour of the pt driver can be altered by setting
some parameters from the insmod command line. The following
parameters are adjustable:
drive0 These four arguments can be arrays of
drive1 1-6 integers as follows:
drive2
drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
Where,
<prt> is the base of the parallel port address for
the corresponding drive. (required)
<pro> is the protocol number for the adapter that
supports this drive. These numbers are
logged by 'paride' when the protocol modules
are initialised. (0 if not given)
<uni> for those adapters that support chained
devices, this is the unit selector for the
chain of devices on the given port. It should
be zero for devices that don't support chaining.
(0 if not given)
<mod> this can be -1 to choose the best mode, or one
of the mode numbers supported by the adapter.
(-1 if not given)
<slv> ATAPI devices can be jumpered to master or slave.
Set this to 0 to choose the master drive, 1 to
choose the slave, -1 (the default) to choose the
first drive found.
<dly> some parallel ports require the driver to
go more slowly. -1 sets a default value that
should work with the chosen protocol. Otherwise,
set this to a small integer, the larger it is
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
major You may use this parameter to overide the
default major number (96) that this driver
will use. Be sure to change the device
name as well.
name This parameter is a character string that
contains the name the kernel will use for this
device (in /proc output, for instance).
(default "pt").
verbose This parameter controls the amount of logging
that the driver will do. Set it to 0 for
normal operation, 1 to see autoprobe progress
messages, or 2 to see additional debugging
output. (default 0)
If this driver is built into the kernel, you can use
the following command line parameters, with the same values
as the corresponding module parameters listed above:
pt.drive0
pt.drive1
pt.drive2
pt.drive3
In addition, you can use the parameter pt.disable to disable
the driver entirely.
*/
/* Changes:
1.01 GRG 1998.05.06 Round up transfer size, fix ready_wait,
loosed interpretation of ATAPI standard
for clearing error status.
Eliminate sti();
1.02 GRG 1998.06.16 Eliminate an Ugh.
1.03 GRG 1998.08.15 Adjusted PT_TMO, use HZ in loop timing,
extra debugging
1.04 GRG 1998.09.24 Repair minor coding error, added jumbo support
*/
#define PT_VERSION "1.04"
#define PT_MAJOR 96
#define PT_NAME "pt"
#define PT_UNITS 4
#include <linux/types.h>
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is on
by default.
*/
static bool verbose = 0;
static int major = PT_MAJOR;
static char *name = PT_NAME;
static int disable = 0;
static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
#define D_PRT 0
#define D_PRO 1
#define D_UNI 2
#define D_MOD 3
#define D_SLV 4
#define D_DLY 5
#define DU (*drives[unit])
/* end of parameters */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mtio.h>
#include <linux/device.h>
#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
#include <linux/mutex.h>
#include <asm/uaccess.h>
module_param(verbose, bool, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param_array(drive0, int, NULL, 0);
module_param_array(drive1, int, NULL, 0);
module_param_array(drive2, int, NULL, 0);
module_param_array(drive3, int, NULL, 0);
#include "paride.h"
#define PT_MAX_RETRIES 5
#define PT_TMO 3000 /* interrupt timeout in jiffies */
#define PT_SPIN_DEL 50 /* spin delay in micro-seconds */
#define PT_RESET_TMO 30 /* 30 seconds */
#define PT_READY_TMO 60 /* 60 seconds */
#define PT_REWIND_TMO 1200 /* 20 minutes */
#define PT_SPIN ((1000000/(HZ*PT_SPIN_DEL))*PT_TMO)
#define STAT_ERR 0x00001
#define STAT_INDEX 0x00002
#define STAT_ECC 0x00004
#define STAT_DRQ 0x00008
#define STAT_SEEK 0x00010
#define STAT_WRERR 0x00020
#define STAT_READY 0x00040
#define STAT_BUSY 0x00080
#define STAT_SENSE 0x1f000
#define ATAPI_TEST_READY 0x00
#define ATAPI_REWIND 0x01
#define ATAPI_REQ_SENSE 0x03
#define ATAPI_READ_6 0x08
#define ATAPI_WRITE_6 0x0a
#define ATAPI_WFM 0x10
#define ATAPI_IDENTIFY 0x12
#define ATAPI_MODE_SENSE 0x1a
#define ATAPI_LOG_SENSE 0x4d
static DEFINE_MUTEX(pt_mutex);
static int pt_open(struct inode *inode, struct file *file);
static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static int pt_release(struct inode *inode, struct file *file);
static ssize_t pt_read(struct file *filp, char __user *buf,
size_t count, loff_t * ppos);
static ssize_t pt_write(struct file *filp, const char __user *buf,
size_t count, loff_t * ppos);
static int pt_detect(void);
/* bits in tape->flags */
#define PT_MEDIA 1
#define PT_WRITE_OK 2
#define PT_REWIND 4
#define PT_WRITING 8
#define PT_READING 16
#define PT_EOF 32
#define PT_NAMELEN 8
#define PT_BUFSIZE 16384
struct pt_unit {
struct pi_adapter pia; /* interface to paride layer */
struct pi_adapter *pi;
int flags; /* various state flags */
int last_sense; /* result of last request sense */
int drive; /* drive */
atomic_t available; /* 1 if access is available 0 otherwise */
int bs; /* block size */
int capacity; /* Size of tape in KB */
int present; /* device present ? */
char *bufptr;
char name[PT_NAMELEN]; /* pf0, pf1, ... */
};
static int pt_identify(struct pt_unit *tape);
static struct pt_unit pt[PT_UNITS];
static char pt_scratch[512]; /* scratch block buffer */
/* kernel glue structures */
static const struct file_operations pt_fops = {
.owner = THIS_MODULE,
.read = pt_read,
.write = pt_write,
.unlocked_ioctl = pt_ioctl,
.open = pt_open,
.release = pt_release,
.llseek = noop_llseek,
};
/* sysfs class support */
static struct class *pt_class;
static inline int status_reg(struct pi_adapter *pi)
{
return pi_read_regr(pi, 1, 6);
}
static inline int read_reg(struct pi_adapter *pi, int reg)
{
return pi_read_regr(pi, 0, reg);
}
static inline void write_reg(struct pi_adapter *pi, int reg, int val)
{
pi_write_regr(pi, 0, reg, val);
}
static inline u8 DRIVE(struct pt_unit *tape)
{
return 0xa0+0x10*tape->drive;
}
static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg)
{
int j, r, e, s, p;
struct pi_adapter *pi = tape->pi;
j = 0;
while ((((r = status_reg(pi)) & go) || (stop && (!(r & stop))))
&& (j++ < PT_SPIN))
udelay(PT_SPIN_DEL);
if ((r & (STAT_ERR & stop)) || (j > PT_SPIN)) {
s = read_reg(pi, 7);
e = read_reg(pi, 1);
p = read_reg(pi, 2);
if (j > PT_SPIN)
e |= 0x100;
if (fun)
printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
" loop=%d phase=%d\n",
tape->name, fun, msg, r, s, e, j, p);
return (e << 8) + s;
}
return 0;
}
static int pt_command(struct pt_unit *tape, char *cmd, int dlen, char *fun)
{
struct pi_adapter *pi = tape->pi;
pi_connect(pi);
write_reg(pi, 6, DRIVE(tape));
if (pt_wait(tape, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
pi_disconnect(pi);
return -1;
}
write_reg(pi, 4, dlen % 256);
write_reg(pi, 5, dlen / 256);
write_reg(pi, 7, 0xa0); /* ATAPI packet command */
if (pt_wait(tape, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
pi_disconnect(pi);
return -1;
}
if (read_reg(pi, 2) != 1) {
printk("%s: %s: command phase error\n", tape->name, fun);
pi_disconnect(pi);
return -1;
}
pi_write_block(pi, cmd, 12);
return 0;
}
static int pt_completion(struct pt_unit *tape, char *buf, char *fun)
{
struct pi_adapter *pi = tape->pi;
int r, s, n, p;
r = pt_wait(tape, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
fun, "completion");
if (read_reg(pi, 7) & STAT_DRQ) {
n = (((read_reg(pi, 4) + 256 * read_reg(pi, 5)) +
3) & 0xfffc);
p = read_reg(pi, 2) & 3;
if (p == 0)
pi_write_block(pi, buf, n);
if (p == 2)
pi_read_block(pi, buf, n);
}
s = pt_wait(tape, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
pi_disconnect(pi);
return (r ? r : s);
}
static void pt_req_sense(struct pt_unit *tape, int quiet)
{
char rs_cmd[12] = { ATAPI_REQ_SENSE, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
char buf[16];
int r;
r = pt_command(tape, rs_cmd, 16, "Request sense");
mdelay(1);
if (!r)
pt_completion(tape, buf, "Request sense");
tape->last_sense = -1;
if (!r) {
if (!quiet)
printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
tape->name, buf[2] & 0xf, buf[12], buf[13]);
tape->last_sense = (buf[2] & 0xf) | ((buf[12] & 0xff) << 8)
| ((buf[13] & 0xff) << 16);
}
}
static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *fun)
{
int r;
r = pt_command(tape, cmd, dlen, fun);
mdelay(1);
if (!r)
r = pt_completion(tape, buf, fun);
if (r)
pt_req_sense(tape, !fun);
return r;
}
static void pt_sleep(int cs)
{
schedule_timeout_interruptible(cs);
}
static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
{
struct pi_adapter *pi = tape->pi;
int k, e, s;
k = 0;
e = 0;
s = 0;
while (k < tmo) {
pt_sleep(pause);
k++;
pi_connect(pi);
write_reg(pi, 6, DRIVE(tape));
s = read_reg(pi, 7);
e = read_reg(pi, 1);
pi_disconnect(pi);
if (s & (STAT_ERR | STAT_SEEK))
break;
}
if ((k >= tmo) || (s & STAT_ERR)) {
if (k >= tmo)
printk("%s: %s DSC timeout\n", tape->name, msg);
else
printk("%s: %s stat=0x%x err=0x%x\n", tape->name, msg, s,
e);
pt_req_sense(tape, 0);
return 0;
}
return 1;
}
static void pt_media_access_cmd(struct pt_unit *tape, int tmo, char *cmd, char *fun)
{
if (pt_command(tape, cmd, 0, fun)) {
pt_req_sense(tape, 0);
return;
}
pi_disconnect(tape->pi);
pt_poll_dsc(tape, HZ, tmo, fun);
}
static void pt_rewind(struct pt_unit *tape)
{
char rw_cmd[12] = { ATAPI_REWIND, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
pt_media_access_cmd(tape, PT_REWIND_TMO, rw_cmd, "rewind");
}
static void pt_write_fm(struct pt_unit *tape)
{
char wm_cmd[12] = { ATAPI_WFM, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 };
pt_media_access_cmd(tape, PT_TMO, wm_cmd, "write filemark");
}
#define DBMSG(msg) ((verbose>1)?(msg):NULL)
static int pt_reset(struct pt_unit *tape)
{
struct pi_adapter *pi = tape->pi;
int i, k, flg;
int expect[5] = { 1, 1, 1, 0x14, 0xeb };
pi_connect(pi);
write_reg(pi, 6, DRIVE(tape));
write_reg(pi, 7, 8);
pt_sleep(20 * HZ / 1000);
k = 0;
while ((k++ < PT_RESET_TMO) && (status_reg(pi) & STAT_BUSY))
pt_sleep(HZ / 10);
flg = 1;
for (i = 0; i < 5; i++)
flg &= (read_reg(pi, i + 1) == expect[i]);
if (verbose) {
printk("%s: Reset (%d) signature = ", tape->name, k);
for (i = 0; i < 5; i++)
printk("%3x", read_reg(pi, i + 1));
if (!flg)
printk(" (incorrect)");
printk("\n");
}
pi_disconnect(pi);
return flg - 1;
}
static int pt_ready_wait(struct pt_unit *tape, int tmo)
{
char tr_cmd[12] = { ATAPI_TEST_READY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int k, p;
k = 0;
while (k < tmo) {
tape->last_sense = 0;
pt_atapi(tape, tr_cmd, 0, NULL, DBMSG("test unit ready"));
p = tape->last_sense;
if (!p)
return 0;
if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
return p;
k++;
pt_sleep(HZ);
}
return 0x000020; /* timeout */
}
static void xs(char *buf, char *targ, int offs, int len)
{
int j, k, l;
j = 0;
l = 0;
for (k = 0; k < len; k++)
if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
l = targ[j++] = buf[k + offs];
if (l == 0x20)
j--;
targ[j] = 0;
}
static int xn(char *buf, int offs, int size)
{
int v, k;
v = 0;
for (k = 0; k < size; k++)
v = v * 256 + (buf[k + offs] & 0xff);
return v;
}
static int pt_identify(struct pt_unit *tape)
{
int dt, s;
char *ms[2] = { "master", "slave" };
char mf[10], id[18];
char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
char ms_cmd[12] =
{ ATAPI_MODE_SENSE, 0, 0x2a, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
char ls_cmd[12] =
{ ATAPI_LOG_SENSE, 0, 0x71, 0, 0, 0, 0, 0, 36, 0, 0, 0 };
char buf[36];
s = pt_atapi(tape, id_cmd, 36, buf, "identify");
if (s)
return -1;
dt = buf[0] & 0x1f;
if (dt != 1) {
if (verbose)
printk("%s: Drive %d, unsupported type %d\n",
tape->name, tape->drive, dt);
return -1;
}
xs(buf, mf, 8, 8);
xs(buf, id, 16, 16);
tape->flags = 0;
tape->capacity = 0;
tape->bs = 0;
if (!pt_ready_wait(tape, PT_READY_TMO))
tape->flags |= PT_MEDIA;
if (!pt_atapi(tape, ms_cmd, 36, buf, "mode sense")) {
if (!(buf[2] & 0x80))
tape->flags |= PT_WRITE_OK;
tape->bs = xn(buf, 10, 2);
}
if (!pt_atapi(tape, ls_cmd, 36, buf, "log sense"))
tape->capacity = xn(buf, 24, 4);
printk("%s: %s %s, %s", tape->name, mf, id, ms[tape->drive]);
if (!(tape->flags & PT_MEDIA))
printk(", no media\n");
else {
if (!(tape->flags & PT_WRITE_OK))
printk(", RO");
printk(", blocksize %d, %d MB\n", tape->bs, tape->capacity / 1024);
}
return 0;
}
/*
* returns 0, with id set if drive is detected
* -1, if drive detection failed
*/
static int pt_probe(struct pt_unit *tape)
{
if (tape->drive == -1) {
for (tape->drive = 0; tape->drive <= 1; tape->drive++)
if (!pt_reset(tape))
return pt_identify(tape);
} else {
if (!pt_reset(tape))
return pt_identify(tape);
}
return -1;
}
static int pt_detect(void)
{
struct pt_unit *tape;
int specified = 0, found = 0;
int unit;
printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major);
specified = 0;
for (unit = 0; unit < PT_UNITS; unit++) {
struct pt_unit *tape = &pt[unit];
tape->pi = &tape->pia;
atomic_set(&tape->available, 1);
tape->flags = 0;
tape->last_sense = 0;
tape->present = 0;
tape->bufptr = NULL;
tape->drive = DU[D_SLV];
snprintf(tape->name, PT_NAMELEN, "%s%d", name, unit);
if (!DU[D_PRT])
continue;
specified++;
if (pi_init(tape->pi, 0, DU[D_PRT], DU[D_MOD], DU[D_UNI],
DU[D_PRO], DU[D_DLY], pt_scratch, PI_PT,
verbose, tape->name)) {
if (!pt_probe(tape)) {
tape->present = 1;
found++;
} else
pi_release(tape->pi);
}
}
if (specified == 0) {
tape = pt;
if (pi_init(tape->pi, 1, -1, -1, -1, -1, -1, pt_scratch,
PI_PT, verbose, tape->name)) {
if (!pt_probe(tape)) {
tape->present = 1;
found++;
} else
pi_release(tape->pi);
}
}
if (found)
return 0;
printk("%s: No ATAPI tape drive detected\n", name);
return -1;
}
static int pt_open(struct inode *inode, struct file *file)
{
int unit = iminor(inode) & 0x7F;
struct pt_unit *tape = pt + unit;
int err;
mutex_lock(&pt_mutex);
if (unit >= PT_UNITS || (!tape->present)) {
mutex_unlock(&pt_mutex);
return -ENODEV;
}
err = -EBUSY;
if (!atomic_dec_and_test(&tape->available))
goto out;
pt_identify(tape);
err = -ENODEV;
if (!(tape->flags & PT_MEDIA))
goto out;
err = -EROFS;
if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & FMODE_WRITE))
goto out;
if (!(iminor(inode) & 128))
tape->flags |= PT_REWIND;
err = -ENOMEM;
tape->bufptr = kmalloc(PT_BUFSIZE, GFP_KERNEL);
if (tape->bufptr == NULL) {
printk("%s: buffer allocation failed\n", tape->name);
goto out;
}
file->private_data = tape;
mutex_unlock(&pt_mutex);
return 0;
out:
atomic_inc(&tape->available);
mutex_unlock(&pt_mutex);
return err;
}
static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pt_unit *tape = file->private_data;
struct mtop __user *p = (void __user *)arg;
struct mtop mtop;
switch (cmd) {
case MTIOCTOP:
if (copy_from_user(&mtop, p, sizeof(struct mtop)))
return -EFAULT;
switch (mtop.mt_op) {
case MTREW:
mutex_lock(&pt_mutex);
pt_rewind(tape);
mutex_unlock(&pt_mutex);
return 0;
case MTWEOF:
mutex_lock(&pt_mutex);
pt_write_fm(tape);
mutex_unlock(&pt_mutex);
return 0;
default:
/* FIXME: rate limit ?? */
printk(KERN_DEBUG "%s: Unimplemented mt_op %d\n", tape->name,
mtop.mt_op);
return -EINVAL;
}
default:
return -ENOTTY;
}
}
static int
pt_release(struct inode *inode, struct file *file)
{
struct pt_unit *tape = file->private_data;
if (atomic_read(&tape->available) > 1)
return -EINVAL;
if (tape->flags & PT_WRITING)
pt_write_fm(tape);
if (tape->flags & PT_REWIND)
pt_rewind(tape);
kfree(tape->bufptr);
tape->bufptr = NULL;
atomic_inc(&tape->available);
return 0;
}
static ssize_t pt_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
{
struct pt_unit *tape = filp->private_data;
struct pi_adapter *pi = tape->pi;
char rd_cmd[12] = { ATAPI_READ_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int k, n, r, p, s, t, b;
if (!(tape->flags & (PT_READING | PT_WRITING))) {
tape->flags |= PT_READING;
if (pt_atapi(tape, rd_cmd, 0, NULL, "start read-ahead"))
return -EIO;
} else if (tape->flags & PT_WRITING)
return -EIO;
if (tape->flags & PT_EOF)
return 0;
t = 0;
while (count > 0) {
if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "read"))
return -EIO;
n = count;
if (n > 32768)
n = 32768; /* max per command */
b = (n - 1 + tape->bs) / tape->bs;
n = b * tape->bs; /* rounded up to even block */
rd_cmd[4] = b;
r = pt_command(tape, rd_cmd, n, "read");
mdelay(1);
if (r) {
pt_req_sense(tape, 0);
return -EIO;
}
while (1) {
r = pt_wait(tape, STAT_BUSY,
STAT_DRQ | STAT_ERR | STAT_READY,
DBMSG("read DRQ"), "");
if (r & STAT_SENSE) {
pi_disconnect(pi);
pt_req_sense(tape, 0);
return -EIO;
}
if (r)
tape->flags |= PT_EOF;
s = read_reg(pi, 7);
if (!(s & STAT_DRQ))
break;
n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
p = (read_reg(pi, 2) & 3);
if (p != 2) {
pi_disconnect(pi);
printk("%s: Phase error on read: %d\n", tape->name,
p);
return -EIO;
}
while (n > 0) {
k = n;
if (k > PT_BUFSIZE)
k = PT_BUFSIZE;
pi_read_block(pi, tape->bufptr, k);
n -= k;
b = k;
if (b > count)
b = count;
if (copy_to_user(buf + t, tape->bufptr, b)) {
pi_disconnect(pi);
return -EFAULT;
}
t += b;
count -= b;
}
}
pi_disconnect(pi);
if (tape->flags & PT_EOF)
break;
}
return t;
}
static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
{
struct pt_unit *tape = filp->private_data;
struct pi_adapter *pi = tape->pi;
char wr_cmd[12] = { ATAPI_WRITE_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int k, n, r, p, s, t, b;
if (!(tape->flags & PT_WRITE_OK))
return -EROFS;
if (!(tape->flags & (PT_READING | PT_WRITING))) {
tape->flags |= PT_WRITING;
if (pt_atapi
(tape, wr_cmd, 0, NULL, "start buffer-available mode"))
return -EIO;
} else if (tape->flags & PT_READING)
return -EIO;
if (tape->flags & PT_EOF)
return -ENOSPC;
t = 0;
while (count > 0) {
if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "write"))
return -EIO;
n = count;
if (n > 32768)
n = 32768; /* max per command */
b = (n - 1 + tape->bs) / tape->bs;
n = b * tape->bs; /* rounded up to even block */
wr_cmd[4] = b;
r = pt_command(tape, wr_cmd, n, "write");
mdelay(1);
if (r) { /* error delivering command only */
pt_req_sense(tape, 0);
return -EIO;
}
while (1) {
r = pt_wait(tape, STAT_BUSY,
STAT_DRQ | STAT_ERR | STAT_READY,
DBMSG("write DRQ"), NULL);
if (r & STAT_SENSE) {
pi_disconnect(pi);
pt_req_sense(tape, 0);
return -EIO;
}
if (r)
tape->flags |= PT_EOF;
s = read_reg(pi, 7);
if (!(s & STAT_DRQ))
break;
n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
p = (read_reg(pi, 2) & 3);
if (p != 0) {
pi_disconnect(pi);
printk("%s: Phase error on write: %d \n",
tape->name, p);
return -EIO;
}
while (n > 0) {
k = n;
if (k > PT_BUFSIZE)
k = PT_BUFSIZE;
b = k;
if (b > count)
b = count;
if (copy_from_user(tape->bufptr, buf + t, b)) {
pi_disconnect(pi);
return -EFAULT;
}
pi_write_block(pi, tape->bufptr, k);
t += b;
count -= b;
n -= k;
}
}
pi_disconnect(pi);
if (tape->flags & PT_EOF)
break;
}
return t;
}
static int __init pt_init(void)
{
int unit;
int err;
if (disable) {
err = -EINVAL;
goto out;
}
if (pt_detect()) {
err = -ENODEV;
goto out;
}
err = register_chrdev(major, name, &pt_fops);
if (err < 0) {
printk("pt_init: unable to get major number %d\n", major);
for (unit = 0; unit < PT_UNITS; unit++)
if (pt[unit].present)
pi_release(pt[unit].pi);
goto out;
}
major = err;
pt_class = class_create(THIS_MODULE, "pt");
if (IS_ERR(pt_class)) {
err = PTR_ERR(pt_class);
goto out_chrdev;
}
for (unit = 0; unit < PT_UNITS; unit++)
if (pt[unit].present) {
device_create(pt_class, NULL, MKDEV(major, unit), NULL,
"pt%d", unit);
device_create(pt_class, NULL, MKDEV(major, unit + 128),
NULL, "pt%dn", unit);
}
goto out;
out_chrdev:
unregister_chrdev(major, "pt");
out:
return err;
}
static void __exit pt_exit(void)
{
int unit;
for (unit = 0; unit < PT_UNITS; unit++)
if (pt[unit].present) {
device_destroy(pt_class, MKDEV(major, unit));
device_destroy(pt_class, MKDEV(major, unit + 128));
}
class_destroy(pt_class);
unregister_chrdev(major, name);
for (unit = 0; unit < PT_UNITS; unit++)
if (pt[unit].present)
pi_release(pt[unit].pi);
}
MODULE_LICENSE("GPL");
module_init(pt_init)
module_exit(pt_exit)
| gpl-2.0 |
invisiblek/android_kernel_lge_g3 | drivers/input/touchscreen/wm9713.c | 8467 | 12515 | /*
* wm9713.c -- Codec touch driver for Wolfson WM9713 AC97 Codec.
*
* Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
* Parts Copyright : Ian Molton <spyro@f2s.com>
* Andrew Zabolotny <zap@homelink.ru>
* Russell King <rmk@arm.linux.org.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/wm97xx.h>
#define TS_NAME "wm97xx"
#define WM9713_VERSION "1.00"
#define DEFAULT_PRESSURE 0xb0c0
/*
* Module parameters
*/
/*
* Set internal pull up for pen detect.
*
* Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive)
* i.e. pull up resistance = 64k Ohms / rpu.
*
* Adjust this value if you are having problems with pen detect not
* detecting any down event.
*/
static int rpu = 8;
module_param(rpu, int, 0);
MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
/*
* Set current used for pressure measurement.
*
* Set pil = 2 to use 400uA
* pil = 1 to use 200uA and
* pil = 0 to disable pressure measurement.
*
* This is used to increase the range of values returned by the adc
* when measureing touchpanel pressure.
*/
static int pil;
module_param(pil, int, 0);
MODULE_PARM_DESC(pil, "Set current used for pressure measurement.");
/*
* Set threshold for pressure measurement.
*
* Pen down pressure below threshold is ignored.
*/
static int pressure = DEFAULT_PRESSURE & 0xfff;
module_param(pressure, int, 0);
MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement.");
/*
* Set adc sample delay.
*
* For accurate touchpanel measurements, some settling time may be
* required between the switch matrix applying a voltage across the
* touchpanel plate and the ADC sampling the signal.
*
* This delay can be set by setting delay = n, where n is the array
* position of the delay in the array delay_table below.
* Long delays > 1ms are supported for completeness, but are not
* recommended.
*/
static int delay = 4;
module_param(delay, int, 0);
MODULE_PARM_DESC(delay, "Set adc sample delay.");
/*
* Set five_wire = 1 to use a 5 wire touchscreen.
*
* NOTE: Five wire mode does not allow for readback of pressure.
*/
static int five_wire;
module_param(five_wire, int, 0);
MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen.");
/*
* Set adc mask function.
*
* Sources of glitch noise, such as signals driving an LCD display, may feed
* through to the touch screen plates and affect measurement accuracy. In
* order to minimise this, a signal may be applied to the MASK pin to delay or
* synchronise the sampling.
*
* 0 = No delay or sync
* 1 = High on pin stops conversions
* 2 = Edge triggered, edge on pin delays conversion by delay param (above)
* 3 = Edge triggered, edge on pin starts conversion after delay param
*/
static int mask;
module_param(mask, int, 0);
MODULE_PARM_DESC(mask, "Set adc mask function.");
/*
* Coordinate Polling Enable.
*
* Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together
* for every poll.
*/
static int coord;
module_param(coord, int, 0);
MODULE_PARM_DESC(coord, "Polling coordinate mode");
/*
* ADC sample delay times in uS
*/
static const int delay_table[] = {
21, /* 1 AC97 Link frames */
42, /* 2 */
84, /* 4 */
167, /* 8 */
333, /* 16 */
667, /* 32 */
1000, /* 48 */
1333, /* 64 */
2000, /* 96 */
2667, /* 128 */
3333, /* 160 */
4000, /* 192 */
4667, /* 224 */
5333, /* 256 */
6000, /* 288 */
0 /* No delay, switch matrix always on */
};
/*
* Delay after issuing a POLL command.
*
* The delay is 3 AC97 link frames + the touchpanel settling delay
*/
static inline void poll_delay(int d)
{
udelay(3 * AC97_LINK_FRAME + delay_table[d]);
}
/*
* set up the physical settings of the WM9713
*/
static void wm9713_phy_init(struct wm97xx *wm)
{
u16 dig1 = 0, dig2, dig3;
/* default values */
dig2 = WM97XX_DELAY(4) | WM97XX_SLT(5);
dig3 = WM9712_RPU(1);
/* rpu */
if (rpu) {
dig3 &= 0xffc0;
dig3 |= WM9712_RPU(rpu);
dev_info(wm->dev, "setting pen detect pull-up to %d Ohms\n",
64000 / rpu);
}
/* Five wire panel? */
if (five_wire) {
dig3 |= WM9713_45W;
dev_info(wm->dev, "setting 5-wire touchscreen mode.");
if (pil) {
dev_warn(wm->dev,
"Pressure measurement not supported in 5 "
"wire mode, disabling\n");
pil = 0;
}
}
/* touchpanel pressure */
if (pil == 2) {
dig3 |= WM9712_PIL;
dev_info(wm->dev,
"setting pressure measurement current to 400uA.");
} else if (pil)
dev_info(wm->dev,
"setting pressure measurement current to 200uA.");
if (!pil)
pressure = 0;
/* sample settling delay */
if (delay < 0 || delay > 15) {
dev_info(wm->dev, "supplied delay out of range.");
delay = 4;
dev_info(wm->dev, "setting adc sample delay to %d u Secs.",
delay_table[delay]);
}
dig2 &= 0xff0f;
dig2 |= WM97XX_DELAY(delay);
/* mask */
dig3 |= ((mask & 0x3) << 4);
if (coord)
dig3 |= WM9713_WAIT;
wm->misc = wm97xx_reg_read(wm, 0x5a);
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
wm97xx_reg_write(wm, AC97_GPIO_STICKY, 0x0);
}
static void wm9713_dig_enable(struct wm97xx *wm, int enable)
{
u16 val;
if (enable) {
val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
wm97xx_reg_write(wm, AC97_EXTENDED_MID, val & 0x7fff);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] |
WM97XX_PRP_DET_DIG);
wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */
} else {
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] &
~WM97XX_PRP_DET_DIG);
val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
wm97xx_reg_write(wm, AC97_EXTENDED_MID, val | 0x8000);
}
}
static void wm9713_dig_restore(struct wm97xx *wm)
{
wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig_save[0]);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig_save[1]);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig_save[2]);
}
static void wm9713_aux_prepare(struct wm97xx *wm)
{
memcpy(wm->dig_save, wm->dig, sizeof(wm->dig));
wm97xx_reg_write(wm, AC97_WM9713_DIG1, 0);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, 0);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, WM97XX_PRP_DET_DIG);
}
static inline int is_pden(struct wm97xx *wm)
{
return wm->dig[2] & WM9713_PDEN;
}
/*
* Read a sample from the WM9713 adc in polling mode.
*/
static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
{
u16 dig1;
int timeout = 5 * delay;
bool wants_pen = adcsel & WM97XX_PEN_DOWN;
if (wants_pen && !wm->pen_probably_down) {
u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
dig1 &= ~WM9713_ADCSEL_MASK;
/* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */
dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12);
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(adcsel);
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL);
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) &&
timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout");
return RC_PENUP;
}
*sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(adcsel);
/* check we have correct sample */
if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
adcsel & WM97XX_ADCSEL_MASK,
*sample & WM97XX_ADCSEL_MASK);
return RC_PENUP;
}
if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
return RC_VALID;
}
/*
* Read a coordinate from the WM9713 adc in polling mode.
*/
static int wm9713_poll_coord(struct wm97xx *wm, struct wm97xx_data *data)
{
u16 dig1;
int timeout = 5 * delay;
if (!wm->pen_probably_down) {
u16 val = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(val & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
dig1 &= ~WM9713_ADCSEL_MASK;
if (pil)
dig1 |= WM9713_ADCSEL_PRES;
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
wm97xx_reg_write(wm, AC97_WM9713_DIG1,
dig1 | WM9713_POLL | WM9713_COO);
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL)
&& timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout");
return RC_PENUP;
}
/* read back data */
data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (pil)
data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
else
data->p = DEFAULT_PRESSURE;
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
/* check we have correct sample */
if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y))
goto err;
if (pil && !(data->p & WM97XX_ADCSEL_PRES))
goto err;
if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
return RC_VALID;
err:
return 0;
}
/*
* Sample the WM9713 touchscreen in polling mode
*/
static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
{
int rc;
if (coord) {
rc = wm9713_poll_coord(wm, data);
if (rc != RC_VALID)
return rc;
} else {
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
if (rc != RC_VALID)
return rc;
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
if (rc != RC_VALID)
return rc;
if (pil) {
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
&data->p);
if (rc != RC_VALID)
return rc;
} else
data->p = DEFAULT_PRESSURE;
}
return RC_VALID;
}
/*
* Enable WM9713 continuous mode, i.e. touch data is streamed across
* an AC97 slot
*/
static int wm9713_acc_enable(struct wm97xx *wm, int enable)
{
u16 dig1, dig2, dig3;
int ret = 0;
dig1 = wm->dig[0];
dig2 = wm->dig[1];
dig3 = wm->dig[2];
if (enable) {
/* continuous mode */
if (wm->mach_ops->acc_startup &&
(ret = wm->mach_ops->acc_startup(wm)) < 0)
return ret;
dig1 &= ~WM9713_ADCSEL_MASK;
dig1 |= WM9713_CTC | WM9713_COO | WM9713_ADCSEL_X |
WM9713_ADCSEL_Y;
if (pil)
dig1 |= WM9713_ADCSEL_PRES;
dig2 &= ~(WM97XX_DELAY_MASK | WM97XX_SLT_MASK |
WM97XX_CM_RATE_MASK);
dig2 |= WM97XX_SLEN | WM97XX_DELAY(delay) |
WM97XX_SLT(wm->acc_slot) | WM97XX_RATE(wm->acc_rate);
dig3 |= WM9713_PDEN;
} else {
dig1 &= ~(WM9713_CTC | WM9713_COO);
dig2 &= ~WM97XX_SLEN;
dig3 &= ~WM9713_PDEN;
if (wm->mach_ops->acc_shutdown)
wm->mach_ops->acc_shutdown(wm);
}
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
return ret;
}
struct wm97xx_codec_drv wm9713_codec = {
.id = WM9713_ID2,
.name = "wm9713",
.poll_sample = wm9713_poll_sample,
.poll_touch = wm9713_poll_touch,
.acc_enable = wm9713_acc_enable,
.phy_init = wm9713_phy_init,
.dig_enable = wm9713_dig_enable,
.dig_restore = wm9713_dig_restore,
.aux_prepare = wm9713_aux_prepare,
};
EXPORT_SYMBOL_GPL(wm9713_codec);
/* Module information */
MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
MODULE_DESCRIPTION("WM9713 Touch Screen Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
markbencze/android_kernel_lge_hammerhead | drivers/input/touchscreen/wm9713.c | 8467 | 12515 | /*
* wm9713.c -- Codec touch driver for Wolfson WM9713 AC97 Codec.
*
* Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
* Parts Copyright : Ian Molton <spyro@f2s.com>
* Andrew Zabolotny <zap@homelink.ru>
* Russell King <rmk@arm.linux.org.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/wm97xx.h>
#define TS_NAME "wm97xx"
#define WM9713_VERSION "1.00"
#define DEFAULT_PRESSURE 0xb0c0
/*
* Module parameters
*/
/*
* Set internal pull up for pen detect.
*
* Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive)
* i.e. pull up resistance = 64k Ohms / rpu.
*
* Adjust this value if you are having problems with pen detect not
* detecting any down event.
*/
static int rpu = 8;
module_param(rpu, int, 0);
MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
/*
* Set current used for pressure measurement.
*
* Set pil = 2 to use 400uA
* pil = 1 to use 200uA and
* pil = 0 to disable pressure measurement.
*
* This is used to increase the range of values returned by the adc
* when measureing touchpanel pressure.
*/
static int pil;
module_param(pil, int, 0);
MODULE_PARM_DESC(pil, "Set current used for pressure measurement.");
/*
* Set threshold for pressure measurement.
*
* Pen down pressure below threshold is ignored.
*/
static int pressure = DEFAULT_PRESSURE & 0xfff;
module_param(pressure, int, 0);
MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement.");
/*
* Set adc sample delay.
*
* For accurate touchpanel measurements, some settling time may be
* required between the switch matrix applying a voltage across the
* touchpanel plate and the ADC sampling the signal.
*
* This delay can be set by setting delay = n, where n is the array
* position of the delay in the array delay_table below.
* Long delays > 1ms are supported for completeness, but are not
* recommended.
*/
static int delay = 4;
module_param(delay, int, 0);
MODULE_PARM_DESC(delay, "Set adc sample delay.");
/*
* Set five_wire = 1 to use a 5 wire touchscreen.
*
* NOTE: Five wire mode does not allow for readback of pressure.
*/
static int five_wire;
module_param(five_wire, int, 0);
MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen.");
/*
* Set adc mask function.
*
* Sources of glitch noise, such as signals driving an LCD display, may feed
* through to the touch screen plates and affect measurement accuracy. In
* order to minimise this, a signal may be applied to the MASK pin to delay or
* synchronise the sampling.
*
* 0 = No delay or sync
* 1 = High on pin stops conversions
* 2 = Edge triggered, edge on pin delays conversion by delay param (above)
* 3 = Edge triggered, edge on pin starts conversion after delay param
*/
static int mask;
module_param(mask, int, 0);
MODULE_PARM_DESC(mask, "Set adc mask function.");
/*
* Coordinate Polling Enable.
*
* Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together
* for every poll.
*/
static int coord;
module_param(coord, int, 0);
MODULE_PARM_DESC(coord, "Polling coordinate mode");
/*
* ADC sample delay times in uS
*/
static const int delay_table[] = {
21, /* 1 AC97 Link frames */
42, /* 2 */
84, /* 4 */
167, /* 8 */
333, /* 16 */
667, /* 32 */
1000, /* 48 */
1333, /* 64 */
2000, /* 96 */
2667, /* 128 */
3333, /* 160 */
4000, /* 192 */
4667, /* 224 */
5333, /* 256 */
6000, /* 288 */
0 /* No delay, switch matrix always on */
};
/*
* Delay after issuing a POLL command.
*
* The delay is 3 AC97 link frames + the touchpanel settling delay
*/
static inline void poll_delay(int d)
{
udelay(3 * AC97_LINK_FRAME + delay_table[d]);
}
/*
* set up the physical settings of the WM9713
*/
static void wm9713_phy_init(struct wm97xx *wm)
{
u16 dig1 = 0, dig2, dig3;
/* default values */
dig2 = WM97XX_DELAY(4) | WM97XX_SLT(5);
dig3 = WM9712_RPU(1);
/* rpu */
if (rpu) {
dig3 &= 0xffc0;
dig3 |= WM9712_RPU(rpu);
dev_info(wm->dev, "setting pen detect pull-up to %d Ohms\n",
64000 / rpu);
}
/* Five wire panel? */
if (five_wire) {
dig3 |= WM9713_45W;
dev_info(wm->dev, "setting 5-wire touchscreen mode.");
if (pil) {
dev_warn(wm->dev,
"Pressure measurement not supported in 5 "
"wire mode, disabling\n");
pil = 0;
}
}
/* touchpanel pressure */
if (pil == 2) {
dig3 |= WM9712_PIL;
dev_info(wm->dev,
"setting pressure measurement current to 400uA.");
} else if (pil)
dev_info(wm->dev,
"setting pressure measurement current to 200uA.");
if (!pil)
pressure = 0;
/* sample settling delay */
if (delay < 0 || delay > 15) {
dev_info(wm->dev, "supplied delay out of range.");
delay = 4;
dev_info(wm->dev, "setting adc sample delay to %d u Secs.",
delay_table[delay]);
}
dig2 &= 0xff0f;
dig2 |= WM97XX_DELAY(delay);
/* mask */
dig3 |= ((mask & 0x3) << 4);
if (coord)
dig3 |= WM9713_WAIT;
wm->misc = wm97xx_reg_read(wm, 0x5a);
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
wm97xx_reg_write(wm, AC97_GPIO_STICKY, 0x0);
}
static void wm9713_dig_enable(struct wm97xx *wm, int enable)
{
u16 val;
if (enable) {
val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
wm97xx_reg_write(wm, AC97_EXTENDED_MID, val & 0x7fff);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] |
WM97XX_PRP_DET_DIG);
wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */
} else {
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] &
~WM97XX_PRP_DET_DIG);
val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
wm97xx_reg_write(wm, AC97_EXTENDED_MID, val | 0x8000);
}
}
static void wm9713_dig_restore(struct wm97xx *wm)
{
wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig_save[0]);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig_save[1]);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig_save[2]);
}
static void wm9713_aux_prepare(struct wm97xx *wm)
{
memcpy(wm->dig_save, wm->dig, sizeof(wm->dig));
wm97xx_reg_write(wm, AC97_WM9713_DIG1, 0);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, 0);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, WM97XX_PRP_DET_DIG);
}
static inline int is_pden(struct wm97xx *wm)
{
return wm->dig[2] & WM9713_PDEN;
}
/*
* Read a sample from the WM9713 adc in polling mode.
*/
static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
{
u16 dig1;
int timeout = 5 * delay;
bool wants_pen = adcsel & WM97XX_PEN_DOWN;
if (wants_pen && !wm->pen_probably_down) {
u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
dig1 &= ~WM9713_ADCSEL_MASK;
/* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */
dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12);
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(adcsel);
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL);
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) &&
timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout");
return RC_PENUP;
}
*sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(adcsel);
/* check we have correct sample */
if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
adcsel & WM97XX_ADCSEL_MASK,
*sample & WM97XX_ADCSEL_MASK);
return RC_PENUP;
}
if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
return RC_VALID;
}
/*
* Read a coordinate from the WM9713 adc in polling mode.
*/
static int wm9713_poll_coord(struct wm97xx *wm, struct wm97xx_data *data)
{
u16 dig1;
int timeout = 5 * delay;
if (!wm->pen_probably_down) {
u16 val = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(val & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
dig1 &= ~WM9713_ADCSEL_MASK;
if (pil)
dig1 |= WM9713_ADCSEL_PRES;
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
wm97xx_reg_write(wm, AC97_WM9713_DIG1,
dig1 | WM9713_POLL | WM9713_COO);
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL)
&& timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout");
return RC_PENUP;
}
/* read back data */
data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (pil)
data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
else
data->p = DEFAULT_PRESSURE;
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
/* check we have correct sample */
if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y))
goto err;
if (pil && !(data->p & WM97XX_ADCSEL_PRES))
goto err;
if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
return RC_VALID;
err:
return 0;
}
/*
* Sample the WM9713 touchscreen in polling mode
*/
static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
{
int rc;
if (coord) {
rc = wm9713_poll_coord(wm, data);
if (rc != RC_VALID)
return rc;
} else {
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
if (rc != RC_VALID)
return rc;
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
if (rc != RC_VALID)
return rc;
if (pil) {
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
&data->p);
if (rc != RC_VALID)
return rc;
} else
data->p = DEFAULT_PRESSURE;
}
return RC_VALID;
}
/*
* Enable WM9713 continuous mode, i.e. touch data is streamed across
* an AC97 slot
*/
static int wm9713_acc_enable(struct wm97xx *wm, int enable)
{
u16 dig1, dig2, dig3;
int ret = 0;
dig1 = wm->dig[0];
dig2 = wm->dig[1];
dig3 = wm->dig[2];
if (enable) {
/* continuous mode */
if (wm->mach_ops->acc_startup &&
(ret = wm->mach_ops->acc_startup(wm)) < 0)
return ret;
dig1 &= ~WM9713_ADCSEL_MASK;
dig1 |= WM9713_CTC | WM9713_COO | WM9713_ADCSEL_X |
WM9713_ADCSEL_Y;
if (pil)
dig1 |= WM9713_ADCSEL_PRES;
dig2 &= ~(WM97XX_DELAY_MASK | WM97XX_SLT_MASK |
WM97XX_CM_RATE_MASK);
dig2 |= WM97XX_SLEN | WM97XX_DELAY(delay) |
WM97XX_SLT(wm->acc_slot) | WM97XX_RATE(wm->acc_rate);
dig3 |= WM9713_PDEN;
} else {
dig1 &= ~(WM9713_CTC | WM9713_COO);
dig2 &= ~WM97XX_SLEN;
dig3 &= ~WM9713_PDEN;
if (wm->mach_ops->acc_shutdown)
wm->mach_ops->acc_shutdown(wm);
}
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
return ret;
}
struct wm97xx_codec_drv wm9713_codec = {
.id = WM9713_ID2,
.name = "wm9713",
.poll_sample = wm9713_poll_sample,
.poll_touch = wm9713_poll_touch,
.acc_enable = wm9713_acc_enable,
.phy_init = wm9713_phy_init,
.dig_enable = wm9713_dig_enable,
.dig_restore = wm9713_dig_restore,
.aux_prepare = wm9713_aux_prepare,
};
EXPORT_SYMBOL_GPL(wm9713_codec);
/* Module information */
MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
MODULE_DESCRIPTION("WM9713 Touch Screen Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Volidol1/android_kernel_lge_d605 | arch/arm/nwfpe/fpmodule.c | 8979 | 5389 |
/*
NetWinder Floating Point Emulator
(c) Rebel.com, 1998-1999
(c) Philip Blundell, 1998-1999
Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "fpa11.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
/* XXX */
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/thread_notify.h>
#include "softfloat.h"
#include "fpopcode.h"
#include "fpmodule.h"
#include "fpa11.inl"
/* kernel symbols required for signal handling */
#ifdef CONFIG_FPE_NWFPE_XP
#define NWFPE_BITS "extended"
#else
#define NWFPE_BITS "double"
#endif
#ifdef MODULE
void fp_send_sig(unsigned long sig, struct task_struct *p, int priv);
#else
#define fp_send_sig send_sig
#define kern_fp_enter fp_enter
extern char fpe_type[];
#endif
static int nwfpe_notify(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
if (cmd == THREAD_NOTIFY_FLUSH)
nwfpe_init_fpa(&thread->fpstate);
return NOTIFY_DONE;
}
static struct notifier_block nwfpe_notifier_block = {
.notifier_call = nwfpe_notify,
};
/* kernel function prototypes required */
void fp_setup(void);
/* external declarations for saved kernel symbols */
extern void (*kern_fp_enter)(void);
/* Original value of fp_enter from kernel before patched by fpe_init. */
static void (*orig_fp_enter)(void);
/* forward declarations */
extern void nwfpe_enter(void);
static int __init fpe_init(void)
{
if (sizeof(FPA11) > sizeof(union fp_state)) {
printk(KERN_ERR "nwfpe: bad structure size\n");
return -EINVAL;
}
if (sizeof(FPREG) != 12) {
printk(KERN_ERR "nwfpe: bad register size\n");
return -EINVAL;
}
if (fpe_type[0] && strcmp(fpe_type, "nwfpe"))
return 0;
/* Display title, version and copyright information. */
printk(KERN_WARNING "NetWinder Floating Point Emulator V0.97 ("
NWFPE_BITS " precision)\n");
thread_register_notifier(&nwfpe_notifier_block);
/* Save pointer to the old FP handler and then patch ourselves in */
orig_fp_enter = kern_fp_enter;
kern_fp_enter = nwfpe_enter;
return 0;
}
static void __exit fpe_exit(void)
{
thread_unregister_notifier(&nwfpe_notifier_block);
/* Restore the values we saved earlier. */
kern_fp_enter = orig_fp_enter;
}
/*
ScottB: November 4, 1998
Moved this function out of softfloat-specialize into fpmodule.c.
This effectively isolates all the changes required for integrating with the
Linux kernel into fpmodule.c. Porting to NetBSD should only require modifying
fpmodule.c to integrate with the NetBSD kernel (I hope!).
[1/1/99: Not quite true any more unfortunately. There is Linux-specific
code to access data in user space in some other source files at the
moment (grep for get_user / put_user calls). --philb]
This function is called by the SoftFloat routines to raise a floating
point exception. We check the trap enable byte in the FPSR, and raise
a SIGFPE exception if necessary. If not the relevant bits in the
cumulative exceptions flag byte are set and we return.
*/
#ifdef CONFIG_DEBUG_USER
/* By default, ignore inexact errors as there are far too many of them to log */
static int debug = ~BIT_IXC;
#endif
void float_raise(signed char flags)
{
register unsigned int fpsr, cumulativeTraps;
#ifdef CONFIG_DEBUG_USER
if (flags & debug)
printk(KERN_DEBUG
"NWFPE: %s[%d] takes exception %08x at %p from %08lx\n",
current->comm, current->pid, flags,
__builtin_return_address(0), GET_USERREG()->ARM_pc);
#endif
/* Read fpsr and initialize the cumulativeTraps. */
fpsr = readFPSR();
cumulativeTraps = 0;
/* For each type of exception, the cumulative trap exception bit is only
set if the corresponding trap enable bit is not set. */
if ((!(fpsr & BIT_IXE)) && (flags & BIT_IXC))
cumulativeTraps |= BIT_IXC;
if ((!(fpsr & BIT_UFE)) && (flags & BIT_UFC))
cumulativeTraps |= BIT_UFC;
if ((!(fpsr & BIT_OFE)) && (flags & BIT_OFC))
cumulativeTraps |= BIT_OFC;
if ((!(fpsr & BIT_DZE)) && (flags & BIT_DZC))
cumulativeTraps |= BIT_DZC;
if ((!(fpsr & BIT_IOE)) && (flags & BIT_IOC))
cumulativeTraps |= BIT_IOC;
/* Set the cumulative exceptions flags. */
if (cumulativeTraps)
writeFPSR(fpsr | cumulativeTraps);
/* Raise an exception if necessary. */
if (fpsr & (flags << 16))
fp_send_sig(SIGFPE, current, 1);
}
module_init(fpe_init);
module_exit(fpe_exit);
MODULE_AUTHOR("Scott Bambrough <scottb@rebel.com>");
MODULE_DESCRIPTION("NWFPE floating point emulator (" NWFPE_BITS " precision)");
MODULE_LICENSE("GPL");
#ifdef CONFIG_DEBUG_USER
module_param(debug, int, 0644);
#endif
| gpl-2.0 |
oloendithas/SM-T705_Exynos5420 | drivers/s390/cio/device_id.c | 9235 | 5853 | /*
* CCW device SENSE ID I/O handling.
*
* Copyright IBM Corp. 2002,2009
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/ccwdev.h>
#include <asm/setup.h>
#include <asm/cio.h>
#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
#include "device.h"
#include "io_sch.h"
#define SENSE_ID_RETRIES 256
#define SENSE_ID_TIMEOUT (10 * HZ)
#define SENSE_ID_MIN_LEN 4
#define SENSE_ID_BASIC_LEN 7
/**
* diag210_to_senseid - convert diag 0x210 data to sense id information
* @senseid: sense id
* @diag: diag 0x210 data
*
* Return 0 on success, non-zero otherwise.
*/
static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
{
static struct {
int class, type, cu_type;
} vm_devices[] = {
{ 0x08, 0x01, 0x3480 },
{ 0x08, 0x02, 0x3430 },
{ 0x08, 0x10, 0x3420 },
{ 0x08, 0x42, 0x3424 },
{ 0x08, 0x44, 0x9348 },
{ 0x08, 0x81, 0x3490 },
{ 0x08, 0x82, 0x3422 },
{ 0x10, 0x41, 0x1403 },
{ 0x10, 0x42, 0x3211 },
{ 0x10, 0x43, 0x3203 },
{ 0x10, 0x45, 0x3800 },
{ 0x10, 0x47, 0x3262 },
{ 0x10, 0x48, 0x3820 },
{ 0x10, 0x49, 0x3800 },
{ 0x10, 0x4a, 0x4245 },
{ 0x10, 0x4b, 0x4248 },
{ 0x10, 0x4d, 0x3800 },
{ 0x10, 0x4e, 0x3820 },
{ 0x10, 0x4f, 0x3820 },
{ 0x10, 0x82, 0x2540 },
{ 0x10, 0x84, 0x3525 },
{ 0x20, 0x81, 0x2501 },
{ 0x20, 0x82, 0x2540 },
{ 0x20, 0x84, 0x3505 },
{ 0x40, 0x01, 0x3278 },
{ 0x40, 0x04, 0x3277 },
{ 0x40, 0x80, 0x2250 },
{ 0x40, 0xc0, 0x5080 },
{ 0x80, 0x00, 0x3215 },
};
int i;
/* Special case for osa devices. */
if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
senseid->cu_type = 0x3088;
senseid->cu_model = 0x60;
senseid->reserved = 0xff;
return 0;
}
for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
if (diag->vrdcvcla == vm_devices[i].class &&
diag->vrdcvtyp == vm_devices[i].type) {
senseid->cu_type = vm_devices[i].cu_type;
senseid->reserved = 0xff;
return 0;
}
}
return -ENODEV;
}
/**
* diag_get_dev_info - retrieve device information via diag 0x210
* @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
*/
static int diag210_get_dev_info(struct ccw_device *cdev)
{
struct ccw_dev_id *dev_id = &cdev->private->dev_id;
struct senseid *senseid = &cdev->private->senseid;
struct diag210 diag_data;
int rc;
if (dev_id->ssid != 0)
return -ENODEV;
memset(&diag_data, 0, sizeof(diag_data));
diag_data.vrdcdvno = dev_id->devno;
diag_data.vrdclen = sizeof(diag_data);
rc = diag210(&diag_data);
CIO_TRACE_EVENT(4, "diag210");
CIO_HEX_EVENT(4, &rc, sizeof(rc));
CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
if (rc != 0 && rc != 2)
goto err_failed;
if (diag210_to_senseid(senseid, &diag_data))
goto err_unknown;
return 0;
err_unknown:
CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
dev_id->ssid, dev_id->devno);
return -ENODEV;
err_failed:
CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
dev_id->ssid, dev_id->devno, rc);
return -ENODEV;
}
/*
* Initialize SENSE ID data.
*/
static void snsid_init(struct ccw_device *cdev)
{
cdev->private->flags.esid = 0;
memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
cdev->private->senseid.cu_type = 0xffff;
}
/*
* Check for complete SENSE ID data.
*/
static int snsid_check(struct ccw_device *cdev, void *data)
{
struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
int len = sizeof(struct senseid) - scsw->count;
/* Check for incomplete SENSE ID data. */
if (len < SENSE_ID_MIN_LEN)
goto out_restart;
if (cdev->private->senseid.cu_type == 0xffff)
goto out_restart;
/* Check for incompatible SENSE ID data. */
if (cdev->private->senseid.reserved != 0xff)
return -EOPNOTSUPP;
/* Check for extended-identification information. */
if (len > SENSE_ID_BASIC_LEN)
cdev->private->flags.esid = 1;
return 0;
out_restart:
snsid_init(cdev);
return -EAGAIN;
}
/*
* Process SENSE ID request result.
*/
static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_dev_id *id = &cdev->private->dev_id;
struct senseid *senseid = &cdev->private->senseid;
int vm = 0;
if (rc && MACHINE_IS_VM) {
/* Try diag 0x210 fallback on z/VM. */
snsid_init(cdev);
if (diag210_get_dev_info(cdev) == 0) {
rc = 0;
vm = 1;
}
}
CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
"%04x/%02x%s\n", id->ssid, id->devno, rc,
senseid->cu_type, senseid->cu_model, senseid->dev_type,
senseid->dev_model, vm ? " (diag210)" : "");
ccw_device_sense_id_done(cdev, rc);
}
/**
* ccw_device_sense_id_start - perform SENSE ID
* @cdev: ccw device
*
* Execute a SENSE ID channel program on @cdev to update its sense id
* information. When finished, call ccw_device_sense_id_done with a
* return code specifying the result.
*/
void ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
struct ccw1 *cp = cdev->private->iccws;
CIO_TRACE_EVENT(4, "snsid");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/* Data setup. */
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
cp->cda = (u32) (addr_t) &cdev->private->senseid;
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
memset(req, 0, sizeof(*req));
req->cp = cp;
req->timeout = SENSE_ID_TIMEOUT;
req->maxretries = SENSE_ID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
req->check = snsid_check;
req->callback = snsid_callback;
ccw_request_start(cdev);
}
| gpl-2.0 |
jdheiner/SGH-T769_Kernel_ICS | drivers/scsi/sun3x_esp.c | 9235 | 7400 | /* sun3x_esp.c: ESP front-end for Sun3x systems.
*
* Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <asm/sun3x.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/dvma.h>
/* DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
#include <scsi/scsi_host.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "sun3x_esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "1.000"
#define DRV_MODULE_RELDATE "Nov 1, 2007"
/*
* m68k always assumes readl/writel operate on little endian
* mmio space; this is wrong at least for Sun3x, so we
* need to workaround this until a proper way is found
*/
#if 0
#define dma_read32(REG) \
readl(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
writel((VAL), esp->dma_regs + (REG))
#else
#define dma_read32(REG) \
*(volatile u32 *)(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
#endif
static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
writeb(val, esp->regs + (reg * 4UL));
}
static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
{
return readb(esp->regs + (reg * 4UL));
}
static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
return dma_map_single(esp->dev, buf, sz, dir);
}
static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
return dma_map_sg(esp->dev, sg, num_sg, dir);
}
static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
dma_unmap_single(esp->dev, addr, sz, dir);
}
static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
dma_unmap_sg(esp->dev, sg, num_sg, dir);
}
static int sun3x_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
return 1;
return 0;
}
static void sun3x_esp_reset_dma(struct esp *esp)
{
u32 val;
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_RST_SCSI, DMA_CSR);
dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
/* Enable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_INT_ENAB, DMA_CSR);
}
static void sun3x_esp_dma_drain(struct esp *esp)
{
u32 csr;
int lim;
csr = dma_read32(DMA_CSR);
if (!(csr & DMA_FIFO_ISDRAIN))
return;
dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
lim = 1000;
while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
esp->host->unique_id);
break;
}
udelay(1);
}
}
static void sun3x_esp_dma_invalidate(struct esp *esp)
{
u32 val;
int lim;
lim = 1000;
while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not "
"invalidate!\n", esp->host->unique_id);
break;
}
udelay(1);
}
val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
val |= DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
val &= ~DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
}
static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
u32 csr;
BUG_ON(!(cmd & ESP_CMD_DMA));
sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
csr = dma_read32(DMA_CSR);
csr |= DMA_ENABLE;
if (write)
csr |= DMA_ST_WRITE;
else
csr &= ~DMA_ST_WRITE;
dma_write32(csr, DMA_CSR);
dma_write32(addr, DMA_ADDR);
scsi_esp_cmd(esp, cmd);
}
static int sun3x_esp_dma_error(struct esp *esp)
{
u32 csr = dma_read32(DMA_CSR);
if (csr & DMA_HNDL_ERROR)
return 1;
return 0;
}
static const struct esp_driver_ops sun3x_esp_ops = {
.esp_write8 = sun3x_esp_write8,
.esp_read8 = sun3x_esp_read8,
.map_single = sun3x_esp_map_single,
.map_sg = sun3x_esp_map_sg,
.unmap_single = sun3x_esp_unmap_single,
.unmap_sg = sun3x_esp_unmap_sg,
.irq_pending = sun3x_esp_irq_pending,
.reset_dma = sun3x_esp_reset_dma,
.dma_drain = sun3x_esp_dma_drain,
.dma_invalidate = sun3x_esp_dma_invalidate,
.send_dma_cmd = sun3x_esp_send_dma_cmd,
.dma_error = sun3x_esp_dma_error,
};
static int __devinit esp_sun3x_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
struct resource *res;
int err = -ENOMEM;
host = scsi_host_alloc(tpnt, sizeof(struct esp));
if (!host)
goto fail;
host->max_id = 8;
esp = shost_priv(host);
esp->host = host;
esp->dev = dev;
esp->ops = &sun3x_esp_ops;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res || !res->start)
goto fail_unlink;
esp->regs = ioremap_nocache(res->start, 0x20);
if (!esp->regs)
goto fail_unmap_regs;
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (!res || !res->start)
goto fail_unmap_regs;
esp->dma_regs = ioremap_nocache(res->start, 0x10);
esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
GFP_KERNEL);
if (!esp->command_block)
goto fail_unmap_regs_dma;
host->irq = platform_get_irq(dev, 0);
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
"SUN3X ESP", esp);
if (err < 0)
goto fail_unmap_command_block;
esp->scsi_id = 7;
esp->host->this_id = esp->scsi_id;
esp->scsi_id_mask = (1 << esp->scsi_id);
esp->cfreq = 20000000;
dev_set_drvdata(&dev->dev, esp);
err = scsi_esp_register(esp, &dev->dev);
if (err)
goto fail_free_irq;
return 0;
fail_free_irq:
free_irq(host->irq, esp);
fail_unmap_command_block:
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
fail_unmap_regs_dma:
iounmap(esp->dma_regs);
fail_unmap_regs:
iounmap(esp->regs);
fail_unlink:
scsi_host_put(host);
fail:
return err;
}
static int __devexit esp_sun3x_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
u32 val;
scsi_esp_unregister(esp);
/* Disable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
free_irq(irq, esp);
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
scsi_host_put(esp->host);
return 0;
}
static struct platform_driver esp_sun3x_driver = {
.probe = esp_sun3x_probe,
.remove = __devexit_p(esp_sun3x_remove),
.driver = {
.name = "sun3x_esp",
.owner = THIS_MODULE,
},
};
static int __init sun3x_esp_init(void)
{
return platform_driver_register(&esp_sun3x_driver);
}
static void __exit sun3x_esp_exit(void)
{
platform_driver_unregister(&esp_sun3x_driver);
}
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(sun3x_esp_init);
module_exit(sun3x_esp_exit);
MODULE_ALIAS("platform:sun3x_esp");
| gpl-2.0 |
getitnowmarketing/mecha_2.6.32 | arch/mips/txx9/generic/setup_tx3927.c | 9491 | 3728 | /*
* TX3927 setup routines
* Based on linux/arch/mips/txx9/jmr3927/setup.c
*
* Copyright 2001 MontaVista Software Inc.
* Copyright (C) 2000-2001 Toshiba Corporation
* Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/param.h>
#include <linux/io.h>
#include <linux/mtd/physmap.h>
#include <asm/mipsregs.h>
#include <asm/txx9irq.h>
#include <asm/txx9tmr.h>
#include <asm/txx9pio.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/tx3927.h>
void __init tx3927_wdt_init(void)
{
txx9_wdt_init(TX3927_TMR_REG(2));
}
void __init tx3927_setup(void)
{
int i;
unsigned int conf;
txx9_reg_res_init(TX3927_REV_PCODE(), TX3927_REG_BASE,
TX3927_REG_SIZE);
/* SDRAMC,ROMC are configured by PROM */
for (i = 0; i < 8; i++) {
if (!(tx3927_romcptr->cr[i] & 0x8))
continue; /* disabled */
txx9_ce_res[i].start = (unsigned long)TX3927_ROMC_BA(i);
txx9_ce_res[i].end =
txx9_ce_res[i].start + TX3927_ROMC_SIZE(i) - 1;
request_resource(&iomem_resource, &txx9_ce_res[i]);
}
/* clocks */
txx9_gbus_clock = txx9_cpu_clock / 2;
/* change default value to udelay/mdelay take reasonable time */
loops_per_jiffy = txx9_cpu_clock / HZ / 2;
/* CCFG */
/* enable Timeout BusError */
if (txx9_ccfg_toeon)
tx3927_ccfgptr->ccfg |= TX3927_CCFG_TOE;
/* clear BusErrorOnWrite flag */
tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_BEOW;
if (read_c0_conf() & TX39_CONF_WBON)
/* Disable PCI snoop */
tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_PSNP;
else
/* Enable PCI SNOOP - with write through only */
tx3927_ccfgptr->ccfg |= TX3927_CCFG_PSNP;
/* do reset on watchdog */
tx3927_ccfgptr->ccfg |= TX3927_CCFG_WR;
printk(KERN_INFO "TX3927 -- CRIR:%08lx CCFG:%08lx PCFG:%08lx\n",
tx3927_ccfgptr->crir,
tx3927_ccfgptr->ccfg, tx3927_ccfgptr->pcfg);
/* TMR */
for (i = 0; i < TX3927_NR_TMR; i++)
txx9_tmr_init(TX3927_TMR_REG(i));
/* DMA */
tx3927_dmaptr->mcr = 0;
for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) {
/* reset channel */
tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST;
tx3927_dmaptr->ch[i].ccr = 0;
}
/* enable DMA */
#ifdef __BIG_ENDIAN
tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN;
#else
tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN | TX3927_DMA_MCR_LE;
#endif
/* PIO */
__raw_writel(0, &tx3927_pioptr->maskcpu);
__raw_writel(0, &tx3927_pioptr->maskext);
txx9_gpio_init(TX3927_PIO_REG, 0, 16);
conf = read_c0_conf();
if (conf & TX39_CONF_DCE) {
if (!(conf & TX39_CONF_WBON))
pr_info("TX3927 D-Cache WriteThrough.\n");
else if (!(conf & TX39_CONF_CWFON))
pr_info("TX3927 D-Cache WriteBack.\n");
else
pr_info("TX3927 D-Cache WriteBack (CWF) .\n");
}
}
void __init tx3927_time_init(unsigned int evt_tmrnr, unsigned int src_tmrnr)
{
txx9_clockevent_init(TX3927_TMR_REG(evt_tmrnr),
TXX9_IRQ_BASE + TX3927_IR_TMR(evt_tmrnr),
TXX9_IMCLK);
txx9_clocksource_init(TX3927_TMR_REG(src_tmrnr), TXX9_IMCLK);
}
void __init tx3927_sio_init(unsigned int sclk, unsigned int cts_mask)
{
int i;
for (i = 0; i < 2; i++)
txx9_sio_init(TX3927_SIO_REG(i),
TXX9_IRQ_BASE + TX3927_IR_SIO(i),
i, sclk, (1 << i) & cts_mask);
}
void __init tx3927_mtd_init(int ch)
{
struct physmap_flash_data pdata = {
.width = TX3927_ROMC_WIDTH(ch) / 8,
};
unsigned long start = txx9_ce_res[ch].start;
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(tx3927_romcptr->cr[ch] & 0x8))
return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
| gpl-2.0 |
ezeteze/android_kernel_huawei_u8815_slim | tools/perf/util/quote.c | 13587 | 1265 | #include "cache.h"
#include "quote.h"
/* Help to copy the thing properly quoted for the shell safety.
* any single quote is replaced with '\'', any exclamation point
* is replaced with '\!', and the whole thing is enclosed in a
*
* E.g.
* original sq_quote result
* name ==> name ==> 'name'
* a b ==> a b ==> 'a b'
* a'b ==> a'\''b ==> 'a'\''b'
* a!b ==> a'\!'b ==> 'a'\!'b'
*/
static inline int need_bs_quote(char c)
{
return (c == '\'' || c == '!');
}
static void sq_quote_buf(struct strbuf *dst, const char *src)
{
char *to_free = NULL;
if (dst->buf == src)
to_free = strbuf_detach(dst, NULL);
strbuf_addch(dst, '\'');
while (*src) {
size_t len = strcspn(src, "'!");
strbuf_add(dst, src, len);
src += len;
while (need_bs_quote(*src)) {
strbuf_addstr(dst, "'\\");
strbuf_addch(dst, *src++);
strbuf_addch(dst, '\'');
}
}
strbuf_addch(dst, '\'');
free(to_free);
}
void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
{
int i;
/* Copy into destination buffer. */
strbuf_grow(dst, 255);
for (i = 0; argv[i]; ++i) {
strbuf_addch(dst, ' ');
sq_quote_buf(dst, argv[i]);
if (maxlen && dst->len > maxlen)
die("Too many or long arguments");
}
}
| gpl-2.0 |
CandyDevices/kernel_htc_msm8974 | fs/ext4/file.c | 20 | 6433 | /*
* linux/fs/ext4/file.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/file.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* ext4 fs regular file handling primitives
*
* 64-bit file support on 64-bit platforms by Jakub Jelinek
* (jj@sunsite.ms.mff.cuni.cz)
*/
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/mount.h>
#include <linux/path.h>
#include <linux/quotaops.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
static int ext4_release_file(struct inode *inode, struct file *filp)
{
if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
ext4_alloc_da_blocks(inode);
ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
}
if ((filp->f_mode & FMODE_WRITE) &&
(atomic_read(&inode->i_writecount) == 1) &&
!EXT4_I(inode)->i_reserved_data_blocks)
{
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
up_write(&EXT4_I(inode)->i_data_sem);
}
if (is_dx(inode) && filp->private_data)
ext4_htree_free_dir_info(filp->private_data);
return 0;
}
static void ext4_aiodio_wait(struct inode *inode)
{
wait_queue_head_t *wq = ext4_ioend_wq(inode);
wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));
}
/*
* This tests whether the IO in question is block-aligned or not.
* Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
* are converted to written only after the IO is complete. Until they are
* mapped, these blocks appear as holes, so dio_zero_block() will assume that
* it needs to zero out portions of the start and/or end block. If 2 AIO
* threads are at work on the same unwritten block, they must be synchronized
* or one thread will zero the other's data, causing corruption.
*/
static int
ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct super_block *sb = inode->i_sb;
int blockmask = sb->s_blocksize - 1;
size_t count = iov_length(iov, nr_segs);
loff_t final_size = pos + count;
if (pos >= i_size_read(inode))
return 0;
if ((pos & blockmask) || (final_size & blockmask))
return 1;
return 0;
}
static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
int unaligned_aio = 0;
int ret;
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
size_t length = iov_length(iov, nr_segs);
if ((pos > sbi->s_bitmap_maxbytes ||
(pos == sbi->s_bitmap_maxbytes && length > 0)))
return -EFBIG;
if (pos + length > sbi->s_bitmap_maxbytes) {
nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
sbi->s_bitmap_maxbytes - pos);
}
} else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
!is_sync_kiocb(iocb))) {
unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
}
if (unaligned_aio) {
static unsigned long unaligned_warn_time;
if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
ext4_msg(inode->i_sb, KERN_WARNING,
"Unaligned AIO/DIO on inode %ld by %s; "
"performance will be poor.",
inode->i_ino, current->comm);
mutex_lock(ext4_aio_mutex(inode));
ext4_aiodio_wait(inode);
}
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
if (unaligned_aio)
mutex_unlock(ext4_aio_mutex(inode));
return ret;
}
static const struct vm_operations_struct ext4_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = ext4_page_mkwrite,
};
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ext4_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
static int ext4_file_open(struct inode * inode, struct file * filp)
{
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
!(sb->s_flags & MS_RDONLY))) {
sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
memset(buf, 0, sizeof(buf));
path.mnt = mnt;
path.dentry = mnt->mnt_root;
cp = d_path(&path, buf, sizeof(buf));
if (!IS_ERR(cp)) {
strlcpy(sbi->s_es->s_last_mounted, cp,
sizeof(sbi->s_es->s_last_mounted));
ext4_mark_super_dirty(sb);
}
}
if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
spin_lock(&inode->i_lock);
if (!ei->jinode) {
if (!jinode) {
spin_unlock(&inode->i_lock);
return -ENOMEM;
}
ei->jinode = jinode;
jbd2_journal_init_jbd_inode(ei->jinode, inode);
jinode = NULL;
}
spin_unlock(&inode->i_lock);
if (unlikely(jinode != NULL))
jbd2_free_inode(jinode);
}
return dquot_file_open(inode, filp);
}
loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_mapping->host;
loff_t maxbytes;
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
else
maxbytes = inode->i_sb->s_maxbytes;
return generic_file_llseek_size(file, offset, origin, maxbytes);
}
const struct file_operations ext4_file_operations = {
.llseek = ext4_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.aio_write = ext4_file_write,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
#endif
.mmap = ext4_file_mmap,
.open = ext4_file_open,
.release = ext4_release_file,
.fsync = ext4_sync_file,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
.fallocate = ext4_fallocate,
};
const struct inode_operations ext4_file_inode_operations = {
.setattr = ext4_setattr,
.getattr = ext4_getattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
.get_acl = ext4_get_acl,
.fiemap = ext4_fiemap,
};
| gpl-2.0 |
jwpi/glibc | wcsmbs/wcscasecmp.c | 20 | 1860 | /* Copyright (C) 1991-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include <wctype.h>
#include <wchar.h>
#ifndef weak_alias
# define __wcscasecmp wcscasecmp
# define TOLOWER(Ch) towlower (Ch)
#else
# ifdef USE_IN_EXTENDED_LOCALE_MODEL
# define __wcscasecmp __wcscasecmp_l
# define TOLOWER(Ch) __towlower_l ((Ch), loc)
# else
# define TOLOWER(Ch) towlower (Ch)
# endif
#endif
#ifdef USE_IN_EXTENDED_LOCALE_MODEL
# define LOCALE_PARAM , loc
# define LOCALE_PARAM_DECL __locale_t loc;
#else
# define LOCALE_PARAM
# define LOCALE_PARAM_DECL
#endif
/* Compare S1 and S2, ignoring case, returning less than, equal to or
greater than zero if S1 is lexicographically less than,
equal to or greater than S2. */
int
__wcscasecmp (s1, s2 LOCALE_PARAM)
const wchar_t *s1;
const wchar_t *s2;
LOCALE_PARAM_DECL
{
wint_t c1, c2;
if (s1 == s2)
return 0;
do
{
c1 = TOLOWER (*s1++);
c2 = TOLOWER (*s2++);
if (c1 == L'\0')
break;
}
while (c1 == c2);
return c1 - c2;
}
#ifndef __wcscasecmp
weak_alias (__wcscasecmp, wcscasecmp)
#endif
| gpl-2.0 |
svn2github/valgrind-master-mirror | massif/tests/overloaded-new.cpp | 20 | 1174 | // operator new(unsigned)
// operator new[](unsigned)
// operator new(unsigned, std::nothrow_t const&)
// operator new[](unsigned, std::nothrow_t const&)
#include <stdlib.h>
#include <new>
using std::nothrow_t;
// A big structure. Its details don't matter.
typedef struct {
int array[1000];
} s;
__attribute__((noinline)) void* operator new (std::size_t n)
{
return malloc(n);
}
__attribute__((noinline)) void* operator new (std::size_t n, std::nothrow_t const &)
{
return malloc(n);
}
__attribute__((noinline)) void* operator new[] (std::size_t n)
{
return malloc(n);
}
__attribute__((noinline)) void* operator new[] (std::size_t n, std::nothrow_t const &)
{
return malloc(n);
}
__attribute__((noinline)) void operator delete (void* p)
{
return free(p);
}
__attribute__((noinline)) void operator delete[] (void* p)
{
return free(p);
}
int main(void)
{
s* p1 = new s;
s* p2 = new (std::nothrow) s;
char* c1 = new char[2000];
char* c2 = new (std::nothrow) char[2000];
delete p1;
delete p2;
delete [] c1;
delete [] c2;
return 0;
}
| gpl-2.0 |
Informatic/ofono | src/ctm.c | 20 | 7943 | /*
*
* oFono - Open Source Telephony
*
* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
* Copyright (C) 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <glib.h>
#include <gdbus.h>
#include "ofono.h"
#include "common.h"
#define CTM_FLAG_CACHED 0x1
static GSList *g_drivers = NULL;
struct ofono_ctm {
DBusMessage *pending;
int flags;
ofono_bool_t enabled;
const struct ofono_ctm_driver *driver;
void *driver_data;
struct ofono_atom *atom;
};
static DBusMessage *ctm_get_properties_reply(DBusMessage *msg,
struct ofono_ctm *ctm)
{
DBusMessage *reply;
DBusMessageIter iter;
DBusMessageIter dict;
dbus_bool_t value;
reply = dbus_message_new_method_return(msg);
if (reply == NULL)
return NULL;
dbus_message_iter_init_append(reply, &iter);
dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY,
OFONO_PROPERTIES_ARRAY_SIGNATURE,
&dict);
value = ctm->enabled;
ofono_dbus_dict_append(&dict, "Enabled", DBUS_TYPE_BOOLEAN, &value);
dbus_message_iter_close_container(&iter, &dict);
return reply;
}
static void ctm_signal_enabled(struct ofono_ctm *ctm)
{
DBusConnection *conn = ofono_dbus_get_connection();
const char *path = __ofono_atom_get_path(ctm->atom);
ofono_bool_t value = ctm->enabled;
ofono_dbus_signal_property_changed(conn, path,
OFONO_TEXT_TELEPHONY_INTERFACE,
"Enabled",
DBUS_TYPE_BOOLEAN, &value);
}
static void ctm_set_enabled_callback(const struct ofono_error *error,
void *data)
{
struct ofono_ctm *ctm = data;
DBusMessage *reply;
if (error->type != OFONO_ERROR_TYPE_NO_ERROR) {
DBG("Error setting ctm enabled property");
reply = __ofono_error_failed(ctm->pending);
__ofono_dbus_pending_reply(&ctm->pending, reply);
return;
}
ctm->enabled = !ctm->enabled;
reply = dbus_message_new_method_return(ctm->pending);
__ofono_dbus_pending_reply(&ctm->pending, reply);
ctm_signal_enabled(ctm);
}
static void ctm_query_enabled_callback(const struct ofono_error *error,
ofono_bool_t enable, void *data)
{
struct ofono_ctm *ctm = data;
DBusMessage *reply;
ofono_bool_t enabled_old;
if (error->type != OFONO_ERROR_TYPE_NO_ERROR) {
DBG("Error during ctm enabled query");
reply = __ofono_error_failed(ctm->pending);
__ofono_dbus_pending_reply(&ctm->pending, reply);
return;
}
ctm->flags |= CTM_FLAG_CACHED;
enabled_old = ctm->enabled;
ctm->enabled = enable;
reply = ctm_get_properties_reply(ctm->pending, ctm);
__ofono_dbus_pending_reply(&ctm->pending, reply);
if (ctm->enabled != enabled_old)
ctm_signal_enabled(ctm);
}
static DBusMessage *ctm_get_properties(DBusConnection *conn,
DBusMessage *msg, void *data)
{
struct ofono_ctm *ctm = data;
if (ctm->flags & CTM_FLAG_CACHED)
return ctm_get_properties_reply(msg, ctm);
if (ctm->pending)
return __ofono_error_busy(msg);
ctm->pending = dbus_message_ref(msg);
ctm->driver->query_tty(ctm, ctm_query_enabled_callback, ctm);
return NULL;
}
static DBusMessage *ctm_set_property(DBusConnection *conn, DBusMessage *msg,
void *data)
{
struct ofono_ctm *ctm = data;
DBusMessageIter iter;
DBusMessageIter var;
const char *property;
if (ctm->pending)
return __ofono_error_busy(msg);
if (!dbus_message_iter_init(msg, &iter))
return __ofono_error_invalid_args(msg);
if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_STRING)
return __ofono_error_invalid_args(msg);
dbus_message_iter_get_basic(&iter, &property);
dbus_message_iter_next(&iter);
if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_VARIANT)
return __ofono_error_invalid_args(msg);
dbus_message_iter_recurse(&iter, &var);
if (g_strcmp0(property, "Enabled") == 0) {
dbus_bool_t value;
int target;
if (dbus_message_iter_get_arg_type(&var) != DBUS_TYPE_BOOLEAN)
return __ofono_error_invalid_args(msg);
dbus_message_iter_get_basic(&var, &value);
target = value;
if (ctm->enabled == target)
return dbus_message_new_method_return(msg);
ctm->pending = dbus_message_ref(msg);
ctm->driver->set_tty(ctm, target,
ctm_set_enabled_callback, ctm);
return NULL;
}
return __ofono_error_invalid_args(msg);
}
static const GDBusMethodTable ctm_methods[] = {
{ GDBUS_ASYNC_METHOD("GetProperties",
NULL, GDBUS_ARGS({ "properties", "a{sv}" }),
ctm_get_properties) },
{ GDBUS_ASYNC_METHOD("SetProperty",
GDBUS_ARGS({ "property", "s" }, { "value", "v" }), NULL,
ctm_set_property) },
{ }
};
static const GDBusSignalTable ctm_signals[] = {
{ GDBUS_SIGNAL("PropertyChanged",
GDBUS_ARGS({ "name", "s" }, { "value", "v" })) },
{ }
};
int ofono_ctm_driver_register(const struct ofono_ctm_driver *d)
{
DBG("driver: %p, name: %s", d, d->name);
if (d == NULL || d->probe == NULL)
return -EINVAL;
g_drivers = g_slist_prepend(g_drivers, (void *)d);
return 0;
}
void ofono_ctm_driver_unregister(const struct ofono_ctm_driver *d)
{
DBG("driver: %p, name: %s", d, d->name);
if (d == NULL)
return;
g_drivers = g_slist_remove(g_drivers, (void *)d);
}
static void text_telephony_unregister(struct ofono_atom *atom)
{
struct ofono_ctm *ctm = __ofono_atom_get_data(atom);
const char *path = __ofono_atom_get_path(ctm->atom);
DBusConnection *conn = ofono_dbus_get_connection();
struct ofono_modem *modem = __ofono_atom_get_modem(ctm->atom);
ofono_modem_remove_interface(modem, OFONO_TEXT_TELEPHONY_INTERFACE);
g_dbus_unregister_interface(conn, path, OFONO_TEXT_TELEPHONY_INTERFACE);
}
static void text_telephony_remove(struct ofono_atom *atom)
{
struct ofono_ctm *ctm = __ofono_atom_get_data(atom);
DBG("atom: %p", atom);
if (ctm == NULL)
return;
if (ctm->driver && ctm->driver->remove)
ctm->driver->remove(ctm);
g_free(ctm);
}
struct ofono_ctm *ofono_ctm_create(struct ofono_modem *modem,
unsigned int vendor,
const char *driver, void *data)
{
struct ofono_ctm *ctm;
GSList *l;
if (driver == NULL)
return NULL;
ctm = g_try_new0(struct ofono_ctm, 1);
if (ctm == NULL)
return NULL;
ctm->atom = __ofono_modem_add_atom(modem, OFONO_ATOM_TYPE_CTM,
text_telephony_remove, ctm);
for (l = g_drivers; l; l = l->next) {
const struct ofono_ctm_driver *drv = l->data;
if (g_strcmp0(drv->name, driver) != 0)
continue;
if (drv->probe(ctm, vendor, data) < 0)
continue;
ctm->driver = drv;
break;
}
return ctm;
}
void ofono_ctm_register(struct ofono_ctm *ctm)
{
DBusConnection *conn = ofono_dbus_get_connection();
struct ofono_modem *modem = __ofono_atom_get_modem(ctm->atom);
const char *path = __ofono_atom_get_path(ctm->atom);
if (!g_dbus_register_interface(conn, path,
OFONO_TEXT_TELEPHONY_INTERFACE,
ctm_methods, ctm_signals,
NULL, ctm, NULL)) {
ofono_error("Could not create %s interface",
OFONO_TEXT_TELEPHONY_INTERFACE);
return;
}
ofono_modem_add_interface(modem, OFONO_TEXT_TELEPHONY_INTERFACE);
__ofono_atom_register(ctm->atom, text_telephony_unregister);
}
void ofono_ctm_remove(struct ofono_ctm *ctm)
{
__ofono_atom_free(ctm->atom);
}
void ofono_ctm_set_data(struct ofono_ctm *ctm, void *data)
{
ctm->driver_data = data;
}
void *ofono_ctm_get_data(struct ofono_ctm *ctm)
{
return ctm->driver_data;
}
| gpl-2.0 |
sigma-random/wireshark | epan/dissectors/packet-3g-a11.c | 20 | 101777 | /*
* packet-3g-a11.c
* Routines for CDMA2000 A11 packet trace
* Copyright 2002, Ryuji Somegawa <somegawa@wide.ad.jp>
* packet-3g-a11.c was written based on 'packet-mip.c'.
*
* packet-3g-a11.c updated by Ravi Valmikam for 3GPP2 TIA-878-A
* Copyright 2005, Ravi Valmikam <rvalmikam@airvananet.com>
*
* packet-mip.c
* Routines for Mobile IP dissection
* Copyright 2000, Stefan Raab <sraab@cisco.com>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*Ref:
* http://www.3gpp2.org/Public_html/specs/A.S0009-C_v3.0_100621.pdf
* http://www.3gpp2.org/Public_html/specs/A.S0017-D_v1.0_070624.pdf (IOS 5.1)
* http://www.3gpp2.org/public_html/specs/A.S0017-D_v2.0_090825.pdf
* http://www.3gpp2.org/public_html/specs/A.S0017-D%20v3.0_Interoperability%20Specification%20%28IOS%29%20for%20cdma2000%20Access%20Network%20Interfaces%20-%20Part%207%20%28A10%20and%20A11%20Interfaces%29_20110701.pdf
* http://www.3gpp2.org/Public_html/specs/A00-20110419-002Er0%20A.S0008-C%20v4.0%20HRPD%20IOS-Pub_20110513.pdf
* http://www.3gpp2.org/Public_html/specs/A.S0022-0_v2.0_100426.pdf
*/
#include "config.h"
#include <epan/packet.h>
#include <epan/expert.h>
/* Include vendor id translation */
#include <epan/sminmpec.h>
#include <epan/to_str.h>
#include "packet-radius.h"
/* Forward declarations */
void proto_register_a11(void);
void proto_reg_handoff_a11(void);
static int registration_request_msg =0;
/* Initialize the protocol and registered fields */
static int proto_a11 = -1;
static int hf_a11_type = -1;
static int hf_a11_flags = -1;
static int hf_a11_s = -1;
static int hf_a11_b = -1;
static int hf_a11_d = -1;
static int hf_a11_m = -1;
static int hf_a11_g = -1;
static int hf_a11_v = -1;
static int hf_a11_t = -1;
static int hf_a11_code = -1;
static int hf_a11_status = -1;
static int hf_a11_life = -1;
static int hf_a11_homeaddr = -1;
static int hf_a11_haaddr = -1;
static int hf_a11_coa = -1;
static int hf_a11_ident = -1;
static int hf_a11_ext_type = -1;
static int hf_a11_ext_stype = -1;
static int hf_a11_ext_len = -1;
static int hf_a11_ext = -1;
static int hf_a11_aext_spi = -1;
static int hf_a11_aext_auth = -1;
static int hf_a11_next_nai = -1;
static int hf_a11_ses_key = -1;
static int hf_a11_ses_mnsrid = -1;
static int hf_a11_ses_sidver = -1;
static int hf_a11_ses_msid_type = -1;
static int hf_a11_ses_msid_len = -1;
static int hf_a11_ses_msid = -1;
static int hf_a11_ses_ptype = -1;
static int hf_a11_vse_vid = -1;
static int hf_a11_vse_apptype = -1;
static int hf_a11_vse_canid = -1;
static int hf_a11_vse_panid = -1;
static int hf_a11_vse_srvopt = -1;
static int hf_a11_vse_qosmode = -1;
static int hf_a11_vse_pdit = -1;
static int hf_a11_vse_session_parameter = -1;
static int hf_a11_vse_code = -1;
static int hf_a11_vse_dormant = -1;
static int hf_a11_vse_ehrpd_mode = -1;
static int hf_a11_vse_ehrpd_pmk = -1;
static int hf_a11_vse_ehrpd_handoff_info = -1;
static int hf_a11_vse_ehrpd_tunnel_mode = -1;
static int hf_a11_vse_ppaddr = -1;
/* Additional Session Information */
static int hf_a11_ase_len_type = -1;
static int hf_a11_ase_srid_type = -1;
static int hf_a11_ase_servopt_type = -1;
static int hf_a11_ase_gre_proto_type = -1;
static int hf_a11_ase_gre_key = -1;
static int hf_a11_ase_pcf_addr_key = -1;
static int hf_a11_ase_forward_rohc_info_len = -1;
static int hf_a11_ase_forward_maxcid = -1;
static int hf_a11_ase_forward_mrru = -1;
static int hf_a11_ase_forward_large_cids = -1;
static int hf_a11_ase_forward_profile_count = -1;
static int hf_a11_ase_forward_profile = -1;
static int hf_a11_ase_reverse_rohc_info_len = -1;
static int hf_a11_ase_reverse_maxcid = -1;
static int hf_a11_ase_reverse_mrru = -1;
static int hf_a11_ase_reverse_large_cids = -1;
static int hf_a11_ase_reverse_profile_count = -1;
static int hf_a11_ase_reverse_profile = -1;
static int hf_a11_aut_flow_prof_sub_type = -1;
static int hf_a11_aut_flow_prof_sub_type_len = -1;
static int hf_a11_aut_flow_prof_sub_type_value = -1;
static int hf_a11_serv_opt_prof_max_serv = -1;
static int hf_a11_sub_type = -1;
static int hf_a11_sub_type_length = -1;
static int hf_a11_serv_opt = -1;
static int hf_a11_max_num_serv_opt = -1;
static int hf_a11_bcmcs_stype = -1;
static int hf_a11_bcmcs_entry_len = -1;
/* Forward QoS Information */
static int hf_a11_fqi_srid = -1;
static int hf_a11_fqi_flags = -1;
static int hf_a11_fqi_flags_ip_flow = -1;
static int hf_a11_fqi_flags_dscp = -1;
static int hf_a11_fqi_entry_flag = -1;
static int hf_a11_fqi_entry_flag_dscp = -1;
static int hf_a11_fqi_entry_flag_flow_state = -1;
static int hf_a11_fqi_flowcount = -1;
static int hf_a11_fqi_flowid = -1;
static int hf_a11_fqi_entrylen = -1;
/* static int hf_a11_fqi_flowstate = -1; */
static int hf_a11_fqi_requested_qoslen = -1;
static int hf_a11_fqi_flow_priority = -1;
static int hf_a11_fqi_num_qos_attribute_set = -1;
static int hf_a11_fqi_qos_attribute_setlen = -1;
static int hf_a11_fqi_qos_attribute_setid = -1;
static int hf_a11_fqi_qos_granted_attribute_setid = -1;
static int hf_a11_fqi_verbose = -1;
static int hf_a11_fqi_flow_profileid = -1;
static int hf_a11_fqi_granted_qoslen = -1;
/* Reverse QoS Information */
static int hf_a11_rqi_srid = -1;
static int hf_a11_rqi_flowcount = -1;
static int hf_a11_rqi_flowid = -1;
static int hf_a11_rqi_entrylen = -1;
static int hf_a11_rqi_entry_flag = -1;
static int hf_a11_rqi_entry_flag_flow_state = -1;
/* static int hf_a11_rqi_flowstate = -1; */
static int hf_a11_rqi_requested_qoslen = -1;
static int hf_a11_rqi_flow_priority = -1;
static int hf_a11_rqi_num_qos_attribute_set = -1;
static int hf_a11_rqi_qos_attribute_setlen = -1;
static int hf_a11_rqi_qos_attribute_setid = -1;
static int hf_a11_rqi_qos_granted_attribute_setid = -1;
static int hf_a11_rqi_verbose = -1;
static int hf_a11_rqi_flow_profileid = -1;
/* static int hf_a11_rqi_requested_qos = -1; */
static int hf_a11_rqi_granted_qoslen = -1;
/* static int hf_a11_rqi_granted_qos = -1; */
/* QoS Update Information */
static int hf_a11_fqui_flowcount = -1;
static int hf_a11_rqui_flowcount = -1;
static int hf_a11_fqui_updated_qoslen = -1;
static int hf_a11_fqui_updated_qos = -1;
static int hf_a11_rqui_updated_qoslen = -1;
static int hf_a11_rqui_updated_qos = -1;
static int hf_a11_subsciber_profile = -1;
/* static int hf_a11_subsciber_profile_len = -1; */
/* Initialize the subtree pointers */
static gint ett_a11 = -1;
static gint ett_a11_flags = -1;
static gint ett_a11_ext = -1;
static gint ett_a11_exts = -1;
static gint ett_a11_radius = -1;
static gint ett_a11_radiuses = -1;
static gint ett_a11_ase = -1;
static gint ett_a11_fqi_flowentry = -1;
static gint ett_a11_fqi_requestedqos = -1;
static gint ett_a11_fqi_qos_attribute_set = -1;
static gint ett_a11_fqi_grantedqos = -1;
static gint ett_a11_rqi_flowentry = -1;
static gint ett_a11_rqi_requestedqos = -1;
static gint ett_a11_rqi_qos_attribute_set = -1;
static gint ett_a11_rqi_grantedqos = -1;
static gint ett_a11_fqi_flags = -1;
static gint ett_a11_fqi_entry_flags = -1;
static gint ett_a11_rqi_entry_flags = -1;
static gint ett_a11_fqui_flowentry = -1;
static gint ett_a11_rqui_flowentry = -1;
static gint ett_a11_subscriber_profile = -1;
static gint ett_a11_forward_rohc = -1;
static gint ett_a11_reverse_rohc = -1;
static gint ett_a11_forward_profile = -1;
static gint ett_a11_reverse_profile = -1;
static gint ett_a11_aut_flow_profile_ids = -1;
static gint ett_a11_bcmcs_entry = -1;
static expert_field ei_a11_sub_type_length_not2 = EI_INIT;
static expert_field ei_a11_sse_too_short = EI_INIT;
static expert_field ei_a11_bcmcs_too_short = EI_INIT;
static expert_field ei_a11_entry_data_not_dissected = EI_INIT;
static expert_field ei_a11_session_data_not_dissected = EI_INIT;
/* Port used for Mobile IP based Tunneling Protocol (A11) */
#define UDP_PORT_3GA11 699
typedef enum {
REGISTRATION_REQUEST = 1,
REGISTRATION_REPLY = 3,
REGISTRATION_UPDATE = 20,
REGISTRATION_ACK = 21,
SESSION_UPDATE = 22,
SESSION_ACK = 23,
CAPABILITIES_INFO = 24,
CAPABILITIES_INFO_ACK = 25,
BC_SERVICE_REQUEST = 0xb0, /* 3GPP2 A.S0019-A v2.0 */
BC_SERVICE_REPLY = 0xb1,
BC_REGISTRATION_REQUEST = 0xb2,
BC_REGISTRATION_REPLY = 0xb3,
BC_REGISTRATION_UPDATE = 0xb4,
BC_REGISTRATION_ACK = 0xb5
} a11MessageTypes;
static const value_string a11_types[] = {
{REGISTRATION_REQUEST, "Registration Request"},
{REGISTRATION_REPLY, "Registration Reply"},
{REGISTRATION_UPDATE, "Registration Update"},
{REGISTRATION_ACK, "Registration Ack"},
{SESSION_UPDATE, "Session Update"},
{SESSION_ACK, "Session Update Ack"},
{CAPABILITIES_INFO, "Capabilities Info"},
{CAPABILITIES_INFO_ACK, "Capabilities Info Ack"},
{BC_SERVICE_REQUEST, "BC Service Request"},
{BC_SERVICE_REPLY, "BC Service Response"},
{BC_REGISTRATION_REQUEST, "BC Registration RequestT"},
{BC_REGISTRATION_REPLY, "BC Registration Reply"},
{BC_REGISTRATION_UPDATE, "BC Registration Update"},
{BC_REGISTRATION_ACK, "BC Registration Acknowledge"},
{0, NULL},
};
static value_string_ext a11_types_ext = VALUE_STRING_EXT_INIT(a11_types);
static const value_string a11_ses_ptype_vals[] = {
{0x8881, "Unstructured Byte Stream"},
{0x88D2, "3GPP2 Packet"},
{0, NULL},
};
static const value_string a11_reply_codes[]= {
{0, "Reg Accepted"},
{9, "Connection Update"},
#if 0
{1, "Reg Accepted, but Simultaneous Bindings Unsupported."},
{64, "Reg Deny (FA)- Unspecified Reason"},
{65, "Reg Deny (FA)- Administratively Prohibited"},
{66, "Reg Deny (FA)- Insufficient Resources"},
{67, "Reg Deny (FA)- MN failed Authentication"},
{68, "Reg Deny (FA)- HA failed Authentication"},
{69, "Reg Deny (FA)- Requested Lifetime too Long"},
{70, "Reg Deny (FA)- Poorly Formed Request"},
{71, "Reg Deny (FA)- Poorly Formed Reply"},
{72, "Reg Deny (FA)- Requested Encapsulation Unavailable"},
{73, "Reg Deny (FA)- VJ Compression Unavailable"},
{74, "Reg Deny (FA)- Requested Reverse Tunnel Unavailable"},
{75, "Reg Deny (FA)- Reverse Tunnel is Mandatory and 'T' Bit Not Set"},
{76, "Reg Deny (FA)- Mobile Node Too Distant"},
{79, "Reg Deny (FA)- Delivery Style Not Supported"},
{80, "Reg Deny (FA)- Home Network Unreachable"},
{81, "Reg Deny (FA)- HA Host Unreachable"},
{82, "Reg Deny (FA)- HA Port Unreachable"},
{88, "Reg Deny (FA)- HA Unreachable"},
{96, "Reg Deny (FA)(NAI) - Non Zero Home Address Required"},
{97, "Reg Deny (FA)(NAI) - Missing NAI"},
{98, "Reg Deny (FA)(NAI) - Missing Home Agent"},
{99, "Reg Deny (FA)(NAI) - Missing Home Address"},
#endif
{128, "Registration Denied - Unspecified"}, /* 3GPP2 A.S0017-D v4.0 */
{129, "Registration Denied - Administratively Prohibited"},
{130, "Registration Denied - Insufficient Resources"},
{131, "Registration Denied - PCF Failed Authentication"},
/* {132, "Reg Deny (HA)- FA Failed Authentication"}, */
{133, "Registration Denied - Identification Mismatch"},
{134, "Registration Denied - Poorly Formed Request"},
/* {135, "Reg Deny (HA)- Too Many Simultaneous Bindings"}, */
{136, "Registration Denied - Unknown PDSN Address"},
{137, "Registration Denied - Requested Reverse Tunnel Unavailable"},
{138, "Registration Denied - Reverse Tunnel is Mandatory and 'T' Bit Not Set"},
{139, "Registration Denied - service option not supported"},
{140, "Registration Denied - no CID available"},
{141, "Registration Denied - unsupported Vendor ID / Application Type in CVSE"},
{142, "Registration Denied - nonexistent A10 or IP flow"},
{0, NULL},
};
static value_string_ext a11_reply_codes_ext = VALUE_STRING_EXT_INIT(a11_reply_codes);
static const value_string a11_ack_status[]= {
{0x00, "Update Accepted"},
{0x01, "Partial QoS updated"},
{0x80, "Update Denied - reason unspecified"},
{0x83, "Update Denied - sending node failed authentication"},
{0x85, "Update Denied - identification mismatch)"},
{0x86, "Update Denied - poorly formed registration update"},
{0xc9, "Update Denied - Session Parameter Not Updated"},
{0xca, "Update Denied - PMK not requested"},
{0xfd, "Update Denied - QoS profileID not supported"},
{0xfe, "Update Denied - insufficient resources"},
{0xff, "Update Denied - handoff in progress"},
{0, NULL},
};
static value_string_ext a11_ack_status_ext = VALUE_STRING_EXT_INIT(a11_ack_status);
typedef enum {
MH_AUTH_EXT = 32,
MF_AUTH_EXT = 33,
FH_AUTH_EXT = 34,
GEN_AUTH_EXT = 36, /* RFC 3012 */
OLD_CVSE_EXT = 37, /* RFC 3115 */
CVSE_EXT = 38, /* RFC 3115 */
SS_EXT = 39, /* 3GPP2 IOS4.2 */
RU_AUTH_EXT = 40, /* 3GPP2 IOS4.2 */
MN_NAI_EXT = 131,
MF_CHALLENGE_EXT = 132, /* RFC 3012 */
OLD_NVSE_EXT = 133, /* RFC 3115 */
NVSE_EXT = 134, /* RFC 3115 */
BCMCS_EXT = 0xb0 /* 3GPP2 A.S0019-A v2.0 */
} MIP_EXTS;
static const value_string a11_ext_types[]= {
{MH_AUTH_EXT, "Mobile-Home Authentication Extension"},
{MF_AUTH_EXT, "Mobile-Foreign Authentication Extension"},
{FH_AUTH_EXT, "Foreign-Home Authentication Extension"},
{GEN_AUTH_EXT, "Generalized Mobile-IP Authentication Extension"},
{OLD_CVSE_EXT, "Critical Vendor/Organization Specific Extension (OLD)"},
{CVSE_EXT, "Critical Vendor/Organization Specific Extension"},
{SS_EXT, "Session Specific Extension"},
{RU_AUTH_EXT, "Registration Update Authentication Extension"},
{MN_NAI_EXT, "Mobile Node NAI Extension"},
{MF_CHALLENGE_EXT, "MN-FA Challenge Extension"},
{OLD_NVSE_EXT, "Normal Vendor/Organization Specific Extension (OLD)"},
{NVSE_EXT, "Normal Vendor/Organization Specific Extension"},
{BCMCS_EXT, "BCMCS Session Extension"},
{0, NULL},
};
static value_string_ext a11_ext_types_ext = VALUE_STRING_EXT_INIT(a11_ext_types);
static const value_string a11_ext_stypes[]= {
{1, "MN AAA Extension"},
{0, NULL},
};
static const value_string a11_ext_nvose_qosmode[]= {
{0x00, "QoS Disabled"},
{0x01, "QoS Enabled"},
{0, NULL},
};
static const value_string a11_ext_nvose_srvopt[]= {
{0x0021, "3G High Speed Packet Data"},
{0x003B, "HRPD Main Service Connection"},
{0x003C, "Link Layer Assisted Header Removal"},
{0x003D, "Link Layer Assisted Robust Header Compression"},
{0x0040, "HRPD Accounting Records Identifier"}, /* 3GPP2 A.S0009-C v4.0 */
{0x0043, "HRPD Packet Data IP Service where Higher Layer Protocol is IP or ROHC"}, /* 3GPP2 A.S0009-C v4.0 */
{0x0047, "HRPD AltPPP operation"}, /* 3GPP2 A.S0009-C v4.0 */
{0, NULL},
};
static const value_string a11_ext_nvose_pdsn_code[]= {
{0xc1, "Connection Release - reason unspecified"},
{0xc2, "Connection Release - PPP time-out"},
{0xc3, "Connection Release - registration time-out"},
{0xc4, "Connection Release - PDSN error"},
{0xc5, "Connection Release - inter-PCF handoff"},
{0xc6, "Connection Release - inter-PDSN handoff"},
{0xc7, "Connection Release - PDSN OAM&P intervention"},
{0xc8, "Connection Release - accounting error"},
{0xca, "Connection Release - user (NAI) failed authentication"},
{0x00, NULL},
};
static const value_string a11_ext_dormant[]= {
{0x0000, "all MS packet data service instances are dormant"},
{0, NULL},
};
static const true_false_string a11_tfs_ehrpd_mode = {
"eAT is operating in evolved mode",
"eAT is operating in legacy mode"
};
/* 3GPP2 A.S0022-0 v2.0, section 4.2.14 */
static const true_false_string a11_tfs_ehrpd_pmk = {
"eAT is requesting PMK information",
"eAT is not requesting PMK information",
};
/* 3GPP2 A.S0022-0 v2.0, section 4.2.14 */
static const true_false_string a11_tfs_ehrpd_handoff_info = {
"eAT is requesting information for E-UTRAN handoff",
"eAT is not requesting information for E-UTRAN handoff",
};
/* 3GPP2 A.S0022-0 v2.0, section 4.2.14 */
static const true_false_string a11_tfs_ehrpd_tunnel_mode = {
"eAT is communicating via tunnel from non-eHRPD",
"eAT is communicating directly via eHRPD",
};
static const value_string a11_ext_app[]= {
{0x0101, "Accounting (RADIUS)"},
{0x0102, "Accounting (DIAMETER)"},
{0x0201, "Mobility Event Indicator (Mobility)"},
{0x0301, "Data Available Indicator (Data Ready to Send)"},
{0x0401, "Access Network Identifiers (ANID)"},
{0x0501, "PDSN Identifiers (Anchor P-P Address)"},
{0x0601, "Indicators (All Dormant Indicator)"},
{0x0602, "Indicators (eHRPD Mode)"}, /* 3GPP2 A.S0022-B v1.0 */
{0x0603, "Indicators (eHRPD Indicators)"}, /* 3GPP2 A.S0009-C v4.0 */
{0x0701, "PDSN Code (PDSN Code)"},
{0x0801, "Session Parameter (RN-PDIT:Radio Network Packet Data Inactivity Timer)"},
{0x0802, "Session Parameter (Always On)"},
{0x0803, "Session Parameter (QoS Mode)"},
{0x0901, "Service Option (Service Option Value)"},
{0x0A01, "PDSN Enabled Features (Flow Control Enabled)"},
{0x0A02, "PDSN Enabled Features (Packet Boundary Enabled)"},
{0x0A03, "PDSN Enabled Features (GRE Segmentation Enabled)"},
{0x0B01, "PCF Enabled Features (Short Data Indication Supported)"},
{0x0B02, "PCF Enabled Features (GRE Segmentation Enabled)"},
{0x0C01, "Additional Session Info"},
{0x0D01, "QoS Information (Forward QoS Information)"},
{0x0D02, "QoS Information (Reverse QoS Information)"},
{0x0D03, "QoS Information (Subscriber QoS Profile)"},
{0x0D04, "QoS Information (Forward Flow Priority Update Information)"},
{0x0D05, "QoS Information (Reverse Flow Priority Update Information)"},
{0x0DFE, "QoS Information (Forward QoS Update Information)"},
{0x0DFF, "QoS Information (Reverse QoS Update Information)"},
{0x0E01, "Header Compression (ROHC Configuration Parameters)"},
{0x0F01, "Information (Cause Code)"},
{0x0F04, "Information (Additional HSGW Information)"},
{0x1001, "HRPD Indicators (Emergency Services)"},
{0xB001, "System Identifiers (BSID / HRPD Subnet)"},
{0, NULL},
};
static value_string_ext a11_ext_app_ext = VALUE_STRING_EXT_INIT(a11_ext_app);
#if 0
static const value_string a11_airlink_types[]= {
{1, "Session Setup (Y=1)"},
{2, "Active Start (Y=2)"},
{3, "Active Stop (Y=3)"},
{4, "Short Data Burst (Y=4)"},
{0, NULL},
};
#endif
static const value_string a11_bcmcs_stype_vals[] = {
{1, "BCMCS Flow and Registration Information"},
{2, "Session Information"},
{3, "BCMCS Registration Result"},
{4, "Enhanced Session Information"},
{0, NULL},
};
#define A11_MSG_MSID_ELEM_LEN_MAX 8
#define A11_MSG_MSID_LEN_MAX 15
/* XXXX ToDo This should be imported from packet-rohc.h */
static const value_string a11_rohc_profile_vals[] =
{
{ 0x0000, "ROHC uncompressed" }, /*RFC 5795*/
{ 0x0001, "ROHC RTP" }, /*RFC 3095*/
{ 0x0002, "ROHC UDP" }, /*RFC 3095*/
{ 0x0003, "ROHC ESP" }, /*RFC 3095*/
{ 0x0004, "ROHC IP" }, /*RFC 3843*/
{ 0x0005, "ROHC LLA" }, /*RFC 3242*/
{ 0x0006, "ROHC TCP" }, /*RFC 4996*/
{ 0x0007, "ROHC RTP/UDP-Lite" }, /*RFC 4019*/
{ 0x0008, "ROHC UDP-Lite" }, /*RFC 4019*/
{ 0x0101, "ROHCv2 RTP" }, /*RFC 5225*/
{ 0x0102, "ROHCv2 UDP" }, /*RFC 5225*/
{ 0x0103, "ROHCv2 ESP" }, /*RFC 5225*/
{ 0x0104, "ROHCv2 IP" }, /*RFC 5225*/
{ 0x0105, "ROHC LLA with R-mode" }, /*RFC 3408*/
{ 0x0107, "ROHCv2 RTP/UDP-Lite" }, /*RFC 5225*/
{ 0x0108, "ROHCv2 UDP-Lite" }, /*RFC 5225*/
{ 0, NULL },
};
static value_string_ext a11_rohc_profile_vals_ext = VALUE_STRING_EXT_INIT(a11_rohc_profile_vals);
#define NUM_ATTR (sizeof(attrs)/sizeof(struct radius_attribute))
#define RADIUS_VENDOR_SPECIFIC 26
#define SKIP_HDR_LEN 6
/* decode MSID from SSE */
/* MSID is encoded in Binary Coded Decimal format
First Byte: [odd-indicator] [Digit 1]
Second Byte: [Digit 3] [Digit 2]
..
if[odd]
Last Byte: [Digit N] [Digit N-1]
else
Last Byte: [F] [Digit N]
*/
/* 3GPP2 A.S0008-C v4.0, 3GPP2 A.S0019-A v2.0 */
static const value_string a11_ses_msid_type_vals[] =
{
{ 0x0000, "No Identity Code" },
{ 0x0001, "MEID" },
{ 0x0005, "ESN" },
{ 0x0006, "IMSI" },
{ 0x0008, "BCMCS Flow ID" },
{ 0, NULL },
};
static const int * a11_flags[] = {
&hf_a11_s,
&hf_a11_b,
&hf_a11_d,
&hf_a11_m,
&hf_a11_g,
&hf_a11_v,
&hf_a11_t,
NULL
};
static void
decode_sse(proto_tree *ext_tree, packet_info *pinfo, tvbuff_t *tvb, int offset, guint ext_len, proto_item *ext_len_item)
{
guint8 msid_len;
guint8 msid_start_offset;
guint8 msid_num_digits;
guint8 msid_index;
char *msid_digits;
const char *p_msid;
gboolean odd_even_ind;
/* Decode Protocol Type */
if (ext_len < 2) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_sse_too_short,
"Cannot decode Protocol Type - SSE too short");
return;
}
proto_tree_add_item(ext_tree, hf_a11_ses_ptype, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
ext_len -= 2;
/* Decode Session Key */
if (ext_len < 4) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_sse_too_short,
"Cannot decode Session Key - SSE too short");
return;
}
proto_tree_add_item(ext_tree, hf_a11_ses_key, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
ext_len -= 4;
/* Decode Session Id Version */
if (ext_len < 2) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_sse_too_short,
"Cannot decode Session Id Version - SSE too short");
return;
}
proto_tree_add_item(ext_tree, hf_a11_ses_sidver, tvb, offset+1, 1, ENC_BIG_ENDIAN);
offset += 2;
ext_len -= 2;
/* Decode SRID */
if (ext_len < 2) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_sse_too_short,
"Cannot decode SRID - SSE too short");
return;
}
proto_tree_add_item(ext_tree, hf_a11_ses_mnsrid, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
ext_len -= 2;
/* MSID Type */
if (ext_len < 2) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_sse_too_short,
"Cannot decode MSID Type - SSE too short");
return;
}
proto_tree_add_item(ext_tree, hf_a11_ses_msid_type, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
ext_len -= 2;
/* MSID Len */
if (ext_len < 1) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_sse_too_short,
"Cannot decode MSID Length - SSE too short");
return;
}
msid_len = tvb_get_guint8(tvb, offset);
proto_tree_add_item(ext_tree, hf_a11_ses_msid_len, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
ext_len -= 1;
/* Decode MSID */
if (ext_len < msid_len) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_sse_too_short,
"Cannot decode MSID - SSE too short");
return;
}
msid_digits = (char *)wmem_alloc(wmem_packet_scope(), A11_MSG_MSID_LEN_MAX+2);
msid_start_offset = offset;
if (msid_len > A11_MSG_MSID_ELEM_LEN_MAX) {
p_msid = "MSID is too long";
} else if (msid_len == 0) {
p_msid = "MSID is too short";
} else {
/* Decode the BCD digits */
for (msid_index=0; msid_index<msid_len; msid_index++) {
guint8 msid_digit = tvb_get_guint8(tvb, offset);
offset += 1;
ext_len -= 1;
msid_digits[msid_index*2] = (msid_digit & 0x0F) + '0';
msid_digits[(msid_index*2) + 1] = ((msid_digit & 0xF0) >> 4) + '0';
}
odd_even_ind = (msid_digits[0] == '1');
if (odd_even_ind) {
msid_num_digits = ((msid_len-1) * 2) + 1;
} else {
msid_num_digits = (msid_len-1) * 2;
}
msid_digits[msid_num_digits + 1] = '\0';
p_msid = msid_digits + 1;
}
proto_tree_add_string(ext_tree, hf_a11_ses_msid, tvb, msid_start_offset, msid_len, p_msid);
}
static void
decode_bcmcs(proto_tree* ext_tree, packet_info *pinfo, tvbuff_t* tvb, int offset, guint ext_len, proto_item *ext_len_item)
{
guint8 bc_stype, entry_len;
/* Decode Protocol Type */
if (ext_len < 2) {
expert_add_info_format(pinfo, ext_len_item, &ei_a11_bcmcs_too_short,
"Cannot decode Protocol Type - BCMCS too short");
return;
}
bc_stype=tvb_get_guint8(tvb, offset);
proto_tree_add_item(ext_tree, hf_a11_bcmcs_stype, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
ext_len -= 1;
switch (bc_stype) {
case 1:
{
int i = 0;
proto_tree *entry_tree;
while (ext_len > 0) {
i++;
entry_len = tvb_get_guint8(tvb, offset);
if (entry_len == 0) {
ext_len -= 1;
entry_len = 1;
} else {
ext_len = ext_len - entry_len;
}
entry_tree = proto_tree_add_subtree_format(ext_tree, tvb, offset, entry_len,
ett_a11_bcmcs_entry, NULL, "BCMCS Information Entry %u", i);
proto_tree_add_item(entry_tree, hf_a11_bcmcs_entry_len, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_expert(ext_tree, pinfo, &ei_a11_entry_data_not_dissected, tvb, offset, entry_len -1);
offset = offset+entry_len;
}
}
break;
default:
proto_tree_add_expert_format(ext_tree, pinfo, &ei_a11_session_data_not_dissected, tvb, offset, -1, "Session Data Type %u Not dissected yet", bc_stype);
return;
break;
}
}
/* RADIUS attributed */
static void
dissect_a11_radius( tvbuff_t *tvb, packet_info *pinfo, int offset, proto_tree *tree, int app_len)
{
proto_tree *radius_tree;
/* None of this really matters if we don't have a tree */
if (!tree)
return;
/* return if length of extension is not valid */
if (tvb_reported_length_remaining(tvb, offset) < 12) {
return;
}
radius_tree = proto_tree_add_subtree(tree, tvb, offset - 2, app_len, ett_a11_radiuses, NULL, "Airlink Record");
dissect_attribute_value_pairs(radius_tree, pinfo, tvb, offset, app_len-2);
}
/* X.S0011-005-D v2.0 Service Option Profile */
static const gchar *
dissect_3gpp2_service_option_profile(proto_tree *tree, tvbuff_t *tvb, packet_info *pinfo)
{
int offset = 0;
guint8 sub_type, sub_type_length;
proto_item *pi;
/* Maximum service connections/Link Flows total 32 bit*/
proto_tree_add_item(tree, hf_a11_serv_opt_prof_max_serv, tvb, offset, 4, ENC_BIG_ENDIAN);
offset+=4;
while (tvb_reported_length_remaining(tvb,offset) > 0) {
sub_type_length = tvb_get_guint8(tvb,offset+1);
sub_type = tvb_get_guint8(tvb,offset);
proto_tree_add_item(tree, hf_a11_sub_type, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
pi = proto_tree_add_item(tree, hf_a11_sub_type_length, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
if (sub_type_length < 2) {
expert_add_info(pinfo, pi, &ei_a11_sub_type_length_not2);
sub_type_length = 2;
}
if (sub_type == 1) {
proto_tree_add_item(tree, hf_a11_serv_opt, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Max number of service instances of Service Option n */
proto_tree_add_item(tree, hf_a11_max_num_serv_opt, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
}
offset = offset + sub_type_length-2;
}
return "";
}
static const value_string a11_aut_flow_prof_subtype_vals[] = {
{0x1, "ProfileID-Forward"},
{0x2, "ProfileID-Reverse"},
{0x3, "ProfileID-Bi-direction"},
{0, NULL},
};
/* X.S0011-005-D v2.0 Authorized Flow Profile IDs for the User */
static const gchar *
dissect_3gpp2_radius_aut_flow_profile_ids(proto_tree *tree, tvbuff_t *tvb, packet_info *pinfo)
{
proto_tree *sub_tree;
int offset = 0;
proto_item *item;
guint8 sub_type, sub_type_length;
guint32 value;
while (tvb_reported_length_remaining(tvb,offset) > 0) {
sub_type = tvb_get_guint8(tvb,offset);
sub_type_length = tvb_get_guint8(tvb,offset+1);
/* value is 2 octets */
value = tvb_get_ntohs(tvb,offset+2);
sub_tree = proto_tree_add_subtree_format(tree, tvb, offset, sub_type_length, ett_a11_aut_flow_profile_ids, &item,
"%s = %u", val_to_str_const(sub_type, a11_aut_flow_prof_subtype_vals, "Unknown"), value);
proto_tree_add_item(sub_tree, hf_a11_aut_flow_prof_sub_type, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
item = proto_tree_add_item(sub_tree, hf_a11_aut_flow_prof_sub_type_len, tvb, offset, 1, ENC_BIG_ENDIAN);
if (sub_type_length < 2) {
expert_add_info(pinfo, item, &ei_a11_sub_type_length_not2);
sub_type_length = 2;
}
offset += 1;
proto_tree_add_item(sub_tree, hf_a11_aut_flow_prof_sub_type_value, tvb, offset, 2, ENC_BIG_ENDIAN);
offset = offset+sub_type_length - 2;
}
return "";
}
/* Code to dissect Additional Session Info */
static void
dissect_ase(tvbuff_t *tvb, int offset, guint ase_len, proto_tree *ext_tree)
{
guint clen = 0; /* consumed length */
while (clen < ase_len) {
proto_tree *exts_tree;
guint8 srid = tvb_get_guint8(tvb, offset+1);
guint16 service_option = tvb_get_ntohs(tvb, offset+2);
guint8 entry_length;
int entry_start_offset;
/* Entry Length */
entry_start_offset = offset;
entry_length = tvb_get_guint8(tvb, offset);
if (registration_request_msg && ((service_option == 64) || (service_option == 67)))
exts_tree = proto_tree_add_subtree_format(ext_tree, tvb, offset, entry_length+1,
ett_a11_ase, NULL, "GRE Key Entry (SRID: %d)", srid);
else
exts_tree = proto_tree_add_subtree_format(ext_tree, tvb, offset, entry_length,
ett_a11_ase, NULL, "GRE Key Entry (SRID: %d)", srid);
proto_tree_add_item(exts_tree, hf_a11_ase_len_type, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* SRID */
proto_tree_add_item(exts_tree, hf_a11_ase_srid_type, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Service Option */
proto_tree_add_item(exts_tree, hf_a11_ase_servopt_type, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* GRE Protocol Type*/
proto_tree_add_item(exts_tree, hf_a11_ase_gre_proto_type, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* GRE Key */
proto_tree_add_item(exts_tree, hf_a11_ase_gre_key, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* PCF IP Address */
proto_tree_add_item(exts_tree, hf_a11_ase_pcf_addr_key, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
if ((entry_length>14)&&(registration_request_msg)) {
if (service_option == 0x0043) {
proto_tree *extv_tree;
guint8 profile_count = tvb_get_guint8(tvb, offset+6);
guint8 profile_index = 0;
guint8 reverse_profile_count;
proto_tree *extt_tree = proto_tree_add_subtree(exts_tree, tvb, offset,6+(profile_count*2)+1,
ett_a11_forward_rohc, NULL, "Forward ROHC Info");
proto_tree_add_item(extt_tree, hf_a11_ase_forward_rohc_info_len, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
proto_tree_add_item(extt_tree, hf_a11_ase_forward_maxcid, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
proto_tree_add_item(extt_tree, hf_a11_ase_forward_mrru, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
proto_tree_add_item(extt_tree, hf_a11_ase_forward_large_cids, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
profile_count=tvb_get_guint8(tvb, offset);
proto_tree_add_item(extt_tree, hf_a11_ase_forward_profile_count, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
for (profile_index=0; profile_index<profile_count; profile_index++) {
proto_tree *extu_tree = proto_tree_add_subtree_format(extt_tree, tvb, offset, (2*profile_count),
ett_a11_forward_profile, NULL, "Forward Profile : %d", profile_index);
proto_tree_add_item(extu_tree, hf_a11_ase_forward_profile, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}/*for*/
reverse_profile_count=tvb_get_guint8(tvb, offset+6);
extv_tree = proto_tree_add_subtree(exts_tree, tvb, offset,6+(reverse_profile_count*2)+1,
ett_a11_reverse_rohc, NULL, "Reverse ROHC Info");
proto_tree_add_item(extv_tree, hf_a11_ase_reverse_rohc_info_len, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
proto_tree_add_item(extv_tree, hf_a11_ase_reverse_maxcid, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
proto_tree_add_item(extv_tree, hf_a11_ase_reverse_mrru, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
proto_tree_add_item(extv_tree, hf_a11_ase_reverse_large_cids, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
profile_count=tvb_get_guint8(tvb, offset);
proto_tree_add_item(extv_tree, hf_a11_ase_reverse_profile_count, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
for (profile_index=0; profile_index<reverse_profile_count; profile_index++) {
proto_tree *extw_tree = proto_tree_add_subtree_format(extv_tree, tvb, offset, (2*profile_count),
ett_a11_reverse_profile, NULL, "Reverse Profile : %d", profile_index);
proto_tree_add_item(extw_tree, hf_a11_ase_reverse_profile, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}/*for*/
}/* Service option */
}/* if */
clen += entry_length + 1;
/* Set offset = start of next entry in case of padding */
offset = entry_start_offset + entry_length+1;
} /* while */
registration_request_msg =0;
}
#define A11_FQI_IPFLOW_DISC_ENABLED 0x80
#define A11_FQI_DSCP_INCLUDED 0x40
static void
dissect_fwd_qosinfo_flags(tvbuff_t *tvb, int offset, proto_tree *ext_tree, guint8 *p_dscp_included)
{
guint8 flags = tvb_get_guint8(tvb, offset);
proto_item *ti = proto_tree_add_item(ext_tree, hf_a11_fqi_flags, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree *flags_tree = proto_item_add_subtree(ti, ett_a11_fqi_flags);
proto_tree_add_item(flags_tree, hf_a11_fqi_flags_ip_flow, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(flags_tree, hf_a11_fqi_flags_dscp, tvb, offset, 1, ENC_BIG_ENDIAN);
if (flags & A11_FQI_DSCP_INCLUDED) {
*p_dscp_included = 1;
} else {
*p_dscp_included = 0;
}
}
static void
dissect_fqi_entry_flags(tvbuff_t *tvb, int offset, proto_tree *ext_tree, guint8 dscp_enabled)
{
proto_item *ti = proto_tree_add_item(ext_tree, hf_a11_fqi_entry_flag, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree *flags_tree = proto_item_add_subtree(ti, ett_a11_fqi_entry_flags);
if (dscp_enabled) {
proto_tree_add_item(flags_tree, hf_a11_fqi_entry_flag_dscp, tvb, offset, 1, ENC_BIG_ENDIAN);
}
proto_tree_add_item(flags_tree, hf_a11_fqi_entry_flag_flow_state, tvb, offset, 1, ENC_BIG_ENDIAN);
}
static void
dissect_rqi_entry_flags(tvbuff_t *tvb, int offset, proto_tree *ext_tree)
{
proto_item *ti = proto_tree_add_item(ext_tree, hf_a11_rqi_entry_flag, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree *flags_tree = proto_item_add_subtree(ti, ett_a11_rqi_entry_flags);
proto_tree_add_item(flags_tree, hf_a11_rqi_entry_flag_flow_state, tvb, offset, 1, ENC_BIG_ENDIAN);
}
/* Code to dissect Forward QoS Info */
static void
dissect_fwd_qosinfo(tvbuff_t *tvb, int offset, proto_tree *ext_tree)
{
int clen = 0; /* consumed length */
guint8 flow_count;
guint8 flow_index;
guint8 dscp_enabled = 0;
/* SR Id */
proto_tree_add_item(ext_tree, hf_a11_fqi_srid, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Flags */
dissect_fwd_qosinfo_flags(tvb, offset+clen, ext_tree, &dscp_enabled);
clen++;
/* Flow Count */
flow_count = tvb_get_guint8(tvb, offset+clen);
flow_count &= 0x1F;
proto_tree_add_item(ext_tree, hf_a11_fqi_flowcount, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
for (flow_index=0; flow_index<flow_count; flow_index++) {
guint8 requested_qos_len = 0;
guint8 granted_qos_len = 0;
guint8 entry_len = tvb_get_guint8(tvb, offset+clen);
guint8 flow_id = tvb_get_guint8(tvb, offset+clen+1);
proto_tree *flow_tree = proto_tree_add_subtree_format(ext_tree, tvb, offset+clen,
entry_len+1, ett_a11_fqi_flowentry, NULL, "Forward Flow Entry (Flow Id: %d)", flow_id);
/* Entry Length */
proto_tree_add_item(flow_tree, hf_a11_fqi_entrylen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Flow Id */
proto_tree_add_item(flow_tree, hf_a11_fqi_flowid, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* DSCP and Flow State*/
dissect_fqi_entry_flags(tvb, offset+clen, flow_tree, dscp_enabled);
clen++;
/* Requested QoS Length */
requested_qos_len = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item(flow_tree, hf_a11_fqi_requested_qoslen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Requested QoS Blob */
if (requested_qos_len) {
proto_tree *exts_tree2;
proto_tree *exts_tree1 = proto_tree_add_subtree(flow_tree, tvb, offset+clen,requested_qos_len,
ett_a11_fqi_requestedqos, NULL, "Forward Requested QoS ");
/* Flow Priority */
proto_tree_add_item(exts_tree1, hf_a11_fqi_flow_priority, tvb,offset+clen , 1, ENC_BIG_ENDIAN);
/* Num of QoS attribute sets */
proto_tree_add_item(exts_tree1, hf_a11_fqi_num_qos_attribute_set, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
/* QoS attribute set length */
proto_tree_add_item(exts_tree1, hf_a11_fqi_qos_attribute_setlen, tvb, offset+clen, 2, ENC_BIG_ENDIAN);
clen++;
/* QoS attribute set */
exts_tree2 = proto_tree_add_subtree(exts_tree1, tvb, offset+clen, 4, ett_a11_fqi_qos_attribute_set,
NULL, "QoS Attribute Set");
/* QoS attribute setid */
proto_tree_add_item(exts_tree2, hf_a11_fqi_qos_attribute_setid, tvb, offset+clen, 2, ENC_BIG_ENDIAN);
clen++;
/* verbose */
proto_tree_add_item(exts_tree2, hf_a11_fqi_verbose, tvb,offset+clen, 1, ENC_BIG_ENDIAN);
/* Flow profile id */
proto_tree_add_item(exts_tree2, hf_a11_fqi_flow_profileid, tvb, offset+clen, 3, ENC_BIG_ENDIAN);
clen += 3;
}
/* Granted QoS Length */
granted_qos_len = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item(flow_tree, hf_a11_fqi_granted_qoslen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Granted QoS Blob */
if (granted_qos_len) {
proto_tree *exts_tree3;
exts_tree3 = proto_tree_add_subtree(flow_tree, tvb, offset+clen, granted_qos_len,
ett_a11_fqi_grantedqos, NULL, "Forward Granted QoS ");
/* QoS attribute setid */
proto_tree_add_item(exts_tree3, hf_a11_fqi_qos_granted_attribute_setid, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
}
} /* for (flow_index...) */
}
/* Code to dissect Reverse QoS Info */
static void
dissect_rev_qosinfo(tvbuff_t *tvb, int offset, proto_tree *ext_tree)
{
int clen = 0; /* consumed length */
guint8 flow_count;
guint8 flow_index;
/* SR Id */
proto_tree_add_item(ext_tree, hf_a11_rqi_srid, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Flow Count */
flow_count = tvb_get_guint8(tvb, offset+clen);
flow_count &= 0x1F;
proto_tree_add_item(ext_tree, hf_a11_rqi_flowcount, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
for (flow_index=0; flow_index<flow_count; flow_index++) {
guint8 requested_qos_len;
guint8 granted_qos_len;
guint8 entry_len = tvb_get_guint8(tvb, offset+clen);
guint8 flow_id = tvb_get_guint8(tvb, offset+clen+1);
proto_tree *flow_tree = proto_tree_add_subtree_format
(ext_tree, tvb, offset+clen, entry_len+1, ett_a11_rqi_flowentry, NULL,
"Reverse Flow Entry (Flow Id: %d)", flow_id);
/* Entry Length */
proto_tree_add_item(flow_tree, hf_a11_rqi_entrylen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Flow Id */
proto_tree_add_item(flow_tree, hf_a11_rqi_flowid, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Flags */
dissect_rqi_entry_flags(tvb, offset+clen, flow_tree);
clen++;
/* Requested QoS Length */
requested_qos_len = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item(flow_tree, hf_a11_rqi_requested_qoslen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Requested QoS Blob */
if (requested_qos_len) {
proto_tree *exts_tree1, *exts_tree2;
exts_tree1 = proto_tree_add_subtree(flow_tree, tvb, offset+clen,requested_qos_len,
ett_a11_rqi_requestedqos, NULL, "Reverse Requested QoS ");
/* Flow Priority */
proto_tree_add_item(exts_tree1, hf_a11_rqi_flow_priority, tvb,offset+clen , 1, ENC_BIG_ENDIAN);
/* Num of QoS attribute sets */
proto_tree_add_item(exts_tree1, hf_a11_rqi_num_qos_attribute_set, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
/* QoS attribute set length */
proto_tree_add_item(exts_tree1, hf_a11_rqi_qos_attribute_setlen, tvb, offset+clen, 2, ENC_BIG_ENDIAN);
clen++;
/* QoS attribute set */
exts_tree2 = proto_tree_add_subtree(exts_tree1, tvb, offset+clen, 4,
ett_a11_rqi_qos_attribute_set, NULL, "QoS Attribute Set");
/* QoS attribute setid */
proto_tree_add_item(exts_tree2, hf_a11_rqi_qos_attribute_setid, tvb, offset+clen, 2, ENC_BIG_ENDIAN);
clen++;
/* verbose */
proto_tree_add_item(exts_tree2, hf_a11_rqi_verbose, tvb,offset+clen, 1, ENC_BIG_ENDIAN);
/* Flow profile id */
proto_tree_add_item(exts_tree2, hf_a11_rqi_flow_profileid, tvb, offset+clen, 3, ENC_BIG_ENDIAN);
clen += 3;
}
/* Granted QoS Length */
granted_qos_len = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item(flow_tree, hf_a11_rqi_granted_qoslen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Granted QoS Blob */
if (granted_qos_len) {
proto_tree *exts_tree3;
exts_tree3 = proto_tree_add_subtree(flow_tree, tvb, offset+clen,granted_qos_len,
ett_a11_rqi_grantedqos, NULL, "Reverse Granted QoS ");
/* QoS attribute setid */
proto_tree_add_item(exts_tree3, hf_a11_rqi_qos_granted_attribute_setid, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
}
}
}
/* Code to dissect Subscriber QoS Profile */
static void
dissect_subscriber_qos_profile(tvbuff_t *tvb, packet_info *pinfo, int offset, int ext_len, proto_tree *ext_tree)
{
proto_tree *exts_tree;
int qos_profile_len = ext_len;
exts_tree =
proto_tree_add_subtree_format(ext_tree, tvb, offset, 0, ett_a11_subscriber_profile, NULL,
"Subscriber Qos Profile (%d bytes)",
qos_profile_len);
/* Subscriber QoS profile */
if (qos_profile_len) {
proto_tree_add_item
(exts_tree, hf_a11_subsciber_profile, tvb, offset,
qos_profile_len, ENC_NA);
dissect_attribute_value_pairs(exts_tree, pinfo, tvb, offset, qos_profile_len);
}
}
/* Code to dissect Forward QoS Update Info */
static void
dissect_fwd_qosupdate_info(tvbuff_t *tvb, int offset, proto_tree *ext_tree)
{
int clen = 0; /* consumed length */
guint8 flow_count;
guint8 flow_index;
/* Flow Count */
flow_count = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item(ext_tree, hf_a11_fqui_flowcount, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
for (flow_index=0; flow_index<flow_count; flow_index++) {
proto_tree *exts_tree;
guint8 granted_qos_len;
guint8 flow_id = tvb_get_guint8(tvb, offset+clen);
exts_tree = proto_tree_add_subtree_format
(ext_tree, tvb, offset+clen, 1, ett_a11_fqui_flowentry, NULL,
"Forward Flow Entry (Flow Id: %d)", flow_id);
clen++;
/* Forward QoS Sub Blob Length */
granted_qos_len = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item
(exts_tree, hf_a11_fqui_updated_qoslen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Forward QoS Sub Blob */
if (granted_qos_len) {
proto_tree_add_item
(exts_tree, hf_a11_fqui_updated_qos, tvb, offset+clen,
granted_qos_len, ENC_NA);
clen += granted_qos_len;
}
}
}
/* Code to dissect Reverse QoS Update Info */
static void
dissect_rev_qosupdate_info(tvbuff_t *tvb, int offset, proto_tree *ext_tree)
{
int clen = 0; /* consumed length */
guint8 flow_count;
guint8 flow_index;
/* Flow Count */
flow_count = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item(ext_tree, hf_a11_rqui_flowcount, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
for (flow_index=0; flow_index<flow_count; flow_index++) {
proto_tree *exts_tree;
guint8 granted_qos_len;
guint8 flow_id = tvb_get_guint8(tvb, offset+clen);
exts_tree = proto_tree_add_subtree_format
(ext_tree, tvb, offset+clen, 1, ett_a11_rqui_flowentry, NULL,
"Reverse Flow Entry (Flow Id: %d)", flow_id);
clen++;
/* Reverse QoS Sub Blob Length */
granted_qos_len = tvb_get_guint8(tvb, offset+clen);
proto_tree_add_item
(exts_tree, hf_a11_rqui_updated_qoslen, tvb, offset+clen, 1, ENC_BIG_ENDIAN);
clen++;
/* Reverse QoS Sub Blob */
if (granted_qos_len) {
proto_tree_add_item
(exts_tree, hf_a11_rqui_updated_qos, tvb, offset+clen,
granted_qos_len, ENC_NA);
clen += granted_qos_len;
}
}
}
/* Code to dissect extensions */
static void
dissect_a11_extensions( tvbuff_t *tvb, packet_info *pinfo, int offset, proto_tree *tree)
{
proto_tree *exts_tree;
proto_tree *ext_tree;
proto_item *ext_len_item = NULL;
guint ext_len;
guint8 ext_type;
guint8 ext_subtype = 0;
guint hdrLen;
gint16 apptype = -1;
/* Add our tree, if we have extensions */
exts_tree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_a11_exts, NULL, "Extensions");
/* And, handle each extension */
while (tvb_reported_length_remaining(tvb, offset) > 0) {
/* Get our extension info */
ext_type = tvb_get_guint8(tvb, offset);
if (ext_type == GEN_AUTH_EXT) {
/*
* Very nasty . . breaks normal extensions, since the length is
* in the wrong place :(
*/
ext_subtype = tvb_get_guint8(tvb, offset + 1);
ext_len = tvb_get_ntohs(tvb, offset + 2);
hdrLen = 4;
} else if ((ext_type == CVSE_EXT) || (ext_type == OLD_CVSE_EXT)) {
ext_len = tvb_get_ntohs(tvb, offset + 2);
ext_subtype = tvb_get_guint8(tvb, offset + 8);
hdrLen = 4;
} else {
ext_len = tvb_get_guint8(tvb, offset + 1);
hdrLen = 2;
}
ext_tree = proto_tree_add_subtree_format(exts_tree, tvb, offset, ext_len + hdrLen,
ett_a11_ext, NULL, "Extension: %s",
val_to_str_ext(ext_type, &a11_ext_types_ext,
"Unknown Extension %u"));
proto_tree_add_uint(ext_tree, hf_a11_ext_type, tvb, offset, 1, ext_type);
offset += 1;
if (ext_type == SS_EXT) {
ext_len_item = proto_tree_add_uint(ext_tree, hf_a11_ext_len, tvb, offset, 1, ext_len);
offset += 1;
}
else if ((ext_type == CVSE_EXT) || (ext_type == OLD_CVSE_EXT)) {
offset += 1;
ext_len_item = proto_tree_add_uint(ext_tree, hf_a11_ext_len, tvb, offset, 2, ext_len);
offset += 2;
}
else if (ext_type != GEN_AUTH_EXT) {
/* Another nasty hack since GEN_AUTH_EXT broke everything */
ext_len_item = proto_tree_add_uint(ext_tree, hf_a11_ext_len, tvb, offset, 1, ext_len);
offset += 1;
}
switch (ext_type) {
case SS_EXT:
decode_sse(ext_tree, pinfo, tvb, offset, ext_len, ext_len_item);
offset += ext_len;
ext_len = 0;
break;
case MH_AUTH_EXT:
case MF_AUTH_EXT:
case FH_AUTH_EXT:
case RU_AUTH_EXT:
/* All these extensions look the same. 4 byte SPI followed by a key */
if (ext_len < 4)
break;
proto_tree_add_item(ext_tree, hf_a11_aext_spi, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
ext_len -= 4;
if (ext_len == 0)
break;
proto_tree_add_item(ext_tree, hf_a11_aext_auth, tvb, offset, ext_len,
ENC_NA);
break;
case MN_NAI_EXT:
if (ext_len == 0)
break;
/*
* RFC 2486 speaks only of ASCII; RFC 4282 expands that to
* UTF-8.
*/
proto_tree_add_item(ext_tree, hf_a11_next_nai, tvb, offset,
ext_len, ENC_UTF_8|ENC_NA);
break;
case GEN_AUTH_EXT: /* RFC 3012 */
/*
* Very nasty . . breaks normal extensions, since the length is
* in the wrong place :(
*/
proto_tree_add_uint(ext_tree, hf_a11_ext_stype, tvb, offset, 1, ext_subtype);
offset += 1;
proto_tree_add_uint(ext_tree, hf_a11_ext_len, tvb, offset, 2, ext_len);
offset += 2;
/* SPI */
if (ext_len < 4)
break;
proto_tree_add_item(ext_tree, hf_a11_aext_spi, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
ext_len -= 4;
/* Key */
if (ext_len == 0)
break;
proto_tree_add_item(ext_tree, hf_a11_aext_auth, tvb, offset,
ext_len, ENC_NA);
break;
case OLD_CVSE_EXT: /* RFC 3115 */
case CVSE_EXT: /* RFC 3115 */
if (ext_len < 4)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_vid, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
ext_len -= 4;
if (ext_len < 2)
break;
apptype = tvb_get_ntohs(tvb, offset);
proto_tree_add_uint(ext_tree, hf_a11_vse_apptype, tvb, offset, 2, apptype);
offset += 2;
ext_len -= 2;
if (apptype == 0x0101) {
if (tvb_reported_length_remaining(tvb, offset) > 0) {
dissect_a11_radius(tvb, pinfo, offset, ext_tree, ext_len + 2);
}
}
break;
case OLD_NVSE_EXT: /* RFC 3115 */
case NVSE_EXT: /* RFC 3115 */
if (ext_len < 6)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_vid, tvb, offset+2, 4, ENC_BIG_ENDIAN);
offset += 6;
ext_len -= 6;
proto_tree_add_item(ext_tree, hf_a11_vse_apptype, tvb, offset, 2, ENC_BIG_ENDIAN);
if (ext_len < 2)
break;
apptype = tvb_get_ntohs(tvb, offset);
offset += 2;
ext_len -= 2;
switch (apptype) {
case 0x0401:
if (ext_len < 5)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_panid, tvb, offset, 5, ENC_NA);
offset += 5;
ext_len -= 5;
if (ext_len < 5)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_canid, tvb, offset, 5, ENC_NA);
break;
case 0x0501:
if (ext_len < 4)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_ppaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
break;
case 0x0601:
if (ext_len < 2)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_dormant, tvb, offset, 2, ENC_BIG_ENDIAN);
break;
case 0x0602:
/* eHRPD Mode */
if (ext_len < 1)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_ehrpd_mode, tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case 0x0603:
/* eHRPD Indicators */
if (ext_len < 1)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_ehrpd_pmk, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(ext_tree, hf_a11_vse_ehrpd_handoff_info, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(ext_tree, hf_a11_vse_ehrpd_tunnel_mode, tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case 0x0701:
if (ext_len < 1)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_code, tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case 0x0801:
if (ext_len < 1)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_pdit, tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case 0x0802:
proto_tree_add_item(ext_tree, hf_a11_vse_session_parameter, tvb, offset, -1, ENC_NA);
break;
case 0x0803:
proto_tree_add_item(ext_tree, hf_a11_vse_qosmode, tvb, offset, 1, ENC_BIG_ENDIAN);
break;
case 0x0901:
if (ext_len < 2)
break;
proto_tree_add_item(ext_tree, hf_a11_vse_srvopt, tvb, offset, 2, ENC_BIG_ENDIAN);
break;
case 0x0C01:
dissect_ase(tvb, offset, ext_len, ext_tree);
break;
case 0x0D01:
dissect_fwd_qosinfo(tvb, offset, ext_tree);
break;
case 0x0D02:
dissect_rev_qosinfo(tvb, offset, ext_tree);
break;
case 0x0D03:
dissect_subscriber_qos_profile(tvb, pinfo, offset, ext_len, ext_tree);
break;
case 0x0DFE:
dissect_fwd_qosupdate_info(tvb, offset, ext_tree);
break;
case 0x0DFF:
dissect_rev_qosupdate_info(tvb, offset, ext_tree);
break;
}
break;
case BCMCS_EXT:
decode_bcmcs(ext_tree, pinfo, tvb, offset, ext_len, ext_len_item);
offset += ext_len;
ext_len = 0;
break;
case MF_CHALLENGE_EXT: /* RFC 3012 */
/* The default dissector is good here. The challenge is all hex anyway. */
default:
proto_tree_add_item(ext_tree, hf_a11_ext, tvb, offset, ext_len, ENC_NA);
break;
} /* ext type */
offset += ext_len;
} /* while data remaining */
} /* dissect_a11_extensions */
/* Code to actually dissect the packets */
static int
dissect_a11( tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
{
/* Set up structures we will need to add the protocol subtree and manage it */
proto_item *ti;
proto_tree *a11_tree = NULL;
guint8 type;
guint offset = 0;
if (!tvb_bytes_exist(tvb, offset, 1))
return 0; /* not enough data to check message type */
type = tvb_get_guint8(tvb, offset);
if (try_val_to_str_ext(type, &a11_types_ext) == NULL)
return 0; /* not a known message type */
/* Make entries in Protocol column and Info column on summary display */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "3GPP2 A11");
col_clear(pinfo->cinfo, COL_INFO);
if (type == REGISTRATION_REQUEST)
registration_request_msg =1;
else
registration_request_msg =0;
switch (type) {
case REGISTRATION_REQUEST:
registration_request_msg = 1;
col_add_fstr(pinfo->cinfo, COL_INFO, "Reg Request: PDSN=%s PCF=%s",
tvb_ip_to_str(tvb, 8),
tvb_ip_to_str(tvb, 12));
if (tree) {
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* flags */
proto_tree_add_bitmask(a11_tree, tvb, offset, hf_a11_flags,
ett_a11_flags, a11_flags, ENC_NA);
offset += 1;
/* lifetime */
proto_tree_add_item(a11_tree, hf_a11_life, tvb, offset, 2, ENC_BIG_ENDIAN);
offset +=2;
/* home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* home agent address */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care of Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case REGISTRATION_REPLY:
col_add_fstr(pinfo->cinfo, COL_INFO, "Reg Reply: PDSN=%s, Code=%u",
tvb_ip_to_str(tvb, 8), tvb_get_guint8(tvb,1));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reply Code */
proto_tree_add_item(a11_tree, hf_a11_code, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Registration Lifetime */
proto_tree_add_item(a11_tree, hf_a11_life, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent Address */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case REGISTRATION_UPDATE:
col_add_fstr(pinfo->cinfo, COL_INFO,"Reg Update: PDSN=%s",
tvb_ip_to_str(tvb, 8));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 3;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent Address */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case REGISTRATION_ACK:
col_add_fstr(pinfo->cinfo, COL_INFO, "Reg Ack: PCF=%s Status=%u",
tvb_ip_to_str(tvb, 8),
tvb_get_guint8(tvb,3));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 2;
/* Ack Status */
proto_tree_add_item(a11_tree, hf_a11_status, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care of Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case SESSION_UPDATE: /* IOS4.3 */
col_add_fstr(pinfo->cinfo, COL_INFO,"Ses Update: PDSN=%s",
tvb_ip_to_str(tvb, 8));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 3;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent Address */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case SESSION_ACK: /* IOS4.3 */
col_add_fstr(pinfo->cinfo, COL_INFO, "Ses Upd Ack: PCF=%s, Status=%u",
tvb_ip_to_str(tvb, 8),
tvb_get_guint8(tvb,3));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 2;
/* Ack Status */
proto_tree_add_item(a11_tree, hf_a11_status, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care of Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case CAPABILITIES_INFO: /* IOS5.1 */
col_add_fstr(pinfo->cinfo, COL_INFO, "Cap Info: PDSN=%s, PCF=%s",
tvb_ip_to_str(tvb, 8),
tvb_ip_to_str(tvb, 12));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 3;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent Address */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care of Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case CAPABILITIES_INFO_ACK: /* IOS5.1 */
col_add_fstr(pinfo->cinfo, COL_INFO, "Cap Info Ack: PCF=%s",
tvb_ip_to_str(tvb, 8));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 3;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care of Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case BC_SERVICE_REQUEST:
col_add_fstr(pinfo->cinfo, COL_INFO, "Service Request: PCF=%s ",
tvb_ip_to_str(tvb, offset + 8));
if (tree) {
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 4;
/* home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care-of-Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case BC_SERVICE_REPLY:
col_add_fstr(pinfo->cinfo, COL_INFO, "Service Response: BSN=%s ",
tvb_ip_to_str(tvb, offset + 8));
if (tree) {
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 3;
/* Reply Code */
proto_tree_add_item(a11_tree, hf_a11_code, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case BC_REGISTRATION_REQUEST:
col_add_fstr(pinfo->cinfo, COL_INFO, "BC Reg Request: BSN=%s ",
tvb_ip_to_str(tvb, offset + 8));
if (tree) {
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* flags */
proto_tree_add_bitmask(a11_tree, tvb, offset, hf_a11_flags,
ett_a11_flags, a11_flags, ENC_NA);
offset += 1;
/* lifetime */
proto_tree_add_item(a11_tree, hf_a11_life, tvb, offset, 2, ENC_BIG_ENDIAN);
offset +=2;
/* home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care-of-Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case BC_REGISTRATION_REPLY:
col_add_fstr(pinfo->cinfo, COL_INFO, "BC Reg Reply: BSN=%s, Code=%u",
tvb_ip_to_str(tvb, offset + 8),
tvb_get_guint8(tvb, offset + 1));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reply Code */
proto_tree_add_item(a11_tree, hf_a11_code, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Registration Lifetime */
proto_tree_add_item(a11_tree, hf_a11_life, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case BC_REGISTRATION_UPDATE:
col_add_fstr(pinfo->cinfo, COL_INFO,"BC Reg Update: BSN=%s",
tvb_ip_to_str(tvb, offset + 8));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 3;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Home Agent */
proto_tree_add_item(a11_tree, hf_a11_haaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
case BC_REGISTRATION_ACK:
col_add_fstr(pinfo->cinfo, COL_INFO, "BC Reg Acknowledge: PCF=%s Status=%u",
tvb_ip_to_str(tvb, offset + 8),
tvb_get_guint8(tvb, offset + 3));
if (tree) {
/* Add Subtree */
ti = proto_tree_add_item(tree, proto_a11, tvb, offset, -1, ENC_NA);
a11_tree = proto_item_add_subtree(ti, ett_a11);
/* Type */
proto_tree_add_uint(a11_tree, hf_a11_type, tvb, offset, 1, type);
offset += 1;
/* Reserved */
offset += 2;
/* Ack Status */
proto_tree_add_item(a11_tree, hf_a11_status, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Home address */
proto_tree_add_item(a11_tree, hf_a11_homeaddr, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Care-of-Address */
proto_tree_add_item(a11_tree, hf_a11_coa, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
/* Identifier - assumed to be an NTP time here */
proto_tree_add_item(a11_tree, hf_a11_ident, tvb, offset, 8, ENC_TIME_NTP|ENC_BIG_ENDIAN);
offset += 8;
} /* if tree */
break;
default:
DISSECTOR_ASSERT_NOT_REACHED();
break;
} /* End switch */
if (tree && a11_tree) {
if (tvb_reported_length_remaining(tvb, offset) > 0)
dissect_a11_extensions(tvb, pinfo, offset, a11_tree);
}
return tvb_reported_length(tvb);
} /* dissect_a11 */
/* Register the protocol with Wireshark */
void
proto_register_a11(void)
{
/* Setup list of header fields */
static hf_register_info hf[] = {
{ &hf_a11_type,
{ "Message Type", "a11.type",
FT_UINT8, BASE_DEC | BASE_EXT_STRING, &a11_types_ext, 0,
"A11 Message Type", HFILL }
},
{ &hf_a11_flags,
{ "Flags", "a11.flags",
FT_UINT8, BASE_HEX, NULL, 0x0,
NULL, HFILL}
},
{ &hf_a11_s,
{ "Simultaneous Bindings", "a11.s",
FT_BOOLEAN, 8, NULL, 128,
"Simultaneous Bindings Allowed", HFILL }
},
{ &hf_a11_b,
{ "Broadcast Datagrams", "a11.b",
FT_BOOLEAN, 8, NULL, 64,
"Broadcast Datagrams requested", HFILL }
},
{ &hf_a11_d,
{ "Co-located Care-of Address", "a11.d",
FT_BOOLEAN, 8, NULL, 32,
"MN using Co-located Care-of address", HFILL }
},
{ &hf_a11_m,
{ "Minimal Encapsulation", "a11.m",
FT_BOOLEAN, 8, NULL, 16,
"MN wants Minimal encapsulation", HFILL }
},
{ &hf_a11_g,
{ "GRE", "a11.g",
FT_BOOLEAN, 8, NULL, 8,
"MN wants GRE encapsulation", HFILL }
},
{ &hf_a11_v,
{ "Van Jacobson", "a11.v",
FT_BOOLEAN, 8, NULL, 4,
NULL, HFILL }
},
{ &hf_a11_t,
{ "Reverse Tunneling", "a11.t",
FT_BOOLEAN, 8, NULL, 2,
"Reverse tunneling requested", HFILL }
},
{ &hf_a11_code,
{ "Reply Code", "a11.code",
FT_UINT8, BASE_DEC | BASE_EXT_STRING, &a11_reply_codes_ext, 0,
"A11 Registration Reply code", HFILL }
},
{ &hf_a11_status,
{ "Reply Status", "a11.ackstat",
FT_UINT8, BASE_DEC | BASE_EXT_STRING, &a11_ack_status_ext, 0,
"A11 Registration Ack Status", HFILL }
},
{ &hf_a11_life,
{ "Lifetime", "a11.life",
FT_UINT16, BASE_DEC, NULL, 0,
"A11 Registration Lifetime", HFILL }
},
{ &hf_a11_homeaddr,
{ "Home Address", "a11.homeaddr",
FT_IPv4, BASE_NONE, NULL, 0,
"Mobile Node's home address", HFILL }
},
{ &hf_a11_haaddr,
{ "Home Agent", "a11.haaddr",
FT_IPv4, BASE_NONE, NULL, 0,
"Home agent IP Address", HFILL }
},
{ &hf_a11_coa,
{ "Care of Address", "a11.coa",
FT_IPv4, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ident,
{ "Identification", "a11.ident",
FT_ABSOLUTE_TIME, ABSOLUTE_TIME_UTC, NULL, 0,
"MN Identification", HFILL }
},
{ &hf_a11_ext_type,
{ "Extension Type", "a11.ext.type",
FT_UINT8, BASE_DEC | BASE_EXT_STRING, &a11_ext_types_ext, 0,
"Mobile IP Extension Type", HFILL }
},
{ &hf_a11_ext_stype,
{ "Gen Auth Ext SubType", "a11.ext.auth.subtype",
FT_UINT8, BASE_DEC, VALS(a11_ext_stypes), 0,
"Mobile IP Auth Extension Sub Type", HFILL }
},
{ &hf_a11_ext_len,
{ "Extension Length", "a11.ext.len",
FT_UINT16, BASE_DEC, NULL, 0,
"Mobile IP Extension Length", HFILL }
},
{ &hf_a11_ext,
{ "Extension", "a11.extension",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_aext_spi,
{ "SPI", "a11.auth.spi",
FT_UINT32, BASE_HEX, NULL, 0,
"Authentication Header Security Parameter Index", HFILL }
},
{ &hf_a11_aext_auth,
{ "Authenticator", "a11.auth.auth",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_next_nai,
{ "NAI", "a11.nai",
FT_STRING, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ses_key,
{ "Key", "a11.ext.key",
FT_UINT32, BASE_HEX, NULL, 0,
"Session Key", HFILL }
},
{ &hf_a11_ses_sidver,
{ "Session ID Version", "a11.ext.sidver",
FT_UINT8, BASE_DEC, NULL, 3,
NULL, HFILL}
},
{ &hf_a11_ses_mnsrid,
{ "MNSR-ID", "a11.ext.mnsrid",
FT_UINT16, BASE_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ses_msid_type,
{ "MSID Type", "a11.ext.msid_type",
FT_UINT16, BASE_DEC, VALS(a11_ses_msid_type_vals), 0,
NULL, HFILL }
},
{ &hf_a11_ses_msid_len,
{ "MSID Length", "a11.ext.msid_len",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ses_msid,
{ "MSID(BCD)", "a11.ext.msid",
FT_STRING, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ses_ptype,
{ "Protocol Type", "a11.ext.ptype",
FT_UINT16, BASE_HEX, VALS(a11_ses_ptype_vals), 0,
NULL, HFILL }
},
{ &hf_a11_vse_vid,
{ "Vendor ID", "a11.ext.vid",
FT_UINT32, BASE_HEX|BASE_EXT_STRING, &sminmpec_values_ext, 0,
NULL, HFILL }
},
{ &hf_a11_vse_apptype,
{ "Application Type", "a11.ext.apptype",
FT_UINT8, BASE_HEX | BASE_EXT_STRING, &a11_ext_app_ext, 0,
NULL, HFILL }
},
{ &hf_a11_vse_ppaddr,
{ "Anchor P-P Address", "a11.ext.ppaddr",
FT_IPv4, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_vse_dormant,
{ "All Dormant Indicator", "a11.ext.dormant",
FT_UINT16, BASE_HEX, VALS(a11_ext_dormant), 0,
NULL, HFILL }
},
{ &hf_a11_vse_ehrpd_mode,
{ "eHRPD Mode", "a11.ext.ehrpd.mode",
FT_BOOLEAN, 8, TFS(&a11_tfs_ehrpd_mode), 0,
NULL, HFILL }
},
{ &hf_a11_vse_ehrpd_pmk,
{ "PMK", "a11.ext.ehrpd.pmk",
FT_BOOLEAN, 8, TFS(&a11_tfs_ehrpd_pmk), 0x04,
NULL, HFILL }
},
{ &hf_a11_vse_ehrpd_handoff_info,
{ "E-UTRAN Handoff Info", "a11.ext.ehrpd.handoff_info",
FT_BOOLEAN, 8, TFS(&a11_tfs_ehrpd_handoff_info), 0x02,
NULL, HFILL }
},
{ &hf_a11_vse_ehrpd_tunnel_mode,
{ "Tunnel Mode", "a11.ext.ehrpd.tunnel_mode",
FT_BOOLEAN, 8, TFS(&a11_tfs_ehrpd_tunnel_mode), 0x01,
NULL, HFILL }
},
{ &hf_a11_vse_code,
{ "Reply Code", "a11.ext.code",
FT_UINT8, BASE_DEC | BASE_EXT_STRING, &a11_reply_codes_ext, 0,
NULL, HFILL }
},
/* XXX: Is this the correct filter name ?? */
{ &hf_a11_vse_pdit,
{ "PDSN Code", "a11.ext.code",
FT_UINT8, BASE_HEX, VALS(a11_ext_nvose_pdsn_code), 0,
NULL, HFILL }
},
{ &hf_a11_vse_session_parameter,
{ "Session Parameter - Always On", "a11.ext.session_parameter",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_vse_srvopt,
{ "Service Option", "a11.ext.srvopt",
FT_UINT16, BASE_HEX, VALS(a11_ext_nvose_srvopt), 0,
NULL, HFILL }
},
{ &hf_a11_vse_panid,
{ "PANID", "a11.ext.panid",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_vse_canid,
{ "CANID", "a11.ext.canid",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_vse_qosmode,
{ "QoS Mode", "a11.ext.qosmode",
FT_UINT8, BASE_HEX, VALS(a11_ext_nvose_qosmode), 0,
NULL, HFILL }
},
{ &hf_a11_ase_len_type,
{ "Entry Length", "a11.ext.ase.len",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_srid_type,
{ "Service Reference ID (SRID)", "a11.ext.ase.srid",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_servopt_type,
{ "Service Option", "a11.ext.ase.srvopt",
FT_UINT16, BASE_HEX, VALS(a11_ext_nvose_srvopt), 0,
NULL, HFILL }
},
{ &hf_a11_ase_gre_proto_type,
{ "GRE Protocol Type", "a11.ext.ase.ptype",
FT_UINT16, BASE_HEX, VALS(a11_ses_ptype_vals), 0,
NULL, HFILL }
},
{ &hf_a11_ase_gre_key,
{ "GRE Key", "a11.ext.ase.key",
FT_UINT32, BASE_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_pcf_addr_key,
{ "PCF IP Address", "a11.ext.ase.pcfip",
FT_IPv4, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_fqi_srid,
{ "SRID", "a11.ext.fqi.srid",
FT_UINT8, BASE_DEC, NULL, 0,
"Forward Flow Entry SRID", HFILL }
},
{ &hf_a11_fqi_flags,
{ "Flags", "a11.ext.fqi.flags",
FT_UINT8, BASE_HEX, NULL, 0,
"Forward Flow Entry Flags", HFILL }
},
{ &hf_a11_fqi_flags_ip_flow,
{ "IP Flow Discriminator", "a11.ext.fqi.flags.ip_flow",
FT_BOOLEAN, 8, TFS(&tfs_enabled_disabled), A11_FQI_IPFLOW_DISC_ENABLED,
NULL, HFILL }
},
{ &hf_a11_fqi_flags_dscp,
{ "DSCP", "a11.ext.fqi.flags.dscp",
FT_BOOLEAN, 8, TFS(&tfs_included_not_included), A11_FQI_DSCP_INCLUDED,
NULL, HFILL }
},
{ &hf_a11_fqi_entry_flag,
{ "DSCP and Flow State", "a11.ext.fqi.entry_flag",
FT_UINT8, BASE_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_fqi_entry_flag_dscp,
{ "DSCP", "a11.ext.fqi.entry_flag.dscp",
FT_UINT8, BASE_HEX, NULL, 0x7E,
NULL, HFILL }
},
{ &hf_a11_fqi_entry_flag_flow_state,
{ "Flow State", "a11.ext.fqi.entry_flag.flow_state",
FT_BOOLEAN, 8, TFS(&tfs_active_inactive), 0x01,
NULL, HFILL }
},
{ &hf_a11_fqi_flowcount,
{ "Forward Flow Count", "a11.ext.fqi.flowcount",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_fqi_flowid,
{ "Forward Flow Id", "a11.ext.fqi.flowid",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_fqi_entrylen,
{ "Entry Length", "a11.ext.fqi.entrylen",
FT_UINT8, BASE_DEC, NULL, 0,
"Forward Entry Length", HFILL }
},
#if 0
{ &hf_a11_fqi_flowstate,
{ "Forward Flow State", "a11.ext.fqi.flowstate",
FT_UINT8, BASE_HEX, NULL, 0,
NULL, HFILL }
},
#endif
{ &hf_a11_fqi_requested_qoslen,
{ "Requested QoS Length", "a11.ext.fqi.reqqoslen",
FT_UINT8, BASE_DEC, NULL, 0,
"Forward Requested QoS Length", HFILL }
},
{ &hf_a11_fqi_flow_priority,
{ "Flow Priority", "a11.ext.fqi.flow_priority",
FT_UINT8, BASE_DEC, NULL, 0xF0,
NULL, HFILL }
},
{ &hf_a11_fqi_num_qos_attribute_set,
{ "Number of QoS Attribute Sets", "a11.ext.fqi.num_qos_attribute_set",
FT_UINT8, BASE_DEC, NULL, 0x0E,
NULL, HFILL }
},
{ &hf_a11_fqi_qos_attribute_setlen,
{ "QoS Attribute Set Length", "a11.ext.fqi.qos_attribute_setlen",
FT_UINT16, BASE_DEC, NULL, 0x01E0,
NULL, HFILL }
},
{ &hf_a11_fqi_qos_attribute_setid,
{ "QoS Attribute SetID", "a11.ext.fqi.qos_attribute_setid",
FT_UINT16, BASE_DEC, NULL, 0x1FC0,
NULL, HFILL }
},
{ &hf_a11_fqi_verbose,
{ "Verbose", "a11.ext.fqi.verbose",
FT_UINT8, BASE_DEC, NULL, 0x20,
NULL, HFILL }
},
{ &hf_a11_fqi_flow_profileid,
{ "Flow Profile Id", "a11.ext.fqi.flow_profileid",
FT_UINT24, BASE_DEC, NULL, 0x1FFFE0,
NULL, HFILL }
},
{ &hf_a11_fqi_qos_granted_attribute_setid,
{ "QoS Attribute SetID", "a11.ext.fqi.qos_granted_attribute_setid",
FT_UINT8, BASE_DEC, NULL, 0xFE,
NULL, HFILL }
},
{ &hf_a11_fqi_granted_qoslen,
{ "Granted QoS Length", "a11.ext.fqi.graqoslen",
FT_UINT8, BASE_DEC, NULL, 0,
"Forward Granted QoS Length", HFILL }
},
{ &hf_a11_rqi_flow_priority,
{ "Flow Priority", "a11.ext.rqi.flow_priority",
FT_UINT8, BASE_DEC, NULL, 0xF0,
NULL, HFILL }
},
{ &hf_a11_rqi_num_qos_attribute_set,
{ "Number of QoS Attribute Sets", "a11.ext.rqi.num_qos_attribute_set",
FT_UINT8, BASE_DEC, NULL, 0x0E,
NULL, HFILL }
},
{ &hf_a11_rqi_qos_attribute_setlen,
{ "QoS Attribute Set Length", "a11.ext.rqi.qos_attribute_setlen",
FT_UINT16, BASE_DEC, NULL, 0x01E0,
NULL, HFILL }
},
{ &hf_a11_rqi_qos_attribute_setid,
{ "QoS Attribute SetID", "a11.ext.rqi.qos_attribute_setid",
FT_UINT16, BASE_DEC, NULL, 0x1FC0,
NULL, HFILL }
},
{ &hf_a11_rqi_verbose,
{ "Verbose", "a11.ext.rqi.verbose",
FT_UINT8, BASE_DEC, NULL, 0x20,
NULL, HFILL }
},
{ &hf_a11_rqi_flow_profileid,
{ "Flow Profile Id", "a11.ext.rqi.flow_profileid",
FT_UINT24, BASE_DEC, NULL, 0x1FFFE0,
NULL, HFILL }
},
{ &hf_a11_rqi_qos_granted_attribute_setid,
{ "QoS Attribute SetID", "a11.ext.rqi.qos_granted_attribute_setid",
FT_UINT8, BASE_DEC, NULL, 0xFE,
NULL, HFILL }
},
{ &hf_a11_rqi_srid,
{ "SRID", "a11.ext.rqi.srid",
FT_UINT8, BASE_DEC, NULL, 0,
"Reverse Flow Entry SRID", HFILL }
},
{ &hf_a11_rqi_flowcount,
{ "Reverse Flow Count", "a11.ext.rqi.flowcount",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_rqi_flowid,
{ "Reverse Flow Id", "a11.ext.rqi.flowid",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_rqi_entrylen,
{ "Entry Length", "a11.ext.rqi.entrylen",
FT_UINT8, BASE_DEC, NULL, 0,
"Reverse Flow Entry Length", HFILL }
},
{ &hf_a11_rqi_entry_flag,
{ "Flags", "a11.ext.rqi.entry_flag",
FT_UINT8, BASE_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_rqi_entry_flag_flow_state,
{ "Flow State", "a11.ext.rqi.entry_flag.flow_state",
FT_BOOLEAN, 8, TFS(&tfs_active_inactive), 0x01,
NULL, HFILL }
},
#if 0
{ &hf_a11_rqi_flowstate,
{ "Flow State", "a11.ext.rqi.flowstate",
FT_UINT8, BASE_HEX, NULL, 0,
"Reverse Flow State", HFILL }
},
#endif
{ &hf_a11_rqi_requested_qoslen,
{ "Requested QoS Length", "a11.ext.rqi.reqqoslen",
FT_UINT8, BASE_DEC, NULL, 0,
"Reverse Requested QoS Length", HFILL }
},
#if 0
{ &hf_a11_rqi_requested_qos,
{ "Requested QoS", "a11.ext.rqi.reqqos",
FT_BYTES, BASE_NONE, NULL, 0,
"Reverse Requested QoS", HFILL }
},
#endif
{ &hf_a11_rqi_granted_qoslen,
{ "Granted QoS Length", "a11.ext.rqi.graqoslen",
FT_UINT8, BASE_DEC, NULL, 0,
"Reverse Granted QoS Length", HFILL }
},
#if 0
{ &hf_a11_rqi_granted_qos,
{ "Granted QoS", "a11.ext.rqi.graqos",
FT_BYTES, BASE_NONE, NULL, 0,
"Reverse Granted QoS", HFILL }
},
#endif
{ &hf_a11_fqui_flowcount,
{ "Forward QoS Update Flow Count", "a11.ext.fqui.flowcount",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_rqui_flowcount,
{ "Reverse QoS Update Flow Count", "a11.ext.rqui.flowcount",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_fqui_updated_qoslen,
{ "Forward Updated QoS Sub-Blob Length", "a11.ext.fqui.updatedqoslen",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_fqui_updated_qos,
{ "Forward Updated QoS Sub-Blob", "a11.ext.fqui.updatedqos",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_rqui_updated_qoslen,
{ "Reverse Updated QoS Sub-Blob Length", "a11.ext.rqui.updatedqoslen",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_rqui_updated_qos,
{ "Reverse Updated QoS Sub-Blob", "a11.ext.rqui.updatedqos",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
#if 0
{ &hf_a11_subsciber_profile_len,
{ "Subscriber QoS Profile Length", "a11.ext.sqp.profilelen",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
#endif
{ &hf_a11_subsciber_profile,
{ "Subscriber QoS Profile", "a11.ext.sqp.profile",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_forward_rohc_info_len,
{ "Forward ROHC Info Length", "a11.ext.ase.forwardlen",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_forward_maxcid,
{ "Forward MAXCID", "a11.ext.ase.maxcid",
FT_UINT16, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_forward_mrru,
{ "Forward MRRU", "a11.ext.ase.mrru",
FT_UINT16, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_forward_large_cids,
{ "Forward Large CIDS", "a11.ext.ase.forwardlargecids",
FT_BOOLEAN, 8, NULL, 0x80,
NULL, HFILL }
},
{ &hf_a11_ase_forward_profile_count,
{ "Forward Profile Count", "a11.ext.ase.profilecount",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_forward_profile,
{ "Forward Profile", "a11.ext.ase.forwardprofile",
FT_UINT16, BASE_DEC | BASE_EXT_STRING, &a11_rohc_profile_vals_ext, 0,
NULL, HFILL }
},
{ &hf_a11_ase_reverse_rohc_info_len,
{ "Reverse ROHC Info Length", "a11.ext.ase.reverselen",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_reverse_maxcid,
{ "Reverse MAXCID", "a11.ext.ase.revmaxcid",
FT_UINT16, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_reverse_mrru,
{ "Reverse MRRU", "a11.ext.ase.revmrru",
FT_UINT16, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_reverse_large_cids,
{ "Reverse Large CIDS", "a11.ext.ase.reverselargecids",
FT_UINT8, BASE_DEC, NULL, 128,
NULL, HFILL }
},
{ &hf_a11_ase_reverse_profile_count,
{ "Reverse Profile Count", "a11.ext.ase.revprofilecount",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_ase_reverse_profile,
{ "Reverse Profile", "a11.ext.ase.reverseprofile",
FT_UINT16, BASE_DEC | BASE_EXT_STRING, &a11_rohc_profile_vals_ext, 0,
NULL, HFILL }
},
{ &hf_a11_aut_flow_prof_sub_type,
{ "Sub type", "a11.aut_flow_prof.sub_type",
FT_UINT8, BASE_DEC, VALS(a11_aut_flow_prof_subtype_vals), 0,
NULL, HFILL }
},
{ &hf_a11_aut_flow_prof_sub_type_len,
{ "Length", "a11.aut_flow_prof.length",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_aut_flow_prof_sub_type_value,
{ "Value", "a11.aut_flow_prof.value",
FT_UINT16, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_serv_opt_prof_max_serv,
{ "Service-Connections-Per-Link-flow", "a11.serv_opt_prof.scplf",
FT_UINT32, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_sub_type,
{ "Sub-Type", "a11.sub_type",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_sub_type_length,
{ "Sub-Type Length", "a11.sub_type_length",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_serv_opt,
{ "Service Option", "a11.serviceoption",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_max_num_serv_opt,
{ "Max number of service instances of Service Option", "a11.serviceoption",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_a11_bcmcs_stype,
{ "Protocol Type", "a11.ext.bcmcs.ptype",
FT_UINT8, BASE_HEX, VALS(a11_bcmcs_stype_vals), 0,
NULL, HFILL }
},
{ &hf_a11_bcmcs_entry_len,
{ "Entry length", "a11.ext.bcmcs.entry_len",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL }
},
};
/* Setup protocol subtree array */
static gint *ett[] = {
&ett_a11,
&ett_a11_flags,
&ett_a11_ext,
&ett_a11_exts,
&ett_a11_radius,
&ett_a11_radiuses,
&ett_a11_ase,
&ett_a11_fqi_flowentry,
&ett_a11_fqi_requestedqos,
&ett_a11_fqi_qos_attribute_set,
&ett_a11_fqi_grantedqos,
&ett_a11_rqi_flowentry,
&ett_a11_rqi_requestedqos,
&ett_a11_rqi_qos_attribute_set,
&ett_a11_rqi_grantedqos,
&ett_a11_fqi_flags,
&ett_a11_fqi_entry_flags,
&ett_a11_rqi_entry_flags,
&ett_a11_fqui_flowentry,
&ett_a11_rqui_flowentry,
&ett_a11_subscriber_profile,
&ett_a11_forward_rohc,
&ett_a11_reverse_rohc,
&ett_a11_forward_profile,
&ett_a11_reverse_profile,
&ett_a11_aut_flow_profile_ids,
&ett_a11_bcmcs_entry,
};
static ei_register_info ei[] = {
{ &ei_a11_sub_type_length_not2, { "a11.sub_type_length.bad", PI_PROTOCOL, PI_WARN, "Sub-Type Length should be at least 2", EXPFILL }},
{ &ei_a11_sse_too_short, { "a11.sse_too_short", PI_MALFORMED, PI_ERROR, "SSE too short", EXPFILL }},
{ &ei_a11_bcmcs_too_short, { "a11.bcmcs_too_short", PI_MALFORMED, PI_ERROR, "BCMCS too short", EXPFILL }},
{ &ei_a11_entry_data_not_dissected, { "a11.entry_data_not_dissected", PI_UNDECODED, PI_WARN, "Entry Data, Not dissected yet", EXPFILL }},
{ &ei_a11_session_data_not_dissected, { "a11.session_data_not_dissected", PI_UNDECODED, PI_WARN, "Session Data Type Not dissected yet", EXPFILL }},
};
expert_module_t* expert_a11;
/* Register the protocol name and description */
proto_a11 = proto_register_protocol("3GPP2 A11", "3GPP2 A11", "a11");
/* Register the dissector by name */
new_register_dissector("a11", dissect_a11, proto_a11);
/* Required function calls to register the header fields and subtrees used */
proto_register_field_array(proto_a11, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_a11 = expert_register_protocol(proto_a11);
expert_register_field_array(expert_a11, ei, array_length(ei));
}
void
proto_reg_handoff_a11(void)
{
dissector_handle_t a11_handle;
a11_handle = find_dissector("a11");
dissector_add_uint("udp.port", UDP_PORT_3GA11, a11_handle);
/* 3GPP2-Service-Option-Profile(74) */
radius_register_avp_dissector(VENDOR_THE3GPP2, 74, dissect_3gpp2_service_option_profile);
/* 3GPP2-Authorized-Flow-Profile-IDs(131) */
radius_register_avp_dissector(VENDOR_THE3GPP2, 131, dissect_3gpp2_radius_aut_flow_profile_ids);
}
/*
* Editor modelines - http://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* vi: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/
| gpl-2.0 |
binkybear/android_kernel_google_msm | drivers/cpufreq/cpufreq.c | 20 | 54391 | /*
* linux/drivers/cpufreq/cpufreq.c
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
*
* Oct 2005 - Ashok Raj <ashok.raj@intel.com>
* Added handling for CPU hotplug
* Feb 2006 - Jacob Shin <jacob.shin@amd.com>
* Fix handling for CPU hotplug -- affected CPUs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <asm/cputime.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/tick.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/syscore_ops.h>
#include <trace/events/power.h>
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
* also protects the cpufreq_cpu_data array.
*/
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
struct cpufreq_cpu_save_data {
char gov[CPUFREQ_NAME_LEN];
unsigned int max, min;
};
static DEFINE_PER_CPU(struct cpufreq_cpu_save_data, cpufreq_policy_save);
#endif
static DEFINE_SPINLOCK(cpufreq_driver_lock);
static struct kset *cpufreq_kset;
static struct kset *cpudev_kset;
/*
* cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
* all cpufreq/hotplug/workqueue/etc related lock issues.
*
* The rules for this semaphore:
* - Any routine that wants to read from the policy structure will
* do a down_read on this semaphore.
* - Any routine that will write to the policy structure and/or may take away
* the policy altogether (eg. CPU hotplug), will hold this lock in write
* mode before doing so.
*
* Additional rules:
* - All holders of the lock should check to make sure that the CPU they
* are concerned with are online after they get the lock.
* - Governor routines that can be called in cpufreq hotplug path should not
* take this sem as top level hotplug notifier handler takes this.
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
int lock_policy_rwsem_##mode \
(int cpu) \
{ \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
if (unlikely(!cpu_online(cpu))) { \
up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
return -1; \
} \
\
return 0; \
}
lock_policy_rwsem(read, cpu);
lock_policy_rwsem(write, cpu);
static void unlock_policy_rwsem_read(int cpu)
{
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
void unlock_policy_rwsem_write(int cpu)
{
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event);
static unsigned int __cpufreq_get(unsigned int cpu);
static void handle_update(struct work_struct *work);
/**
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
* "transition" list for kernel code that needs to handle
* changes to devices when the CPU clock speed changes.
* The mutex locks both lists.
*/
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
static struct srcu_notifier_head cpufreq_transition_notifier_list;
static bool init_cpufreq_transition_notifier_list_called;
static int __init init_cpufreq_transition_notifier_list(void)
{
srcu_init_notifier_head(&cpufreq_transition_notifier_list);
init_cpufreq_transition_notifier_list_called = true;
return 0;
}
pure_initcall(init_cpufreq_transition_notifier_list);
static int off __read_mostly;
int cpufreq_disabled(void)
{
return off;
}
void disable_cpufreq(void)
{
off = 1;
}
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
u64 idle_time;
u64 cur_wall_time;
u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
idle_time = cur_wall_time - busy_time;
if (wall)
*wall = cputime_to_usecs(cur_wall_time);
return cputime_to_usecs(idle_time);
}
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
{
u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
if (idle_time == -1ULL)
return get_cpu_idle_time_jiffy(cpu, wall);
else if (!io_busy)
idle_time += get_cpu_iowait_time_us(cpu, wall);
return idle_time;
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, int sysfs)
{
struct cpufreq_policy *data;
unsigned long flags;
if (cpu >= nr_cpu_ids)
goto err_out;
/* get the cpufreq driver */
spin_lock_irqsave(&cpufreq_driver_lock, flags);
if (!cpufreq_driver)
goto err_out_unlock;
if (!try_module_get(cpufreq_driver->owner))
goto err_out_unlock;
/* get the CPU */
data = per_cpu(cpufreq_cpu_data, cpu);
if (!data)
goto err_out_put_module;
if (!sysfs && !kobject_get(&data->kobj))
goto err_out_put_module;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return data;
err_out_put_module:
module_put(cpufreq_driver->owner);
err_out_unlock:
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_out:
return NULL;
}
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return __cpufreq_cpu_get(cpu, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
{
return __cpufreq_cpu_get(cpu, 1);
}
static void __cpufreq_cpu_put(struct cpufreq_policy *data, int sysfs)
{
if (!sysfs)
kobject_put(&data->kobj);
module_put(cpufreq_driver->owner);
}
void cpufreq_cpu_put(struct cpufreq_policy *data)
{
__cpufreq_cpu_put(data, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
{
__cpufreq_cpu_put(data, 1);
}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
/**
* adjust_jiffies - adjust the system "loops_per_jiffy"
*
* This function alters the system "loops_per_jiffy" for the clock
* speed change. Note that loops_per_jiffy cannot be updated on SMP
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
#ifndef CONFIG_SMP
static unsigned long l_p_j_ref;
static unsigned int l_p_j_ref_freq;
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
if (ci->flags & CPUFREQ_CONST_LOOPS)
return;
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
pr_debug("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
pr_debug("scaling loops_per_jiffy to %lu "
"for frequency %u kHz\n", loops_per_jiffy, ci->new);
}
}
#else
static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
return;
}
#endif
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies
* on frequency transition.
*
* This function calls the transition notifiers and the "adjust_jiffies"
* function. It is called twice on all CPU frequency changes that have
* external effects.
*/
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
struct cpufreq_policy *policy;
BUG_ON(irqs_disabled());
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
switch (state) {
case CPUFREQ_PRECHANGE:
/* detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
*/
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
if ((policy) && (policy->cpu == freqs->cpu) &&
(policy->cur) && (policy->cur != freqs->old)) {
pr_debug("Warning: CPU frequency is"
" %u, cpufreq assumed %u kHz.\n",
freqs->old, policy->cur);
freqs->old = policy->cur;
}
}
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
policy->cur = freqs->new;
sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq");
}
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
/**
* cpufreq_notify_utilization - notify CPU userspace about CPU utilization
* change
*
* This function is called everytime the CPU load is evaluated by the
* ondemand governor. It notifies userspace of cpu load changes via sysfs.
*/
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
unsigned int util)
{
if (policy)
policy->util = util;
if (policy->util >= MIN_CPU_UTIL_NOTIFY)
sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
}
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
static struct cpufreq_governor *__find_governor(const char *str_governor)
{
struct cpufreq_governor *t;
list_for_each_entry(t, &cpufreq_governor_list, governor_list)
if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
return NULL;
}
/**
* cpufreq_parse_governor - parse a governor string
*/
static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
struct cpufreq_governor **governor)
{
int err = -EINVAL;
if (!cpufreq_driver)
goto out;
if (cpufreq_driver->setpolicy) {
if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE;
err = 0;
} else if (!strnicmp(str_governor, "powersave",
CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
}
} else if (cpufreq_driver->target) {
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
t = __find_governor(str_governor);
if (t == NULL) {
int ret;
mutex_unlock(&cpufreq_governor_mutex);
ret = request_module("cpufreq_%s", str_governor);
mutex_lock(&cpufreq_governor_mutex);
if (ret == 0)
t = __find_governor(str_governor);
}
if (t != NULL) {
*governor = t;
err = 0;
}
mutex_unlock(&cpufreq_governor_mutex);
}
out:
return err;
}
/**
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
*
* Write out information from cpufreq_driver->policy[cpu]; object must be
* "unsigned int".
*/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy *policy, char *buf) \
{ \
return sprintf(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
show_one(cpu_utilization, util);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
#define store_one(file_name, object) \
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
unsigned int ret = -EINVAL; \
struct cpufreq_policy new_policy; \
\
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
if (ret) \
return -EINVAL; \
\
ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
return -EINVAL; \
\
ret = cpufreq_driver->verify(&new_policy); \
if (ret) \
pr_err("cpufreq: Frequency verification failed\n"); \
\
policy->user_policy.object = new_policy.object; \
ret = __cpufreq_set_policy(policy, &new_policy); \
\
return ret ? ret : count; \
}
store_one(scaling_min_freq, min);
store_one(scaling_max_freq, max);
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy->cpu);
if (!cur_freq)
return sprintf(buf, "<unknown>");
return sprintf(buf, "%u\n", cur_freq);
}
/**
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
return sprintf(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
policy->governor->name);
return -EINVAL;
}
/**
* store_scaling_governor - store policy for the specified CPU
*/
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int ret = -EINVAL;
char str_governor[16];
struct cpufreq_policy new_policy;
ret = cpufreq_get_policy(&new_policy, policy->cpu);
if (ret)
return ret;
ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
if (cpufreq_parse_governor(str_governor, &new_policy.policy,
&new_policy.governor))
return -EINVAL;
/* Do not use cpufreq_set_policy here or the user_policy.max
will be wrongly overridden */
ret = __cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
sysfs_notify(&policy->kobj, NULL, "scaling_governor");
kobject_uevent(cpufreq_global_kobject, KOBJ_ADD);
if (ret)
return ret;
else
return count;
}
/**
* show_scaling_driver - show the cpufreq driver currently loaded
*/
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
}
/**
* show_scaling_available_governors - show the available CPUfreq governors
*/
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
char *buf)
{
ssize_t i = 0;
struct cpufreq_governor *t;
if (!cpufreq_driver->target) {
i += sprintf(buf, "performance powersave");
goto out;
}
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
goto out;
i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
}
out:
i += sprintf(&buf[i], "\n");
return i;
}
static ssize_t show_cpus(const struct cpumask *mask, char *buf)
{
ssize_t i = 0;
unsigned int cpu;
for_each_cpu(cpu, mask) {
if (i)
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
if (i >= (PAGE_SIZE - 5))
break;
}
i += sprintf(&buf[i], "\n");
return i;
}
/**
* show_related_cpus - show the CPUs affected by each transition even if
* hw coordination is in use
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
if (cpumask_empty(policy->related_cpus))
return show_cpus(policy->cpus, buf);
return show_cpus(policy->related_cpus, buf);
}
/**
* show_affected_cpus - show the CPUs affected by each transition
*/
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
{
return show_cpus(policy->cpus, buf);
}
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
unsigned int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
ret = sscanf(buf, "%u", &freq);
if (ret != 1)
return -EINVAL;
policy->governor->store_setspeed(policy, freq);
return count;
}
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
{
if (!policy->governor || !policy->governor->show_setspeed)
return sprintf(buf, "<unsupported>\n");
return policy->governor->show_setspeed(policy, buf);
}
static ssize_t store_dvfs_test(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int enable= 0;
unsigned int ret;
if (!policy->governor || !policy->governor->start_dvfs_test)
return -EINVAL;
ret = sscanf(buf, "%u", &enable);
if (ret != 1)
return -EINVAL;
policy->governor->start_dvfs_test(policy, enable);
return count;
}
static ssize_t show_dvfs_test(struct cpufreq_policy *policy, char *buf)
{
if (!policy->governor || !policy->governor->show_dvfs_test)
return sprintf(buf, "<unsupported>\n");
return policy->governor->show_dvfs_test(policy, buf);
}
/**
* show_scaling_driver - show the current cpufreq HW/BIOS limitation
*/
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
unsigned int limit;
int ret;
if (cpufreq_driver->bios_limit) {
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
if (!ret)
return sprintf(buf, "%u\n", limit);
}
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
cpufreq_freq_attr_ro(scaling_available_governors);
cpufreq_freq_attr_ro(scaling_driver);
cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
cpufreq_freq_attr_ro(cpu_utilization);
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);
cpufreq_freq_attr_rw(dvfs_test);
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
&cpu_utilization.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
&scaling_available_governors.attr,
&scaling_setspeed.attr,
&dvfs_test.attr,
NULL
};
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
if (lock_policy_rwsem_read(policy->cpu) < 0)
goto fail;
if (fattr->show)
ret = fattr->show(policy, buf);
else
ret = -EIO;
unlock_policy_rwsem_read(policy->cpu);
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
if (lock_policy_rwsem_write(policy->cpu) < 0)
goto fail;
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
unlock_policy_rwsem_write(policy->cpu);
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
{
struct cpufreq_policy *policy = to_policy(kobj);
pr_debug("last reference is dropped\n");
complete(&policy->kobj_unregister);
}
static const struct sysfs_ops sysfs_ops = {
.show = show,
.store = store,
};
static struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops,
.default_attrs = default_attrs,
.release = cpufreq_sysfs_release,
};
/*
* Returns:
* Negative: Failure
* 0: Success
* Positive: When we have a managed CPU and the sysfs got symlinked
*/
static int cpufreq_add_dev_policy(unsigned int cpu,
struct cpufreq_policy *policy,
struct device *dev)
{
int ret = 0;
#ifdef CONFIG_SMP
unsigned long flags;
unsigned int j;
#ifdef CONFIG_HOTPLUG_CPU
struct cpufreq_governor *gov;
gov = __find_governor(per_cpu(cpufreq_policy_save, cpu).gov);
if (gov) {
policy->governor = gov;
pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, cpu);
}
if (per_cpu(cpufreq_policy_save, cpu).min) {
policy->min = per_cpu(cpufreq_policy_save, cpu).min;
policy->user_policy.min = policy->min;
}
if (per_cpu(cpufreq_policy_save, cpu).max) {
policy->max = per_cpu(cpufreq_policy_save, cpu).max;
policy->user_policy.max = policy->max;
}
pr_debug("Restoring CPU%d min %d and max %d\n",
cpu, policy->min, policy->max);
#endif
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
if (cpu == j)
continue;
/* Check for existing affected CPUs.
* They may not be aware of it due to CPU Hotplug.
* cpufreq_cpu_put is called when the device is removed
* in __cpufreq_remove_dev()
*/
managed_policy = cpufreq_cpu_get(j);
if (unlikely(managed_policy)) {
/* Set proper policy_cpu */
unlock_policy_rwsem_write(cpu);
per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
if (lock_policy_rwsem_write(cpu) < 0) {
/* Should not go through policy unlock path */
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
cpufreq_cpu_put(managed_policy);
return -EBUSY;
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_copy(managed_policy->cpus, policy->cpus);
cpumask_and(managed_policy->cpus,
managed_policy->cpus, cpu_online_mask);
per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_debug("CPU already managed, adding link\n");
ret = sysfs_create_link(&dev->kobj,
&managed_policy->kobj,
"cpufreq");
if (ret)
cpufreq_cpu_put(managed_policy);
/*
* Success. We only needed to be added to the mask.
* Call driver->exit() because only the cpu parent of
* the kobj needed to call init().
*/
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
if (!ret)
return 1;
else
return ret;
}
}
#endif
return ret;
}
/* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu,
struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
struct device *cpu_dev;
if (j == cpu)
continue;
if (!cpu_online(j))
continue;
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
cpu_dev = get_cpu_device(j);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) {
cpufreq_cpu_put(managed_policy);
return ret;
}
}
return ret;
}
static int cpufreq_add_dev_interface(unsigned int cpu,
struct cpufreq_policy *policy,
struct device *dev)
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
unsigned long flags;
int ret = 0;
unsigned int j;
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
&dev->kobj, "cpufreq");
if (ret)
return ret;
/* create cpu device kset */
if (!cpudev_kset) {
cpudev_kset = kset_create_and_add("kset", NULL, &dev->kobj);
BUG_ON(!cpudev_kset);
dev->kobj.kset = cpudev_kset;
}
/* send uevent when cpu device is added */
kobject_uevent(&dev->kobj, KOBJ_ADD);
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret)
goto err_out_kobj_put;
drv_attr++;
}
if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
if (ret)
goto err_out_kobj_put;
}
if (cpufreq_driver->target) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
goto err_out_kobj_put;
}
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
if (ret)
goto err_out_kobj_put;
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
if (!cpu_online(j))
continue;
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
ret = cpufreq_add_dev_symlink(cpu, policy);
if (ret)
goto err_out_kobj_put;
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
/* assure that the starting sequence is run in __cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
ret = __cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
if (ret) {
pr_debug("setting policy failed\n");
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
}
return ret;
err_out_kobj_put:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
return ret;
}
/**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
int ret = 0, found = 0;
struct cpufreq_policy *policy;
unsigned long flags;
unsigned int j;
#ifdef CONFIG_HOTPLUG_CPU
int sibling;
#endif
if (cpu_is_offline(cpu))
return 0;
pr_debug("adding CPU %u\n", cpu);
#ifdef CONFIG_SMP
/* check whether a different CPU already registered this
* CPU because it is in the same boat. */
policy = cpufreq_cpu_get(cpu);
if (unlikely(policy)) {
cpufreq_cpu_put(policy);
return 0;
}
#endif
if (!try_module_get(cpufreq_driver->owner)) {
ret = -EINVAL;
goto module_out;
}
ret = -ENOMEM;
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy)
goto nomem_out;
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
goto err_free_policy;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
policy->cpu = cpu;
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
ret = (lock_policy_rwsem_write(cpu) < 0);
WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
/* Set governor before ->init, so that driver could check it */
#ifdef CONFIG_HOTPLUG_CPU
for_each_online_cpu(sibling) {
struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
if (cp && cp->governor &&
(cpumask_test_cpu(cpu, cp->related_cpus))) {
policy->governor = cp->governor;
found = 1;
break;
}
}
#endif
if (!found)
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("initialization failed\n");
goto err_unlock_policy;
}
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
ret = cpufreq_add_dev_policy(cpu, policy, dev);
if (ret) {
if (ret > 0)
/* This is a managed cpu, symlink created,
exit with 0 */
ret = 0;
goto err_unlock_policy;
}
ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
goto err_out_unregister;
unlock_policy_rwsem_write(cpu);
kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner);
pr_debug("initialization complete\n");
return 0;
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
err_unlock_policy:
unlock_policy_rwsem_write(cpu);
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
err_free_policy:
kfree(policy);
nomem_out:
module_put(cpufreq_driver->owner);
module_out:
return ret;
}
/**
* __cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
#ifdef CONFIG_SMP
struct device *cpu_dev;
unsigned int j;
#endif
pr_debug("unregistering CPU %u\n", cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
data = per_cpu(cpufreq_cpu_data, cpu);
if (!data) {
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(cpu);
return -EINVAL;
}
per_cpu(cpufreq_cpu_data, cpu) = NULL;
#ifdef CONFIG_SMP
/* if this isn't the CPU which is the parent of the kobj, we
* only need to unlink, put and exit
*/
if (unlikely(cpu != data->cpu)) {
pr_debug("removing link\n");
cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
kobj = &dev->kobj;
cpufreq_cpu_put(data);
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
return 0;
}
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_HOTPLUG_CPU
strncpy(per_cpu(cpufreq_policy_save, cpu).gov, data->governor->name,
CPUFREQ_NAME_LEN);
per_cpu(cpufreq_policy_save, cpu).min = data->user_policy.min;
per_cpu(cpufreq_policy_save, cpu).max = data->user_policy.max;
pr_debug("Saving CPU%d user policy min %d and max %d\n",
cpu, data->user_policy.min, data->user_policy.max);
#endif
/* if we have other CPUs still registered, we need to unlink them,
* or else wait_for_completion below will lock up. Clean the
* per_cpu(cpufreq_cpu_data) while holding the lock, and remove
* the sysfs links afterwards.
*/
if (unlikely(cpumask_weight(data->cpus) > 1)) {
for_each_cpu(j, data->cpus) {
if (j == cpu)
continue;
per_cpu(cpufreq_cpu_data, j) = NULL;
}
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (unlikely(cpumask_weight(data->cpus) > 1)) {
for_each_cpu(j, data->cpus) {
if (j == cpu)
continue;
pr_debug("removing link for cpu %u\n", j);
#ifdef CONFIG_HOTPLUG_CPU
strncpy(per_cpu(cpufreq_policy_save, j).gov,
data->governor->name, CPUFREQ_NAME_LEN);
per_cpu(cpufreq_policy_save, j).min
= data->user_policy.min;
per_cpu(cpufreq_policy_save, j).max
= data->user_policy.max;
pr_debug("Saving CPU%d user policy min %d and max %d\n",
j, data->min, data->max);
#endif
cpu_dev = get_cpu_device(j);
kobj = &cpu_dev->kobj;
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
lock_policy_rwsem_write(cpu);
cpufreq_cpu_put(data);
}
}
#else
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
kobj = &data->kobj;
cmp = &data->kobj_unregister;
unlock_policy_rwsem_write(cpu);
kobject_put(kobj);
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
*/
pr_debug("waiting for dropping of refcount\n");
wait_for_completion(cmp);
pr_debug("wait complete\n");
lock_policy_rwsem_write(cpu);
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);
unlock_policy_rwsem_write(cpu);
#ifdef CONFIG_HOTPLUG_CPU
/* when the CPU which is the parent of the kobj is hotplugged
* offline, check for siblings, and create cpufreq sysfs interface
* and symlinks
*/
if (unlikely(cpumask_weight(data->cpus) > 1)) {
/* first sibling now owns the new sysfs dir */
cpumask_clear_cpu(cpu, data->cpus);
cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
/* finally remove our own symlink */
lock_policy_rwsem_write(cpu);
__cpufreq_remove_dev(dev, sif);
}
#endif
free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus);
kfree(data);
return 0;
}
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
int retval;
if (cpu_is_offline(cpu))
return 0;
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
container_of(work, struct cpufreq_policy, update);
unsigned int cpu = policy->cpu;
pr_debug("handle_update for cpu %u called\n", cpu);
cpufreq_update_policy(cpu);
}
/**
* cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
* @cpu: cpu number
* @old_freq: CPU frequency the kernel thinks the CPU runs at
* @new_freq: CPU frequency the CPU actually runs at
*
* We adjust to current frequency first, and need to clean up later.
* So either call to cpufreq_update_policy() or schedule handle_update()).
*/
static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
unsigned int new_freq)
{
struct cpufreq_freqs freqs;
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
"core thinks of %u, is %u kHz.\n", old_freq, new_freq);
freqs.cpu = cpu;
freqs.old = old_freq;
freqs.new = new_freq;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
/**
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
* @cpu: CPU number
*
* This is the last known freq, without actually getting it from the driver.
* Return value will be same as what is shown in scaling_cur_freq in sysfs.
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
if (policy) {
ret_freq = policy->cur;
cpufreq_cpu_put(policy);
}
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_quick_get);
/**
* cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
* @cpu: CPU number
*
* Just return the max possible frequency for a given CPU.
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
if (policy) {
ret_freq = policy->max;
cpufreq_cpu_put(policy);
}
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
static unsigned int __cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
unsigned int ret_freq = 0;
if (!cpufreq_driver->get)
return ret_freq;
ret_freq = cpufreq_driver->get(cpu);
if (ret_freq && policy->cur &&
!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
/* verify no discrepancy between actual and
saved value exists */
if (unlikely(ret_freq != policy->cur)) {
cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
schedule_work(&policy->update);
}
}
return ret_freq;
}
/**
* cpufreq_get - get the current CPU frequency (in kHz)
* @cpu: CPU number
*
* Get the CPU current (static) CPU frequency
*/
unsigned int cpufreq_get(unsigned int cpu)
{
unsigned int ret_freq = 0;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!policy)
goto out;
if (unlikely(lock_policy_rwsem_read(cpu)))
goto out_policy;
ret_freq = __cpufreq_get(cpu);
unlock_policy_rwsem_read(cpu);
out_policy:
cpufreq_cpu_put(policy);
out:
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_get);
static struct subsys_interface cpufreq_interface = {
.name = "cpufreq",
.subsys = &cpu_subsys,
.add_dev = cpufreq_add_dev,
.remove_dev = cpufreq_remove_dev,
};
/**
* cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
*
* This function is only executed for the boot processor. The other CPUs
* have been put offline by means of CPU hotplug.
*/
static int cpufreq_bp_suspend(void)
{
int ret = 0;
int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy;
pr_debug("suspending cpu %u\n", cpu);
/* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
return 0;
if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(cpu_policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
}
cpufreq_cpu_put(cpu_policy);
return ret;
}
/**
* cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
*
* 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
* 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
* restored. It will verify that the current freq is in sync with
* what we believe it to be. This is a bit later than when it
* should be, but nonethteless it's better than calling
* cpufreq_driver->get() here which might re-enable interrupts...
*
* This function is only executed for the boot CPU. The other CPUs have not
* been turned on yet.
*/
static void cpufreq_bp_resume(void)
{
int ret = 0;
int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy;
pr_debug("resuming cpu %u\n", cpu);
/* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
return;
if (cpufreq_driver->resume) {
ret = cpufreq_driver->resume(cpu_policy);
if (ret) {
printk(KERN_ERR "cpufreq: resume failed in ->resume "
"step on CPU %u\n", cpu_policy->cpu);
goto fail;
}
}
schedule_work(&cpu_policy->update);
fail:
cpufreq_cpu_put(cpu_policy);
}
static struct syscore_ops cpufreq_syscore_ops = {
.suspend = cpufreq_bp_suspend,
.resume = cpufreq_bp_resume,
};
/*********************************************************************
* NOTIFIER LISTS INTERFACE *
*********************************************************************/
/**
* cpufreq_register_notifier - register a driver with cpufreq
* @nb: notifier function to register
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
*
* Add a driver to one of two lists: either a list of drivers that
* are notified about clock rate changes (once before and once after
* the transition), or a list of drivers that are notified about
* changes in cpufreq policy.
*
* This function may sleep, and has the same return conditions as
* blocking_notifier_chain_register.
*/
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
WARN_ON(!init_cpufreq_transition_notifier_list_called);
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_register(
&cpufreq_transition_notifier_list, nb);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_register(
&cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(cpufreq_register_notifier);
/**
* cpufreq_unregister_notifier - unregister a driver with cpufreq
* @nb: notifier block to be unregistered
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
*
* Remove a driver from the CPU frequency notifier list.
*
* This function may sleep, and has the same return conditions as
* blocking_notifier_chain_unregister.
*/
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_unregister(
&cpufreq_transition_notifier_list, nb);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_unregister(
&cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(cpufreq_unregister_notifier);
/*********************************************************************
* GOVERNORS *
*********************************************************************/
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int retval = -EINVAL;
if (cpufreq_disabled())
return -ENODEV;
pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
goto no_policy;
if (unlikely(lock_policy_rwsem_write(policy->cpu)))
goto fail;
ret = __cpufreq_driver_target(policy, target_freq, relation);
unlock_policy_rwsem_write(policy->cpu);
fail:
cpufreq_cpu_put(policy);
no_policy:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
return -EINVAL;
if (cpu_online(cpu) && cpufreq_driver->getavg)
ret = cpufreq_driver->getavg(policy, cpu);
cpufreq_cpu_put(policy);
return ret;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
/*
* when "event" is CPUFREQ_GOV_LIMITS
*/
static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event)
{
int ret;
/* Only must be defined when default governor is known to have latency
restrictions, like e.g. conservative or ondemand.
That this is the case is already ensured in Kconfig
*/
#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
struct cpufreq_governor *gov = &cpufreq_gov_performance;
#else
struct cpufreq_governor *gov = NULL;
#endif
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
if (!gov)
return -EINVAL;
else {
printk(KERN_WARNING "%s governor failed, too long"
" transition latency of HW, fallback"
" to %s governor\n",
policy->governor->name,
gov->name);
policy->governor = gov;
}
}
if (!try_module_get(policy->governor->owner))
return -EINVAL;
pr_debug("__cpufreq_governor for CPU %u, event %u\n",
policy->cpu, event);
ret = policy->governor->governor(policy, event);
/* we keep one module reference alive for
each CPU governed by this CPU */
if ((event != CPUFREQ_GOV_START) || ret)
module_put(policy->governor->owner);
if ((event == CPUFREQ_GOV_STOP) && !ret)
module_put(policy->governor->owner);
return ret;
}
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
int err;
if (!governor)
return -EINVAL;
if (cpufreq_disabled())
return -ENODEV;
mutex_lock(&cpufreq_governor_mutex);
err = -EBUSY;
if (__find_governor(governor->name) == NULL) {
err = 0;
list_add(&governor->governor_list, &cpufreq_governor_list);
}
mutex_unlock(&cpufreq_governor_mutex);
return err;
}
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
#ifdef CONFIG_HOTPLUG_CPU
int cpu;
#endif
if (!governor)
return;
if (cpufreq_disabled())
return;
#ifdef CONFIG_HOTPLUG_CPU
for_each_present_cpu(cpu) {
if (cpu_online(cpu))
continue;
if (!strcmp(per_cpu(cpufreq_policy_save, cpu).gov,
governor->name))
strcpy(per_cpu(cpufreq_policy_save, cpu).gov, "\0");
per_cpu(cpufreq_policy_save, cpu).min = 0;
per_cpu(cpufreq_policy_save, cpu).max = 0;
}
#endif
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
return;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
/*********************************************************************
* POLICY INTERFACE *
*********************************************************************/
/**
* cpufreq_get_policy - get the current cpufreq_policy
* @policy: struct cpufreq_policy into which the current cpufreq_policy
* is written
*
* Reads the current cpufreq policy.
*/
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
{
struct cpufreq_policy *cpu_policy;
if (!policy)
return -EINVAL;
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
return -EINVAL;
memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
cpufreq_cpu_put(cpu_policy);
return 0;
}
EXPORT_SYMBOL(cpufreq_get_policy);
/*
* data : current policy.
* policy : policy to be set.
*/
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy)
{
int ret = 0;
pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
policy->min, policy->max);
memcpy(&policy->cpuinfo, &data->cpuinfo,
sizeof(struct cpufreq_cpuinfo));
if (policy->min > data->user_policy.max
|| policy->max < data->user_policy.min) {
ret = -EINVAL;
goto error_out;
}
/* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(policy);
if (ret)
goto error_out;
/* adjust if necessary - all reasons */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_ADJUST, policy);
/* adjust if necessary - hardware incompatibility*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_INCOMPATIBLE, policy);
/* verify the cpu speed can be set within this limit,
which might be different to the first one */
ret = cpufreq_driver->verify(policy);
if (ret)
goto error_out;
/* notification of the new policy */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_NOTIFY, policy);
data->min = policy->min;
data->max = policy->max;
pr_debug("new min and max freqs are %u - %u kHz\n",
data->min, data->max);
if (cpufreq_driver->setpolicy) {
data->policy = policy->policy;
pr_debug("setting range\n");
ret = cpufreq_driver->setpolicy(policy);
} else {
if (policy->governor != data->governor) {
/* save old, working values */
struct cpufreq_governor *old_gov = data->governor;
pr_debug("governor switch\n");
/* end old governor */
if (data->governor)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
/* start new governor */
data->governor = policy->governor;
if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n",
data->governor->name);
if (old_gov) {
data->governor = old_gov;
__cpufreq_governor(data,
CPUFREQ_GOV_START);
}
ret = -EINVAL;
goto error_out;
}
/* might be a policy change, too, so fall through */
}
pr_debug("governor: change or update limits\n");
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
}
error_out:
return ret;
}
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
*
* Useful for policy notifiers which have different necessities
* at different times.
*/
int cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
struct cpufreq_policy policy;
int ret;
if (!data) {
ret = -ENODEV;
goto no_policy;
}
if (unlikely(lock_policy_rwsem_write(cpu))) {
ret = -EINVAL;
goto fail;
}
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&policy, data, sizeof(struct cpufreq_policy));
policy.min = data->user_policy.min;
policy.max = data->user_policy.max;
policy.policy = data->user_policy.policy;
policy.governor = data->user_policy.governor;
/* BIOS might change freq behind our back
-> ask driver for current freq and notify governors about a change */
if (cpufreq_driver->get) {
policy.cur = cpufreq_driver->get(cpu);
if (!data->cur) {
pr_debug("Driver did not initialize current freq");
data->cur = policy.cur;
} else {
if (data->cur != policy.cur)
cpufreq_out_of_sync(cpu, data->cur,
policy.cur);
}
}
ret = __cpufreq_set_policy(data, &policy);
unlock_policy_rwsem_write(cpu);
fail:
cpufreq_cpu_put(data);
no_policy:
return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
dev = get_cpu_device(cpu);
if (dev) {
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
}
}
return NOTIFY_OK;
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
.notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
/**
* cpufreq_register_driver - register a CPU Frequency driver
* @driver_data: A struct cpufreq_driver containing the values#
* submitted by the CPU Frequency driver.
*
* Registers a CPU Frequency driver to this core code. This code
* returns zero on success, -EBUSY when another driver got here first
* (and isn't unregistered in the meantime).
*
*/
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
{
unsigned long flags;
int ret;
if (cpufreq_disabled())
return -ENODEV;
if (!driver_data || !driver_data->verify || !driver_data->init ||
((!driver_data->setpolicy) && (!driver_data->target)))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
spin_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return -EBUSY;
}
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_null_driver;
if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
/* check for at least one working CPU */
for (i = 0; i < nr_cpu_ids; i++)
if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
ret = 0;
break;
}
/* if all ->init() calls failed, unregister */
if (ret) {
pr_debug("no CPU initialized for driver %s\n",
driver_data->name);
goto err_if_unreg;
}
}
register_hotcpu_notifier(&cpufreq_cpu_notifier);
pr_debug("driver %s up and running\n", driver_data->name);
return 0;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_null_driver:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
/**
* cpufreq_unregister_driver - unregister the current CPUFreq driver
*
* Unregister the current CPUFreq driver. Only call this if you have
* the right to do so, i.e. if you have succeeded in initialising before!
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
*/
int cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
if (!cpufreq_driver || (driver != cpufreq_driver))
return -EINVAL;
pr_debug("unregistering driver %s\n", driver->name);
subsys_interface_unregister(&cpufreq_interface);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
int cpu;
if (cpufreq_disabled())
return -ENODEV;
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
/* create cpufreq kset */
cpufreq_kset = kset_create_and_add("kset", NULL, cpufreq_global_kobject);
BUG_ON(!cpufreq_kset);
cpufreq_global_kobject->kset = cpufreq_kset;
register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}
core_initcall(cpufreq_core_init);
| gpl-2.0 |
shinru2004/N860_Kernel | drivers/media/video/pvrusb2/pvrusb2-ioread.c | 1556 | 12501 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "pvrusb2-ioread.h"
#include "pvrusb2-debug.h"
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#define BUFFER_COUNT 32
#define BUFFER_SIZE PAGE_ALIGN(0x4000)
struct pvr2_ioread {
struct pvr2_stream *stream;
char *buffer_storage[BUFFER_COUNT];
char *sync_key_ptr;
unsigned int sync_key_len;
unsigned int sync_buf_offs;
unsigned int sync_state;
unsigned int sync_trashed_count;
int enabled; // Streaming is on
int spigot_open; // OK to pass data to client
int stream_running; // Passing data to client now
/* State relevant to current buffer being read */
struct pvr2_buffer *c_buf;
char *c_data_ptr;
unsigned int c_data_len;
unsigned int c_data_offs;
struct mutex mutex;
};
static int pvr2_ioread_init(struct pvr2_ioread *cp)
{
unsigned int idx;
cp->stream = NULL;
mutex_init(&cp->mutex);
for (idx = 0; idx < BUFFER_COUNT; idx++) {
cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL);
if (!(cp->buffer_storage[idx])) break;
}
if (idx < BUFFER_COUNT) {
// An allocation appears to have failed
for (idx = 0; idx < BUFFER_COUNT; idx++) {
if (!(cp->buffer_storage[idx])) continue;
kfree(cp->buffer_storage[idx]);
}
return -ENOMEM;
}
return 0;
}
static void pvr2_ioread_done(struct pvr2_ioread *cp)
{
unsigned int idx;
pvr2_ioread_setup(cp,NULL);
for (idx = 0; idx < BUFFER_COUNT; idx++) {
if (!(cp->buffer_storage[idx])) continue;
kfree(cp->buffer_storage[idx]);
}
}
struct pvr2_ioread *pvr2_ioread_create(void)
{
struct pvr2_ioread *cp;
cp = kzalloc(sizeof(*cp),GFP_KERNEL);
if (!cp) return NULL;
pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_create id=%p",cp);
if (pvr2_ioread_init(cp) < 0) {
kfree(cp);
return NULL;
}
return cp;
}
void pvr2_ioread_destroy(struct pvr2_ioread *cp)
{
if (!cp) return;
pvr2_ioread_done(cp);
pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_destroy id=%p",cp);
if (cp->sync_key_ptr) {
kfree(cp->sync_key_ptr);
cp->sync_key_ptr = NULL;
}
kfree(cp);
}
void pvr2_ioread_set_sync_key(struct pvr2_ioread *cp,
const char *sync_key_ptr,
unsigned int sync_key_len)
{
if (!cp) return;
if (!sync_key_ptr) sync_key_len = 0;
if ((sync_key_len == cp->sync_key_len) &&
((!sync_key_len) ||
(!memcmp(sync_key_ptr,cp->sync_key_ptr,sync_key_len)))) return;
if (sync_key_len != cp->sync_key_len) {
if (cp->sync_key_ptr) {
kfree(cp->sync_key_ptr);
cp->sync_key_ptr = NULL;
}
cp->sync_key_len = 0;
if (sync_key_len) {
cp->sync_key_ptr = kmalloc(sync_key_len,GFP_KERNEL);
if (cp->sync_key_ptr) {
cp->sync_key_len = sync_key_len;
}
}
}
if (!cp->sync_key_len) return;
memcpy(cp->sync_key_ptr,sync_key_ptr,cp->sync_key_len);
}
static void pvr2_ioread_stop(struct pvr2_ioread *cp)
{
if (!(cp->enabled)) return;
pvr2_trace(PVR2_TRACE_START_STOP,
"/*---TRACE_READ---*/ pvr2_ioread_stop id=%p",cp);
pvr2_stream_kill(cp->stream);
cp->c_buf = NULL;
cp->c_data_ptr = NULL;
cp->c_data_len = 0;
cp->c_data_offs = 0;
cp->enabled = 0;
cp->stream_running = 0;
cp->spigot_open = 0;
if (cp->sync_state) {
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/ sync_state <== 0");
cp->sync_state = 0;
}
}
static int pvr2_ioread_start(struct pvr2_ioread *cp)
{
int stat;
struct pvr2_buffer *bp;
if (cp->enabled) return 0;
if (!(cp->stream)) return 0;
pvr2_trace(PVR2_TRACE_START_STOP,
"/*---TRACE_READ---*/ pvr2_ioread_start id=%p",cp);
while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != NULL) {
stat = pvr2_buffer_queue(bp);
if (stat < 0) {
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/"
" pvr2_ioread_start id=%p"
" error=%d",
cp,stat);
pvr2_ioread_stop(cp);
return stat;
}
}
cp->enabled = !0;
cp->c_buf = NULL;
cp->c_data_ptr = NULL;
cp->c_data_len = 0;
cp->c_data_offs = 0;
cp->stream_running = 0;
if (cp->sync_key_len) {
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/ sync_state <== 1");
cp->sync_state = 1;
cp->sync_trashed_count = 0;
cp->sync_buf_offs = 0;
}
cp->spigot_open = 0;
return 0;
}
struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *cp)
{
return cp->stream;
}
int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
{
int ret;
unsigned int idx;
struct pvr2_buffer *bp;
mutex_lock(&cp->mutex); do {
if (cp->stream) {
pvr2_trace(PVR2_TRACE_START_STOP,
"/*---TRACE_READ---*/"
" pvr2_ioread_setup (tear-down) id=%p",cp);
pvr2_ioread_stop(cp);
pvr2_stream_kill(cp->stream);
if (pvr2_stream_get_buffer_count(cp->stream)) {
pvr2_stream_set_buffer_count(cp->stream,0);
}
cp->stream = NULL;
}
if (sp) {
pvr2_trace(PVR2_TRACE_START_STOP,
"/*---TRACE_READ---*/"
" pvr2_ioread_setup (setup) id=%p",cp);
pvr2_stream_kill(sp);
ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT);
if (ret < 0) return ret;
for (idx = 0; idx < BUFFER_COUNT; idx++) {
bp = pvr2_stream_get_buffer(sp,idx);
pvr2_buffer_set_buffer(bp,
cp->buffer_storage[idx],
BUFFER_SIZE);
}
cp->stream = sp;
}
} while (0); mutex_unlock(&cp->mutex);
return 0;
}
int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl)
{
int ret = 0;
if ((!fl) == (!(cp->enabled))) return ret;
mutex_lock(&cp->mutex); do {
if (fl) {
ret = pvr2_ioread_start(cp);
} else {
pvr2_ioread_stop(cp);
}
} while (0); mutex_unlock(&cp->mutex);
return ret;
}
static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
{
int stat;
while (cp->c_data_len <= cp->c_data_offs) {
if (cp->c_buf) {
// Flush out current buffer first.
stat = pvr2_buffer_queue(cp->c_buf);
if (stat < 0) {
// Streaming error...
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/"
" pvr2_ioread_read id=%p"
" queue_error=%d",
cp,stat);
pvr2_ioread_stop(cp);
return 0;
}
cp->c_buf = NULL;
cp->c_data_ptr = NULL;
cp->c_data_len = 0;
cp->c_data_offs = 0;
}
// Now get a freshly filled buffer.
cp->c_buf = pvr2_stream_get_ready_buffer(cp->stream);
if (!cp->c_buf) break; // Nothing ready; done.
cp->c_data_len = pvr2_buffer_get_count(cp->c_buf);
if (!cp->c_data_len) {
// Nothing transferred. Was there an error?
stat = pvr2_buffer_get_status(cp->c_buf);
if (stat < 0) {
// Streaming error...
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/"
" pvr2_ioread_read id=%p"
" buffer_error=%d",
cp,stat);
pvr2_ioread_stop(cp);
// Give up.
return 0;
}
// Start over...
continue;
}
cp->c_data_offs = 0;
cp->c_data_ptr = cp->buffer_storage[
pvr2_buffer_get_id(cp->c_buf)];
}
return !0;
}
static void pvr2_ioread_filter(struct pvr2_ioread *cp)
{
unsigned int idx;
if (!cp->enabled) return;
if (cp->sync_state != 1) return;
// Search the stream for our synchronization key. This is made
// complicated by the fact that in order to be honest with
// ourselves here we must search across buffer boundaries...
mutex_lock(&cp->mutex); while (1) {
// Ensure we have a buffer
if (!pvr2_ioread_get_buffer(cp)) break;
if (!cp->c_data_len) break;
// Now walk the buffer contents until we match the key or
// run out of buffer data.
for (idx = cp->c_data_offs; idx < cp->c_data_len; idx++) {
if (cp->sync_buf_offs >= cp->sync_key_len) break;
if (cp->c_data_ptr[idx] ==
cp->sync_key_ptr[cp->sync_buf_offs]) {
// Found the next key byte
(cp->sync_buf_offs)++;
} else {
// Whoops, mismatched. Start key over...
cp->sync_buf_offs = 0;
}
}
// Consume what we've walked through
cp->c_data_offs += idx;
cp->sync_trashed_count += idx;
// If we've found the key, then update state and get out.
if (cp->sync_buf_offs >= cp->sync_key_len) {
cp->sync_trashed_count -= cp->sync_key_len;
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/"
" sync_state <== 2 (skipped %u bytes)",
cp->sync_trashed_count);
cp->sync_state = 2;
cp->sync_buf_offs = 0;
break;
}
if (cp->c_data_offs < cp->c_data_len) {
// Sanity check - should NEVER get here
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"ERROR: pvr2_ioread filter sync problem"
" len=%u offs=%u",
cp->c_data_len,cp->c_data_offs);
// Get out so we don't get stuck in an infinite
// loop.
break;
}
continue; // (for clarity)
} mutex_unlock(&cp->mutex);
}
int pvr2_ioread_avail(struct pvr2_ioread *cp)
{
int ret;
if (!(cp->enabled)) {
// Stream is not enabled; so this is an I/O error
return -EIO;
}
if (cp->sync_state == 1) {
pvr2_ioread_filter(cp);
if (cp->sync_state == 1) return -EAGAIN;
}
ret = 0;
if (cp->stream_running) {
if (!pvr2_stream_get_ready_count(cp->stream)) {
// No data available at all right now.
ret = -EAGAIN;
}
} else {
if (pvr2_stream_get_ready_count(cp->stream) < BUFFER_COUNT/2) {
// Haven't buffered up enough yet; try again later
ret = -EAGAIN;
}
}
if ((!(cp->spigot_open)) != (!(ret == 0))) {
cp->spigot_open = (ret == 0);
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/ data is %s",
cp->spigot_open ? "available" : "pending");
}
return ret;
}
int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
{
unsigned int copied_cnt;
unsigned int bcnt;
const char *src;
int stat;
int ret = 0;
unsigned int req_cnt = cnt;
if (!cnt) {
pvr2_trace(PVR2_TRACE_TRAP,
"/*---TRACE_READ---*/ pvr2_ioread_read id=%p"
" ZERO Request? Returning zero.",cp);
return 0;
}
stat = pvr2_ioread_avail(cp);
if (stat < 0) return stat;
cp->stream_running = !0;
mutex_lock(&cp->mutex); do {
// Suck data out of the buffers and copy to the user
copied_cnt = 0;
if (!buf) cnt = 0;
while (1) {
if (!pvr2_ioread_get_buffer(cp)) {
ret = -EIO;
break;
}
if (!cnt) break;
if (cp->sync_state == 2) {
// We're repeating the sync key data into
// the stream.
src = cp->sync_key_ptr + cp->sync_buf_offs;
bcnt = cp->sync_key_len - cp->sync_buf_offs;
} else {
// Normal buffer copy
src = cp->c_data_ptr + cp->c_data_offs;
bcnt = cp->c_data_len - cp->c_data_offs;
}
if (!bcnt) break;
// Don't run past user's buffer
if (bcnt > cnt) bcnt = cnt;
if (copy_to_user(buf,src,bcnt)) {
// User supplied a bad pointer?
// Give up - this *will* cause data
// to be lost.
ret = -EFAULT;
break;
}
cnt -= bcnt;
buf += bcnt;
copied_cnt += bcnt;
if (cp->sync_state == 2) {
// Update offset inside sync key that we're
// repeating back out.
cp->sync_buf_offs += bcnt;
if (cp->sync_buf_offs >= cp->sync_key_len) {
// Consumed entire key; switch mode
// to normal.
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/"
" sync_state <== 0");
cp->sync_state = 0;
}
} else {
// Update buffer offset.
cp->c_data_offs += bcnt;
}
}
} while (0); mutex_unlock(&cp->mutex);
if (!ret) {
if (copied_cnt) {
// If anything was copied, return that count
ret = copied_cnt;
} else {
// Nothing copied; suggest to caller that another
// attempt should be tried again later
ret = -EAGAIN;
}
}
pvr2_trace(PVR2_TRACE_DATA_FLOW,
"/*---TRACE_READ---*/ pvr2_ioread_read"
" id=%p request=%d result=%d",
cp,req_cnt,ret);
return ret;
}
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 75 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
aka-mccloud/ployer-momo7-kernel | block/blk-throttle.c | 2324 | 32716 | /*
* Interface for controlling IO bandwidth on a request queue
*
* Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/blktrace_api.h>
#include "blk-cgroup.h"
/* Max dispatch from a group in 1 round */
static int throtl_grp_quantum = 8;
/* Total max dispatch from all groups in one round */
static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
unsigned long delay);
struct throtl_rb_root {
struct rb_root rb;
struct rb_node *left;
unsigned int count;
unsigned long min_disptime;
};
#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
.count = 0, .min_disptime = 0}
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
struct throtl_grp {
/* List of throtl groups on the request queue*/
struct hlist_node tg_node;
/* active throtl group service_tree member */
struct rb_node rb_node;
/*
* Dispatch time in jiffies. This is the estimated time when group
* will unthrottle and is ready to dispatch more bio. It is used as
* key to sort active groups in service tree.
*/
unsigned long disptime;
struct blkio_group blkg;
atomic_t ref;
unsigned int flags;
/* Two lists for READ and WRITE */
struct bio_list bio_lists[2];
/* Number of queued bios on READ and WRITE lists */
unsigned int nr_queued[2];
/* bytes per second rate limits */
uint64_t bps[2];
/* IOPS limits */
unsigned int iops[2];
/* Number of bytes disptached in current slice */
uint64_t bytes_disp[2];
/* Number of bio's dispatched in current slice */
unsigned int io_disp[2];
/* When did we start a new slice */
unsigned long slice_start[2];
unsigned long slice_end[2];
/* Some throttle limits got updated for the group */
int limits_changed;
struct rcu_head rcu_head;
};
struct throtl_data
{
/* List of throtl groups */
struct hlist_head tg_list;
/* service tree for active throtl groups */
struct throtl_rb_root tg_service_tree;
struct throtl_grp *root_tg;
struct request_queue *queue;
/* Total Number of queued bios on READ and WRITE lists */
unsigned int nr_queued[2];
/*
* number of total undestroyed groups
*/
unsigned int nr_undestroyed_grps;
/* Work for dispatching throttled bios */
struct delayed_work throtl_work;
int limits_changed;
};
enum tg_state_flags {
THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
};
#define THROTL_TG_FNS(name) \
static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
{ \
(tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
} \
static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
{ \
(tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
} \
static inline int throtl_tg_##name(const struct throtl_grp *tg) \
{ \
return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
}
THROTL_TG_FNS(on_rr);
#define throtl_log_tg(td, tg, fmt, args...) \
blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
blkg_path(&(tg)->blkg), ##args); \
#define throtl_log(td, fmt, args...) \
blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
{
if (blkg)
return container_of(blkg, struct throtl_grp, blkg);
return NULL;
}
static inline int total_nr_queued(struct throtl_data *td)
{
return (td->nr_queued[0] + td->nr_queued[1]);
}
static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
{
atomic_inc(&tg->ref);
return tg;
}
static void throtl_free_tg(struct rcu_head *head)
{
struct throtl_grp *tg;
tg = container_of(head, struct throtl_grp, rcu_head);
free_percpu(tg->blkg.stats_cpu);
kfree(tg);
}
static void throtl_put_tg(struct throtl_grp *tg)
{
BUG_ON(atomic_read(&tg->ref) <= 0);
if (!atomic_dec_and_test(&tg->ref))
return;
/*
* A group is freed in rcu manner. But having an rcu lock does not
* mean that one can access all the fields of blkg and assume these
* are valid. For example, don't try to follow throtl_data and
* request queue links.
*
* Having a reference to blkg under an rcu allows acess to only
* values local to groups like group stats and group rate limits
*/
call_rcu(&tg->rcu_head, throtl_free_tg);
}
static void throtl_init_group(struct throtl_grp *tg)
{
INIT_HLIST_NODE(&tg->tg_node);
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
bio_list_init(&tg->bio_lists[1]);
tg->limits_changed = false;
/* Practically unlimited BW */
tg->bps[0] = tg->bps[1] = -1;
tg->iops[0] = tg->iops[1] = -1;
/*
* Take the initial reference that will be released on destroy
* This can be thought of a joint reference by cgroup and
* request queue which will be dropped by either request queue
* exit or cgroup deletion path depending on who is exiting first.
*/
atomic_set(&tg->ref, 1);
}
/* Should be called with rcu read lock held (needed for blkcg) */
static void
throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
{
hlist_add_head(&tg->tg_node, &td->tg_list);
td->nr_undestroyed_grps++;
}
static void
__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
{
struct backing_dev_info *bdi = &td->queue->backing_dev_info;
unsigned int major, minor;
if (!tg || tg->blkg.dev)
return;
/*
* Fill in device details for a group which might not have been
* filled at group creation time as queue was being instantiated
* and driver had not attached a device yet
*/
if (bdi->dev && dev_name(bdi->dev)) {
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
tg->blkg.dev = MKDEV(major, minor);
}
}
/*
* Should be called with without queue lock held. Here queue lock will be
* taken rarely. It will be taken only once during life time of a group
* if need be
*/
static void
throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
{
if (!tg || tg->blkg.dev)
return;
spin_lock_irq(td->queue->queue_lock);
__throtl_tg_fill_dev_details(td, tg);
spin_unlock_irq(td->queue->queue_lock);
}
static void throtl_init_add_tg_lists(struct throtl_data *td,
struct throtl_grp *tg, struct blkio_cgroup *blkcg)
{
__throtl_tg_fill_dev_details(td, tg);
/* Add group onto cgroup list */
blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
tg->blkg.dev, BLKIO_POLICY_THROTL);
tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
throtl_add_group_to_td_list(td, tg);
}
/* Should be called without queue lock and outside of rcu period */
static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
{
struct throtl_grp *tg = NULL;
int ret;
tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
if (!tg)
return NULL;
ret = blkio_alloc_blkg_stats(&tg->blkg);
if (ret) {
kfree(tg);
return NULL;
}
throtl_init_group(tg);
return tg;
}
static struct
throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
{
struct throtl_grp *tg = NULL;
void *key = td;
/*
* This is the common case when there are no blkio cgroups.
* Avoid lookup in this case
*/
if (blkcg == &blkio_root_cgroup)
tg = td->root_tg;
else
tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
__throtl_tg_fill_dev_details(td, tg);
return tg;
}
/*
* This function returns with queue lock unlocked in case of error, like
* request queue is no more
*/
static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
if (tg) {
rcu_read_unlock();
return tg;
}
/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
* we need to drop rcu lock and queue_lock before we call alloc
*
* Take the request queue reference to make sure queue does not
* go away once we return from allocation.
*/
blk_get_queue(q);
rcu_read_unlock();
spin_unlock_irq(q->queue_lock);
tg = throtl_alloc_tg(td);
/*
* We might have slept in group allocation. Make sure queue is not
* dead
*/
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
blk_put_queue(q);
if (tg)
kfree(tg);
return ERR_PTR(-ENODEV);
}
blk_put_queue(q);
/* Group allocated and queue is still alive. take the lock */
spin_lock_irq(q->queue_lock);
/*
* Initialize the new group. After sleeping, read the blkcg again.
*/
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
/*
* If some other thread already allocated the group while we were
* not holding queue lock, free up the group
*/
__tg = throtl_find_tg(td, blkcg);
if (__tg) {
kfree(tg);
rcu_read_unlock();
return __tg;
}
/* Group allocation failed. Account the IO to root group */
if (!tg) {
tg = td->root_tg;
return tg;
}
throtl_init_add_tg_lists(td, tg, blkcg);
rcu_read_unlock();
return tg;
}
static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
{
/* Service tree is empty */
if (!root->count)
return NULL;
if (!root->left)
root->left = rb_first(&root->rb);
if (root->left)
return rb_entry_tg(root->left);
return NULL;
}
static void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
{
if (root->left == n)
root->left = NULL;
rb_erase_init(n, &root->rb);
--root->count;
}
static void update_min_dispatch_time(struct throtl_rb_root *st)
{
struct throtl_grp *tg;
tg = throtl_rb_first(st);
if (!tg)
return;
st->min_disptime = tg->disptime;
}
static void
tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
{
struct rb_node **node = &st->rb.rb_node;
struct rb_node *parent = NULL;
struct throtl_grp *__tg;
unsigned long key = tg->disptime;
int left = 1;
while (*node != NULL) {
parent = *node;
__tg = rb_entry_tg(parent);
if (time_before(key, __tg->disptime))
node = &parent->rb_left;
else {
node = &parent->rb_right;
left = 0;
}
}
if (left)
st->left = &tg->rb_node;
rb_link_node(&tg->rb_node, parent, node);
rb_insert_color(&tg->rb_node, &st->rb);
}
static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
{
struct throtl_rb_root *st = &td->tg_service_tree;
tg_service_tree_add(st, tg);
throtl_mark_tg_on_rr(tg);
st->count++;
}
static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
{
if (!throtl_tg_on_rr(tg))
__throtl_enqueue_tg(td, tg);
}
static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
{
throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
throtl_clear_tg_on_rr(tg);
}
static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
{
if (throtl_tg_on_rr(tg))
__throtl_dequeue_tg(td, tg);
}
static void throtl_schedule_next_dispatch(struct throtl_data *td)
{
struct throtl_rb_root *st = &td->tg_service_tree;
/*
* If there are more bios pending, schedule more work.
*/
if (!total_nr_queued(td))
return;
BUG_ON(!st->count);
update_min_dispatch_time(st);
if (time_before_eq(st->min_disptime, jiffies))
throtl_schedule_delayed_work(td, 0);
else
throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
}
static inline void
throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
{
tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0;
tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + throtl_slice;
throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', tg->slice_start[rw],
tg->slice_end[rw], jiffies);
}
static inline void throtl_set_slice_end(struct throtl_data *td,
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
{
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
}
static inline void throtl_extend_slice(struct throtl_data *td,
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
{
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', tg->slice_start[rw],
tg->slice_end[rw], jiffies);
}
/* Determine if previously allocated or extended slice is complete or not */
static bool
throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
{
if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
return 0;
return 1;
}
/* Trim the used slices and adjust slice start accordingly */
static inline void
throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
{
unsigned long nr_slices, time_elapsed, io_trim;
u64 bytes_trim, tmp;
BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
/*
* If bps are unlimited (-1), then time slice don't get
* renewed. Don't try to trim the slice if slice is used. A new
* slice will start when appropriate.
*/
if (throtl_slice_used(td, tg, rw))
return;
/*
* A bio has been dispatched. Also adjust slice_end. It might happen
* that initially cgroup limit was very low resulting in high
* slice_end, but later limit was bumped up and bio was dispached
* sooner, then we need to reduce slice_end. A high bogus slice_end
* is bad because it does not allow new slice to start.
*/
throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
time_elapsed = jiffies - tg->slice_start[rw];
nr_slices = time_elapsed / throtl_slice;
if (!nr_slices)
return;
tmp = tg->bps[rw] * throtl_slice * nr_slices;
do_div(tmp, HZ);
bytes_trim = tmp;
io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
if (!bytes_trim && !io_trim)
return;
if (tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim;
else
tg->bytes_disp[rw] = 0;
if (tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim;
else
tg->io_disp[rw] = 0;
tg->slice_start[rw] += nr_slices * throtl_slice;
throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
" start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
tg->slice_start[rw], tg->slice_end[rw], jiffies);
}
static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
struct bio *bio, unsigned long *wait)
{
bool rw = bio_data_dir(bio);
unsigned int io_allowed;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
u64 tmp;
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
/* Slice has just started. Consider one slice interval */
if (!jiffy_elapsed)
jiffy_elapsed_rnd = throtl_slice;
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
/*
* jiffy_elapsed_rnd should not be a big value as minimum iops can be
* 1 then at max jiffy elapsed should be equivalent of 1 second as we
* will allow dispatch after 1 second and after that slice should
* have been trimmed.
*/
tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
do_div(tmp, HZ);
if (tmp > UINT_MAX)
io_allowed = UINT_MAX;
else
io_allowed = tmp;
if (tg->io_disp[rw] + 1 <= io_allowed) {
if (wait)
*wait = 0;
return 1;
}
/* Calc approx time to dispatch */
jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
if (jiffy_wait > jiffy_elapsed)
jiffy_wait = jiffy_wait - jiffy_elapsed;
else
jiffy_wait = 1;
if (wait)
*wait = jiffy_wait;
return 0;
}
static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
struct bio *bio, unsigned long *wait)
{
bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
/* Slice has just started. Consider one slice interval */
if (!jiffy_elapsed)
jiffy_elapsed_rnd = throtl_slice;
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
tmp = tg->bps[rw] * jiffy_elapsed_rnd;
do_div(tmp, HZ);
bytes_allowed = tmp;
if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
if (wait)
*wait = 0;
return 1;
}
/* Calc approx time to dispatch */
extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
if (!jiffy_wait)
jiffy_wait = 1;
/*
* This wait time is without taking into consideration the rounding
* up we did. Add that time also.
*/
jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
if (wait)
*wait = jiffy_wait;
return 0;
}
static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
return 1;
return 0;
}
/*
* Returns whether one can dispatch a bio or not. Also returns approx number
* of jiffies to wait before this bio is with-in IO rate and can be dispatched
*/
static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
struct bio *bio, unsigned long *wait)
{
bool rw = bio_data_dir(bio);
unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
/*
* Currently whole state machine of group depends on first bio
* queued in the group bio list. So one should not be calling
* this function with a different bio if there are other bios
* queued.
*/
BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
/* If tg->bps = -1, then BW is unlimited */
if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
if (wait)
*wait = 0;
return 1;
}
/*
* If previous slice expired, start a new one otherwise renew/extend
* existing slice to make sure it is at least throtl_slice interval
* long since now.
*/
if (throtl_slice_used(td, tg, rw))
throtl_start_new_slice(td, tg, rw);
else {
if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
}
if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
&& tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
if (wait)
*wait = 0;
return 1;
}
max_wait = max(bps_wait, iops_wait);
if (wait)
*wait = max_wait;
if (time_before(tg->slice_end[rw], jiffies + max_wait))
throtl_extend_slice(td, tg, rw, jiffies + max_wait);
return 0;
}
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
bool sync = bio->bi_rw & REQ_SYNC;
/* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_size;
tg->io_disp[rw]++;
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
}
static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
struct bio *bio)
{
bool rw = bio_data_dir(bio);
bio_list_add(&tg->bio_lists[rw], bio);
/* Take a bio reference on tg */
throtl_ref_get_tg(tg);
tg->nr_queued[rw]++;
td->nr_queued[rw]++;
throtl_enqueue_tg(td, tg);
}
static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
{
unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
struct bio *bio;
if ((bio = bio_list_peek(&tg->bio_lists[READ])))
tg_may_dispatch(td, tg, bio, &read_wait);
if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
tg_may_dispatch(td, tg, bio, &write_wait);
min_wait = min(read_wait, write_wait);
disptime = jiffies + min_wait;
/* Update dispatch time */
throtl_dequeue_tg(td, tg);
tg->disptime = disptime;
throtl_enqueue_tg(td, tg);
}
static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
bool rw, struct bio_list *bl)
{
struct bio *bio;
bio = bio_list_pop(&tg->bio_lists[rw]);
tg->nr_queued[rw]--;
/* Drop bio reference on tg */
throtl_put_tg(tg);
BUG_ON(td->nr_queued[rw] <= 0);
td->nr_queued[rw]--;
throtl_charge_bio(tg, bio);
bio_list_add(bl, bio);
bio->bi_rw |= REQ_THROTTLED;
throtl_trim_slice(td, tg, rw);
}
static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
struct bio_list *bl)
{
unsigned int nr_reads = 0, nr_writes = 0;
unsigned int max_nr_reads = throtl_grp_quantum*3/4;
unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
struct bio *bio;
/* Try to dispatch 75% READS and 25% WRITES */
while ((bio = bio_list_peek(&tg->bio_lists[READ]))
&& tg_may_dispatch(td, tg, bio, NULL)) {
tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
nr_reads++;
if (nr_reads >= max_nr_reads)
break;
}
while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
&& tg_may_dispatch(td, tg, bio, NULL)) {
tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
nr_writes++;
if (nr_writes >= max_nr_writes)
break;
}
return nr_reads + nr_writes;
}
static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
{
unsigned int nr_disp = 0;
struct throtl_grp *tg;
struct throtl_rb_root *st = &td->tg_service_tree;
while (1) {
tg = throtl_rb_first(st);
if (!tg)
break;
if (time_before(jiffies, tg->disptime))
break;
throtl_dequeue_tg(td, tg);
nr_disp += throtl_dispatch_tg(td, tg, bl);
if (tg->nr_queued[0] || tg->nr_queued[1]) {
tg_update_disptime(td, tg);
throtl_enqueue_tg(td, tg);
}
if (nr_disp >= throtl_quantum)
break;
}
return nr_disp;
}
static void throtl_process_limit_change(struct throtl_data *td)
{
struct throtl_grp *tg;
struct hlist_node *pos, *n;
if (!td->limits_changed)
return;
xchg(&td->limits_changed, false);
throtl_log(td, "limits changed");
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
if (!tg->limits_changed)
continue;
if (!xchg(&tg->limits_changed, false))
continue;
throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
" riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
tg->iops[READ], tg->iops[WRITE]);
/*
* Restart the slices for both READ and WRITES. It
* might happen that a group's limit are dropped
* suddenly and we don't want to account recently
* dispatched IO with new low rate
*/
throtl_start_new_slice(td, tg, 0);
throtl_start_new_slice(td, tg, 1);
if (throtl_tg_on_rr(tg))
tg_update_disptime(td, tg);
}
}
/* Dispatch throttled bios. Should be called without queue lock held. */
static int throtl_dispatch(struct request_queue *q)
{
struct throtl_data *td = q->td;
unsigned int nr_disp = 0;
struct bio_list bio_list_on_stack;
struct bio *bio;
struct blk_plug plug;
spin_lock_irq(q->queue_lock);
throtl_process_limit_change(td);
if (!total_nr_queued(td))
goto out;
bio_list_init(&bio_list_on_stack);
throtl_log(td, "dispatch nr_queued=%d read=%u write=%u",
total_nr_queued(td), td->nr_queued[READ],
td->nr_queued[WRITE]);
nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
if (nr_disp)
throtl_log(td, "bios disp=%u", nr_disp);
throtl_schedule_next_dispatch(td);
out:
spin_unlock_irq(q->queue_lock);
/*
* If we dispatched some requests, unplug the queue to make sure
* immediate dispatch
*/
if (nr_disp) {
blk_start_plug(&plug);
while((bio = bio_list_pop(&bio_list_on_stack)))
generic_make_request(bio);
blk_finish_plug(&plug);
}
return nr_disp;
}
void blk_throtl_work(struct work_struct *work)
{
struct throtl_data *td = container_of(work, struct throtl_data,
throtl_work.work);
struct request_queue *q = td->queue;
throtl_dispatch(q);
}
/* Call with queue lock held */
static void
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{
struct delayed_work *dwork = &td->throtl_work;
/* schedule work if limits changed even if no bio is queued */
if (total_nr_queued(td) > 0 || td->limits_changed) {
/*
* We might have a work scheduled to be executed in future.
* Cancel that and schedule a new one.
*/
__cancel_delayed_work(dwork);
queue_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies);
}
}
static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
{
/* Something wrong if we are trying to remove same group twice */
BUG_ON(hlist_unhashed(&tg->tg_node));
hlist_del_init(&tg->tg_node);
/*
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
throtl_put_tg(tg);
td->nr_undestroyed_grps--;
}
static void throtl_release_tgs(struct throtl_data *td)
{
struct hlist_node *pos, *n;
struct throtl_grp *tg;
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
/*
* If cgroup removal path got to blk_group first and removed
* it from cgroup list, then it will take care of destroying
* cfqg also.
*/
if (!blkiocg_del_blkio_group(&tg->blkg))
throtl_destroy_tg(td, tg);
}
}
static void throtl_td_free(struct throtl_data *td)
{
kfree(td);
}
/*
* Blk cgroup controller notification saying that blkio_group object is being
* delinked as associated cgroup object is going away. That also means that
* no new IO will come in this group. So get rid of this group as soon as
* any pending IO in the group is finished.
*
* This function is called under rcu_read_lock(). key is the rcu protected
* pointer. That means "key" is a valid throtl_data pointer as long as we are
* rcu read lock.
*
* "key" was fetched from blkio_group under blkio_cgroup->lock. That means
* it should not be NULL as even if queue was going away, cgroup deltion
* path got to it first.
*/
void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
{
unsigned long flags;
struct throtl_data *td = key;
spin_lock_irqsave(td->queue->queue_lock, flags);
throtl_destroy_tg(td, tg_of_blkg(blkg));
spin_unlock_irqrestore(td->queue->queue_lock, flags);
}
static void throtl_update_blkio_group_common(struct throtl_data *td,
struct throtl_grp *tg)
{
xchg(&tg->limits_changed, true);
xchg(&td->limits_changed, true);
/* Schedule a work now to process the limit change */
throtl_schedule_delayed_work(td, 0);
}
/*
* For all update functions, key should be a valid pointer because these
* update functions are called under blkcg_lock, that means, blkg is
* valid and in turn key is valid. queue exit path can not race because
* of blkcg_lock
*
* Can not take queue lock in update functions as queue lock under blkcg_lock
* is not allowed. Under other paths we take blkcg_lock under queue_lock.
*/
static void throtl_update_blkio_group_read_bps(void *key,
struct blkio_group *blkg, u64 read_bps)
{
struct throtl_data *td = key;
struct throtl_grp *tg = tg_of_blkg(blkg);
tg->bps[READ] = read_bps;
throtl_update_blkio_group_common(td, tg);
}
static void throtl_update_blkio_group_write_bps(void *key,
struct blkio_group *blkg, u64 write_bps)
{
struct throtl_data *td = key;
struct throtl_grp *tg = tg_of_blkg(blkg);
tg->bps[WRITE] = write_bps;
throtl_update_blkio_group_common(td, tg);
}
static void throtl_update_blkio_group_read_iops(void *key,
struct blkio_group *blkg, unsigned int read_iops)
{
struct throtl_data *td = key;
struct throtl_grp *tg = tg_of_blkg(blkg);
tg->iops[READ] = read_iops;
throtl_update_blkio_group_common(td, tg);
}
static void throtl_update_blkio_group_write_iops(void *key,
struct blkio_group *blkg, unsigned int write_iops)
{
struct throtl_data *td = key;
struct throtl_grp *tg = tg_of_blkg(blkg);
tg->iops[WRITE] = write_iops;
throtl_update_blkio_group_common(td, tg);
}
static void throtl_shutdown_wq(struct request_queue *q)
{
struct throtl_data *td = q->td;
cancel_delayed_work_sync(&td->throtl_work);
}
static struct blkio_policy_type blkio_policy_throtl = {
.ops = {
.blkio_unlink_group_fn = throtl_unlink_blkio_group,
.blkio_update_group_read_bps_fn =
throtl_update_blkio_group_read_bps,
.blkio_update_group_write_bps_fn =
throtl_update_blkio_group_write_bps,
.blkio_update_group_read_iops_fn =
throtl_update_blkio_group_read_iops,
.blkio_update_group_write_iops_fn =
throtl_update_blkio_group_write_iops,
},
.plid = BLKIO_POLICY_THROTL,
};
int blk_throtl_bio(struct request_queue *q, struct bio **biop)
{
struct throtl_data *td = q->td;
struct throtl_grp *tg;
struct bio *bio = *biop;
bool rw = bio_data_dir(bio), update_disptime = true;
struct blkio_cgroup *blkcg;
if (bio->bi_rw & REQ_THROTTLED) {
bio->bi_rw &= ~REQ_THROTTLED;
return 0;
}
/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
* just update the dispatch stats in lockless manner and return.
*/
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
if (tg) {
throtl_tg_fill_dev_details(td, tg);
if (tg_no_rule_group(tg, rw)) {
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
rw, bio->bi_rw & REQ_SYNC);
rcu_read_unlock();
return 0;
}
}
rcu_read_unlock();
/*
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
spin_lock_irq(q->queue_lock);
tg = throtl_get_tg(td);
if (IS_ERR(tg)) {
if (PTR_ERR(tg) == -ENODEV) {
/*
* Queue is gone. No queue lock held here.
*/
return -ENODEV;
}
}
if (tg->nr_queued[rw]) {
/*
* There is already another bio queued in same dir. No
* need to update dispatch time.
*/
update_disptime = false;
goto queue_bio;
}
/* Bio is with-in rate limit of group */
if (tg_may_dispatch(td, tg, bio, NULL)) {
throtl_charge_bio(tg, bio);
/*
* We need to trim slice even when bios are not being queued
* otherwise it might happen that a bio is not queued for
* a long time and slice keeps on extending and trim is not
* called for a long time. Now if limits are reduced suddenly
* we take into account all the IO dispatched so far at new
* low rate and * newly queued IO gets a really long dispatch
* time.
*
* So keep on trimming slice even if bio is not queued.
*/
throtl_trim_slice(td, tg, rw);
goto out;
}
queue_bio:
throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
" iodisp=%u iops=%u queued=%d/%d",
rw == READ ? 'R' : 'W',
tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
tg->io_disp[rw], tg->iops[rw],
tg->nr_queued[READ], tg->nr_queued[WRITE]);
throtl_add_bio_tg(q->td, tg, bio);
*biop = NULL;
if (update_disptime) {
tg_update_disptime(td, tg);
throtl_schedule_next_dispatch(td);
}
out:
spin_unlock_irq(q->queue_lock);
return 0;
}
int blk_throtl_init(struct request_queue *q)
{
struct throtl_data *td;
struct throtl_grp *tg;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
INIT_HLIST_HEAD(&td->tg_list);
td->tg_service_tree = THROTL_RB_ROOT;
td->limits_changed = false;
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
/* alloc and Init root group. */
td->queue = q;
tg = throtl_alloc_tg(td);
if (!tg) {
kfree(td);
return -ENOMEM;
}
td->root_tg = tg;
rcu_read_lock();
throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
rcu_read_unlock();
/* Attach throtl data to request queue */
q->td = td;
return 0;
}
void blk_throtl_exit(struct request_queue *q)
{
struct throtl_data *td = q->td;
bool wait = false;
BUG_ON(!td);
throtl_shutdown_wq(q);
spin_lock_irq(q->queue_lock);
throtl_release_tgs(td);
/* If there are other groups */
if (td->nr_undestroyed_grps > 0)
wait = true;
spin_unlock_irq(q->queue_lock);
/*
* Wait for tg->blkg->key accessors to exit their grace periods.
* Do this wait only if there are other undestroyed groups out
* there (other than root group). This can happen if cgroup deletion
* path claimed the responsibility of cleaning up a group before
* queue cleanup code get to the group.
*
* Do not call synchronize_rcu() unconditionally as there are drivers
* which create/delete request queue hundreds of times during scan/boot
* and synchronize_rcu() can take significant time and slow down boot.
*/
if (wait)
synchronize_rcu();
/*
* Just being safe to make sure after previous flush if some body did
* update limits through cgroup and another work got queued, cancel
* it.
*/
throtl_shutdown_wq(q);
throtl_td_free(td);
}
static int __init throtl_init(void)
{
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
blkio_policy_register(&blkio_policy_throtl);
return 0;
}
module_init(throtl_init);
| gpl-2.0 |
fredericgermain/linux-sunxi | tools/testing/selftests/kcmp/kcmp_test.c | 2836 | 2116 | #define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <limits.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <linux/unistd.h>
#include <linux/kcmp.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
{
return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
}
int main(int argc, char **argv)
{
const char kpath[] = "kcmp-test-file";
int pid1, pid2;
int fd1, fd2;
int status;
fd1 = open(kpath, O_RDWR | O_CREAT | O_TRUNC, 0644);
pid1 = getpid();
if (fd1 < 0) {
perror("Can't create file");
exit(1);
}
pid2 = fork();
if (pid2 < 0) {
perror("fork failed");
exit(1);
}
if (!pid2) {
int pid2 = getpid();
int ret;
fd2 = open(kpath, O_RDWR, 0644);
if (fd2 < 0) {
perror("Can't open file");
exit(1);
}
/* An example of output and arguments */
printf("pid1: %6d pid2: %6d FD: %2ld FILES: %2ld VM: %2ld "
"FS: %2ld SIGHAND: %2ld IO: %2ld SYSVSEM: %2ld "
"INV: %2ld\n",
pid1, pid2,
sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd2),
sys_kcmp(pid1, pid2, KCMP_FILES, 0, 0),
sys_kcmp(pid1, pid2, KCMP_VM, 0, 0),
sys_kcmp(pid1, pid2, KCMP_FS, 0, 0),
sys_kcmp(pid1, pid2, KCMP_SIGHAND, 0, 0),
sys_kcmp(pid1, pid2, KCMP_IO, 0, 0),
sys_kcmp(pid1, pid2, KCMP_SYSVSEM, 0, 0),
/* This one should fail */
sys_kcmp(pid1, pid2, KCMP_TYPES + 1, 0, 0));
/* This one should return same fd */
ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
ret = -1;
} else
printf("PASS: 0 returned as expected\n");
/* Compare with self */
ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
if (ret) {
printf("FAIL: 0 expected but %li returned (%s)\n",
ret, strerror(errno));
ret = -1;
} else
printf("PASS: 0 returned as expected\n");
exit(ret);
}
waitpid(pid2, &status, P_ALL);
return 0;
}
| gpl-2.0 |
TEAM-Gummy/Gummy_kernel_grouper | drivers/parport/parport_sunbpp.c | 3092 | 10446 | /* parport_sunbpp.c: Parallel-port routines for SBUS
*
* Author: Derrick J. Brashear <shadow@dementia.org>
*
* based on work by:
* Phil Blundell <philb@gnu.org>
* Tim Waugh <tim@cyberelk.demon.co.uk>
* Jose Renau <renau@acm.org>
* David Campbell <campbell@tirian.che.curtin.edu.au>
* Grant Guenther <grant@torque.net>
* Eddie C. Dost <ecd@skynet.be>
* Stephen Williams (steve@icarus.com)
* Gus Baldauf (gbaldauf@ix.netcom.com)
* Peter Zaitcev
* Tom Dyas
*
* Updated to new SBUS device framework: David S. Miller <davem@davemloft.net>
*
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/parport.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/oplib.h> /* OpenProm Library */
#include <asm/dma.h> /* BPP uses LSI 64854 for DMA */
#include <asm/irq.h>
#include <asm/sunbpp.h>
#undef __SUNBPP_DEBUG
#ifdef __SUNBPP_DEBUG
#define dprintk(x) printk x
#else
#define dprintk(x)
#endif
static void parport_sunbpp_disable_irq(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
u32 tmp;
tmp = sbus_readl(®s->p_csr);
tmp &= ~DMA_INT_ENAB;
sbus_writel(tmp, ®s->p_csr);
}
static void parport_sunbpp_enable_irq(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
u32 tmp;
tmp = sbus_readl(®s->p_csr);
tmp |= DMA_INT_ENAB;
sbus_writel(tmp, ®s->p_csr);
}
static void parport_sunbpp_write_data(struct parport *p, unsigned char d)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
sbus_writeb(d, ®s->p_dr);
dprintk((KERN_DEBUG "wrote 0x%x\n", d));
}
static unsigned char parport_sunbpp_read_data(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
return sbus_readb(®s->p_dr);
}
#if 0
static void control_pc_to_sunbpp(struct parport *p, unsigned char status)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
unsigned char value_or = sbus_readb(®s->p_or);
if (status & PARPORT_CONTROL_STROBE)
value_tcr |= P_TCR_DS;
if (status & PARPORT_CONTROL_AUTOFD)
value_or |= P_OR_AFXN;
if (status & PARPORT_CONTROL_INIT)
value_or |= P_OR_INIT;
if (status & PARPORT_CONTROL_SELECT)
value_or |= P_OR_SLCT_IN;
sbus_writeb(value_or, ®s->p_or);
sbus_writeb(value_tcr, ®s->p_tcr);
}
#endif
static unsigned char status_sunbpp_to_pc(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char bits = 0;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
unsigned char value_ir = sbus_readb(®s->p_ir);
if (!(value_ir & P_IR_ERR))
bits |= PARPORT_STATUS_ERROR;
if (!(value_ir & P_IR_SLCT))
bits |= PARPORT_STATUS_SELECT;
if (!(value_ir & P_IR_PE))
bits |= PARPORT_STATUS_PAPEROUT;
if (value_tcr & P_TCR_ACK)
bits |= PARPORT_STATUS_ACK;
if (!(value_tcr & P_TCR_BUSY))
bits |= PARPORT_STATUS_BUSY;
dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", value_tcr, value_ir));
dprintk((KERN_DEBUG "read status 0x%x\n", bits));
return bits;
}
static unsigned char control_sunbpp_to_pc(struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char bits = 0;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
unsigned char value_or = sbus_readb(®s->p_or);
if (!(value_tcr & P_TCR_DS))
bits |= PARPORT_CONTROL_STROBE;
if (!(value_or & P_OR_AFXN))
bits |= PARPORT_CONTROL_AUTOFD;
if (!(value_or & P_OR_INIT))
bits |= PARPORT_CONTROL_INIT;
if (value_or & P_OR_SLCT_IN)
bits |= PARPORT_CONTROL_SELECT;
dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", value_tcr, value_or));
dprintk((KERN_DEBUG "read control 0x%x\n", bits));
return bits;
}
static unsigned char parport_sunbpp_read_control(struct parport *p)
{
return control_sunbpp_to_pc(p);
}
static unsigned char parport_sunbpp_frob_control(struct parport *p,
unsigned char mask,
unsigned char val)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
unsigned char value_or = sbus_readb(®s->p_or);
dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n",
value_tcr, value_or));
if (mask & PARPORT_CONTROL_STROBE) {
if (val & PARPORT_CONTROL_STROBE) {
value_tcr &= ~P_TCR_DS;
} else {
value_tcr |= P_TCR_DS;
}
}
if (mask & PARPORT_CONTROL_AUTOFD) {
if (val & PARPORT_CONTROL_AUTOFD) {
value_or &= ~P_OR_AFXN;
} else {
value_or |= P_OR_AFXN;
}
}
if (mask & PARPORT_CONTROL_INIT) {
if (val & PARPORT_CONTROL_INIT) {
value_or &= ~P_OR_INIT;
} else {
value_or |= P_OR_INIT;
}
}
if (mask & PARPORT_CONTROL_SELECT) {
if (val & PARPORT_CONTROL_SELECT) {
value_or |= P_OR_SLCT_IN;
} else {
value_or &= ~P_OR_SLCT_IN;
}
}
sbus_writeb(value_or, ®s->p_or);
sbus_writeb(value_tcr, ®s->p_tcr);
dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n",
value_tcr, value_or));
return parport_sunbpp_read_control(p);
}
static void parport_sunbpp_write_control(struct parport *p, unsigned char d)
{
const unsigned char wm = (PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD |
PARPORT_CONTROL_INIT |
PARPORT_CONTROL_SELECT);
parport_sunbpp_frob_control (p, wm, d & wm);
}
static unsigned char parport_sunbpp_read_status(struct parport *p)
{
return status_sunbpp_to_pc(p);
}
static void parport_sunbpp_data_forward (struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
unsigned char value_tcr = sbus_readb(®s->p_tcr);
dprintk((KERN_DEBUG "forward\n"));
value_tcr &= ~P_TCR_DIR;
sbus_writeb(value_tcr, ®s->p_tcr);
}
static void parport_sunbpp_data_reverse (struct parport *p)
{
struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
u8 val = sbus_readb(®s->p_tcr);
dprintk((KERN_DEBUG "reverse\n"));
val |= P_TCR_DIR;
sbus_writeb(val, ®s->p_tcr);
}
static void parport_sunbpp_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.pc.ctr = 0xc;
s->u.pc.ecr = 0x0;
}
static void parport_sunbpp_save_state(struct parport *p, struct parport_state *s)
{
s->u.pc.ctr = parport_sunbpp_read_control(p);
}
static void parport_sunbpp_restore_state(struct parport *p, struct parport_state *s)
{
parport_sunbpp_write_control(p, s->u.pc.ctr);
}
static struct parport_operations parport_sunbpp_ops =
{
.write_data = parport_sunbpp_write_data,
.read_data = parport_sunbpp_read_data,
.write_control = parport_sunbpp_write_control,
.read_control = parport_sunbpp_read_control,
.frob_control = parport_sunbpp_frob_control,
.read_status = parport_sunbpp_read_status,
.enable_irq = parport_sunbpp_enable_irq,
.disable_irq = parport_sunbpp_disable_irq,
.data_forward = parport_sunbpp_data_forward,
.data_reverse = parport_sunbpp_data_reverse,
.init_state = parport_sunbpp_init_state,
.save_state = parport_sunbpp_save_state,
.restore_state = parport_sunbpp_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
static int __devinit bpp_probe(struct platform_device *op)
{
struct parport_operations *ops;
struct bpp_regs __iomem *regs;
int irq, dma, err = 0, size;
unsigned char value_tcr;
void __iomem *base;
struct parport *p;
irq = op->archdata.irqs[0];
base = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
"sunbpp");
if (!base)
return -ENODEV;
size = resource_size(&op->resource[0]);
dma = PARPORT_DMA_NONE;
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
if (!ops)
goto out_unmap;
memcpy (ops, &parport_sunbpp_ops, sizeof(struct parport_operations));
dprintk(("register_port\n"));
if (!(p = parport_register_port((unsigned long)base, irq, dma, ops)))
goto out_free_ops;
p->size = size;
p->dev = &op->dev;
if ((err = request_irq(p->irq, parport_irq_handler,
IRQF_SHARED, p->name, p)) != 0) {
goto out_put_port;
}
parport_sunbpp_enable_irq(p);
regs = (struct bpp_regs __iomem *)p->base;
value_tcr = sbus_readb(®s->p_tcr);
value_tcr &= ~P_TCR_DIR;
sbus_writeb(value_tcr, ®s->p_tcr);
printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base);
dev_set_drvdata(&op->dev, p);
parport_announce_port(p);
return 0;
out_put_port:
parport_put_port(p);
out_free_ops:
kfree(ops);
out_unmap:
of_iounmap(&op->resource[0], base, size);
return err;
}
static int __devexit bpp_remove(struct platform_device *op)
{
struct parport *p = dev_get_drvdata(&op->dev);
struct parport_operations *ops = p->ops;
parport_remove_port(p);
if (p->irq != PARPORT_IRQ_NONE) {
parport_sunbpp_disable_irq(p);
free_irq(p->irq, p);
}
of_iounmap(&op->resource[0], (void __iomem *) p->base, p->size);
parport_put_port(p);
kfree(ops);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id bpp_match[] = {
{
.name = "SUNW,bpp",
},
{},
};
MODULE_DEVICE_TABLE(of, bpp_match);
static struct platform_driver bpp_sbus_driver = {
.driver = {
.name = "bpp",
.owner = THIS_MODULE,
.of_match_table = bpp_match,
},
.probe = bpp_probe,
.remove = __devexit_p(bpp_remove),
};
static int __init parport_sunbpp_init(void)
{
return platform_driver_register(&bpp_sbus_driver);
}
static void __exit parport_sunbpp_exit(void)
{
platform_driver_unregister(&bpp_sbus_driver);
}
MODULE_AUTHOR("Derrick J Brashear");
MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
module_init(parport_sunbpp_init)
module_exit(parport_sunbpp_exit)
| gpl-2.0 |
SamueleCiprietti/nova_kernel | drivers/media/video/bt866.c | 3348 | 6424 | /*
bt866 - BT866 Digital Video Encoder (Rockwell Part)
Copyright (C) 1999 Mike Bernson <mike@mlb.org>
Copyright (C) 1998 Dave Perks <dperks@ibm.net>
Modifications for LML33/DC10plus unified driver
Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
This code was modify/ported from the saa7111 driver written
by Dave Perks.
This code was adapted for the bt866 by Christer Weinigel and ported
to 2.6 by Martin Samuelsson.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
MODULE_DESCRIPTION("Brooktree-866 video encoder driver");
MODULE_AUTHOR("Mike Bernson & Dave Perks");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* ----------------------------------------------------------------------- */
struct bt866 {
struct v4l2_subdev sd;
u8 reg[256];
};
static inline struct bt866 *to_bt866(struct v4l2_subdev *sd)
{
return container_of(sd, struct bt866, sd);
}
static int bt866_write(struct bt866 *encoder, u8 subaddr, u8 data)
{
struct i2c_client *client = v4l2_get_subdevdata(&encoder->sd);
u8 buffer[2];
int err;
buffer[0] = subaddr;
buffer[1] = data;
encoder->reg[subaddr] = data;
v4l_dbg(1, debug, client, "write 0x%02x = 0x%02x\n", subaddr, data);
for (err = 0; err < 3;) {
if (i2c_master_send(client, buffer, 2) == 2)
break;
err++;
v4l_warn(client, "error #%d writing to 0x%02x\n",
err, subaddr);
schedule_timeout_interruptible(msecs_to_jiffies(100));
}
if (err == 3) {
v4l_warn(client, "giving up\n");
return -1;
}
return 0;
}
static int bt866_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
{
v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std);
/* Only PAL supported by this driver at the moment! */
if (!(std & V4L2_STD_NTSC))
return -EINVAL;
return 0;
}
static int bt866_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
static const __u8 init[] = {
0xc8, 0xcc, /* CRSCALE */
0xca, 0x91, /* CBSCALE */
0xcc, 0x24, /* YC16 | OSDNUM */
0xda, 0x00, /* */
0xdc, 0x24, /* SETMODE | PAL */
0xde, 0x02, /* EACTIVE */
/* overlay colors */
0x70, 0xEB, 0x90, 0x80, 0xB0, 0x80, /* white */
0x72, 0xA2, 0x92, 0x8E, 0xB2, 0x2C, /* yellow */
0x74, 0x83, 0x94, 0x2C, 0xB4, 0x9C, /* cyan */
0x76, 0x70, 0x96, 0x3A, 0xB6, 0x48, /* green */
0x78, 0x54, 0x98, 0xC6, 0xB8, 0xB8, /* magenta */
0x7A, 0x41, 0x9A, 0xD4, 0xBA, 0x64, /* red */
0x7C, 0x23, 0x9C, 0x72, 0xBC, 0xD4, /* blue */
0x7E, 0x10, 0x9E, 0x80, 0xBE, 0x80, /* black */
0x60, 0xEB, 0x80, 0x80, 0xc0, 0x80, /* white */
0x62, 0xA2, 0x82, 0x8E, 0xc2, 0x2C, /* yellow */
0x64, 0x83, 0x84, 0x2C, 0xc4, 0x9C, /* cyan */
0x66, 0x70, 0x86, 0x3A, 0xc6, 0x48, /* green */
0x68, 0x54, 0x88, 0xC6, 0xc8, 0xB8, /* magenta */
0x6A, 0x41, 0x8A, 0xD4, 0xcA, 0x64, /* red */
0x6C, 0x23, 0x8C, 0x72, 0xcC, 0xD4, /* blue */
0x6E, 0x10, 0x8E, 0x80, 0xcE, 0x80, /* black */
};
struct bt866 *encoder = to_bt866(sd);
u8 val;
int i;
for (i = 0; i < ARRAY_SIZE(init) / 2; i += 2)
bt866_write(encoder, init[i], init[i+1]);
val = encoder->reg[0xdc];
if (input == 0)
val |= 0x40; /* CBSWAP */
else
val &= ~0x40; /* !CBSWAP */
bt866_write(encoder, 0xdc, val);
val = encoder->reg[0xcc];
if (input == 2)
val |= 0x01; /* OSDBAR */
else
val &= ~0x01; /* !OSDBAR */
bt866_write(encoder, 0xcc, val);
v4l2_dbg(1, debug, sd, "set input %d\n", input);
switch (input) {
case 0:
case 1:
case 2:
break;
default:
return -EINVAL;
}
return 0;
}
#if 0
/* Code to setup square pixels, might be of some use in the future,
but is currently unused. */
val = encoder->reg[0xdc];
if (*iarg)
val |= 1; /* SQUARE */
else
val &= ~1; /* !SQUARE */
bt866_write(client, 0xdc, val);
#endif
static int bt866_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_BT866, 0);
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops bt866_core_ops = {
.g_chip_ident = bt866_g_chip_ident,
};
static const struct v4l2_subdev_video_ops bt866_video_ops = {
.s_std_output = bt866_s_std_output,
.s_routing = bt866_s_routing,
};
static const struct v4l2_subdev_ops bt866_ops = {
.core = &bt866_core_ops,
.video = &bt866_video_ops,
};
static int bt866_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bt866 *encoder;
struct v4l2_subdev *sd;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
encoder = kzalloc(sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
sd = &encoder->sd;
v4l2_i2c_subdev_init(sd, client, &bt866_ops);
return 0;
}
static int bt866_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_bt866(sd));
return 0;
}
static const struct i2c_device_id bt866_id[] = {
{ "bt866", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, bt866_id);
static struct i2c_driver bt866_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "bt866",
},
.probe = bt866_probe,
.remove = bt866_remove,
.id_table = bt866_id,
};
static __init int init_bt866(void)
{
return i2c_add_driver(&bt866_driver);
}
static __exit void exit_bt866(void)
{
i2c_del_driver(&bt866_driver);
}
module_init(init_bt866);
module_exit(exit_bt866);
| gpl-2.0 |
sub77/android_kernel_samsung_matissewifi | drivers/mfd/pm8018-core.c | 3348 | 19304 | /*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/msm_ssbi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/pm8xxx/pm8018.h>
#include <linux/mfd/pm8xxx/core.h>
#include <linux/mfd/pm8xxx/regulator.h>
#include <linux/leds-pm8xxx.h>
/* PMIC PM8018 SSBI Addresses */
#define REG_HWREV 0x002 /* PMIC4 revision */
#define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */
#define REG_MPP_BASE 0x050
#define REG_IRQ_BASE 0x1BB
#define REG_RTC_BASE 0x11D
#define REG_TEMP_ALARM_CTRL 0x01B
#define REG_TEMP_ALARM_PWM 0x09B
#define PM8018_VERSION_MASK 0xFFF0
#define PM8018_VERSION_VALUE 0x08F0
#define PM8018_REVISION_MASK 0x000F
#define REG_PM8018_PON_CNTRL_3 0x01D
#define SINGLE_IRQ_RESOURCE(_name, _irq) \
{ \
.name = _name, \
.start = _irq, \
.end = _irq, \
.flags = IORESOURCE_IRQ, \
}
struct pm8018 {
struct device *dev;
struct pm_irq_chip *irq_chip;
struct mfd_cell *mfd_regulators;
struct pm8xxx_regulator_core_platform_data *regulator_cdata;
u32 rev_registers;
u8 restart_reason;
};
static int pm8018_readb(const struct device *dev, u16 addr, u8 *val)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
return msm_ssbi_read(pmic->dev->parent, addr, val, 1);
}
static int pm8018_writeb(const struct device *dev, u16 addr, u8 val)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
return msm_ssbi_write(pmic->dev->parent, addr, &val, 1);
}
static int pm8018_read_buf(const struct device *dev, u16 addr, u8 *buf,
int cnt)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt);
}
static int pm8018_write_buf(const struct device *dev, u16 addr, u8 *buf,
int cnt)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt);
}
static int pm8018_read_irq_stat(const struct device *dev, int irq)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
return pm8xxx_get_irq_stat(pmic->irq_chip, irq);
}
static enum pm8xxx_version pm8018_get_version(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
enum pm8xxx_version version = -ENODEV;
if ((pmic->rev_registers & PM8018_VERSION_MASK) == PM8018_VERSION_VALUE)
version = PM8XXX_VERSION_8018;
return version;
}
static int pm8018_get_revision(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
return pmic->rev_registers & PM8018_REVISION_MASK;
}
static u8 pm8018_restart_reason(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev);
const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data;
return pmic->restart_reason;
}
static struct pm8xxx_drvdata pm8018_drvdata = {
.pmic_readb = pm8018_readb,
.pmic_writeb = pm8018_writeb,
.pmic_read_buf = pm8018_read_buf,
.pmic_write_buf = pm8018_write_buf,
.pmic_read_irq_stat = pm8018_read_irq_stat,
.pmic_get_version = pm8018_get_version,
.pmic_get_revision = pm8018_get_revision,
.pmic_restart_reason = pm8018_restart_reason,
};
static const struct resource gpio_cell_resources[] __devinitconst = {
[0] = {
.start = PM8018_IRQ_BLOCK_BIT(PM8018_GPIO_BLOCK_START, 0),
.end = PM8018_IRQ_BLOCK_BIT(PM8018_GPIO_BLOCK_START, 0)
+ PM8018_NR_GPIOS - 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell gpio_cell __devinitdata = {
.name = PM8XXX_GPIO_DEV_NAME,
.id = -1,
.resources = gpio_cell_resources,
.num_resources = ARRAY_SIZE(gpio_cell_resources),
};
static const struct resource adc_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE(NULL, PM8018_ADC_EOC_USR_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8018_ADC_BATT_TEMP_WARM_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8018_ADC_BATT_TEMP_COLD_IRQ),
};
static struct mfd_cell adc_cell __devinitdata = {
.name = PM8XXX_ADC_DEV_NAME,
.id = -1,
.resources = adc_cell_resources,
.num_resources = ARRAY_SIZE(adc_cell_resources),
};
static const struct resource mpp_cell_resources[] __devinitconst = {
{
.start = PM8018_IRQ_BLOCK_BIT(PM8018_MPP_BLOCK_START, 0),
.end = PM8018_IRQ_BLOCK_BIT(PM8018_MPP_BLOCK_START, 0)
+ PM8018_NR_MPPS - 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell mpp_cell __devinitdata = {
.name = PM8XXX_MPP_DEV_NAME,
.id = -1,
.resources = mpp_cell_resources,
.num_resources = ARRAY_SIZE(mpp_cell_resources),
};
static const struct resource rtc_cell_resources[] __devinitconst = {
[0] = SINGLE_IRQ_RESOURCE(NULL, PM8018_RTC_ALARM_IRQ),
[1] = {
.name = "pmic_rtc_base",
.start = REG_RTC_BASE,
.end = REG_RTC_BASE,
.flags = IORESOURCE_IO,
},
};
static struct mfd_cell rtc_cell __devinitdata = {
.name = PM8XXX_RTC_DEV_NAME,
.id = -1,
.resources = rtc_cell_resources,
.num_resources = ARRAY_SIZE(rtc_cell_resources),
};
static const struct resource resources_pwrkey[] __devinitconst = {
SINGLE_IRQ_RESOURCE(NULL, PM8018_PWRKEY_REL_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8018_PWRKEY_PRESS_IRQ),
};
static struct mfd_cell pwrkey_cell __devinitdata = {
.name = PM8XXX_PWRKEY_DEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(resources_pwrkey),
.resources = resources_pwrkey,
};
static struct mfd_cell misc_cell __devinitdata = {
.name = PM8XXX_MISC_DEV_NAME,
.id = -1,
};
static struct mfd_cell debugfs_cell __devinitdata = {
.name = "pm8xxx-debug",
.id = -1,
.platform_data = "pm8018-dbg",
.pdata_size = sizeof("pm8018-dbg"),
};
static struct mfd_cell pwm_cell __devinitdata = {
.name = PM8XXX_PWM_DEV_NAME,
.id = -1,
};
static struct mfd_cell leds_cell __devinitdata = {
.name = PM8XXX_LEDS_DEV_NAME,
.id = -1,
};
static const struct resource thermal_alarm_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("pm8018_tempstat_irq", PM8018_TEMPSTAT_IRQ),
SINGLE_IRQ_RESOURCE("pm8018_overtemp_irq", PM8018_OVERTEMP_IRQ),
};
static struct pm8xxx_tm_core_data thermal_alarm_cdata = {
.adc_channel = CHANNEL_DIE_TEMP,
.adc_type = PM8XXX_TM_ADC_PM8XXX_ADC,
.reg_addr_temp_alarm_ctrl = REG_TEMP_ALARM_CTRL,
.reg_addr_temp_alarm_pwm = REG_TEMP_ALARM_PWM,
.tm_name = "pm8018_tz",
.irq_name_temp_stat = "pm8018_tempstat_irq",
.irq_name_over_temp = "pm8018_overtemp_irq",
};
static struct mfd_cell thermal_alarm_cell __devinitdata = {
.name = PM8XXX_TM_DEV_NAME,
.id = -1,
.resources = thermal_alarm_cell_resources,
.num_resources = ARRAY_SIZE(thermal_alarm_cell_resources),
.platform_data = &thermal_alarm_cdata,
.pdata_size = sizeof(struct pm8xxx_tm_core_data),
};
static struct pm8xxx_vreg regulator_data[] = {
/* name pc_name ctrl test hpm_min */
PLDO("8018_l2", "8018_l2_pc", 0x0B0, 0x0B1, LDO_50),
PLDO("8018_l3", "8018_l3_pc", 0x0B2, 0x0B3, LDO_50),
PLDO("8018_l4", "8018_l4_pc", 0x0B4, 0x0B5, LDO_300),
PLDO("8018_l5", "8018_l5_pc", 0x0B6, 0x0B7, LDO_150),
PLDO("8018_l6", "8018_l6_pc", 0x0B8, 0x0B9, LDO_150),
PLDO("8018_l7", "8018_l7_pc", 0x0BA, 0x0BB, LDO_300),
NLDO("8018_l8", "8018_l8_pc", 0x0BC, 0x0BD, LDO_150),
NLDO1200("8018_l9", 0x0BE, 0x0BF, LDO_1200),
NLDO1200("8018_l10", 0x0C0, 0x0C1, LDO_1200),
NLDO1200("8018_l11", 0x0C2, 0x0C3, LDO_1200),
NLDO1200("8018_l12", 0x0C4, 0x0C5, LDO_1200),
PLDO("8018_l13", "8018_l13_pc", 0x0C8, 0x0C9, LDO_50),
PLDO("8018_l14", "8018_l14_pc", 0x0CA, 0x0CB, LDO_50),
/* name pc_name ctrl test2 clk sleep hpm_min */
SMPS("8018_s1", "8018_s1_pc", 0x1D0, 0x1D5, 0x009, 0x1D2, SMPS_1500),
SMPS("8018_s2", "8018_s2_pc", 0x1D8, 0x1DD, 0x00A, 0x1DA, SMPS_1500),
SMPS("8018_s3", "8018_s3_pc", 0x1E0, 0x1E5, 0x00B, 0x1E2, SMPS_1500),
SMPS("8018_s4", "8018_s4_pc", 0x1E8, 0x1ED, 0x00C, 0x1EA, SMPS_1500),
SMPS("8018_s5", "8018_s5_pc", 0x1F0, 0x1F5, 0x00D, 0x1F2, SMPS_1500),
/* name pc_name ctrl test */
VS("8018_lvs1", "8018_lvs1_pc", 0x060, 0x061),
};
#define MAX_NAME_COMPARISON_LEN 32
static int __devinit match_regulator(
struct pm8xxx_regulator_core_platform_data *core_data, const char *name)
{
int found = 0;
int i;
for (i = 0; i < ARRAY_SIZE(regulator_data); i++) {
if (regulator_data[i].rdesc.name
&& strncmp(regulator_data[i].rdesc.name, name,
MAX_NAME_COMPARISON_LEN) == 0) {
core_data->is_pin_controlled = false;
core_data->vreg = ®ulator_data[i];
found = 1;
break;
} else if (regulator_data[i].rdesc_pc.name
&& strncmp(regulator_data[i].rdesc_pc.name, name,
MAX_NAME_COMPARISON_LEN) == 0) {
core_data->is_pin_controlled = true;
core_data->vreg = ®ulator_data[i];
found = 1;
break;
}
}
if (!found)
pr_err("could not find a match for regulator: %s\n", name);
return found;
}
static int __devinit
pm8018_add_regulators(const struct pm8018_platform_data *pdata,
struct pm8018 *pmic, int irq_base)
{
int ret = 0;
struct mfd_cell *mfd_regulators;
struct pm8xxx_regulator_core_platform_data *cdata;
int i;
/* Add one device for each regulator used by the board. */
mfd_regulators = kzalloc(sizeof(struct mfd_cell)
* (pdata->num_regulators), GFP_KERNEL);
if (!mfd_regulators) {
pr_err("Cannot allocate %d bytes for pm8018 regulator "
"mfd cells\n", sizeof(struct mfd_cell)
* (pdata->num_regulators));
return -ENOMEM;
}
cdata = kzalloc(sizeof(struct pm8xxx_regulator_core_platform_data)
* pdata->num_regulators, GFP_KERNEL);
if (!cdata) {
pr_err("Cannot allocate %d bytes for pm8018 regulator "
"core data\n", pdata->num_regulators
* sizeof(struct pm8xxx_regulator_core_platform_data));
kfree(mfd_regulators);
return -ENOMEM;
}
for (i = 0; i < ARRAY_SIZE(regulator_data); i++)
mutex_init(®ulator_data[i].pc_lock);
for (i = 0; i < pdata->num_regulators; i++) {
if (!pdata->regulator_pdatas[i].init_data.constraints.name) {
pr_err("name missing for regulator %d\n", i);
ret = -EINVAL;
goto bail;
}
if (!match_regulator(&cdata[i],
pdata->regulator_pdatas[i].init_data.constraints.name)) {
ret = -ENODEV;
goto bail;
}
cdata[i].pdata = &(pdata->regulator_pdatas[i]);
mfd_regulators[i].name = PM8XXX_REGULATOR_DEV_NAME;
mfd_regulators[i].id = cdata[i].pdata->id;
mfd_regulators[i].platform_data = &cdata[i];
mfd_regulators[i].pdata_size =
sizeof(struct pm8xxx_regulator_core_platform_data);
}
ret = mfd_add_devices(pmic->dev, 0, mfd_regulators,
pdata->num_regulators, NULL, irq_base);
if (ret)
goto bail;
pmic->mfd_regulators = mfd_regulators;
pmic->regulator_cdata = cdata;
return ret;
bail:
for (i = 0; i < ARRAY_SIZE(regulator_data); i++)
mutex_destroy(®ulator_data[i].pc_lock);
kfree(mfd_regulators);
kfree(cdata);
return ret;
}
static int __devinit
pm8018_add_subdevices(const struct pm8018_platform_data *pdata,
struct pm8018 *pmic)
{
int ret = 0, irq_base = 0;
struct pm_irq_chip *irq_chip;
if (pdata->irq_pdata) {
pdata->irq_pdata->irq_cdata.nirqs = PM8018_NR_IRQS;
pdata->irq_pdata->irq_cdata.base_addr = REG_IRQ_BASE;
irq_base = pdata->irq_pdata->irq_base;
irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata);
if (IS_ERR(irq_chip)) {
pr_err("Failed to init interrupts ret=%ld\n",
PTR_ERR(irq_chip));
return PTR_ERR(irq_chip);
}
pmic->irq_chip = irq_chip;
}
if (pdata->gpio_pdata) {
pdata->gpio_pdata->gpio_cdata.ngpios = PM8018_NR_GPIOS;
gpio_cell.platform_data = pdata->gpio_pdata;
gpio_cell.pdata_size = sizeof(struct pm8xxx_gpio_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &gpio_cell, 1,
NULL, irq_base);
if (ret) {
pr_err("Failed to add gpio subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->mpp_pdata) {
pdata->mpp_pdata->core_data.nmpps = PM8018_NR_MPPS;
pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE;
mpp_cell.platform_data = pdata->mpp_pdata;
mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add mpp subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->rtc_pdata) {
rtc_cell.platform_data = pdata->rtc_pdata;
rtc_cell.pdata_size = sizeof(struct pm8xxx_rtc_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &rtc_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add rtc subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->pwrkey_pdata) {
pwrkey_cell.platform_data = pdata->pwrkey_pdata;
pwrkey_cell.pdata_size =
sizeof(struct pm8xxx_pwrkey_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &pwrkey_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add pwrkey subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->misc_pdata) {
misc_cell.platform_data = pdata->misc_pdata;
misc_cell.pdata_size = sizeof(struct pm8xxx_misc_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &misc_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add misc subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->adc_pdata) {
adc_cell.platform_data = pdata->adc_pdata;
adc_cell.pdata_size = sizeof(struct pm8xxx_adc_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &adc_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add adc subdevice ret=%d\n", ret);
}
}
if (pdata->leds_pdata) {
leds_cell.platform_data = pdata->leds_pdata;
leds_cell.pdata_size = sizeof(struct pm8xxx_led_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &leds_cell, 1, NULL, 0);
if (ret) {
pr_err("Failed to add leds subdevice ret=%d\n", ret);
goto bail;
}
}
ret = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base);
if (ret) {
pr_err("Failed to add debugfs subdevice ret=%d\n", ret);
goto bail;
}
ret = mfd_add_devices(pmic->dev, 0, &pwm_cell, 1, NULL, 0);
if (ret) {
pr_err("Failed to add pwm subdevice ret=%d\n", ret);
goto bail;
}
if (pdata->num_regulators > 0 && pdata->regulator_pdatas) {
ret = pm8018_add_regulators(pdata, pmic, irq_base);
if (ret) {
pr_err("Failed to add regulator subdevices ret=%d\n",
ret);
goto bail;
}
}
ret = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add thermal alarm subdevice, ret=%d\n", ret);
goto bail;
}
return 0;
bail:
if (pmic->irq_chip) {
pm8xxx_irq_exit(pmic->irq_chip);
pmic->irq_chip = NULL;
}
return ret;
}
static const char * const pm8018_rev_names[] = {
[PM8XXX_REVISION_8018_TEST] = "test",
[PM8XXX_REVISION_8018_1p0] = "1.0",
[PM8XXX_REVISION_8018_2p0] = "2.0",
[PM8XXX_REVISION_8018_2p1] = "2.1",
};
static int __devinit pm8018_probe(struct platform_device *pdev)
{
const struct pm8018_platform_data *pdata = pdev->dev.platform_data;
const char *revision_name = "unknown";
struct pm8018 *pmic;
enum pm8xxx_version version;
int revision;
int rc;
u8 val;
if (!pdata) {
pr_err("missing platform data\n");
return -EINVAL;
}
pmic = kzalloc(sizeof(struct pm8018), GFP_KERNEL);
if (!pmic) {
pr_err("Cannot alloc pm8018 struct\n");
return -ENOMEM;
}
/* Read PMIC chip revision */
rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val));
if (rc) {
pr_err("Failed to read hw rev 1 reg %d:rc=%d\n", REG_HWREV, rc);
goto err_read_rev;
}
pr_info("PMIC revision 1: %02X\n", val);
pmic->rev_registers = val;
/* Read PMIC chip revision 2 */
rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val));
if (rc) {
pr_err("Failed to read hw rev 2 reg %d:rc=%d\n", REG_HWREV_2,
rc);
goto err_read_rev;
}
pr_info("PMIC revision 2: %02X\n", val);
pmic->rev_registers |= val << BITS_PER_BYTE;
pmic->dev = &pdev->dev;
pm8018_drvdata.pm_chip_data = pmic;
platform_set_drvdata(pdev, &pm8018_drvdata);
/* Print out human readable version and revision names. */
version = pm8xxx_get_version(pmic->dev);
if (version == PM8XXX_VERSION_8018) {
revision = pm8xxx_get_revision(pmic->dev);
if (revision >= 0 && revision < ARRAY_SIZE(pm8018_rev_names))
revision_name = pm8018_rev_names[revision];
pr_info("PMIC version: PM8018 rev %s\n", revision_name);
} else {
WARN_ON(version != PM8XXX_VERSION_8018);
}
/* Log human readable restart reason */
rc = msm_ssbi_read(pdev->dev.parent, REG_PM8018_PON_CNTRL_3, &val, 1);
if (rc) {
pr_err("Cannot read restart reason rc=%d\n", rc);
goto err_read_rev;
}
val &= PM8XXX_RESTART_REASON_MASK;
pr_info("PMIC Restart Reason: %s\n", pm8xxx_restart_reason_str[val]);
pmic->restart_reason = val;
rc = pm8018_add_subdevices(pdata, pmic);
if (rc) {
pr_err("Cannot add subdevices rc=%d\n", rc);
goto err;
}
/* gpio might not work if no irq device is found */
WARN_ON(pmic->irq_chip == NULL);
return 0;
err:
mfd_remove_devices(pmic->dev);
platform_set_drvdata(pdev, NULL);
kfree(pmic->mfd_regulators);
kfree(pmic->regulator_cdata);
err_read_rev:
kfree(pmic);
return rc;
}
static int __devexit pm8018_remove(struct platform_device *pdev)
{
struct pm8xxx_drvdata *drvdata;
struct pm8018 *pmic = NULL;
int i;
drvdata = platform_get_drvdata(pdev);
if (drvdata)
pmic = drvdata->pm_chip_data;
if (pmic) {
if (pmic->dev)
mfd_remove_devices(pmic->dev);
if (pmic->irq_chip) {
pm8xxx_irq_exit(pmic->irq_chip);
pmic->irq_chip = NULL;
}
if (pmic->mfd_regulators) {
for (i = 0; i < ARRAY_SIZE(regulator_data); i++)
mutex_destroy(®ulator_data[i].pc_lock);
}
kfree(pmic->mfd_regulators);
kfree(pmic->regulator_cdata);
kfree(pmic);
}
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver pm8018_driver = {
.probe = pm8018_probe,
.remove = __devexit_p(pm8018_remove),
.driver = {
.name = PM8018_CORE_DEV_NAME,
.owner = THIS_MODULE,
},
};
static int __init pm8018_init(void)
{
return platform_driver_register(&pm8018_driver);
}
postcore_initcall(pm8018_init);
static void __exit pm8018_exit(void)
{
platform_driver_unregister(&pm8018_driver);
}
module_exit(pm8018_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC 8018 core driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:" PM8018_CORE_DEV_NAME);
| gpl-2.0 |
Thunderoar/android_kernel_samsung_goyave3g | drivers/ide/q40ide.c | 10260 | 4221 | /*
* Q40 I/O port IDE Driver
*
* (c) Richard Zidlicky
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
*
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <linux/module.h>
#include <asm/ide.h>
/*
* Bases of the IDE interfaces
*/
#define Q40IDE_NUM_HWIFS 2
#define PCIDE_BASE1 0x1f0
#define PCIDE_BASE2 0x170
#define PCIDE_BASE3 0x1e8
#define PCIDE_BASE4 0x168
#define PCIDE_BASE5 0x1e0
#define PCIDE_BASE6 0x160
static const unsigned long pcide_bases[Q40IDE_NUM_HWIFS] = {
PCIDE_BASE1, PCIDE_BASE2, /* PCIDE_BASE3, PCIDE_BASE4 , PCIDE_BASE5,
PCIDE_BASE6 */
};
static int q40ide_default_irq(unsigned long base)
{
switch (base) {
case 0x1f0: return 14;
case 0x170: return 15;
case 0x1e8: return 11;
default:
return 0;
}
}
/*
* Addresses are pretranslated for Q40 ISA access.
*/
static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, int irq)
{
memset(hw, 0, sizeof(*hw));
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
hw->io_ports.data_addr = Q40_ISA_IO_W(base);
hw->io_ports.error_addr = Q40_ISA_IO_B(base + 1);
hw->io_ports.nsect_addr = Q40_ISA_IO_B(base + 2);
hw->io_ports.lbal_addr = Q40_ISA_IO_B(base + 3);
hw->io_ports.lbam_addr = Q40_ISA_IO_B(base + 4);
hw->io_ports.lbah_addr = Q40_ISA_IO_B(base + 5);
hw->io_ports.device_addr = Q40_ISA_IO_B(base + 6);
hw->io_ports.status_addr = Q40_ISA_IO_B(base + 7);
hw->io_ports.ctl_addr = Q40_ISA_IO_B(base + 0x206);
hw->irq = irq;
}
static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
__ide_mm_insw(data_addr, buf, (len + 1) / 2);
return;
}
raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
}
static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
unsigned long data_addr = drive->hwif->io_ports.data_addr;
if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
__ide_mm_outsw(data_addr, buf, (len + 1) / 2);
return;
}
raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
}
/* Q40 has a byte-swapped IDE interface */
static const struct ide_tp_ops q40ide_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = ide_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = q40ide_input_data,
.output_data = q40ide_output_data,
};
static const struct ide_port_info q40ide_port_info = {
.tp_ops = &q40ide_tp_ops,
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
.chipset = ide_generic,
};
/*
* the static array is needed to have the name reported in /proc/ioports,
* hwif->name unfortunately isn't available yet
*/
static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
"ide0", "ide1"
};
/*
* Probe for Q40 IDE interfaces
*/
static int __init q40ide_init(void)
{
int i;
struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
if (!MACH_IS_Q40)
return -ENODEV;
printk(KERN_INFO "ide: Q40 IDE controller\n");
for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
const char *name = q40_ide_names[i];
if (!request_region(pcide_bases[i], 8, name)) {
printk("could not reserve ports %lx-%lx for %s\n",
pcide_bases[i],pcide_bases[i]+8,name);
continue;
}
if (!request_region(pcide_bases[i]+0x206, 1, name)) {
printk("could not reserve port %lx for %s\n",
pcide_bases[i]+0x206,name);
release_region(pcide_bases[i], 8);
continue;
}
q40_ide_setup_ports(&hw[i], pcide_bases[i],
q40ide_default_irq(pcide_bases[i]));
hws[i] = &hw[i];
}
return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
}
module_init(q40ide_init);
MODULE_LICENSE("GPL");
| gpl-2.0 |
95A31/android_kernel_msm | fs/nls/nls_iso8859-2.c | 12564 | 13181 | /*
* linux/fs/nls/nls_iso8859-2.c
*
* Charset iso8859-2 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0080, 0x0081, 0x0082, 0x0083,
0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b,
0x008c, 0x008d, 0x008e, 0x008f,
/* 0x90*/
0x0090, 0x0091, 0x0092, 0x0093,
0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b,
0x009c, 0x009d, 0x009e, 0x009f,
/* 0xa0*/
0x00a0, 0x0104, 0x02d8, 0x0141,
0x00a4, 0x013d, 0x015a, 0x00a7,
0x00a8, 0x0160, 0x015e, 0x0164,
0x0179, 0x00ad, 0x017d, 0x017b,
/* 0xb0*/
0x00b0, 0x0105, 0x02db, 0x0142,
0x00b4, 0x013e, 0x015b, 0x02c7,
0x00b8, 0x0161, 0x015f, 0x0165,
0x017a, 0x02dd, 0x017e, 0x017c,
/* 0xc0*/
0x0154, 0x00c1, 0x00c2, 0x0102,
0x00c4, 0x0139, 0x0106, 0x00c7,
0x010c, 0x00c9, 0x0118, 0x00cb,
0x011a, 0x00cd, 0x00ce, 0x010e,
/* 0xd0*/
0x0110, 0x0143, 0x0147, 0x00d3,
0x00d4, 0x0150, 0x00d6, 0x00d7,
0x0158, 0x016e, 0x00da, 0x0170,
0x00dc, 0x00dd, 0x0162, 0x00df,
/* 0xe0*/
0x0155, 0x00e1, 0x00e2, 0x0103,
0x00e4, 0x013a, 0x0107, 0x00e7,
0x010d, 0x00e9, 0x0119, 0x00eb,
0x011b, 0x00ed, 0x00ee, 0x010f,
/* 0xf0*/
0x0111, 0x0144, 0x0148, 0x00f3,
0x00f4, 0x0151, 0x00f6, 0x00f7,
0x0159, 0x016f, 0x00fa, 0x0171,
0x00fc, 0x00fd, 0x0163, 0x02d9,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */
0xa8, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
0xb0, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */
0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */
0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */
0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */
0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */
0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page01[256] = {
0x00, 0x00, 0xc3, 0xe3, 0xa1, 0xb1, 0xc6, 0xe6, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */
0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0xc5, 0xe5, 0x00, 0x00, 0xa5, 0xb5, 0x00, /* 0x38-0x3f */
0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */
0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */
0xd8, 0xf8, 0xa6, 0xb6, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */
0xa9, 0xb9, 0xde, 0xfe, 0xab, 0xbb, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */
0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0xac, 0xbc, 0xaf, 0xbf, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
};
static const unsigned char page02[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */
};
static const unsigned char *const page_uni2charset[256] = {
page00, page01, page02, NULL, NULL, NULL, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xbf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xaf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "iso8859-2",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_iso8859_2(void)
{
return register_nls(&table);
}
static void __exit exit_nls_iso8859_2(void)
{
unregister_nls(&table);
}
module_init(init_nls_iso8859_2)
module_exit(exit_nls_iso8859_2)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
parallella/parallella-linux-old | fs/nls/nls_cp874.c | 12564 | 10995 | /*
* linux/fs/nls/nls_cp874.c
*
* Charset cp874 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x2026, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
/* 0x90*/
0x0000, 0x2018, 0x2019, 0x201c,
0x201d, 0x2022, 0x2013, 0x2014,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
/* 0xa0*/
0x00a0, 0x0e01, 0x0e02, 0x0e03,
0x0e04, 0x0e05, 0x0e06, 0x0e07,
0x0e08, 0x0e09, 0x0e0a, 0x0e0b,
0x0e0c, 0x0e0d, 0x0e0e, 0x0e0f,
/* 0xb0*/
0x0e10, 0x0e11, 0x0e12, 0x0e13,
0x0e14, 0x0e15, 0x0e16, 0x0e17,
0x0e18, 0x0e19, 0x0e1a, 0x0e1b,
0x0e1c, 0x0e1d, 0x0e1e, 0x0e1f,
/* 0xc0*/
0x0e20, 0x0e21, 0x0e22, 0x0e23,
0x0e24, 0x0e25, 0x0e26, 0x0e27,
0x0e28, 0x0e29, 0x0e2a, 0x0e2b,
0x0e2c, 0x0e2d, 0x0e2e, 0x0e2f,
/* 0xd0*/
0x0e30, 0x0e31, 0x0e32, 0x0e33,
0x0e34, 0x0e35, 0x0e36, 0x0e37,
0x0e38, 0x0e39, 0x0e3a, 0x0000,
0x0000, 0x0000, 0x0000, 0x0e3f,
/* 0xe0*/
0x0e40, 0x0e41, 0x0e42, 0x0e43,
0x0e44, 0x0e45, 0x0e46, 0x0e47,
0x0e48, 0x0e49, 0x0e4a, 0x0e4b,
0x0e4c, 0x0e4d, 0x0e4e, 0x0e4f,
/* 0xf0*/
0x0e50, 0x0e51, 0x0e52, 0x0e53,
0x0e54, 0x0e55, 0x0e56, 0x0e57,
0x0e58, 0x0e59, 0x0e5a, 0x0e5b,
0x0000, 0x0000, 0x0000, 0x0000,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
};
static const unsigned char page0e[256] = {
0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x08-0x0f */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */
0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0x38-0x3f */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */
0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x91, 0x92, 0x00, 0x00, 0x93, 0x94, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */
};
static const unsigned char *const page_uni2charset[256] = {
page00, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, page0e, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "cp874",
.alias = "tis-620",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_cp874(void)
{
return register_nls(&table);
}
static void __exit exit_nls_cp874(void)
{
unregister_nls(&table);
}
module_init(init_nls_cp874)
module_exit(exit_nls_cp874)
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NLS(tis-620);
| gpl-2.0 |
TheStrix/android_kernel_cyanogen_msm8916 | arch/microblaze/mm/mmu_context.c | 13844 | 2042 | /*
* This file contains the routines for handling the MMU.
*
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
*
* Derived from arch/ppc/mm/4xx_mmu.c:
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
mm_context_t next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
atomic_t nr_free_contexts;
struct mm_struct *context_mm[LAST_CONTEXT+1];
/*
* Initialize the context management stuff.
*/
void __init mmu_context_init(void)
{
/*
* The use of context zero is reserved for the kernel.
* This code assumes FIRST_CONTEXT < 32.
*/
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
}
/*
* Steal a context from a task that has one at the moment.
*
* This isn't an LRU system, it just frees up each context in
* turn (sort-of pseudo-random replacement :). This would be the
* place to implement an LRU scheme if anyone were motivated to do it.
*/
void steal_context(void)
{
struct mm_struct *mm;
/* free up context `next_mmu_context' */
/* if we shouldn't free context 0, don't... */
if (next_mmu_context < FIRST_CONTEXT)
next_mmu_context = FIRST_CONTEXT;
mm = context_mm[next_mmu_context];
flush_tlb_mm(mm);
destroy_context(mm);
}
| gpl-2.0 |
eus/cbs_inheritance | kernel/time/timeconv.c | 13844 | 3580 | /*
* Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
* This file is part of the GNU C Library.
* Contributed by Paul Eggert (eggert@twinsun.com).
*
* The GNU C Library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* The GNU C Library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with the GNU C Library; see the file COPYING.LIB. If not,
* write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/*
* Converts the calendar time to broken-down time representation
* Based on code from glibc-2.6
*
* 2009-7-14:
* Moved from glibc-2.6 to kernel by Zhaolei<zhaolei@cn.fujitsu.com>
*/
#include <linux/time.h>
#include <linux/module.h>
/*
* Nonzero if YEAR is a leap year (every 4 years,
* except every 100th isn't, and every 400th is).
*/
static int __isleap(long year)
{
return (year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0);
}
/* do a mathdiv for long type */
static long math_div(long a, long b)
{
return a / b - (a % b < 0);
}
/* How many leap years between y1 and y2, y1 must less or equal to y2 */
static long leaps_between(long y1, long y2)
{
long leaps1 = math_div(y1 - 1, 4) - math_div(y1 - 1, 100)
+ math_div(y1 - 1, 400);
long leaps2 = math_div(y2 - 1, 4) - math_div(y2 - 1, 100)
+ math_div(y2 - 1, 400);
return leaps2 - leaps1;
}
/* How many days come before each month (0-12). */
static const unsigned short __mon_yday[2][13] = {
/* Normal years. */
{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
/* Leap years. */
{0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
};
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
/**
* time_to_tm - converts the calendar time to local broken-down time
*
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
* Coordinated Universal Time (UTC).
* @offset offset seconds adding to totalsecs.
* @result pointer to struct tm variable to receive broken-down time
*/
void time_to_tm(time_t totalsecs, int offset, struct tm *result)
{
long days, rem, y;
const unsigned short *ip;
days = totalsecs / SECS_PER_DAY;
rem = totalsecs % SECS_PER_DAY;
rem += offset;
while (rem < 0) {
rem += SECS_PER_DAY;
--days;
}
while (rem >= SECS_PER_DAY) {
rem -= SECS_PER_DAY;
++days;
}
result->tm_hour = rem / SECS_PER_HOUR;
rem %= SECS_PER_HOUR;
result->tm_min = rem / 60;
result->tm_sec = rem % 60;
/* January 1, 1970 was a Thursday. */
result->tm_wday = (4 + days) % 7;
if (result->tm_wday < 0)
result->tm_wday += 7;
y = 1970;
while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
/* Guess a corrected year, assuming 365 days per year. */
long yg = y + math_div(days, 365);
/* Adjust DAYS and Y to match the guessed year. */
days -= (yg - y) * 365 + leaps_between(y, yg);
y = yg;
}
result->tm_year = y - 1900;
result->tm_yday = days;
ip = __mon_yday[__isleap(y)];
for (y = 11; days < ip[y]; y--)
continue;
days -= ip[y];
result->tm_mon = y;
result->tm_mday = days + 1;
}
EXPORT_SYMBOL(time_to_tm);
| gpl-2.0 |
elkay/LK_DNA_2 | arch/powerpc/boot/cuboot-yosemite.c | 14100 | 1095 | /*
* Old U-boot compatibility for Yosemite
*
* Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* Copyright 2008 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "4xx.h"
#include "44x.h"
#include "cuboot.h"
#define TARGET_4xx
#define TARGET_44x
#include "ppcboot.h"
static bd_t bd;
static void yosemite_fixups(void)
{
unsigned long sysclk = 66666666;
ibm440ep_fixup_clocks(sysclk, 11059200, 50000000);
ibm4xx_sdram_fixup_memsize();
ibm4xx_quiesce_eth((u32 *)0xef600e00, (u32 *)0xef600f00);
dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
platform_ops.fixups = yosemite_fixups;
platform_ops.exit = ibm44x_dbcr_reset;
fdt_init(_dtb_start);
serial_console_init();
}
| gpl-2.0 |
SlimRoms/kernel_sony_msm8974 | arch/arm/mach-w90x900/nuc960.c | 14356 | 1140 | /*
* linux/arch/arm/mach-w90x900/nuc960.c
*
* Based on linux/arch/arm/plat-s3c24xx/s3c244x.c by Ben Dooks
*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* NUC960 cpu support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/platform_device.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include "cpu.h"
/* define specific CPU platform device */
static struct platform_device *nuc960_dev[] __initdata = {
&nuc900_device_kpi,
&nuc900_device_fmi,
};
/* define specific CPU platform io map */
static struct map_desc nuc960evb_iodesc[] __initdata = {
};
/*Init NUC960 evb io*/
void __init nuc960_map_io(void)
{
nuc900_map_io(nuc960evb_iodesc, ARRAY_SIZE(nuc960evb_iodesc));
}
/*Init NUC960 clock*/
void __init nuc960_init_clocks(void)
{
nuc900_init_clocks();
}
/*Init NUC960 board info*/
void __init nuc960_board_init(void)
{
nuc900_board_init(nuc960_dev, ARRAY_SIZE(nuc960_dev));
}
| gpl-2.0 |
DevSwift/Kernel-3.4-NovaThor | drivers/dio/dio-sysfs.c | 15380 | 2250 | /*
* File Attributes for DIO Devices
*
* Copyright (C) 2004 Jochen Friedrich
*
* Loosely based on drivers/pci/pci-sysfs.c and drivers/zorro/zorro-sysfs.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/dio.h>
#include <linux/stat.h>
/* show configuration fields */
static ssize_t dio_show_id(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dio_dev *d;
d = to_dio_dev(dev);
return sprintf(buf, "0x%02x\n", (d->id & 0xff));
}
static DEVICE_ATTR(id, S_IRUGO, dio_show_id, NULL);
static ssize_t dio_show_ipl(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dio_dev *d;
d = to_dio_dev(dev);
return sprintf(buf, "0x%02x\n", d->ipl);
}
static DEVICE_ATTR(ipl, S_IRUGO, dio_show_ipl, NULL);
static ssize_t dio_show_secid(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dio_dev *d;
d = to_dio_dev(dev);
return sprintf(buf, "0x%02x\n", ((d->id >> 8)& 0xff));
}
static DEVICE_ATTR(secid, S_IRUGO, dio_show_secid, NULL);
static ssize_t dio_show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dio_dev *d;
d = to_dio_dev(dev);
return sprintf(buf, "%s\n", d->name);
}
static DEVICE_ATTR(name, S_IRUGO, dio_show_name, NULL);
static ssize_t dio_show_resource(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dio_dev *d = to_dio_dev(dev);
return sprintf(buf, "0x%08lx 0x%08lx 0x%08lx\n",
(unsigned long)dio_resource_start(d),
(unsigned long)dio_resource_end(d),
dio_resource_flags(d));
}
static DEVICE_ATTR(resource, S_IRUGO, dio_show_resource, NULL);
int dio_create_sysfs_dev_files(struct dio_dev *d)
{
struct device *dev = &d->dev;
int error;
/* current configuration's attributes */
if ((error = device_create_file(dev, &dev_attr_id)) ||
(error = device_create_file(dev, &dev_attr_ipl)) ||
(error = device_create_file(dev, &dev_attr_secid)) ||
(error = device_create_file(dev, &dev_attr_name)) ||
(error = device_create_file(dev, &dev_attr_resource)))
return error;
return 0;
}
| gpl-2.0 |
wjflyhigh/glibc-2.21 | sysdeps/powerpc/ffs.c | 21 | 1318 | /* Find first set bit in a word, counted from least significant end.
For PowerPC.
Copyright (C) 1991-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Torbjorn Granlund (tege@sics.se).
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#define ffsl __something_else
#include <limits.h>
#include <string.h>
#undef ffs
#ifdef __GNUC__
int
__ffs (int x)
{
int cnt;
asm ("cntlzw %0,%1" : "=r" (cnt) : "r" (x & -x));
return 32 - cnt;
}
weak_alias (__ffs, ffs)
libc_hidden_def (__ffs)
libc_hidden_builtin_def (ffs)
#if ULONG_MAX == UINT_MAX
#undef ffsl
weak_alias (__ffs, ffsl)
#endif
#else
#include <string/ffs.c>
#endif
| gpl-2.0 |
romanalexander/Trickles | drivers/char/machzwd.c | 21 | 11542 | /*
* MachZ ZF-Logic Watchdog Timer driver for Linux
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* The author does NOT admit liability nor provide warranty for
* any of this software. This material is provided "AS-IS" in
* the hope that it may be useful for others.
*
* Author: Fernando Fuganti <fuganti@conectiva.com.br>
*
* Based on sbc60xxwdt.c by Jakob Oestergaard
*
*
* We have two timers (wd#1, wd#2) driven by a 32 KHz clock with the
* following periods:
* wd#1 - 2 seconds;
* wd#2 - 7.2 ms;
* After the expiration of wd#1, it can generate a NMI, SCI, SMI, or
* a system RESET and it starts wd#2 that unconditionaly will RESET
* the system when the counter reaches zero.
*
* 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
* Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/smp_lock.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
/* ports */
#define ZF_IOBASE 0x218
#define INDEX 0x218
#define DATA_B 0x219
#define DATA_W 0x21A
#define DATA_D 0x21A
/* indexes */ /* size */
#define ZFL_VERSION 0x02 /* 16 */
#define CONTROL 0x10 /* 16 */
#define STATUS 0x12 /* 8 */
#define COUNTER_1 0x0C /* 16 */
#define COUNTER_2 0x0E /* 8 */
#define PULSE_LEN 0x0F /* 8 */
/* controls */
#define ENABLE_WD1 0x0001
#define ENABLE_WD2 0x0002
#define RESET_WD1 0x0010
#define RESET_WD2 0x0020
#define GEN_SCI 0x0100
#define GEN_NMI 0x0200
#define GEN_SMI 0x0400
#define GEN_RESET 0x0800
/* utilities */
#define WD1 0
#define WD2 1
#define zf_writew(port, data) { outb(port, INDEX); outw(data, DATA_W); }
#define zf_writeb(port, data) { outb(port, INDEX); outb(data, DATA_B); }
#define zf_get_ZFL_version() zf_readw(ZFL_VERSION)
static unsigned short zf_readw(unsigned char port)
{
outb(port, INDEX);
return inw(DATA_W);
}
static unsigned short zf_readb(unsigned char port)
{
outb(port, INDEX);
return inb(DATA_B);
}
MODULE_AUTHOR("Fernando Fuganti <fuganti@conectiva.com.br>");
MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver");
MODULE_LICENSE("GPL");
MODULE_PARM(action, "i");
MODULE_PARM_DESC(action, "after watchdog resets, generate: 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
#ifdef CONFIG_WATCHDOG_NOWAYOUT
static int nowayout = 1;
#else
static int nowayout = 0;
#endif
MODULE_PARM(nowayout,"i");
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
#define PFX "machzwd"
static struct watchdog_info zf_info = {
options: WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
firmware_version: 1,
identity: "ZF-Logic watchdog"
};
/*
* action refers to action taken when watchdog resets
* 0 = GEN_RESET
* 1 = GEN_SMI
* 2 = GEN_NMI
* 3 = GEN_SCI
* defaults to GEN_RESET (0)
*/
static int action = 0;
static int zf_action = GEN_RESET;
static int zf_is_open = 0;
static int zf_expect_close = 0;
static spinlock_t zf_lock;
static spinlock_t zf_port_lock;
static struct timer_list zf_timer;
static unsigned long next_heartbeat = 0;
/* timeout for user land heart beat (10 seconds) */
#define ZF_USER_TIMEO (HZ*10)
/* timeout for hardware watchdog (~500ms) */
#define ZF_HW_TIMEO (HZ/2)
/* number of ticks on WD#1 (driven by a 32KHz clock, 2s) */
#define ZF_CTIMEOUT 0xffff
#ifndef ZF_DEBUG
# define dprintk(format, args...)
#else
# define dprintk(format, args...) printk(KERN_DEBUG PFX; ":" __FUNCTION__ ":%d: " format, __LINE__ , ## args)
#endif
/* STATUS register functions */
static inline unsigned char zf_get_status(void)
{
return zf_readb(STATUS);
}
static inline void zf_set_status(unsigned char new)
{
zf_writeb(STATUS, new);
}
/* CONTROL register functions */
static inline unsigned short zf_get_control(void)
{
return zf_readw(CONTROL);
}
static inline void zf_set_control(unsigned short new)
{
zf_writew(CONTROL, new);
}
/* WD#? counter functions */
/*
* Just get current counter value
*/
static inline unsigned short zf_get_timer(unsigned char n)
{
switch(n){
case WD1:
return zf_readw(COUNTER_1);
case WD2:
return zf_readb(COUNTER_2);
default:
return 0;
}
}
/*
* Just set counter value
*/
static inline void zf_set_timer(unsigned short new, unsigned char n)
{
switch(n){
case WD1:
zf_writew(COUNTER_1, new);
case WD2:
zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
default:
return;
}
}
/*
* stop hardware timer
*/
static void zf_timer_off(void)
{
unsigned int ctrl_reg = 0;
unsigned long flags;
/* stop internal ping */
del_timer_sync(&zf_timer);
spin_lock_irqsave(&zf_port_lock, flags);
/* stop watchdog timer */
ctrl_reg = zf_get_control();
ctrl_reg |= (ENABLE_WD1|ENABLE_WD2); /* disable wd1 and wd2 */
ctrl_reg &= ~(ENABLE_WD1|ENABLE_WD2);
zf_set_control(ctrl_reg);
spin_unlock_irqrestore(&zf_port_lock, flags);
printk(KERN_INFO PFX ": Watchdog timer is now disabled\n");
}
/*
* start hardware timer
*/
static void zf_timer_on(void)
{
unsigned int ctrl_reg = 0;
unsigned long flags;
spin_lock_irqsave(&zf_port_lock, flags);
zf_writeb(PULSE_LEN, 0xff);
zf_set_timer(ZF_CTIMEOUT, WD1);
/* user land ping */
next_heartbeat = jiffies + ZF_USER_TIMEO;
/* start the timer for internal ping */
zf_timer.expires = jiffies + ZF_HW_TIMEO;
add_timer(&zf_timer);
/* start watchdog timer */
ctrl_reg = zf_get_control();
ctrl_reg |= (ENABLE_WD1|zf_action);
zf_set_control(ctrl_reg);
spin_unlock_irqrestore(&zf_port_lock, flags);
printk(KERN_INFO PFX ": Watchdog timer is now enabled\n");
}
static void zf_ping(unsigned long data)
{
unsigned int ctrl_reg = 0;
unsigned long flags;
zf_writeb(COUNTER_2, 0xff);
if(time_before(jiffies, next_heartbeat)){
dprintk("time_before: %ld\n", next_heartbeat - jiffies);
/*
* reset event is activated by transition from 0 to 1 on
* RESET_WD1 bit and we assume that it is already zero...
*/
spin_lock_irqsave(&zf_port_lock, flags);
ctrl_reg = zf_get_control();
ctrl_reg |= RESET_WD1;
zf_set_control(ctrl_reg);
/* ...and nothing changes until here */
ctrl_reg &= ~(RESET_WD1);
zf_set_control(ctrl_reg);
spin_unlock_irqrestore(&zf_port_lock, flags);
zf_timer.expires = jiffies + ZF_HW_TIMEO;
add_timer(&zf_timer);
}else{
printk(KERN_CRIT PFX ": I will reset your machine\n");
}
}
static ssize_t zf_write(struct file *file, const char *buf, size_t count,
loff_t *ppos)
{
/* Can't seek (pwrite) on this device */
if (ppos != &file->f_pos)
return -ESPIPE;
/* See if we got the magic character */
if(count){
/*
* no need to check for close confirmation
* no way to disable watchdog ;)
*/
if (!nowayout) {
size_t ofs;
/*
* note: just in case someone wrote the
* magic character five months ago...
*/
zf_expect_close = 0;
/* now scan */
for(ofs = 0; ofs != count; ofs++){
char c;
if(get_user(c, buf+ofs))
return -EFAULT;
if(c == 'V'){
zf_expect_close = 1;
dprintk("zf_expect_close 1\n");
}
}
}
/*
* Well, anyhow someone wrote to us,
* we should return that favour
*/
next_heartbeat = jiffies + ZF_USER_TIMEO;
dprintk("user ping at %ld\n", jiffies);
return 1;
}
return 0;
}
static ssize_t zf_read(struct file *file, char *buf, size_t count,
loff_t *ppos)
{
return -EINVAL;
}
static int zf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
switch(cmd){
case WDIOC_GETSUPPORT:
if (copy_to_user((struct watchdog_info *)arg,
&zf_info, sizeof(zf_info)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
if (copy_to_user((int *)arg, &zf_is_open, sizeof(int)))
return -EFAULT;
break;
case WDIOC_KEEPALIVE:
zf_ping(0);
break;
default:
return -ENOTTY;
}
return 0;
}
static int zf_open(struct inode *inode, struct file *file)
{
switch(MINOR(inode->i_rdev)){
case WATCHDOG_MINOR:
spin_lock(&zf_lock);
if(zf_is_open){
spin_unlock(&zf_lock);
return -EBUSY;
}
if (nowayout) {
MOD_INC_USE_COUNT;
}
zf_is_open = 1;
spin_unlock(&zf_lock);
zf_timer_on();
return 0;
default:
return -ENODEV;
}
}
static int zf_close(struct inode *inode, struct file *file)
{
if(MINOR(inode->i_rdev) == WATCHDOG_MINOR){
if(zf_expect_close){
zf_timer_off();
} else {
del_timer(&zf_timer);
printk(KERN_ERR PFX ": device file closed unexpectedly. Will not stop the WDT!\n");
}
spin_lock(&zf_lock);
zf_is_open = 0;
spin_unlock(&zf_lock);
zf_expect_close = 0;
}
return 0;
}
/*
* Notifier for system down
*/
static int zf_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if(code == SYS_DOWN || code == SYS_HALT){
zf_timer_off();
}
return NOTIFY_DONE;
}
static struct file_operations zf_fops = {
owner: THIS_MODULE,
read: zf_read,
write: zf_write,
ioctl: zf_ioctl,
open: zf_open,
release: zf_close,
};
static struct miscdevice zf_miscdev = {
WATCHDOG_MINOR,
"watchdog",
&zf_fops
};
/*
* The device needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block zf_notifier = {
zf_notify_sys,
NULL,
0
};
static void __init zf_show_action(int act)
{
char *str[] = { "RESET", "SMI", "NMI", "SCI" };
printk(KERN_INFO PFX ": Watchdog using action = %s\n", str[act]);
}
static int __init zf_init(void)
{
int ret;
printk(KERN_INFO PFX ": MachZ ZF-Logic Watchdog driver initializing.\n");
ret = zf_get_ZFL_version();
printk("%#x\n", ret);
if((!ret) || (ret != 0xffff)){
printk(KERN_WARNING PFX ": no ZF-Logic found\n");
return -ENODEV;
}
if((action <= 3) && (action >= 0)){
zf_action = zf_action>>action;
} else
action = 0;
zf_show_action(action);
spin_lock_init(&zf_lock);
spin_lock_init(&zf_port_lock);
ret = misc_register(&zf_miscdev);
if (ret){
printk(KERN_ERR "can't misc_register on minor=%d\n",
WATCHDOG_MINOR);
goto out;
}
if(!request_region(ZF_IOBASE, 3, "MachZ ZFL WDT")){
printk(KERN_ERR "cannot reserve I/O ports at %d\n",
ZF_IOBASE);
ret = -EBUSY;
goto no_region;
}
ret = register_reboot_notifier(&zf_notifier);
if(ret){
printk(KERN_ERR "can't register reboot notifier (err=%d)\n",
ret);
goto no_reboot;
}
zf_set_status(0);
zf_set_control(0);
/* this is the timer that will do the hard work */
init_timer(&zf_timer);
zf_timer.function = zf_ping;
zf_timer.data = 0;
return 0;
no_reboot:
release_region(ZF_IOBASE, 3);
no_region:
misc_deregister(&zf_miscdev);
out:
return ret;
}
void __exit zf_exit(void)
{
zf_timer_off();
misc_deregister(&zf_miscdev);
unregister_reboot_notifier(&zf_notifier);
release_region(ZF_IOBASE, 3);
}
module_init(zf_init);
module_exit(zf_exit);
| gpl-2.0 |
kanpol/bibo | util-linux/swaponoff.c | 21 | 6912 | /* vi: set sw=4 ts=4: */
/*
* Mini swapon/swapoff implementation for busybox
*
* Copyright (C) 1999-2004 by Erik Andersen <andersen@codepoet.org>
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
//usage:#define swapon_trivial_usage
//usage: "[-a]" IF_FEATURE_SWAPON_DISCARD(" [-d[POL]]") IF_FEATURE_SWAPON_PRI(" [-p PRI]") " [DEVICE]"
//usage:#define swapon_full_usage "\n\n"
//usage: "Start swapping on DEVICE\n"
//usage: "\n -a Start swapping on all swap devices"
//usage: IF_FEATURE_SWAPON_DISCARD(
//usage: "\n -d[POL] Discard blocks at swapon (POL=once),"
//usage: "\n as freed (POL=pages), or both (POL omitted)"
//usage: )
//usage: IF_FEATURE_SWAPON_PRI(
//usage: "\n -p PRI Set swap device priority"
//usage: )
//usage:
//usage:#define swapoff_trivial_usage
//usage: "[-a] [DEVICE]"
//usage:#define swapoff_full_usage "\n\n"
//usage: "Stop swapping on DEVICE\n"
//usage: "\n -a Stop swapping on all swap devices"
#include "libbb.h"
#include <mntent.h>
#ifndef __BIONIC__
# include <sys/swap.h>
#endif
#if ENABLE_FEATURE_MOUNT_LABEL
# include "volume_id.h"
#else
# define resolve_mount_spec(fsname) ((void)0)
#endif
#ifndef MNTTYPE_SWAP
# define MNTTYPE_SWAP "swap"
#endif
#if ENABLE_FEATURE_SWAPON_DISCARD
#ifndef SWAP_FLAG_DISCARD
#define SWAP_FLAG_DISCARD 0x10000
#endif
#ifndef SWAP_FLAG_DISCARD_ONCE
#define SWAP_FLAG_DISCARD_ONCE 0x20000
#endif
#ifndef SWAP_FLAG_DISCARD_PAGES
#define SWAP_FLAG_DISCARD_PAGES 0x40000
#endif
#define SWAP_FLAG_DISCARD_MASK \
(SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | SWAP_FLAG_DISCARD_PAGES)
#endif
#if ENABLE_FEATURE_SWAPON_DISCARD || ENABLE_FEATURE_SWAPON_PRI
struct globals {
int flags;
} FIX_ALIASING;
#define G (*(struct globals*)&bb_common_bufsiz1)
#define g_flags (G.flags)
#define save_g_flags() int save_g_flags = g_flags
#define restore_g_flags() g_flags = save_g_flags
#else
#define g_flags 0
#define save_g_flags() ((void)0)
#define restore_g_flags() ((void)0)
#endif
#define INIT_G() do { } while (0)
#define do_swapoff (applet_name[5] == 'f')
/* Command line options */
enum {
OPTBIT_a, /* -a all */
IF_FEATURE_SWAPON_DISCARD( OPTBIT_d ,) /* -d discard */
IF_FEATURE_SWAPON_PRI ( OPTBIT_p ,) /* -p priority */
OPT_a = 1 << OPTBIT_a,
OPT_d = IF_FEATURE_SWAPON_DISCARD((1 << OPTBIT_d)) + 0,
OPT_p = IF_FEATURE_SWAPON_PRI ((1 << OPTBIT_p)) + 0,
};
#define OPT_ALL (option_mask32 & OPT_a)
#define OPT_DISCARD (option_mask32 & OPT_d)
#define OPT_PRIO (option_mask32 & OPT_p)
static int swap_enable_disable(char *device)
{
int err = 0;
int quiet = 0;
struct stat st;
resolve_mount_spec(&device);
if (do_swapoff) {
err = swapoff(device);
/* Don't complain on OPT_ALL if not a swap device or if it doesn't exist */
quiet = (OPT_ALL && (errno == EINVAL || errno == ENOENT));
} else {
/* swapon */
err = stat(device, &st);
if (!err) {
if (ENABLE_DESKTOP && S_ISREG(st.st_mode)) {
if (st.st_blocks * (off_t)512 < st.st_size) {
bb_error_msg("%s: file has holes", device);
return 1;
}
}
err = swapon(device, g_flags);
/* Don't complain on swapon -a if device is already in use */
quiet = (OPT_ALL && errno == EBUSY);
}
}
if (err && !quiet) {
bb_simple_perror_msg(device);
return 1;
}
return 0;
}
#if ENABLE_FEATURE_SWAPON_DISCARD
static void set_discard_flag(char *s)
{
/* Unset the flag first to allow fstab options to override */
/* options set on the command line */
g_flags = (g_flags & ~SWAP_FLAG_DISCARD_MASK) | SWAP_FLAG_DISCARD;
if (!s) /* No optional policy value on the commandline */
return;
/* Skip prepended '=' */
if (*s == '=')
s++;
/* For fstab parsing: remove other appended options */
*strchrnul(s, ',') = '\0';
if (strcmp(s, "once") == 0)
g_flags |= SWAP_FLAG_DISCARD_ONCE;
if (strcmp(s, "pages") == 0)
g_flags |= SWAP_FLAG_DISCARD_PAGES;
}
#else
#define set_discard_flag(s) ((void)0)
#endif
#if ENABLE_FEATURE_SWAPON_PRI
static void set_priority_flag(char *s)
{
unsigned prio;
/* For fstab parsing: remove other appended options */
*strchrnul(s, ',') = '\0';
/* Max allowed 32767 (== SWAP_FLAG_PRIO_MASK) */
prio = bb_strtou(s, NULL, 10);
if (!errno) {
/* Unset the flag first to allow fstab options to override */
/* options set on the command line */
g_flags = (g_flags & ~SWAP_FLAG_PRIO_MASK) | SWAP_FLAG_PREFER |
MIN(prio, SWAP_FLAG_PRIO_MASK);
}
}
#else
#define set_priority_flag(s) ((void)0)
#endif
static int do_em_all_in_fstab(void)
{
struct mntent *m;
int err = 0;
FILE *f = xfopen_for_read("/etc/fstab");
while ((m = getmntent(f)) != NULL) {
if (strcmp(m->mnt_type, MNTTYPE_SWAP) == 0) {
/* swapon -a should ignore entries with noauto,
* but swapoff -a should process them
*/
if (do_swapoff || hasmntopt(m, MNTOPT_NOAUTO) == NULL) {
/* each swap space might have different flags */
/* save global flags for the next round */
save_g_flags();
if (ENABLE_FEATURE_SWAPON_DISCARD) {
char *p = hasmntopt(m, "discard");
if (p) {
/* move to '=' or to end of string */
p += 7;
set_discard_flag(p);
}
}
if (ENABLE_FEATURE_SWAPON_PRI) {
char *p = hasmntopt(m, "pri");
if (p) {
set_priority_flag(p + 4);
}
}
err |= swap_enable_disable(m->mnt_fsname);
restore_g_flags();
}
}
}
if (ENABLE_FEATURE_CLEAN_UP)
endmntent(f);
return err;
}
static int do_all_in_proc_swaps(void)
{
char *line;
int err = 0;
FILE *f = fopen_for_read("/proc/swaps");
/* Don't complain if missing */
if (f) {
while ((line = xmalloc_fgetline(f)) != NULL) {
if (line[0] == '/') {
*strchrnul(line, ' ') = '\0';
err |= swap_enable_disable(line);
}
free(line);
}
if (ENABLE_FEATURE_CLEAN_UP)
fclose(f);
}
return err;
}
#define OPTSTR_SWAPON "a" \
IF_FEATURE_SWAPON_DISCARD("d::") \
IF_FEATURE_SWAPON_PRI("p:")
int swap_on_off_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
int swap_on_off_main(int argc UNUSED_PARAM, char **argv)
{
IF_FEATURE_SWAPON_PRI(char *prio;)
IF_FEATURE_SWAPON_DISCARD(char *discard = NULL;)
int ret = 0;
INIT_G();
getopt32(argv, do_swapoff ? "a" : OPTSTR_SWAPON
IF_FEATURE_SWAPON_DISCARD(, &discard)
IF_FEATURE_SWAPON_PRI(, &prio)
);
argv += optind;
if (OPT_DISCARD) {
set_discard_flag(discard);
}
if (OPT_PRIO) {
set_priority_flag(prio);
}
if (OPT_ALL) {
/* swapoff -a does also /proc/swaps */
if (do_swapoff)
ret = do_all_in_proc_swaps();
ret |= do_em_all_in_fstab();
} else if (!*argv) {
/* if not -a we need at least one arg */
bb_show_usage();
}
/* Unset -a now to allow for more messages in swap_enable_disable */
option_mask32 = option_mask32 & ~OPT_a;
/* Now process devices on the commandline if any */
while (*argv) {
ret |= swap_enable_disable(*argv++);
}
return ret;
}
| gpl-2.0 |
patrickhwood/uboot-imx | net/net.c | 21 | 39410 | /*
* Copied from Linux Monitor (LiMon) - Networking.
*
* Copyright 1994 - 2000 Neil Russell.
* (See License)
* Copyright 2000 Roland Borde
* Copyright 2000 Paolo Scaffardi
* Copyright 2000-2002 Wolfgang Denk, wd@denx.de
*/
/*
* General Desription:
*
* The user interface supports commands for BOOTP, RARP, and TFTP.
* Also, we support ARP internally. Depending on available data,
* these interact as follows:
*
* BOOTP:
*
* Prerequisites: - own ethernet address
* We want: - own IP address
* - TFTP server IP address
* - name of bootfile
* Next step: ARP
*
* RARP:
*
* Prerequisites: - own ethernet address
* We want: - own IP address
* - TFTP server IP address
* Next step: ARP
*
* ARP:
*
* Prerequisites: - own ethernet address
* - own IP address
* - TFTP server IP address
* We want: - TFTP server ethernet address
* Next step: TFTP
*
* DHCP:
*
* Prerequisites: - own ethernet address
* We want: - IP, Netmask, ServerIP, Gateway IP
* - bootfilename, lease time
* Next step: - TFTP
*
* TFTP:
*
* Prerequisites: - own ethernet address
* - own IP address
* - TFTP server IP address
* - TFTP server ethernet address
* - name of bootfile (if unknown, we use a default name
* derived from our own IP address)
* We want: - load the boot file
* Next step: none
*
* NFS:
*
* Prerequisites: - own ethernet address
* - own IP address
* - name of bootfile (if unknown, we use a default name
* derived from our own IP address)
* We want: - load the boot file
* Next step: none
*
* SNTP:
*
* Prerequisites: - own ethernet address
* - own IP address
* We want: - network time
* Next step: none
*/
#include <common.h>
#include <watchdog.h>
#include <command.h>
#include <net.h>
#include "bootp.h"
#include "tftp.h"
#include "rarp.h"
#include "nfs.h"
#ifdef CONFIG_STATUS_LED
#include <status_led.h>
#include <miiphy.h>
#endif
#if defined(CONFIG_CMD_SNTP)
#include "sntp.h"
#endif
#if defined(CONFIG_CDP_VERSION)
#include <timestamp.h>
#endif
#if defined(CONFIG_CMD_DNS)
#include "dns.h"
#endif
#if defined(CONFIG_CMD_NET)
DECLARE_GLOBAL_DATA_PTR;
#ifndef CONFIG_ARP_TIMEOUT
# define ARP_TIMEOUT 5000UL /* Milliseconds before trying ARP again */
#else
# define ARP_TIMEOUT CONFIG_ARP_TIMEOUT
#endif
#ifndef CONFIG_NET_RETRY_COUNT
# define ARP_TIMEOUT_COUNT 5 /* # of timeouts before giving up */
#else
# define ARP_TIMEOUT_COUNT CONFIG_NET_RETRY_COUNT
#endif
/** BOOTP EXTENTIONS **/
IPaddr_t NetOurSubnetMask=0; /* Our subnet mask (0=unknown) */
IPaddr_t NetOurGatewayIP=0; /* Our gateways IP address */
IPaddr_t NetOurDNSIP=0; /* Our DNS IP address */
#if defined(CONFIG_BOOTP_DNS2)
IPaddr_t NetOurDNS2IP=0; /* Our 2nd DNS IP address */
#endif
char NetOurNISDomain[32]={0,}; /* Our NIS domain */
char NetOurHostName[32]={0,}; /* Our hostname */
char NetOurRootPath[64]={0,}; /* Our bootpath */
ushort NetBootFileSize=0; /* Our bootfile size in blocks */
#ifdef CONFIG_MCAST_TFTP /* Multicast TFTP */
IPaddr_t Mcast_addr;
#endif
/** END OF BOOTP EXTENTIONS **/
ulong NetBootFileXferSize; /* The actual transferred size of the bootfile (in bytes) */
uchar NetOurEther[6]; /* Our ethernet address */
uchar NetServerEther[6] = /* Boot server enet address */
{ 0, 0, 0, 0, 0, 0 };
IPaddr_t NetOurIP; /* Our IP addr (0 = unknown) */
IPaddr_t NetServerIP; /* Server IP addr (0 = unknown) */
volatile uchar *NetRxPacket; /* Current receive packet */
int NetRxPacketLen; /* Current rx packet length */
unsigned NetIPID; /* IP packet ID */
uchar NetBcastAddr[6] = /* Ethernet bcast address */
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
uchar NetEtherNullAddr[6] =
{ 0, 0, 0, 0, 0, 0 };
#ifdef CONFIG_API
void (*push_packet)(volatile void *, int len) = 0;
#endif
#if defined(CONFIG_CMD_CDP)
uchar NetCDPAddr[6] = /* Ethernet bcast address */
{ 0x01, 0x00, 0x0c, 0xcc, 0xcc, 0xcc };
#endif
int NetState; /* Network loop state */
#ifdef CONFIG_NET_MULTI
int NetRestartWrap = 0; /* Tried all network devices */
static int NetRestarted = 0; /* Network loop restarted */
static int NetDevExists = 0; /* At least one device configured */
#endif
/* XXX in both little & big endian machines 0xFFFF == ntohs(-1) */
ushort NetOurVLAN = 0xFFFF; /* default is without VLAN */
ushort NetOurNativeVLAN = 0xFFFF; /* ditto */
char BootFile[128]; /* Boot File name */
#if defined(CONFIG_CMD_PING)
IPaddr_t NetPingIP; /* the ip address to ping */
static void PingStart(void);
#endif
#if defined(CONFIG_CMD_CDP)
static void CDPStart(void);
#endif
#if defined(CONFIG_CMD_SNTP)
IPaddr_t NetNtpServerIP; /* NTP server IP address */
int NetTimeOffset=0; /* offset time from UTC */
#endif
#ifdef CONFIG_NETCONSOLE
void NcStart(void);
int nc_input_packet(uchar *pkt, unsigned dest, unsigned src, unsigned len);
#endif
volatile uchar PktBuf[(PKTBUFSRX+1) * PKTSIZE_ALIGN + PKTALIGN];
volatile uchar *NetRxPackets[PKTBUFSRX]; /* Receive packets */
static rxhand_f *packetHandler; /* Current RX packet handler */
static thand_f *timeHandler; /* Current timeout handler */
static ulong timeStart; /* Time base value */
static ulong timeDelta; /* Current timeout value */
volatile uchar *NetTxPacket = 0; /* THE transmit packet */
static int net_check_prereq (proto_t protocol);
/**********************************************************************/
IPaddr_t NetArpWaitPacketIP;
IPaddr_t NetArpWaitReplyIP;
uchar *NetArpWaitPacketMAC; /* MAC address of waiting packet's destination */
uchar *NetArpWaitTxPacket; /* THE transmit packet */
int NetArpWaitTxPacketSize;
uchar NetArpWaitPacketBuf[PKTSIZE_ALIGN + PKTALIGN];
ulong NetArpWaitTimerStart;
int NetArpWaitTry;
void ArpRequest (void)
{
int i;
volatile uchar *pkt;
ARP_t *arp;
debug("ARP broadcast %d\n", NetArpWaitTry);
pkt = NetTxPacket;
pkt += NetSetEther (pkt, NetBcastAddr, PROT_ARP);
arp = (ARP_t *) pkt;
arp->ar_hrd = htons (ARP_ETHER);
arp->ar_pro = htons (PROT_IP);
arp->ar_hln = 6;
arp->ar_pln = 4;
arp->ar_op = htons (ARPOP_REQUEST);
memcpy (&arp->ar_data[0], NetOurEther, 6); /* source ET addr */
NetWriteIP ((uchar *) & arp->ar_data[6], NetOurIP); /* source IP addr */
for (i = 10; i < 16; ++i) {
arp->ar_data[i] = 0; /* dest ET addr = 0 */
}
if ((NetArpWaitPacketIP & NetOurSubnetMask) !=
(NetOurIP & NetOurSubnetMask)) {
if (NetOurGatewayIP == 0) {
puts ("## Warning: gatewayip needed but not set\n");
NetArpWaitReplyIP = NetArpWaitPacketIP;
} else {
NetArpWaitReplyIP = NetOurGatewayIP;
}
} else {
NetArpWaitReplyIP = NetArpWaitPacketIP;
}
NetWriteIP ((uchar *) & arp->ar_data[16], NetArpWaitReplyIP);
(void) eth_send (NetTxPacket, (pkt - NetTxPacket) + ARP_HDR_SIZE);
}
void ArpTimeoutCheck(void)
{
ulong t;
if (!NetArpWaitPacketIP)
return;
t = get_timer(0);
/* check for arp timeout */
if ((t - NetArpWaitTimerStart) > ARP_TIMEOUT) {
NetArpWaitTry++;
if (NetArpWaitTry >= ARP_TIMEOUT_COUNT) {
puts ("\nARP Retry count exceeded; starting again\n");
NetArpWaitTry = 0;
NetStartAgain();
} else {
NetArpWaitTimerStart = t;
ArpRequest();
}
}
}
static void
NetInitLoop(proto_t protocol)
{
static int env_changed_id = 0;
bd_t *bd = gd->bd;
int env_id = get_env_id ();
/* update only when the environment has changed */
if (env_changed_id != env_id) {
NetCopyIP(&NetOurIP, &bd->bi_ip_addr);
NetOurGatewayIP = getenv_IPaddr ("gatewayip");
NetOurSubnetMask= getenv_IPaddr ("netmask");
NetServerIP = getenv_IPaddr ("serverip");
NetOurNativeVLAN = getenv_VLAN("nvlan");
NetOurVLAN = getenv_VLAN("vlan");
#if defined(CONFIG_CMD_DNS)
NetOurDNSIP = getenv_IPaddr("dnsip");
#endif
env_changed_id = env_id;
}
return;
}
/**********************************************************************/
/*
* Main network processing loop.
*/
int
NetLoop(proto_t protocol)
{
bd_t *bd = gd->bd;
#ifdef CONFIG_NET_MULTI
NetRestarted = 0;
NetDevExists = 0;
#endif
/* XXX problem with bss workaround */
NetArpWaitPacketMAC = NULL;
NetArpWaitTxPacket = NULL;
NetArpWaitPacketIP = 0;
NetArpWaitReplyIP = 0;
NetArpWaitTxPacket = NULL;
NetTxPacket = NULL;
if (!NetTxPacket) {
int i;
/*
* Setup packet buffers, aligned correctly.
*/
NetTxPacket = &PktBuf[0] + (PKTALIGN - 1);
NetTxPacket -= (ulong)NetTxPacket % PKTALIGN;
for (i = 0; i < PKTBUFSRX; i++) {
NetRxPackets[i] = NetTxPacket + (i+1)*PKTSIZE_ALIGN;
}
}
if (!NetArpWaitTxPacket) {
NetArpWaitTxPacket = &NetArpWaitPacketBuf[0] + (PKTALIGN - 1);
NetArpWaitTxPacket -= (ulong)NetArpWaitTxPacket % PKTALIGN;
NetArpWaitTxPacketSize = 0;
}
eth_halt();
#ifdef CONFIG_NET_MULTI
eth_set_current();
#endif
if (eth_init(bd) < 0) {
eth_halt();
return(-1);
}
restart:
#ifdef CONFIG_NET_MULTI
memcpy (NetOurEther, eth_get_dev()->enetaddr, 6);
#else
eth_getenv_enetaddr("ethaddr", NetOurEther);
#endif
NetState = NETLOOP_CONTINUE;
/*
* Start the ball rolling with the given start function. From
* here on, this code is a state machine driven by received
* packets and timer events.
*/
NetInitLoop(protocol);
switch (net_check_prereq (protocol)) {
case 1:
/* network not configured */
eth_halt();
return (-1);
#ifdef CONFIG_NET_MULTI
case 2:
/* network device not configured */
break;
#endif /* CONFIG_NET_MULTI */
case 0:
#ifdef CONFIG_NET_MULTI
NetDevExists = 1;
#endif
switch (protocol) {
case TFTP:
/* always use ARP to get server ethernet address */
TftpStart();
break;
#if defined(CONFIG_CMD_DHCP)
case DHCP:
BootpTry = 0;
NetOurIP = 0;
DhcpRequest(); /* Basically same as BOOTP */
break;
#endif
case BOOTP:
BootpTry = 0;
NetOurIP = 0;
BootpRequest ();
break;
case RARP:
RarpTry = 0;
NetOurIP = 0;
RarpRequest ();
break;
#if defined(CONFIG_CMD_PING)
case PING:
PingStart();
break;
#endif
#if defined(CONFIG_CMD_NFS)
case NFS:
NfsStart();
break;
#endif
#if defined(CONFIG_CMD_CDP)
case CDP:
CDPStart();
break;
#endif
#ifdef CONFIG_NETCONSOLE
case NETCONS:
NcStart();
break;
#endif
#if defined(CONFIG_CMD_SNTP)
case SNTP:
SntpStart();
break;
#endif
#if defined(CONFIG_CMD_DNS)
case DNS:
DnsStart();
break;
#endif
default:
break;
}
NetBootFileXferSize = 0;
break;
}
#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
#if defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN) && defined(CONFIG_STATUS_LED) && defined(STATUS_LED_RED)
/*
* Echo the inverted link state to the fault LED.
*/
if(miiphy_link(eth_get_dev()->name, CONFIG_SYS_FAULT_MII_ADDR)) {
status_led_set (STATUS_LED_RED, STATUS_LED_OFF);
} else {
status_led_set (STATUS_LED_RED, STATUS_LED_ON);
}
#endif /* CONFIG_SYS_FAULT_ECHO_LINK_DOWN, ... */
#endif /* CONFIG_MII, ... */
/*
* Main packet reception loop. Loop receiving packets until
* someone sets `NetState' to a state that terminates.
*/
for (;;) {
WATCHDOG_RESET();
#ifdef CONFIG_SHOW_ACTIVITY
{
extern void show_activity(int arg);
show_activity(1);
}
#endif
/*
* Check the ethernet for a new packet. The ethernet
* receive routine will process it.
*/
eth_rx();
/*
* Abort if ctrl-c was pressed.
*/
if (ctrlc()) {
eth_halt();
puts ("\nAbort\n");
return (-1);
}
ArpTimeoutCheck();
/*
* Check for a timeout, and run the timeout handler
* if we have one.
*/
if (timeHandler && ((get_timer(0) - timeStart) > timeDelta)) {
thand_f *x;
#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
# if defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN) && \
defined(CONFIG_STATUS_LED) && \
defined(STATUS_LED_RED)
/*
* Echo the inverted link state to the fault LED.
*/
if(miiphy_link(eth_get_dev()->name, CONFIG_SYS_FAULT_MII_ADDR)) {
status_led_set (STATUS_LED_RED, STATUS_LED_OFF);
} else {
status_led_set (STATUS_LED_RED, STATUS_LED_ON);
}
# endif /* CONFIG_SYS_FAULT_ECHO_LINK_DOWN, ... */
#endif /* CONFIG_MII, ... */
x = timeHandler;
timeHandler = (thand_f *)0;
(*x)();
}
switch (NetState) {
case NETLOOP_RESTART:
#ifdef CONFIG_NET_MULTI
NetRestarted = 1;
#endif
goto restart;
case NETLOOP_SUCCESS:
if (NetBootFileXferSize > 0) {
char buf[20];
printf("Bytes transferred = %ld (%lx hex)\n",
NetBootFileXferSize,
NetBootFileXferSize);
sprintf(buf, "%lX", NetBootFileXferSize);
setenv("filesize", buf);
sprintf(buf, "%lX", (unsigned long)load_addr);
setenv("fileaddr", buf);
}
eth_halt();
return NetBootFileXferSize;
case NETLOOP_FAIL:
return (-1);
}
}
}
/**********************************************************************/
static void
startAgainTimeout(void)
{
NetState = NETLOOP_RESTART;
}
static void
startAgainHandler(uchar * pkt, unsigned dest, unsigned src, unsigned len)
{
/* Totally ignore the packet */
}
void NetStartAgain (void)
{
char *nretry;
int noretry = 0, once = 0;
if ((nretry = getenv ("netretry")) != NULL) {
noretry = (strcmp (nretry, "no") == 0);
once = (strcmp (nretry, "once") == 0);
}
if (noretry) {
eth_halt ();
NetState = NETLOOP_FAIL;
return;
}
#ifndef CONFIG_NET_MULTI
NetSetTimeout (10000UL, startAgainTimeout);
NetSetHandler (startAgainHandler);
#else /* !CONFIG_NET_MULTI*/
eth_halt ();
#if !defined(CONFIG_NET_DO_NOT_TRY_ANOTHER)
eth_try_another (!NetRestarted);
#endif
eth_init (gd->bd);
if (NetRestartWrap) {
NetRestartWrap = 0;
if (NetDevExists && !once) {
NetSetTimeout (10000UL, startAgainTimeout);
NetSetHandler (startAgainHandler);
} else {
NetState = NETLOOP_FAIL;
}
} else {
NetState = NETLOOP_RESTART;
}
#endif /* CONFIG_NET_MULTI */
}
/**********************************************************************/
/*
* Miscelaneous bits.
*/
void
NetSetHandler(rxhand_f * f)
{
packetHandler = f;
}
void
NetSetTimeout(ulong iv, thand_f * f)
{
if (iv == 0) {
timeHandler = (thand_f *)0;
} else {
timeHandler = f;
timeStart = get_timer(0);
timeDelta = iv;
}
}
void
NetSendPacket(volatile uchar * pkt, int len)
{
(void) eth_send(pkt, len);
}
int
NetSendUDPPacket(uchar *ether, IPaddr_t dest, int dport, int sport, int len)
{
uchar *pkt;
/* convert to new style broadcast */
if (dest == 0)
dest = 0xFFFFFFFF;
/* if broadcast, make the ether address a broadcast and don't do ARP */
if (dest == 0xFFFFFFFF)
ether = NetBcastAddr;
/* if MAC address was not discovered yet, save the packet and do an ARP request */
if (memcmp(ether, NetEtherNullAddr, 6) == 0) {
debug("sending ARP for %08lx\n", dest);
NetArpWaitPacketIP = dest;
NetArpWaitPacketMAC = ether;
pkt = NetArpWaitTxPacket;
pkt += NetSetEther (pkt, NetArpWaitPacketMAC, PROT_IP);
NetSetIP (pkt, dest, dport, sport, len);
memcpy(pkt + IP_HDR_SIZE, (uchar *)NetTxPacket + (pkt - (uchar *)NetArpWaitTxPacket) + IP_HDR_SIZE, len);
/* size of the waiting packet */
NetArpWaitTxPacketSize = (pkt - NetArpWaitTxPacket) + IP_HDR_SIZE + len;
/* and do the ARP request */
NetArpWaitTry = 1;
NetArpWaitTimerStart = get_timer(0);
ArpRequest();
return 1; /* waiting */
}
debug("sending UDP to %08lx/%pM\n", dest, ether);
pkt = (uchar *)NetTxPacket;
pkt += NetSetEther (pkt, ether, PROT_IP);
NetSetIP (pkt, dest, dport, sport, len);
(void) eth_send(NetTxPacket, (pkt - NetTxPacket) + IP_HDR_SIZE + len);
return 0; /* transmitted */
}
#if defined(CONFIG_CMD_PING)
static ushort PingSeqNo;
int PingSend(void)
{
static uchar mac[6];
volatile IP_t *ip;
volatile ushort *s;
uchar *pkt;
/* XXX always send arp request */
memcpy(mac, NetEtherNullAddr, 6);
debug("sending ARP for %08lx\n", NetPingIP);
NetArpWaitPacketIP = NetPingIP;
NetArpWaitPacketMAC = mac;
pkt = NetArpWaitTxPacket;
pkt += NetSetEther(pkt, mac, PROT_IP);
ip = (volatile IP_t *)pkt;
/*
* Construct an IP and ICMP header. (need to set no fragment bit - XXX)
*/
ip->ip_hl_v = 0x45; /* IP_HDR_SIZE / 4 (not including UDP) */
ip->ip_tos = 0;
ip->ip_len = htons(IP_HDR_SIZE_NO_UDP + 8);
ip->ip_id = htons(NetIPID++);
ip->ip_off = htons(IP_FLAGS_DFRAG); /* Don't fragment */
ip->ip_ttl = 255;
ip->ip_p = 0x01; /* ICMP */
ip->ip_sum = 0;
NetCopyIP((void*)&ip->ip_src, &NetOurIP); /* already in network byte order */
NetCopyIP((void*)&ip->ip_dst, &NetPingIP); /* - "" - */
ip->ip_sum = ~NetCksum((uchar *)ip, IP_HDR_SIZE_NO_UDP / 2);
s = &ip->udp_src; /* XXX ICMP starts here */
s[0] = htons(0x0800); /* echo-request, code */
s[1] = 0; /* checksum */
s[2] = 0; /* identifier */
s[3] = htons(PingSeqNo++); /* sequence number */
s[1] = ~NetCksum((uchar *)s, 8/2);
/* size of the waiting packet */
NetArpWaitTxPacketSize = (pkt - NetArpWaitTxPacket) + IP_HDR_SIZE_NO_UDP + 8;
/* and do the ARP request */
NetArpWaitTry = 1;
NetArpWaitTimerStart = get_timer(0);
ArpRequest();
return 1; /* waiting */
}
static void
PingTimeout (void)
{
eth_halt();
NetState = NETLOOP_FAIL; /* we did not get the reply */
}
static void
PingHandler (uchar * pkt, unsigned dest, unsigned src, unsigned len)
{
IPaddr_t tmp;
volatile IP_t *ip = (volatile IP_t *)pkt;
tmp = NetReadIP((void *)&ip->ip_src);
if (tmp != NetPingIP)
return;
NetState = NETLOOP_SUCCESS;
}
static void PingStart(void)
{
#if defined(CONFIG_NET_MULTI)
printf ("Using %s device\n", eth_get_name());
#endif /* CONFIG_NET_MULTI */
NetSetTimeout (10000UL, PingTimeout);
NetSetHandler (PingHandler);
PingSend();
}
#endif
#if defined(CONFIG_CMD_CDP)
#define CDP_DEVICE_ID_TLV 0x0001
#define CDP_ADDRESS_TLV 0x0002
#define CDP_PORT_ID_TLV 0x0003
#define CDP_CAPABILITIES_TLV 0x0004
#define CDP_VERSION_TLV 0x0005
#define CDP_PLATFORM_TLV 0x0006
#define CDP_NATIVE_VLAN_TLV 0x000a
#define CDP_APPLIANCE_VLAN_TLV 0x000e
#define CDP_TRIGGER_TLV 0x000f
#define CDP_POWER_CONSUMPTION_TLV 0x0010
#define CDP_SYSNAME_TLV 0x0014
#define CDP_SYSOBJECT_TLV 0x0015
#define CDP_MANAGEMENT_ADDRESS_TLV 0x0016
#define CDP_TIMEOUT 250UL /* one packet every 250ms */
static int CDPSeq;
static int CDPOK;
ushort CDPNativeVLAN;
ushort CDPApplianceVLAN;
static const uchar CDP_SNAP_hdr[8] = { 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x0C, 0x20, 0x00 };
static ushort CDP_compute_csum(const uchar *buff, ushort len)
{
ushort csum;
int odd;
ulong result = 0;
ushort leftover;
ushort *p;
if (len > 0) {
odd = 1 & (ulong)buff;
if (odd) {
result = *buff << 8;
len--;
buff++;
}
while (len > 1) {
p = (ushort *)buff;
result += *p++;
buff = (uchar *)p;
if (result & 0x80000000)
result = (result & 0xFFFF) + (result >> 16);
len -= 2;
}
if (len) {
leftover = (signed short)(*(const signed char *)buff);
/* CISCO SUCKS big time! (and blows too):
* CDP uses the IP checksum algorithm with a twist;
* for the last byte it *sign* extends and sums.
*/
result = (result & 0xffff0000) | ((result + leftover) & 0x0000ffff);
}
while (result >> 16)
result = (result & 0xFFFF) + (result >> 16);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
/* add up 16-bit and 17-bit words for 17+c bits */
result = (result & 0xffff) + (result >> 16);
/* add up 16-bit and 2-bit for 16+c bit */
result = (result & 0xffff) + (result >> 16);
/* add up carry.. */
result = (result & 0xffff) + (result >> 16);
/* negate */
csum = ~(ushort)result;
/* run time endian detection */
if (csum != htons(csum)) /* little endian */
csum = htons(csum);
return csum;
}
int CDPSendTrigger(void)
{
volatile uchar *pkt;
volatile ushort *s;
volatile ushort *cp;
Ethernet_t *et;
int len;
ushort chksum;
#if defined(CONFIG_CDP_DEVICE_ID) || defined(CONFIG_CDP_PORT_ID) || \
defined(CONFIG_CDP_VERSION) || defined(CONFIG_CDP_PLATFORM)
char buf[32];
#endif
pkt = NetTxPacket;
et = (Ethernet_t *)pkt;
/* NOTE: trigger sent not on any VLAN */
/* form ethernet header */
memcpy(et->et_dest, NetCDPAddr, 6);
memcpy(et->et_src, NetOurEther, 6);
pkt += ETHER_HDR_SIZE;
/* SNAP header */
memcpy((uchar *)pkt, CDP_SNAP_hdr, sizeof(CDP_SNAP_hdr));
pkt += sizeof(CDP_SNAP_hdr);
/* CDP header */
*pkt++ = 0x02; /* CDP version 2 */
*pkt++ = 180; /* TTL */
s = (volatile ushort *)pkt;
cp = s;
*s++ = htons(0); /* checksum (0 for later calculation) */
/* CDP fields */
#ifdef CONFIG_CDP_DEVICE_ID
*s++ = htons(CDP_DEVICE_ID_TLV);
*s++ = htons(CONFIG_CDP_DEVICE_ID);
sprintf(buf, CONFIG_CDP_DEVICE_ID_PREFIX "%pm", NetOurEther);
memcpy((uchar *)s, buf, 16);
s += 16 / 2;
#endif
#ifdef CONFIG_CDP_PORT_ID
*s++ = htons(CDP_PORT_ID_TLV);
memset(buf, 0, sizeof(buf));
sprintf(buf, CONFIG_CDP_PORT_ID, eth_get_dev_index());
len = strlen(buf);
if (len & 1) /* make it even */
len++;
*s++ = htons(len + 4);
memcpy((uchar *)s, buf, len);
s += len / 2;
#endif
#ifdef CONFIG_CDP_CAPABILITIES
*s++ = htons(CDP_CAPABILITIES_TLV);
*s++ = htons(8);
*(ulong *)s = htonl(CONFIG_CDP_CAPABILITIES);
s += 2;
#endif
#ifdef CONFIG_CDP_VERSION
*s++ = htons(CDP_VERSION_TLV);
memset(buf, 0, sizeof(buf));
strcpy(buf, CONFIG_CDP_VERSION);
len = strlen(buf);
if (len & 1) /* make it even */
len++;
*s++ = htons(len + 4);
memcpy((uchar *)s, buf, len);
s += len / 2;
#endif
#ifdef CONFIG_CDP_PLATFORM
*s++ = htons(CDP_PLATFORM_TLV);
memset(buf, 0, sizeof(buf));
strcpy(buf, CONFIG_CDP_PLATFORM);
len = strlen(buf);
if (len & 1) /* make it even */
len++;
*s++ = htons(len + 4);
memcpy((uchar *)s, buf, len);
s += len / 2;
#endif
#ifdef CONFIG_CDP_TRIGGER
*s++ = htons(CDP_TRIGGER_TLV);
*s++ = htons(8);
*(ulong *)s = htonl(CONFIG_CDP_TRIGGER);
s += 2;
#endif
#ifdef CONFIG_CDP_POWER_CONSUMPTION
*s++ = htons(CDP_POWER_CONSUMPTION_TLV);
*s++ = htons(6);
*s++ = htons(CONFIG_CDP_POWER_CONSUMPTION);
#endif
/* length of ethernet packet */
len = (uchar *)s - ((uchar *)NetTxPacket + ETHER_HDR_SIZE);
et->et_protlen = htons(len);
len = ETHER_HDR_SIZE + sizeof(CDP_SNAP_hdr);
chksum = CDP_compute_csum((uchar *)NetTxPacket + len, (uchar *)s - (NetTxPacket + len));
if (chksum == 0)
chksum = 0xFFFF;
*cp = htons(chksum);
(void) eth_send(NetTxPacket, (uchar *)s - NetTxPacket);
return 0;
}
static void
CDPTimeout (void)
{
CDPSeq++;
if (CDPSeq < 3) {
NetSetTimeout (CDP_TIMEOUT, CDPTimeout);
CDPSendTrigger();
return;
}
/* if not OK try again */
if (!CDPOK)
NetStartAgain();
else
NetState = NETLOOP_SUCCESS;
}
static void
CDPDummyHandler (uchar * pkt, unsigned dest, unsigned src, unsigned len)
{
/* nothing */
}
static void
CDPHandler(const uchar * pkt, unsigned len)
{
const uchar *t;
const ushort *ss;
ushort type, tlen;
uchar applid;
ushort vlan, nvlan;
/* minimum size? */
if (len < sizeof(CDP_SNAP_hdr) + 4)
goto pkt_short;
/* check for valid CDP SNAP header */
if (memcmp(pkt, CDP_SNAP_hdr, sizeof(CDP_SNAP_hdr)) != 0)
return;
pkt += sizeof(CDP_SNAP_hdr);
len -= sizeof(CDP_SNAP_hdr);
/* Version of CDP protocol must be >= 2 and TTL != 0 */
if (pkt[0] < 0x02 || pkt[1] == 0)
return;
/* if version is greater than 0x02 maybe we'll have a problem; output a warning */
if (pkt[0] != 0x02)
printf("** WARNING: CDP packet received with a protocol version %d > 2\n",
pkt[0] & 0xff);
if (CDP_compute_csum(pkt, len) != 0)
return;
pkt += 4;
len -= 4;
vlan = htons(-1);
nvlan = htons(-1);
while (len > 0) {
if (len < 4)
goto pkt_short;
ss = (const ushort *)pkt;
type = ntohs(ss[0]);
tlen = ntohs(ss[1]);
if (tlen > len) {
goto pkt_short;
}
pkt += tlen;
len -= tlen;
ss += 2; /* point ss to the data of the TLV */
tlen -= 4;
switch (type) {
case CDP_DEVICE_ID_TLV:
break;
case CDP_ADDRESS_TLV:
break;
case CDP_PORT_ID_TLV:
break;
case CDP_CAPABILITIES_TLV:
break;
case CDP_VERSION_TLV:
break;
case CDP_PLATFORM_TLV:
break;
case CDP_NATIVE_VLAN_TLV:
nvlan = *ss;
break;
case CDP_APPLIANCE_VLAN_TLV:
t = (const uchar *)ss;
while (tlen > 0) {
if (tlen < 3)
goto pkt_short;
applid = t[0];
ss = (const ushort *)(t + 1);
#ifdef CONFIG_CDP_APPLIANCE_VLAN_TYPE
if (applid == CONFIG_CDP_APPLIANCE_VLAN_TYPE)
vlan = *ss;
#else
vlan = ntohs(*ss); /* XXX will this work; dunno */
#endif
t += 3; tlen -= 3;
}
break;
case CDP_TRIGGER_TLV:
break;
case CDP_POWER_CONSUMPTION_TLV:
break;
case CDP_SYSNAME_TLV:
break;
case CDP_SYSOBJECT_TLV:
break;
case CDP_MANAGEMENT_ADDRESS_TLV:
break;
}
}
CDPApplianceVLAN = vlan;
CDPNativeVLAN = nvlan;
CDPOK = 1;
return;
pkt_short:
printf("** CDP packet is too short\n");
return;
}
static void CDPStart(void)
{
#if defined(CONFIG_NET_MULTI)
printf ("Using %s device\n", eth_get_name());
#endif
CDPSeq = 0;
CDPOK = 0;
CDPNativeVLAN = htons(-1);
CDPApplianceVLAN = htons(-1);
NetSetTimeout (CDP_TIMEOUT, CDPTimeout);
NetSetHandler (CDPDummyHandler);
CDPSendTrigger();
}
#endif
void
NetReceive(volatile uchar * inpkt, int len)
{
Ethernet_t *et;
IP_t *ip;
ARP_t *arp;
IPaddr_t tmp;
int x;
uchar *pkt;
#if defined(CONFIG_CMD_CDP)
int iscdp;
#endif
ushort cti = 0, vlanid = VLAN_NONE, myvlanid, mynvlanid;
debug("packet received\n");
NetRxPacket = inpkt;
NetRxPacketLen = len;
et = (Ethernet_t *)inpkt;
/* too small packet? */
if (len < ETHER_HDR_SIZE)
return;
#ifdef CONFIG_API
if (push_packet) {
(*push_packet)(inpkt, len);
return;
}
#endif
#if defined(CONFIG_CMD_CDP)
/* keep track if packet is CDP */
iscdp = memcmp(et->et_dest, NetCDPAddr, 6) == 0;
#endif
myvlanid = ntohs(NetOurVLAN);
if (myvlanid == (ushort)-1)
myvlanid = VLAN_NONE;
mynvlanid = ntohs(NetOurNativeVLAN);
if (mynvlanid == (ushort)-1)
mynvlanid = VLAN_NONE;
x = ntohs(et->et_protlen);
debug("packet received\n");
if (x < 1514) {
/*
* Got a 802 packet. Check the other protocol field.
*/
x = ntohs(et->et_prot);
ip = (IP_t *)(inpkt + E802_HDR_SIZE);
len -= E802_HDR_SIZE;
} else if (x != PROT_VLAN) { /* normal packet */
ip = (IP_t *)(inpkt + ETHER_HDR_SIZE);
len -= ETHER_HDR_SIZE;
} else { /* VLAN packet */
VLAN_Ethernet_t *vet = (VLAN_Ethernet_t *)et;
debug("VLAN packet received\n");
/* too small packet? */
if (len < VLAN_ETHER_HDR_SIZE)
return;
/* if no VLAN active */
if ((ntohs(NetOurVLAN) & VLAN_IDMASK) == VLAN_NONE
#if defined(CONFIG_CMD_CDP)
&& iscdp == 0
#endif
)
return;
cti = ntohs(vet->vet_tag);
vlanid = cti & VLAN_IDMASK;
x = ntohs(vet->vet_type);
ip = (IP_t *)(inpkt + VLAN_ETHER_HDR_SIZE);
len -= VLAN_ETHER_HDR_SIZE;
}
debug("Receive from protocol 0x%x\n", x);
#if defined(CONFIG_CMD_CDP)
if (iscdp) {
CDPHandler((uchar *)ip, len);
return;
}
#endif
if ((myvlanid & VLAN_IDMASK) != VLAN_NONE) {
if (vlanid == VLAN_NONE)
vlanid = (mynvlanid & VLAN_IDMASK);
/* not matched? */
if (vlanid != (myvlanid & VLAN_IDMASK))
return;
}
switch (x) {
case PROT_ARP:
/*
* We have to deal with two types of ARP packets:
* - REQUEST packets will be answered by sending our
* IP address - if we know it.
* - REPLY packates are expected only after we asked
* for the TFTP server's or the gateway's ethernet
* address; so if we receive such a packet, we set
* the server ethernet address
*/
debug("Got ARP\n");
arp = (ARP_t *)ip;
if (len < ARP_HDR_SIZE) {
printf("bad length %d < %d\n", len, ARP_HDR_SIZE);
return;
}
if (ntohs(arp->ar_hrd) != ARP_ETHER) {
return;
}
if (ntohs(arp->ar_pro) != PROT_IP) {
return;
}
if (arp->ar_hln != 6) {
return;
}
if (arp->ar_pln != 4) {
return;
}
if (NetOurIP == 0) {
return;
}
if (NetReadIP(&arp->ar_data[16]) != NetOurIP) {
return;
}
switch (ntohs(arp->ar_op)) {
case ARPOP_REQUEST: /* reply with our IP address */
debug("Got ARP REQUEST, return our IP\n");
pkt = (uchar *)et;
pkt += NetSetEther(pkt, et->et_src, PROT_ARP);
arp->ar_op = htons(ARPOP_REPLY);
memcpy (&arp->ar_data[10], &arp->ar_data[0], 6);
NetCopyIP(&arp->ar_data[16], &arp->ar_data[6]);
memcpy (&arp->ar_data[ 0], NetOurEther, 6);
NetCopyIP(&arp->ar_data[ 6], &NetOurIP);
(void) eth_send((uchar *)et, (pkt - (uchar *)et) + ARP_HDR_SIZE);
return;
case ARPOP_REPLY: /* arp reply */
/* are we waiting for a reply */
if (!NetArpWaitPacketIP || !NetArpWaitPacketMAC)
break;
#ifdef CONFIG_KEEP_SERVERADDR
if (NetServerIP == NetArpWaitPacketIP) {
char buf[20];
sprintf(buf, "%pM", arp->ar_data);
setenv("serveraddr", buf);
}
#endif
debug("Got ARP REPLY, set server/gtwy eth addr (%pM)\n",
arp->ar_data);
tmp = NetReadIP(&arp->ar_data[6]);
/* matched waiting packet's address */
if (tmp == NetArpWaitReplyIP) {
debug("Got it\n");
/* save address for later use */
memcpy(NetArpWaitPacketMAC, &arp->ar_data[0], 6);
#ifdef CONFIG_NETCONSOLE
(*packetHandler)(0,0,0,0);
#endif
/* modify header, and transmit it */
memcpy(((Ethernet_t *)NetArpWaitTxPacket)->et_dest, NetArpWaitPacketMAC, 6);
(void) eth_send(NetArpWaitTxPacket, NetArpWaitTxPacketSize);
/* no arp request pending now */
NetArpWaitPacketIP = 0;
NetArpWaitTxPacketSize = 0;
NetArpWaitPacketMAC = NULL;
}
return;
default:
debug("Unexpected ARP opcode 0x%x\n", ntohs(arp->ar_op));
return;
}
break;
case PROT_RARP:
debug("Got RARP\n");
arp = (ARP_t *)ip;
if (len < ARP_HDR_SIZE) {
printf("bad length %d < %d\n", len, ARP_HDR_SIZE);
return;
}
if ((ntohs(arp->ar_op) != RARPOP_REPLY) ||
(ntohs(arp->ar_hrd) != ARP_ETHER) ||
(ntohs(arp->ar_pro) != PROT_IP) ||
(arp->ar_hln != 6) || (arp->ar_pln != 4)) {
puts ("invalid RARP header\n");
} else {
NetCopyIP(&NetOurIP, &arp->ar_data[16]);
if (NetServerIP == 0)
NetCopyIP(&NetServerIP, &arp->ar_data[ 6]);
memcpy (NetServerEther, &arp->ar_data[ 0], 6);
(*packetHandler)(0,0,0,0);
}
break;
case PROT_IP:
debug("Got IP\n");
if (len < IP_HDR_SIZE) {
debug("len bad %d < %lu\n", len, (ulong)IP_HDR_SIZE);
return;
}
if (len < ntohs(ip->ip_len)) {
printf("len bad %d < %d\n", len, ntohs(ip->ip_len));
return;
}
len = ntohs(ip->ip_len);
debug("len=%d, v=%02x\n", len, ip->ip_hl_v & 0xff);
if ((ip->ip_hl_v & 0xf0) != 0x40) {
return;
}
/* Can't deal with fragments */
if (ip->ip_off & htons(IP_OFFS | IP_FLAGS_MFRAG)) {
return;
}
/* can't deal with headers > 20 bytes */
if ((ip->ip_hl_v & 0x0f) > 0x05) {
return;
}
if (!NetCksumOk((uchar *)ip, IP_HDR_SIZE_NO_UDP / 2)) {
puts ("checksum bad\n");
return;
}
tmp = NetReadIP(&ip->ip_dst);
if (NetOurIP && tmp != NetOurIP && tmp != 0xFFFFFFFF) {
#ifdef CONFIG_MCAST_TFTP
if (Mcast_addr != tmp)
#endif
return;
}
/*
* watch for ICMP host redirects
*
* There is no real handler code (yet). We just watch
* for ICMP host redirect messages. In case anybody
* sees these messages: please contact me
* (wd@denx.de), or - even better - send me the
* necessary fixes :-)
*
* Note: in all cases where I have seen this so far
* it was a problem with the router configuration,
* for instance when a router was configured in the
* BOOTP reply, but the TFTP server was on the same
* subnet. So this is probably a warning that your
* configuration might be wrong. But I'm not really
* sure if there aren't any other situations.
*/
if (ip->ip_p == IPPROTO_ICMP) {
ICMP_t *icmph = (ICMP_t *)&(ip->udp_src);
switch (icmph->type) {
case ICMP_REDIRECT:
if (icmph->code != ICMP_REDIR_HOST)
return;
printf (" ICMP Host Redirect to %pI4 ", &icmph->un.gateway);
return;
#if defined(CONFIG_CMD_PING)
case ICMP_ECHO_REPLY:
/*
* IP header OK. Pass the packet to the current handler.
*/
/* XXX point to ip packet */
(*packetHandler)((uchar *)ip, 0, 0, 0);
return;
case ICMP_ECHO_REQUEST:
debug("Got ICMP ECHO REQUEST, return %d bytes \n",
ETHER_HDR_SIZE + len);
memcpy (&et->et_dest[0], &et->et_src[0], 6);
memcpy (&et->et_src[ 0], NetOurEther, 6);
ip->ip_sum = 0;
ip->ip_off = 0;
NetCopyIP((void*)&ip->ip_dst, &ip->ip_src);
NetCopyIP((void*)&ip->ip_src, &NetOurIP);
ip->ip_sum = ~NetCksum((uchar *)ip, IP_HDR_SIZE_NO_UDP >> 1);
icmph->type = ICMP_ECHO_REPLY;
icmph->checksum = 0;
icmph->checksum = ~NetCksum((uchar *)icmph,
(len - IP_HDR_SIZE_NO_UDP) >> 1);
(void) eth_send((uchar *)et, ETHER_HDR_SIZE + len);
return;
#endif
default:
return;
}
} else if (ip->ip_p != IPPROTO_UDP) { /* Only UDP packets */
return;
}
#ifdef CONFIG_UDP_CHECKSUM
if (ip->udp_xsum != 0) {
ulong xsum;
ushort *sumptr;
ushort sumlen;
xsum = ip->ip_p;
xsum += (ntohs(ip->udp_len));
xsum += (ntohl(ip->ip_src) >> 16) & 0x0000ffff;
xsum += (ntohl(ip->ip_src) >> 0) & 0x0000ffff;
xsum += (ntohl(ip->ip_dst) >> 16) & 0x0000ffff;
xsum += (ntohl(ip->ip_dst) >> 0) & 0x0000ffff;
sumlen = ntohs(ip->udp_len);
sumptr = (ushort *) &(ip->udp_src);
while (sumlen > 1) {
ushort sumdata;
sumdata = *sumptr++;
xsum += ntohs(sumdata);
sumlen -= 2;
}
if (sumlen > 0) {
ushort sumdata;
sumdata = *(unsigned char *) sumptr;
sumdata = (sumdata << 8) & 0xff00;
xsum += sumdata;
}
while ((xsum >> 16) != 0) {
xsum = (xsum & 0x0000ffff) + ((xsum >> 16) & 0x0000ffff);
}
if ((xsum != 0x00000000) && (xsum != 0x0000ffff)) {
printf(" UDP wrong checksum %08lx %08x\n",
xsum, ntohs(ip->udp_xsum));
return;
}
}
#endif
#ifdef CONFIG_NETCONSOLE
nc_input_packet((uchar *)ip +IP_HDR_SIZE,
ntohs(ip->udp_dst),
ntohs(ip->udp_src),
ntohs(ip->udp_len) - 8);
#endif
/*
* IP header OK. Pass the packet to the current handler.
*/
(*packetHandler)((uchar *)ip +IP_HDR_SIZE,
ntohs(ip->udp_dst),
ntohs(ip->udp_src),
ntohs(ip->udp_len) - 8);
break;
}
}
/**********************************************************************/
static int net_check_prereq (proto_t protocol)
{
switch (protocol) {
/* Fall through */
#if defined(CONFIG_CMD_PING)
case PING:
if (NetPingIP == 0) {
puts ("*** ERROR: ping address not given\n");
return (1);
}
goto common;
#endif
#if defined(CONFIG_CMD_SNTP)
case SNTP:
if (NetNtpServerIP == 0) {
puts ("*** ERROR: NTP server address not given\n");
return (1);
}
goto common;
#endif
#if defined(CONFIG_CMD_DNS)
case DNS:
if (NetOurDNSIP == 0) {
puts("*** ERROR: DNS server address not given\n");
return 1;
}
goto common;
#endif
#if defined(CONFIG_CMD_NFS)
case NFS:
#endif
case NETCONS:
case TFTP:
if (NetServerIP == 0) {
puts ("*** ERROR: `serverip' not set\n");
return (1);
}
#if defined(CONFIG_CMD_PING) || defined(CONFIG_CMD_SNTP)
common:
#endif
if (NetOurIP == 0) {
puts ("*** ERROR: `ipaddr' not set\n");
return (1);
}
/* Fall through */
case DHCP:
case RARP:
case BOOTP:
case CDP:
if (memcmp (NetOurEther, "\0\0\0\0\0\0", 6) == 0) {
#ifdef CONFIG_NET_MULTI
extern int eth_get_dev_index (void);
int num = eth_get_dev_index ();
switch (num) {
case -1:
puts ("*** ERROR: No ethernet found.\n");
return (1);
case 0:
puts ("*** ERROR: `ethaddr' not set\n");
break;
default:
printf ("*** ERROR: `eth%daddr' not set\n",
num);
break;
}
NetStartAgain ();
return (2);
#else
puts ("*** ERROR: `ethaddr' not set\n");
return (1);
#endif
}
/* Fall through */
default:
return (0);
}
return (0); /* OK */
}
/**********************************************************************/
int
NetCksumOk(uchar * ptr, int len)
{
return !((NetCksum(ptr, len) + 1) & 0xfffe);
}
unsigned
NetCksum(uchar * ptr, int len)
{
ulong xsum;
ushort *p = (ushort *)ptr;
xsum = 0;
while (len-- > 0)
xsum += *p++;
xsum = (xsum & 0xffff) + (xsum >> 16);
xsum = (xsum & 0xffff) + (xsum >> 16);
return (xsum & 0xffff);
}
int
NetEthHdrSize(void)
{
ushort myvlanid;
myvlanid = ntohs(NetOurVLAN);
if (myvlanid == (ushort)-1)
myvlanid = VLAN_NONE;
return ((myvlanid & VLAN_IDMASK) == VLAN_NONE) ? ETHER_HDR_SIZE : VLAN_ETHER_HDR_SIZE;
}
int
NetSetEther(volatile uchar * xet, uchar * addr, uint prot)
{
Ethernet_t *et = (Ethernet_t *)xet;
ushort myvlanid;
myvlanid = ntohs(NetOurVLAN);
if (myvlanid == (ushort)-1)
myvlanid = VLAN_NONE;
memcpy (et->et_dest, addr, 6);
memcpy (et->et_src, NetOurEther, 6);
if ((myvlanid & VLAN_IDMASK) == VLAN_NONE) {
et->et_protlen = htons(prot);
return ETHER_HDR_SIZE;
} else {
VLAN_Ethernet_t *vet = (VLAN_Ethernet_t *)xet;
vet->vet_vlan_type = htons(PROT_VLAN);
vet->vet_tag = htons((0 << 5) | (myvlanid & VLAN_IDMASK));
vet->vet_type = htons(prot);
return VLAN_ETHER_HDR_SIZE;
}
}
void
NetSetIP(volatile uchar * xip, IPaddr_t dest, int dport, int sport, int len)
{
IP_t *ip = (IP_t *)xip;
/*
* If the data is an odd number of bytes, zero the
* byte after the last byte so that the checksum
* will work.
*/
if (len & 1)
xip[IP_HDR_SIZE + len] = 0;
/*
* Construct an IP and UDP header.
* (need to set no fragment bit - XXX)
*/
ip->ip_hl_v = 0x45; /* IP_HDR_SIZE / 4 (not including UDP) */
ip->ip_tos = 0;
ip->ip_len = htons(IP_HDR_SIZE + len);
ip->ip_id = htons(NetIPID++);
ip->ip_off = htons(IP_FLAGS_DFRAG); /* Don't fragment */
ip->ip_ttl = 255;
ip->ip_p = 17; /* UDP */
ip->ip_sum = 0;
NetCopyIP((void*)&ip->ip_src, &NetOurIP); /* already in network byte order */
NetCopyIP((void*)&ip->ip_dst, &dest); /* - "" - */
ip->udp_src = htons(sport);
ip->udp_dst = htons(dport);
ip->udp_len = htons(8 + len);
ip->udp_xsum = 0;
ip->ip_sum = ~NetCksum((uchar *)ip, IP_HDR_SIZE_NO_UDP / 2);
}
void copy_filename (char *dst, char *src, int size)
{
if (*src && (*src == '"')) {
++src;
--size;
}
while ((--size > 0) && *src && (*src != '"')) {
*dst++ = *src++;
}
*dst = '\0';
}
#endif
#if defined(CONFIG_CMD_NFS) || defined(CONFIG_CMD_SNTP) || defined(CONFIG_CMD_DNS)
/*
* make port a little random, but use something trivial to compute
*/
unsigned int random_port(void)
{
return 1024 + (get_timer(0) % 0x8000);;
}
#endif
void ip_to_string (IPaddr_t x, char *s)
{
x = ntohl (x);
sprintf (s, "%d.%d.%d.%d",
(int) ((x >> 24) & 0xff),
(int) ((x >> 16) & 0xff),
(int) ((x >> 8) & 0xff), (int) ((x >> 0) & 0xff)
);
}
IPaddr_t string_to_ip(char *s)
{
IPaddr_t addr;
char *e;
int i;
if (s == NULL)
return(0);
for (addr=0, i=0; i<4; ++i) {
ulong val = s ? simple_strtoul(s, &e, 10) : 0;
addr <<= 8;
addr |= (val & 0xFF);
if (s) {
s = (*e) ? e+1 : e;
}
}
return (htonl(addr));
}
void VLAN_to_string(ushort x, char *s)
{
x = ntohs(x);
if (x == (ushort)-1)
x = VLAN_NONE;
if (x == VLAN_NONE)
strcpy(s, "none");
else
sprintf(s, "%d", x & VLAN_IDMASK);
}
ushort string_to_VLAN(char *s)
{
ushort id;
if (s == NULL)
return htons(VLAN_NONE);
if (*s < '0' || *s > '9')
id = VLAN_NONE;
else
id = (ushort)simple_strtoul(s, NULL, 10);
return htons(id);
}
IPaddr_t getenv_IPaddr (char *var)
{
return (string_to_ip(getenv(var)));
}
ushort getenv_VLAN(char *var)
{
return (string_to_VLAN(getenv(var)));
}
| gpl-2.0 |
ArthySundaram/chromeos-3.8 | arch/sparc/kernel/leon_smp.c | 277 | 12376 | /* leon_smp.c: Sparc-Leon SMP support.
*
* based on sun4m_smp.c
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
* Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
*/
#include <asm/head.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/pm.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/clockchips.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq_regs.h>
#include <asm/traps.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cpudata.h>
#include <asm/asi.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include <asm/timer.h>
#include "kernel.h"
#include "irq.h"
extern ctxd_t *srmmu_ctx_table_phys;
static int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
void __cpuinit leon_configure_cache_smp(void);
static void leon_ipi_init(void);
/* IRQ number of LEON IPIs */
int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT;
static inline unsigned long do_swap(volatile unsigned long *ptr,
unsigned long val)
{
__asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
: "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
: "memory");
return val;
}
void __cpuinit leon_callin(void)
{
int cpuid = hard_smp_processor_id();
local_ops->cache_all();
local_ops->tlb_all();
leon_configure_cache_smp();
notify_cpu_starting(cpuid);
/* Get our local ticker going. */
register_percpu_ce(cpuid);
calibrate_delay();
smp_store_cpu_info(cpuid);
local_ops->cache_all();
local_ops->tlb_all();
/*
* Unblock the master CPU _only_ when the scheduler state
* of all secondary CPUs will be up-to-date, so after
* the SMP initialization the master will be just allowed
* to call the scheduler code.
* Allow master to continue.
*/
do_swap(&cpu_callin_map[cpuid], 1);
local_ops->cache_all();
local_ops->tlb_all();
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid])
: "memory" /* paranoid */);
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
local_irq_enable();
set_cpu_online(cpuid, true);
}
/*
* Cycle through the processors asking the PROM to start each one.
*/
extern struct linux_prom_registers smp_penguin_ctable;
void __cpuinit leon_configure_cache_smp(void)
{
unsigned long cfg = sparc_leon3_get_dcachecfg();
int me = smp_processor_id();
if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) {
printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n",
(unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg),
(unsigned int)cfg, (unsigned int)me);
sparc_leon3_disable_cache();
} else {
if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) {
sparc_leon3_enable_snooping();
} else {
printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n",
me);
sparc_leon3_disable_cache();
}
}
local_ops->cache_all();
local_ops->tlb_all();
}
void leon_smp_setbroadcast(unsigned int mask)
{
int broadcast =
((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
LEON3_IRQMPSTATUS_BROADCAST) & 1);
if (!broadcast) {
prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n",
leon_smp_nrcpus());
if (leon_smp_nrcpus() > 1) {
BUG();
} else {
prom_printf("continue anyway\n");
return;
}
}
LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
}
unsigned int leon_smp_getbroadcast(void)
{
unsigned int mask;
mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast));
return mask;
}
int leon_smp_nrcpus(void)
{
int nrcpu =
((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1;
return nrcpu;
}
void __init leon_boot_cpus(void)
{
int nrcpu = leon_smp_nrcpus();
int me = smp_processor_id();
/* Setup IPI */
leon_ipi_init();
printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me,
(unsigned int)nrcpu, (unsigned int)NR_CPUS,
(unsigned int)&(leon3_irqctrl_regs->mpstatus));
leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me);
leon_enable_irq_cpu(LEON3_IRQ_TICKER, me);
leon_enable_irq_cpu(leon_ipi_irq, me);
leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
leon_configure_cache_smp();
local_ops->cache_all();
}
int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle)
{
int timeout;
current_set[i] = task_thread_info(idle);
/* See trampoline.S:leon_smp_cpu_startup for details...
* Initialize the contexts table
* Since the call to prom_startcpu() trashes the structure,
* we need to re-initialize it for each cpu
*/
smp_penguin_ctable.which_io = 0;
smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys;
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
(unsigned int)&leon3_irqctrl_regs->mpstatus);
local_ops->cache_all();
/* Make sure all IRQs are of from the start for this new CPU */
LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
/* Wake one CPU */
LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i);
/* wheee... it's going... */
for (timeout = 0; timeout < 10000; timeout++) {
if (cpu_callin_map[i])
break;
udelay(200);
}
printk(KERN_INFO "Started CPU %d\n", (unsigned int)i);
if (!(cpu_callin_map[i])) {
printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
} else {
leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i);
leon_enable_irq_cpu(LEON3_IRQ_TICKER, i);
leon_enable_irq_cpu(leon_ipi_irq, i);
}
local_ops->cache_all();
return 0;
}
void __init leon_smp_done(void)
{
int i, first;
int *prev;
/* setup cpu list for irq rotation */
first = 0;
prev = &first;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i)) {
*prev = i;
prev = &cpu_data(i).next;
}
}
*prev = first;
local_ops->cache_all();
/* Free unneeded trap tables */
if (!cpu_present(1)) {
ClearPageReserved(virt_to_page(&trapbase_cpu1));
init_page_count(virt_to_page(&trapbase_cpu1));
free_page((unsigned long)&trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
if (!cpu_present(2)) {
ClearPageReserved(virt_to_page(&trapbase_cpu2));
init_page_count(virt_to_page(&trapbase_cpu2));
free_page((unsigned long)&trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
if (!cpu_present(3)) {
ClearPageReserved(virt_to_page(&trapbase_cpu3));
init_page_count(virt_to_page(&trapbase_cpu3));
free_page((unsigned long)&trapbase_cpu3);
totalram_pages++;
num_physpages++;
}
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1;
}
void leon_irq_rotate(int cpu)
{
}
struct leon_ipi_work {
int single;
int msk;
int resched;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work);
/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ
* is used for all three types of IPIs.
*/
static void __init leon_ipi_init(void)
{
int cpu, len;
struct leon_ipi_work *work;
struct property *pp;
struct device_node *rootnp;
struct tt_entry *trap_table;
unsigned long flags;
/* Find IPI IRQ or stick with default value */
rootnp = of_find_node_by_path("/ambapp0");
if (rootnp) {
pp = of_find_property(rootnp, "ipi_num", &len);
if (pp && (*(int *)pp->value))
leon_ipi_irq = *(int *)pp->value;
}
printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq);
/* Adjust so that we jump directly to smpleon_ipi */
local_irq_save(flags);
trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
trap_table->inst_three += smpleon_ipi - real_irq_entry;
local_ops->cache_all();
local_irq_restore(flags);
for_each_possible_cpu(cpu) {
work = &per_cpu(leon_ipi_work, cpu);
work->single = work->msk = work->resched = 0;
}
}
static void leon_send_ipi(int cpu, int level)
{
unsigned long mask;
mask = leon_get_irqmask(level);
LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
}
static void leon_ipi_single(int cpu)
{
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
/* Mark work */
work->single = 1;
/* Generate IRQ on the CPU */
leon_send_ipi(cpu, leon_ipi_irq);
}
static void leon_ipi_mask_one(int cpu)
{
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
/* Mark work */
work->msk = 1;
/* Generate IRQ on the CPU */
leon_send_ipi(cpu, leon_ipi_irq);
}
static void leon_ipi_resched(int cpu)
{
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
/* Mark work */
work->resched = 1;
/* Generate IRQ on the CPU (any IRQ will cause resched) */
leon_send_ipi(cpu, leon_ipi_irq);
}
void leonsmp_ipi_interrupt(void)
{
struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
if (work->single) {
work->single = 0;
smp_call_function_single_interrupt();
}
if (work->msk) {
work->msk = 0;
smp_call_function_interrupt();
}
if (work->resched) {
work->resched = 0;
smp_resched_interrupt();
}
}
static struct smp_funcall {
smpfunc_t func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
} ccall_info;
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
if (smp_processors_ready) {
register int high = NR_CPUS - 1;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
{
/* If you make changes here, make sure gcc generates proper code... */
register smpfunc_t f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
register unsigned long a4 asm("i4") = arg4;
register unsigned long a5 asm("i5") = 0;
__asm__ __volatile__("std %0, [%6]\n\t"
"std %2, [%6 + 8]\n\t"
"std %4, [%6 + 16]\n\t" : :
"r"(f), "r"(a1), "r"(a2), "r"(a3),
"r"(a4), "r"(a5),
"r"(&ccall_info.func));
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{
register int i;
cpumask_clear_cpu(smp_processor_id(), &mask);
cpumask_and(&mask, cpu_online_mask, &mask);
for (i = 0; i <= high; i++) {
if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
leon_send_ipi(i, LEON3_IRQ_CROSS_CALL);
}
}
}
{
register int i;
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_in[i])
barrier();
} while (++i <= high);
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_out[i])
barrier();
} while (++i <= high);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
}
}
/* Running cross calls. */
void leon_cross_call_irq(void)
{
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
ccall_info.arg4, ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
static const struct sparc32_ipi_ops leon_ipi_ops = {
.cross_call = leon_cross_call,
.resched = leon_ipi_resched,
.single = leon_ipi_single,
.mask_one = leon_ipi_mask_one,
};
void __init leon_init_smp(void)
{
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m);
sparc32_ipi_ops = &leon_ipi_ops;
}
| gpl-2.0 |
mrlambchop/imx23-kernel | arch/arm/plat-samsung/s5p-irq-eint.c | 277 | 5221 | /*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P - IRQ EINT support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <asm/hardware/vic.h>
#include <plat/regs-irqtype.h>
#include <mach/map.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/gpio-cfg.h>
#include <mach/regs-gpio.h>
static inline void s5p_irq_eint_mask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask |= eint_irq_to_bit(data->irq);
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_unmask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask &= ~(eint_irq_to_bit(data->irq));
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static inline void s5p_irq_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_maskack(struct irq_data *data)
{
/* compiler should in-line these */
s5p_irq_eint_mask(data);
s5p_irq_eint_ack(data);
}
static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
{
int offs = EINT_OFFSET(data->irq);
int shift;
u32 ctrl, mask;
u32 newvalue = 0;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
newvalue = S5P_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
break;
default:
printk(KERN_ERR "No such irq type %d", type);
return -EINVAL;
}
shift = (offs & 0x7) * 4;
mask = 0x7 << shift;
ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
ctrl &= ~mask;
ctrl |= newvalue << shift;
__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
if ((0 <= offs) && (offs < 8))
s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
else if ((8 <= offs) && (offs < 16))
s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
else if ((16 <= offs) && (offs < 24))
s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
else if ((24 <= offs) && (offs < 32))
s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
else
printk(KERN_ERR "No such irq number %d", offs);
return 0;
}
static struct irq_chip s5p_irq_eint = {
.name = "s5p-eint",
.irq_mask = s5p_irq_eint_mask,
.irq_unmask = s5p_irq_eint_unmask,
.irq_mask_ack = s5p_irq_eint_maskack,
.irq_ack = s5p_irq_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
/* s5p_irq_demux_eint
*
* This function demuxes the IRQ from the group0 external interrupts,
* from EINTs 16 to 31. It is designed to be inlined into the specific
* handler s5p_irq_demux_eintX_Y.
*
* Each EINT pend/mask registers handle eight of them.
*/
static inline void s5p_irq_demux_eint(unsigned int start)
{
u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
unsigned int irq;
status &= ~mask;
status &= 0xff;
while (status) {
irq = fls(status) - 1;
generic_handle_irq(irq + start);
status &= ~(1 << irq);
}
}
static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
s5p_irq_demux_eint(IRQ_EINT(16));
s5p_irq_demux_eint(IRQ_EINT(24));
}
static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_mask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
}
static void s5p_irq_vic_eint_unmask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_unmask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
}
static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_vic_eint_maskack(struct irq_data *data)
{
s5p_irq_vic_eint_mask(data);
s5p_irq_vic_eint_ack(data);
}
static struct irq_chip s5p_irq_vic_eint = {
.name = "s5p_vic_eint",
.irq_mask = s5p_irq_vic_eint_mask,
.irq_unmask = s5p_irq_vic_eint_unmask,
.irq_mask_ack = s5p_irq_vic_eint_maskack,
.irq_ack = s5p_irq_vic_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
static int __init s5p_init_irq_eint(void)
{
int irq;
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
irq_set_chip(irq, &s5p_irq_vic_eint);
for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
return 0;
}
arch_initcall(s5p_init_irq_eint);
| gpl-2.0 |
exynos4-sdk/kernel | arch/sparc/kernel/leon_pmc.c | 277 | 1994 | /* leon_pmc.c: LEON Power-down cpu_idle() handler
*
* Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <asm/leon_amba.h>
#include <asm/cpu_type.h>
#include <asm/leon.h>
/* List of Systems that need fixup instructions around power-down instruction */
unsigned int pmc_leon_fixup_ids[] = {
AEROFLEX_UT699,
GAISLER_GR712RC,
LEON4_NEXTREME1,
0
};
int pmc_leon_need_fixup(void)
{
unsigned int systemid = amba_system_id >> 16;
unsigned int *id;
id = &pmc_leon_fixup_ids[0];
while (*id != 0) {
if (*id == systemid)
return 1;
id++;
}
return 0;
}
/*
* CPU idle callback function for systems that need some extra handling
* See .../arch/sparc/kernel/process.c
*/
void pmc_leon_idle_fixup(void)
{
/* Prepare an address to a non-cachable region. APB is always
* none-cachable. One instruction is executed after the Sleep
* instruction, we make sure to read the bus and throw away the
* value by accessing a non-cachable area, also we make sure the
* MMU does not get a TLB miss here by using the MMU BYPASS ASI.
*/
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
__asm__ __volatile__ (
"mov %%g0, %%asr19\n"
"lda [%0] %1, %%g0\n"
:
: "r"(address), "i"(ASI_LEON_BYPASS));
}
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
void pmc_leon_idle(void)
{
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("mov %g0, %asr19\n\t");
}
/* Install LEON Power Down function */
static int __init leon_pmc_install(void)
{
if (sparc_cpu_model == sparc_leon) {
/* Assign power management IDLE handler */
if (pmc_leon_need_fixup())
pm_idle = pmc_leon_idle_fixup;
else
pm_idle = pmc_leon_idle;
printk(KERN_INFO "leon: power management initialized\n");
}
return 0;
}
/* This driver is not critical to the boot process, don't care
* if initialized late.
*/
late_initcall(leon_pmc_install);
| gpl-2.0 |
figue/raspberry-pi-kernel | fs/xfs/xfs_qm_bhv.c | 277 | 4534 | /*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_alloc.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_qm.h"
STATIC void
xfs_fill_statvfs_from_dquot(
struct kstatfs *statp,
struct xfs_dquot *dqp)
{
__uint64_t limit;
limit = dqp->q_core.d_blk_softlimit ?
be64_to_cpu(dqp->q_core.d_blk_softlimit) :
be64_to_cpu(dqp->q_core.d_blk_hardlimit);
if (limit && statp->f_blocks > limit) {
statp->f_blocks = limit;
statp->f_bfree = statp->f_bavail =
(statp->f_blocks > dqp->q_res_bcount) ?
(statp->f_blocks - dqp->q_res_bcount) : 0;
}
limit = dqp->q_core.d_ino_softlimit ?
be64_to_cpu(dqp->q_core.d_ino_softlimit) :
be64_to_cpu(dqp->q_core.d_ino_hardlimit);
if (limit && statp->f_files > limit) {
statp->f_files = limit;
statp->f_ffree =
(statp->f_files > dqp->q_res_icount) ?
(statp->f_ffree - dqp->q_res_icount) : 0;
}
}
/*
* Directory tree accounting is implemented using project quotas, where
* the project identifier is inherited from parent directories.
* A statvfs (df, etc.) of a directory that is using project quota should
* return a statvfs of the project, not the entire filesystem.
* This makes such trees appear as if they are filesystems in themselves.
*/
void
xfs_qm_statvfs(
xfs_inode_t *ip,
struct kstatfs *statp)
{
xfs_mount_t *mp = ip->i_mount;
xfs_dquot_t *dqp;
if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, dqp);
xfs_qm_dqput(dqp);
}
}
int
xfs_qm_newmount(
xfs_mount_t *mp,
uint *needquotamount,
uint *quotaflags)
{
uint quotaondisk;
uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0;
quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) &&
(mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT);
if (quotaondisk) {
uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT;
gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
}
/*
* If the device itself is read-only, we can't allow
* the user to change the state of quota on the mount -
* this would generate a transaction on the ro device,
* which would lead to an I/O error and shutdown
*/
if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
(!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
(pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
(!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) ||
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
(!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
xfs_warn(mp, "please mount with%s%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
(pquotaondisk ? " prjquota" : ""),
(gquotaondisk ? " grpquota" : ""));
return XFS_ERROR(EPERM);
}
if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
/*
* Call mount_quotas at this point only if we won't have to do
* a quotacheck.
*/
if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/*
* If an error occurred, qm_mount_quotas code
* has already disabled quotas. So, just finish
* mounting, and get on with the boring life
* without disk quotas.
*/
xfs_qm_mount_quotas(mp);
} else {
/*
* Clear the quota flags, but remember them. This
* is so that the quota code doesn't get invoked
* before we're ready. This can happen when an
* inode goes inactive and wants to free blocks,
* or via xfs_log_mount_finish.
*/
*needquotamount = B_TRUE;
*quotaflags = mp->m_qflags;
mp->m_qflags = 0;
}
}
return 0;
}
| gpl-2.0 |
barome/Nexus-S | drivers/serial/clps711x.c | 1813 | 13342 | /*
* linux/drivers/char/clps711x.c
*
* Driver for CLPS711x serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/hardware/clps7111.h>
#define UART_NR 2
#define SERIAL_CLPS711X_MAJOR 204
#define SERIAL_CLPS711X_MINOR 40
#define SERIAL_CLPS711X_NR UART_NR
/*
* We use the relevant SYSCON register as a base address for these ports.
*/
#define UBRLCR(port) ((port)->iobase + UBRLCR1 - SYSCON1)
#define UARTDR(port) ((port)->iobase + UARTDR1 - SYSCON1)
#define SYSFLG(port) ((port)->iobase + SYSFLG1 - SYSCON1)
#define SYSCON(port) ((port)->iobase + SYSCON1 - SYSCON1)
#define TX_IRQ(port) ((port)->irq)
#define RX_IRQ(port) ((port)->irq + 1)
#define UART_ANY_ERR (UARTDR_FRMERR | UARTDR_PARERR | UARTDR_OVERR)
#define tx_enabled(port) ((port)->unused[0])
static void clps711xuart_stop_tx(struct uart_port *port)
{
if (tx_enabled(port)) {
disable_irq(TX_IRQ(port));
tx_enabled(port) = 0;
}
}
static void clps711xuart_start_tx(struct uart_port *port)
{
if (!tx_enabled(port)) {
enable_irq(TX_IRQ(port));
tx_enabled(port) = 1;
}
}
static void clps711xuart_stop_rx(struct uart_port *port)
{
disable_irq(RX_IRQ(port));
}
static void clps711xuart_enable_ms(struct uart_port *port)
{
}
static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, flg;
status = clps_readl(SYSFLG(port));
while (!(status & SYSFLG_URXFE)) {
ch = clps_readl(UARTDR(port));
port->icount.rx++;
flg = TTY_NORMAL;
/*
* Note that the error handling code is
* out of the main execution path
*/
if (unlikely(ch & UART_ANY_ERR)) {
if (ch & UARTDR_PARERR)
port->icount.parity++;
else if (ch & UARTDR_FRMERR)
port->icount.frame++;
if (ch & UARTDR_OVERR)
port->icount.overrun++;
ch &= port->read_status_mask;
if (ch & UARTDR_PARERR)
flg = TTY_PARITY;
else if (ch & UARTDR_FRMERR)
flg = TTY_FRAME;
#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
#endif
}
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
/*
* CHECK: does overrun affect the current character?
* ASSUMPTION: it does not.
*/
uart_insert_char(port, ch, UARTDR_OVERR, ch, flg);
ignore_char:
status = clps_readl(SYSFLG(port));
}
tty_flip_buffer_push(tty);
return IRQ_HANDLED;
}
static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct circ_buf *xmit = &port->state->xmit;
int count;
if (port->x_char) {
clps_writel(port->x_char, UARTDR(port));
port->icount.tx++;
port->x_char = 0;
return IRQ_HANDLED;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
clps711xuart_stop_tx(port);
return IRQ_HANDLED;
}
count = port->fifosize >> 1;
do {
clps_writel(xmit->buf[xmit->tail], UARTDR(port));
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
clps711xuart_stop_tx(port);
return IRQ_HANDLED;
}
static unsigned int clps711xuart_tx_empty(struct uart_port *port)
{
unsigned int status = clps_readl(SYSFLG(port));
return status & SYSFLG_UBUSY ? 0 : TIOCSER_TEMT;
}
static unsigned int clps711xuart_get_mctrl(struct uart_port *port)
{
unsigned int port_addr;
unsigned int result = 0;
unsigned int status;
port_addr = SYSFLG(port);
if (port_addr == SYSFLG1) {
status = clps_readl(SYSFLG1);
if (status & SYSFLG1_DCD)
result |= TIOCM_CAR;
if (status & SYSFLG1_DSR)
result |= TIOCM_DSR;
if (status & SYSFLG1_CTS)
result |= TIOCM_CTS;
}
return result;
}
static void
clps711xuart_set_mctrl_null(struct uart_port *port, unsigned int mctrl)
{
}
static void clps711xuart_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
unsigned int ubrlcr;
spin_lock_irqsave(&port->lock, flags);
ubrlcr = clps_readl(UBRLCR(port));
if (break_state == -1)
ubrlcr |= UBRLCR_BREAK;
else
ubrlcr &= ~UBRLCR_BREAK;
clps_writel(ubrlcr, UBRLCR(port));
spin_unlock_irqrestore(&port->lock, flags);
}
static int clps711xuart_startup(struct uart_port *port)
{
unsigned int syscon;
int retval;
tx_enabled(port) = 1;
/*
* Allocate the IRQs
*/
retval = request_irq(TX_IRQ(port), clps711xuart_int_tx, 0,
"clps711xuart_tx", port);
if (retval)
return retval;
retval = request_irq(RX_IRQ(port), clps711xuart_int_rx, 0,
"clps711xuart_rx", port);
if (retval) {
free_irq(TX_IRQ(port), port);
return retval;
}
/*
* enable the port
*/
syscon = clps_readl(SYSCON(port));
syscon |= SYSCON_UARTEN;
clps_writel(syscon, SYSCON(port));
return 0;
}
static void clps711xuart_shutdown(struct uart_port *port)
{
unsigned int ubrlcr, syscon;
/*
* Free the interrupt
*/
free_irq(TX_IRQ(port), port); /* TX interrupt */
free_irq(RX_IRQ(port), port); /* RX interrupt */
/*
* disable the port
*/
syscon = clps_readl(SYSCON(port));
syscon &= ~SYSCON_UARTEN;
clps_writel(syscon, SYSCON(port));
/*
* disable break condition and fifos
*/
ubrlcr = clps_readl(UBRLCR(port));
ubrlcr &= ~(UBRLCR_FIFOEN | UBRLCR_BREAK);
clps_writel(ubrlcr, UBRLCR(port));
}
static void
clps711xuart_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned int ubrlcr, baud, quot;
unsigned long flags;
/*
* We don't implement CREAD.
*/
termios->c_cflag |= CREAD;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
switch (termios->c_cflag & CSIZE) {
case CS5:
ubrlcr = UBRLCR_WRDLEN5;
break;
case CS6:
ubrlcr = UBRLCR_WRDLEN6;
break;
case CS7:
ubrlcr = UBRLCR_WRDLEN7;
break;
default: // CS8
ubrlcr = UBRLCR_WRDLEN8;
break;
}
if (termios->c_cflag & CSTOPB)
ubrlcr |= UBRLCR_XSTOP;
if (termios->c_cflag & PARENB) {
ubrlcr |= UBRLCR_PRTEN;
if (!(termios->c_cflag & PARODD))
ubrlcr |= UBRLCR_EVENPRT;
}
if (port->fifosize > 1)
ubrlcr |= UBRLCR_FIFOEN;
spin_lock_irqsave(&port->lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = UARTDR_OVERR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UARTDR_PARERR | UARTDR_FRMERR;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UARTDR_FRMERR | UARTDR_PARERR;
if (termios->c_iflag & IGNBRK) {
/*
* If we're ignoring parity and break indicators,
* ignore overruns to (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UARTDR_OVERR;
}
quot -= 1;
clps_writel(ubrlcr | quot, UBRLCR(port));
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *clps711xuart_type(struct uart_port *port)
{
return port->type == PORT_CLPS711X ? "CLPS711x" : NULL;
}
/*
* Configure/autoconfigure the port.
*/
static void clps711xuart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_CLPS711X;
}
static void clps711xuart_release_port(struct uart_port *port)
{
}
static int clps711xuart_request_port(struct uart_port *port)
{
return 0;
}
static struct uart_ops clps711x_pops = {
.tx_empty = clps711xuart_tx_empty,
.set_mctrl = clps711xuart_set_mctrl_null,
.get_mctrl = clps711xuart_get_mctrl,
.stop_tx = clps711xuart_stop_tx,
.start_tx = clps711xuart_start_tx,
.stop_rx = clps711xuart_stop_rx,
.enable_ms = clps711xuart_enable_ms,
.break_ctl = clps711xuart_break_ctl,
.startup = clps711xuart_startup,
.shutdown = clps711xuart_shutdown,
.set_termios = clps711xuart_set_termios,
.type = clps711xuart_type,
.config_port = clps711xuart_config_port,
.release_port = clps711xuart_release_port,
.request_port = clps711xuart_request_port,
};
static struct uart_port clps711x_ports[UART_NR] = {
{
.iobase = SYSCON1,
.irq = IRQ_UTXINT1, /* IRQ_URXINT1, IRQ_UMSINT */
.uartclk = 3686400,
.fifosize = 16,
.ops = &clps711x_pops,
.line = 0,
.flags = UPF_BOOT_AUTOCONF,
},
{
.iobase = SYSCON2,
.irq = IRQ_UTXINT2, /* IRQ_URXINT2 */
.uartclk = 3686400,
.fifosize = 16,
.ops = &clps711x_pops,
.line = 1,
.flags = UPF_BOOT_AUTOCONF,
}
};
#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
static void clps711xuart_console_putchar(struct uart_port *port, int ch)
{
while (clps_readl(SYSFLG(port)) & SYSFLG_UTXFF)
barrier();
clps_writel(ch, UARTDR(port));
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*
* Note that this is called with interrupts already disabled
*/
static void
clps711xuart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = clps711x_ports + co->index;
unsigned int status, syscon;
/*
* Ensure that the port is enabled.
*/
syscon = clps_readl(SYSCON(port));
clps_writel(syscon | SYSCON_UARTEN, SYSCON(port));
uart_console_write(port, s, count, clps711xuart_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the uart state.
*/
do {
status = clps_readl(SYSFLG(port));
} while (status & SYSFLG_UBUSY);
clps_writel(syscon, SYSCON(port));
}
static void __init
clps711xuart_console_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
if (clps_readl(SYSCON(port)) & SYSCON_UARTEN) {
unsigned int ubrlcr, quot;
ubrlcr = clps_readl(UBRLCR(port));
*parity = 'n';
if (ubrlcr & UBRLCR_PRTEN) {
if (ubrlcr & UBRLCR_EVENPRT)
*parity = 'e';
else
*parity = 'o';
}
if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7)
*bits = 7;
else
*bits = 8;
quot = ubrlcr & UBRLCR_BAUD_MASK;
*baud = port->uartclk / (16 * (quot + 1));
}
}
static int __init clps711xuart_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 38400;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
port = uart_get_console(clps711x_ports, UART_NR, co);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
clps711xuart_console_get_options(port, &baud, &parity, &bits);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver clps711x_reg;
static struct console clps711x_console = {
.name = "ttyCL",
.write = clps711xuart_console_write,
.device = uart_console_device,
.setup = clps711xuart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &clps711x_reg,
};
static int __init clps711xuart_console_init(void)
{
register_console(&clps711x_console);
return 0;
}
console_initcall(clps711xuart_console_init);
#define CLPS711X_CONSOLE &clps711x_console
#else
#define CLPS711X_CONSOLE NULL
#endif
static struct uart_driver clps711x_reg = {
.driver_name = "ttyCL",
.dev_name = "ttyCL",
.major = SERIAL_CLPS711X_MAJOR,
.minor = SERIAL_CLPS711X_MINOR,
.nr = UART_NR,
.cons = CLPS711X_CONSOLE,
};
static int __init clps711xuart_init(void)
{
int ret, i;
printk(KERN_INFO "Serial: CLPS711x driver\n");
ret = uart_register_driver(&clps711x_reg);
if (ret)
return ret;
for (i = 0; i < UART_NR; i++)
uart_add_one_port(&clps711x_reg, &clps711x_ports[i]);
return 0;
}
static void __exit clps711xuart_exit(void)
{
int i;
for (i = 0; i < UART_NR; i++)
uart_remove_one_port(&clps711x_reg, &clps711x_ports[i]);
uart_unregister_driver(&clps711x_reg);
}
module_init(clps711xuart_init);
module_exit(clps711xuart_exit);
MODULE_AUTHOR("Deep Blue Solutions Ltd");
MODULE_DESCRIPTION("CLPS-711x generic serial driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(SERIAL_CLPS711X_MAJOR, SERIAL_CLPS711X_MINOR);
| gpl-2.0 |
Menpiko/SnaPKernel-N | drivers/gpio/gpio-wm8350.c | 2325 | 4525 | /*
* gpiolib support for Wolfson WM835x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/mfd/wm8350/core.h>
#include <linux/mfd/wm8350/gpio.h>
struct wm8350_gpio_data {
struct wm8350 *wm8350;
struct gpio_chip gpio_chip;
};
static inline struct wm8350_gpio_data *to_wm8350_gpio(struct gpio_chip *chip)
{
return container_of(chip, struct wm8350_gpio_data, gpio_chip);
}
static int wm8350_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
return wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O,
1 << offset);
}
static int wm8350_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
int ret;
ret = wm8350_reg_read(wm8350, WM8350_GPIO_LEVEL);
if (ret < 0)
return ret;
if (ret & (1 << offset))
return 1;
else
return 0;
}
static void wm8350_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
if (value)
wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
else
wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
}
static int wm8350_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
int ret;
ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O,
1 << offset);
if (ret < 0)
return ret;
/* Don't have an atomic direction/value setup */
wm8350_gpio_set(chip, offset, value);
return 0;
}
static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
if (!wm8350->irq_base)
return -EINVAL;
return wm8350->irq_base + WM8350_IRQ_GPIO(offset);
}
static struct gpio_chip template_chip = {
.label = "wm8350",
.owner = THIS_MODULE,
.direction_input = wm8350_gpio_direction_in,
.get = wm8350_gpio_get,
.direction_output = wm8350_gpio_direction_out,
.set = wm8350_gpio_set,
.to_irq = wm8350_gpio_to_irq,
.can_sleep = 1,
};
static int wm8350_gpio_probe(struct platform_device *pdev)
{
struct wm8350 *wm8350 = dev_get_drvdata(pdev->dev.parent);
struct wm8350_platform_data *pdata = wm8350->dev->platform_data;
struct wm8350_gpio_data *wm8350_gpio;
int ret;
wm8350_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8350_gpio),
GFP_KERNEL);
if (wm8350_gpio == NULL)
return -ENOMEM;
wm8350_gpio->wm8350 = wm8350;
wm8350_gpio->gpio_chip = template_chip;
wm8350_gpio->gpio_chip.ngpio = 13;
wm8350_gpio->gpio_chip.dev = &pdev->dev;
if (pdata && pdata->gpio_base)
wm8350_gpio->gpio_chip.base = pdata->gpio_base;
else
wm8350_gpio->gpio_chip.base = -1;
ret = gpiochip_add(&wm8350_gpio->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, wm8350_gpio);
return ret;
}
static int wm8350_gpio_remove(struct platform_device *pdev)
{
struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev);
return gpiochip_remove(&wm8350_gpio->gpio_chip);
}
static struct platform_driver wm8350_gpio_driver = {
.driver.name = "wm8350-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8350_gpio_probe,
.remove = wm8350_gpio_remove,
};
static int __init wm8350_gpio_init(void)
{
return platform_driver_register(&wm8350_gpio_driver);
}
subsys_initcall(wm8350_gpio_init);
static void __exit wm8350_gpio_exit(void)
{
platform_driver_unregister(&wm8350_gpio_driver);
}
module_exit(wm8350_gpio_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("GPIO interface for WM8350 PMICs");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8350-gpio");
| gpl-2.0 |
engine95/navelC-990 | drivers/net/wireless/iwlegacy/4965-mac.c | 2837 | 187964 | /******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <net/mac80211.h>
#include <asm/div64.h>
#define DRV_NAME "iwl4965"
#include "common.h"
#include "4965.h"
/******************************************************************************
*
* module boiler plate
*
******************************************************************************/
/*
* module name, copyright, version, etc.
*/
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
#ifdef CONFIG_IWLEGACY_DEBUG
#define VD "d"
#else
#define VD
#endif
#define DRV_VERSION IWLWIFI_VERSION VD
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("iwl4965");
void
il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
{
if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
IL_ERR("Tx flush command to flush out all frames\n");
if (!test_bit(S_EXIT_PENDING, &il->status))
queue_work(il->workqueue, &il->tx_flush);
}
}
/*
* EEPROM
*/
struct il_mod_params il4965_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1,
/* the rest are 0 by default */
};
void
il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
{
unsigned long flags;
int i;
spin_lock_irqsave(&rxq->lock, flags);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
/* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__il_free_pages(il, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
}
for (i = 0; i < RX_QUEUE_SIZE; i++)
rxq->queue[i] = NULL;
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
int
il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
{
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
u32 rb_timeout = 0;
if (il->cfg->mod_params->amsdu_size_8K)
rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
else
rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
/* Stop Rx DMA */
il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* Reset driver's Rx queue write idx */
il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* Direct rx interrupts to hosts
* Rx buffer size 4 or 8k
* RB timeout 0x10
* 256 RBDs
*/
il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
rb_size |
(rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
(rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
/* Set interrupt coalescing timer to default (2048 usecs) */
il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
return 0;
}
static void
il4965_set_pwr_vmain(struct il_priv *il)
{
/*
* (for documentation purposes)
* to set power to V_AUX, do:
if (pci_pme_capable(il->pci_dev, PCI_D3cold))
il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
*/
il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
}
int
il4965_hw_nic_init(struct il_priv *il)
{
unsigned long flags;
struct il_rx_queue *rxq = &il->rxq;
int ret;
spin_lock_irqsave(&il->lock, flags);
il_apm_init(il);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&il->lock, flags);
il4965_set_pwr_vmain(il);
il4965_nic_config(il);
/* Allocate the RX queue, or reset if it is already allocated */
if (!rxq->bd) {
ret = il_rx_queue_alloc(il);
if (ret) {
IL_ERR("Unable to initialize Rx queue\n");
return -ENOMEM;
}
} else
il4965_rx_queue_reset(il, rxq);
il4965_rx_replenish(il);
il4965_rx_init(il, rxq);
spin_lock_irqsave(&il->lock, flags);
rxq->need_update = 1;
il_rx_queue_update_write_ptr(il, rxq);
spin_unlock_irqrestore(&il->lock, flags);
/* Allocate or reset and init all Tx and Command queues */
if (!il->txq) {
ret = il4965_txq_ctx_alloc(il);
if (ret)
return ret;
} else
il4965_txq_ctx_reset(il);
set_bit(S_INIT, &il->status);
return 0;
}
/**
* il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32
il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
{
return cpu_to_le32((u32) (dma_addr >> 8));
}
/**
* il4965_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
* and we have free pre-allocated buffers, fill the ranks as much
* as we can, pulling from rx_free.
*
* This moves the 'write' idx forward to catch up with 'processed', and
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
void
il4965_rx_queue_restock(struct il_priv *il)
{
struct il_rx_queue *rxq = &il->rxq;
struct list_head *element;
struct il_rx_buf *rxb;
unsigned long flags;
spin_lock_irqsave(&rxq->lock, flags);
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
BUG_ON(rxb && rxb->page);
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
rxq->bd[rxq->write] =
il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
spin_unlock_irqrestore(&rxq->lock, flags);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
queue_work(il->workqueue, &il->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
il_rx_queue_update_write_ptr(il, rxq);
}
}
/**
* il4965_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
*
* Also restock the Rx queue via il_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
*/
static void
il4965_rx_allocate(struct il_priv *il, gfp_t priority)
{
struct il_rx_queue *rxq = &il->rxq;
struct list_head *element;
struct il_rx_buf *rxb;
struct page *page;
unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
return;
}
spin_unlock_irqrestore(&rxq->lock, flags);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
if (il->hw_params.rx_page_order > 0)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
if (!page) {
if (net_ratelimit())
D_INFO("alloc_pages failed, " "order: %d\n",
il->hw_params.rx_page_order);
if (rxq->free_count <= RX_LOW_WATERMARK &&
net_ratelimit())
IL_ERR("Failed to alloc_pages with %s. "
"Only %u free buffers remaining.\n",
priority ==
GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
return;
}
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
__free_pages(page, il->hw_params.rx_page_order);
return;
}
element = rxq->rx_used.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
spin_unlock_irqrestore(&rxq->lock, flags);
BUG_ON(rxb->page);
rxb->page = page;
/* Get physical address of the RB */
rxb->page_dma =
pci_map_page(il->pci_dev, page, 0,
PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
il->alloc_rxb_page++;
spin_unlock_irqrestore(&rxq->lock, flags);
}
}
void
il4965_rx_replenish(struct il_priv *il)
{
unsigned long flags;
il4965_rx_allocate(il, GFP_KERNEL);
spin_lock_irqsave(&il->lock, flags);
il4965_rx_queue_restock(il);
spin_unlock_irqrestore(&il->lock, flags);
}
void
il4965_rx_replenish_now(struct il_priv *il)
{
il4965_rx_allocate(il, GFP_ATOMIC);
il4965_rx_queue_restock(il);
}
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
* This free routine walks the list of POOL entries and if SKB is set to
* non NULL it is unmapped and freed
*/
void
il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
{
int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].page != NULL) {
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__il_free_pages(il, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
}
dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->bd_dma);
dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
int
il4965_rxq_stop(struct il_priv *il)
{
int ret;
_il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
1000);
if (ret < 0)
IL_ERR("Can't stop Rx DMA.\n");
return 0;
}
int
il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
int idx = 0;
int band_offset = 0;
/* HT rate format: mac80211 wants an MCS number, which is just LSB */
if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = (rate_n_flags & 0xff);
return idx;
/* Legacy rate format, search for match in table */
} else {
if (band == IEEE80211_BAND_5GHZ)
band_offset = IL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
return idx - band_offset;
}
return -1;
}
static int
il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
{
/* data from PHY/DSP regarding signal strength, etc.,
* contents are always there, not configurable by host. */
struct il4965_rx_non_cfg_phy *ncphy =
(struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
u32 agc =
(le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
IL49_AGC_DB_POS;
u32 valid_antennae =
(le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
>> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
u8 max_rssi = 0;
u32 i;
/* Find max rssi among 3 possible receivers.
* These values are measured by the digital signal processor (DSP).
* They should stay fairly constant even as the signal strength varies,
* if the radio's automatic gain control (AGC) is working right.
* AGC value (see below) will provide the "interesting" info. */
for (i = 0; i < 3; i++)
if (valid_antennae & (1 << i))
max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
max_rssi, agc);
/* dBm = max_rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal. */
return max_rssi - agc - IL4965_RSSI_OFFSET;
}
static u32
il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
{
u32 decrypt_out = 0;
if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
RX_RES_STATUS_STATION_FOUND)
decrypt_out |=
(RX_RES_STATUS_STATION_FOUND |
RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
/* packet was not encrypted */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_NONE)
return decrypt_out;
/* packet was encrypted with unknown alg */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_ERR)
return decrypt_out;
/* decryption was not done in HW */
if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
RX_MPDU_RES_STATUS_DEC_DONE_MSK)
return decrypt_out;
switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
case RX_RES_STATUS_SEC_TYPE_CCMP:
/* alg is CCM: check MIC only */
if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
/* Bad MIC */
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
case RX_RES_STATUS_SEC_TYPE_TKIP:
if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
/* Bad TTAK */
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
/* fall through if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
}
D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
return decrypt_out;
}
static void
il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
/* We only process data packets if the interface is open */
if (unlikely(!il->is_open)) {
D_DROP("Dropping packet while interface is not open.\n");
return;
}
/* In case of HW accelerated crypto and bad decryption, drop */
if (!il->cfg->mod_params->sw_crypto &&
il_set_decrypted_flag(il, hdr, ampdu_status, stats))
return;
skb = dev_alloc_skb(128);
if (!skb) {
IL_ERR("dev_alloc_skb failed\n");
return;
}
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
len);
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(il->hw, skb);
il->alloc_rxb_page--;
rxb->page = NULL;
}
/* Called for N_RX (legacy ABG frames), or
* N_RX_MPDU (HT high-throughput N frames). */
void
il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status;
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il_rx_phy_res *phy_res;
__le32 rx_pkt_status;
struct il_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
/**
* N_RX and N_RX_MPDU are handled differently.
* N_RX: physical layer info is in this buffer
* N_RX_MPDU: physical layer info was sent in separate
* command and cached in il->last_phy_res
*
* Here we set up local variables depending on which command is
* received.
*/
if (pkt->hdr.cmd == N_RX) {
phy_res = (struct il_rx_phy_res *)pkt->u.raw;
header =
(struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
rx_pkt_status =
*(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
if (!il->_4965.last_phy_res_valid) {
IL_ERR("MPDU frame without cached PHY data\n");
return;
}
phy_res = &il->_4965.last_phy_res;
amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
ampdu_status =
il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
D_DROP("dsp size out of range [0,20]: %d/n",
phy_res->cfg_phy_cnt);
return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
return;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band =
(phy_res->
phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
IEEE80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
rx_status.rate_idx =
il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = il4965_calc_rssi(il, phy_res);
D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
/*
* "antenna number"
*
* It seems that the antenna field in the phy flags value
* is actually a bit field. This is undefined by radiotap,
* it wants an actual antenna number but I always get "7"
* for most legacy frames I receive indicating that the
* same frame was received on all three RX chains.
*
* I think this field should be removed in favor of a
* new 802.11n radiotap field "RX chains" that is defined
* as a bitmask.
*/
rx_status.antenna =
(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
RX_RES_PHY_FLAGS_ANTENNA_POS;
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.flag |= RX_FLAG_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
&rx_status);
}
/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
* This will be used later in il_hdl_rx() for N_RX_MPDU. */
void
il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
il->_4965.last_phy_res_valid = true;
memcpy(&il->_4965.last_phy_res, pkt->u.raw,
sizeof(struct il_rx_phy_res));
}
static int
il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
enum ieee80211_band band, u8 is_active,
u8 n_probes, struct il_scan_channel *scan_ch)
{
struct ieee80211_channel *chan;
const struct ieee80211_supported_band *sband;
const struct il_channel_info *ch_info;
u16 passive_dwell = 0;
u16 active_dwell = 0;
int added, i;
u16 channel;
sband = il_get_hw_mode(il, band);
if (!sband)
return 0;
active_dwell = il_get_active_dwell_time(il, band, n_probes);
passive_dwell = il_get_passive_dwell_time(il, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
chan = il->scan_request->channels[i];
if (chan->band != band)
continue;
channel = chan->hw_value;
scan_ch->channel = cpu_to_le16(channel);
ch_info = il_get_channel_info(il, band, channel);
if (!il_is_channel_valid(ch_info)) {
D_SCAN("Channel %d is INVALID for this band.\n",
channel);
continue;
}
if (!is_active || il_is_channel_passive(ch_info) ||
(chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
if (n_probes)
scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
scan_ch->active_dwell = cpu_to_le16(active_dwell);
scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
/* Set txpower levels to defaults */
scan_ch->dsp_atten = 110;
/* NOTE: if we were doing 6Mb OFDM for scans we'd use
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
if (band == IEEE80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
le32_to_cpu(scan_ch->type),
(scan_ch->
type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
(scan_ch->
type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
passive_dwell);
scan_ch++;
added++;
}
D_SCAN("total channels to scan %d\n", added);
return added;
}
static void
il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
{
int i;
u8 ind = *ant;
for (i = 0; i < RATE_ANT_NUM - 1; i++) {
ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
if (valid & BIT(ind)) {
*ant = ind;
return;
}
}
}
int
il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
{
struct il_host_cmd cmd = {
.id = C_SCAN,
.len = sizeof(struct il_scan_cmd),
.flags = CMD_SIZE_HUGE,
};
struct il_scan_cmd *scan;
u32 rate_flags = 0;
u16 cmd_len;
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
u8 rx_ant = il->hw_params.valid_rx_ant;
u8 rate;
bool is_active = false;
int chan_mod;
u8 active_chains;
u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
int ret;
lockdep_assert_held(&il->mutex);
if (!il->scan_cmd) {
il->scan_cmd =
kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
GFP_KERNEL);
if (!il->scan_cmd) {
D_SCAN("fail to allocate memory for scan\n");
return -ENOMEM;
}
}
scan = il->scan_cmd;
memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
scan->quiet_time = IL_ACTIVE_QUIET_TIME;
if (il_is_any_associated(il)) {
u16 interval;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
D_INFO("Scanning while associated...\n");
interval = vif->bss_conf.beacon_int;
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
if (!interval)
interval = suspend_time;
extra = (suspend_time / interval) << 22;
scan_suspend_time =
(extra | ((suspend_time % interval) * 1024));
scan->suspend_time = cpu_to_le32(scan_suspend_time);
D_SCAN("suspend_time 0x%X beacon interval %d\n",
scan_suspend_time, interval);
}
if (il->scan_request->n_ssids) {
int i, p = 0;
D_SCAN("Kicking off active scan\n");
for (i = 0; i < il->scan_request->n_ssids; i++) {
/* always does wildcard anyway */
if (!il->scan_request->ssids[i].ssid_len)
continue;
scan->direct_scan[p].id = WLAN_EID_SSID;
scan->direct_scan[p].len =
il->scan_request->ssids[i].ssid_len;
memcpy(scan->direct_scan[p].ssid,
il->scan_request->ssids[i].ssid,
il->scan_request->ssids[i].ssid_len);
n_probes++;
p++;
}
is_active = true;
} else
D_SCAN("Start passive scan.\n");
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
scan->tx_cmd.sta_id = il->hw_params.bcast_id;
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (il->scan_band) {
case IEEE80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod =
le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
RXON_FLG_CHANNEL_MODE_POS;
if (chan_mod == CHANNEL_MODE_PURE_40) {
rate = RATE_6M_PLCP;
} else {
rate = RATE_1M_PLCP;
rate_flags = RATE_MCS_CCK_MSK;
}
break;
case IEEE80211_BAND_5GHZ:
rate = RATE_6M_PLCP;
break;
default:
IL_WARN("Invalid scan band\n");
return -EIO;
}
/*
* If active scanning is requested but a certain channel is
* marked passive, we can do active scanning if we detect
* transmissions.
*
* There is an issue with some firmware versions that triggers
* a sysassert on a "good CRC threshold" of zero (== disabled),
* on a radar channel even though this means that we should NOT
* send probes.
*
* The "good CRC threshold" is the number of frames that we
* need to receive during our dwell time on a channel before
* sending out probes -- setting this to a huge value will
* mean we never reach it, but at the same time work around
* the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
* here instead of IL_GOOD_CRC_TH_DISABLED.
*/
scan->good_CRC_th =
is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
band = il->scan_band;
if (il->cfg->scan_rx_antennas[band])
rx_ant = il->cfg->scan_rx_antennas[band];
il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
/* In power save mode use one chain, otherwise use all chains */
if (test_bit(S_POWER_PMI, &il->status)) {
/* rx_ant has been set to all valid chains previously */
active_chains =
rx_ant & ((u8) (il->chain_noise_data.active_chains));
if (!active_chains)
active_chains = rx_ant;
D_SCAN("chain_noise_data.active_chains: %u\n",
il->chain_noise_data.active_chains);
rx_ant = il4965_first_antenna(active_chains);
}
/* MIMO is not used here, but value is required */
rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
cmd_len =
il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
vif->addr, il->scan_request->ie,
il->scan_request->ie_len,
IL_MAX_SCAN_SIZE - sizeof(*scan));
scan->tx_cmd.len = cpu_to_le16(cmd_len);
scan->filter_flags |=
(RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
scan->channel_count =
il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
(void *)&scan->data[cmd_len]);
if (scan->channel_count == 0) {
D_SCAN("channel count %d\n", scan->channel_count);
return -EIO;
}
cmd.len +=
le16_to_cpu(scan->tx_cmd.len) +
scan->channel_count * sizeof(struct il_scan_channel);
cmd.data = scan;
scan->len = cpu_to_le16(cmd.len);
set_bit(S_SCAN_HW, &il->status);
ret = il_send_cmd_sync(il, &cmd);
if (ret)
clear_bit(S_SCAN_HW, &il->status);
return ret;
}
int
il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
bool add)
{
struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
if (add)
return il4965_add_bssid_station(il, vif->bss_conf.bssid,
&vif_priv->ibss_bssid_sta_id);
return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
vif->bss_conf.bssid);
}
void
il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
{
lockdep_assert_held(&il->sta_lock);
if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
else {
D_TX("free more than tfds_in_queue (%u:%d)\n",
il->stations[sta_id].tid[tid].tfds_in_queue, freed);
il->stations[sta_id].tid[tid].tfds_in_queue = 0;
}
}
#define IL_TX_QUEUE_MSK 0xfffff
static bool
il4965_is_single_rx_stream(struct il_priv *il)
{
return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
il->current_ht_config.single_chain_sufficient;
}
#define IL_NUM_RX_CHAINS_MULTIPLE 3
#define IL_NUM_RX_CHAINS_SINGLE 2
#define IL_NUM_IDLE_CHAINS_DUAL 2
#define IL_NUM_IDLE_CHAINS_SINGLE 1
/*
* Determine how many receiver/antenna chains to use.
*
* More provides better reception via diversity. Fewer saves power
* at the expense of throughput, but only when not in powersave to
* start with.
*
* MIMO (dual stream) requires at least 2, but works better with 3.
* This does not determine *which* chains to use, just how many.
*/
static int
il4965_get_active_rx_chain_count(struct il_priv *il)
{
/* # of Rx chains to use when expecting MIMO. */
if (il4965_is_single_rx_stream(il))
return IL_NUM_RX_CHAINS_SINGLE;
else
return IL_NUM_RX_CHAINS_MULTIPLE;
}
/*
* When we are in power saving mode, unless device support spatial
* multiplexing power save, use the active count for rx chain count.
*/
static int
il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
{
/* # Rx chains when idling, depending on SMPS mode */
switch (il->current_ht_config.smps) {
case IEEE80211_SMPS_STATIC:
case IEEE80211_SMPS_DYNAMIC:
return IL_NUM_IDLE_CHAINS_SINGLE;
case IEEE80211_SMPS_OFF:
return active_cnt;
default:
WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
return active_cnt;
}
}
/* up to 4 chains */
static u8
il4965_count_chain_bitmap(u32 chain_bitmap)
{
u8 res;
res = (chain_bitmap & BIT(0)) >> 0;
res += (chain_bitmap & BIT(1)) >> 1;
res += (chain_bitmap & BIT(2)) >> 2;
res += (chain_bitmap & BIT(3)) >> 3;
return res;
}
/**
* il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
* This should not be used for scan command ... it puts data in wrong place.
*/
void
il4965_set_rxon_chain(struct il_priv *il)
{
bool is_single = il4965_is_single_rx_stream(il);
bool is_cam = !test_bit(S_POWER_PMI, &il->status);
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
u32 active_chains;
u16 rx_chain;
/* Tell uCode which antennas are actually connected.
* Before first association, we assume all antennas are connected.
* Just after first association, il4965_chain_noise_calibration()
* checks which antennas actually *are* connected. */
if (il->chain_noise_data.active_chains)
active_chains = il->chain_noise_data.active_chains;
else
active_chains = il->hw_params.valid_rx_ant;
rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
/* How many receivers should we use? */
active_rx_cnt = il4965_get_active_rx_chain_count(il);
idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
/* correct rx chain count according hw settings
* and chain noise calibration
*/
valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
if (valid_rx_cnt < active_rx_cnt)
active_rx_cnt = valid_rx_cnt;
if (valid_rx_cnt < idle_rx_cnt)
idle_rx_cnt = valid_rx_cnt;
rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
il->staging.rx_chain = cpu_to_le16(rx_chain);
if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
else
il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
active_rx_cnt, idle_rx_cnt);
WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
active_rx_cnt < idle_rx_cnt);
}
static const char *
il4965_get_fh_string(int cmd)
{
switch (cmd) {
IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
IL_CMD(FH49_RSCSR_CHNL0_WPTR);
IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
IL_CMD(FH49_TSSR_TX_STATUS_REG);
IL_CMD(FH49_TSSR_TX_ERROR_REG);
default:
return "UNKNOWN";
}
}
int
il4965_dump_fh(struct il_priv *il, char **buf, bool display)
{
int i;
#ifdef CONFIG_IWLEGACY_DEBUG
int pos = 0;
size_t bufsz = 0;
#endif
static const u32 fh_tbl[] = {
FH49_RSCSR_CHNL0_STTS_WPTR_REG,
FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
FH49_RSCSR_CHNL0_WPTR,
FH49_MEM_RCSR_CHNL0_CONFIG_REG,
FH49_MEM_RSSR_SHARED_CTRL_REG,
FH49_MEM_RSSR_RX_STATUS_REG,
FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
FH49_TSSR_TX_STATUS_REG,
FH49_TSSR_TX_ERROR_REG
};
#ifdef CONFIG_IWLEGACY_DEBUG
if (display) {
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
return -ENOMEM;
pos +=
scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
pos +=
scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n",
il4965_get_fh_string(fh_tbl[i]),
il_rd(il, fh_tbl[i]));
}
return pos;
}
#endif
IL_ERR("FH register values:\n");
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
il_rd(il, fh_tbl[i]));
}
return 0;
}
void
il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il_missed_beacon_notif *missed_beacon;
missed_beacon = &pkt->u.missed_beacon;
if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
il->missed_beacon_threshold) {
D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
le32_to_cpu(missed_beacon->consecutive_missed_beacons),
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
if (!test_bit(S_SCANNING, &il->status))
il4965_init_sensitivity(il);
}
}
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
* exactly when to expect beacons, therefore only when we're associated. */
static void
il4965_rx_calc_noise(struct il_priv *il)
{
struct stats_rx_non_phy *rx_info;
int num_active_rx = 0;
int total_silence = 0;
int bcn_silence_a, bcn_silence_b, bcn_silence_c;
int last_rx_noise;
rx_info = &(il->_4965.stats.rx.general);
bcn_silence_a =
le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
bcn_silence_b =
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
bcn_silence_c =
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
if (bcn_silence_a) {
total_silence += bcn_silence_a;
num_active_rx++;
}
if (bcn_silence_b) {
total_silence += bcn_silence_b;
num_active_rx++;
}
if (bcn_silence_c) {
total_silence += bcn_silence_c;
num_active_rx++;
}
/* Average among active antennas */
if (num_active_rx)
last_rx_noise = (total_silence / num_active_rx) - 107;
else
last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
bcn_silence_b, bcn_silence_c, last_rx_noise);
}
#ifdef CONFIG_IWLEGACY_DEBUGFS
/*
* based on the assumption of all stats counter are in DWORD
* FIXME: This function is for debugging, do not deal with
* the case of counters roll-over.
*/
static void
il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
{
int i, size;
__le32 *prev_stats;
u32 *accum_stats;
u32 *delta, *max_delta;
struct stats_general_common *general, *accum_general;
struct stats_tx *tx, *accum_tx;
prev_stats = (__le32 *) &il->_4965.stats;
accum_stats = (u32 *) &il->_4965.accum_stats;
size = sizeof(struct il_notif_stats);
general = &il->_4965.stats.general.common;
accum_general = &il->_4965.accum_stats.general.common;
tx = &il->_4965.stats.tx;
accum_tx = &il->_4965.accum_stats.tx;
delta = (u32 *) &il->_4965.delta_stats;
max_delta = (u32 *) &il->_4965.max_delta;
for (i = sizeof(__le32); i < size;
i +=
sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
accum_stats++) {
if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
*delta =
(le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
*accum_stats += *delta;
if (*delta > *max_delta)
*max_delta = *delta;
}
}
/* reset accumulative stats for "no-counter" type stats */
accum_general->temperature = general->temperature;
accum_general->ttl_timestamp = general->ttl_timestamp;
}
#endif
void
il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
{
const int recalib_seconds = 60;
bool change;
struct il_rx_pkt *pkt = rxb_addr(rxb);
D_RX("Statistics notification received (%d vs %d).\n",
(int)sizeof(struct il_notif_stats),
le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
change =
((il->_4965.stats.general.common.temperature !=
pkt->u.stats.general.common.temperature) ||
((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
(pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
#ifdef CONFIG_IWLEGACY_DEBUGFS
il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
#endif
/* TODO: reading some of stats is unneeded */
memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
set_bit(S_STATS, &il->status);
/*
* Reschedule the stats timer to occur in recalib_seconds to ensure
* we get a thermal update even if the uCode doesn't give us one
*/
mod_timer(&il->stats_periodic,
jiffies + msecs_to_jiffies(recalib_seconds * 1000));
if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
(pkt->hdr.cmd == N_STATS)) {
il4965_rx_calc_noise(il);
queue_work(il->workqueue, &il->run_time_calib_work);
}
if (change)
il4965_temperature_calib(il);
}
void
il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
#ifdef CONFIG_IWLEGACY_DEBUGFS
memset(&il->_4965.accum_stats, 0,
sizeof(struct il_notif_stats));
memset(&il->_4965.delta_stats, 0,
sizeof(struct il_notif_stats));
memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
#endif
D_RX("Statistics have been cleared\n");
}
il4965_hdl_stats(il, rxb);
}
/*
* mac80211 queues, ACs, hardware queues, FIFOs.
*
* Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
*
* Mac80211 uses the following numbers, which we get as from it
* by way of skb_get_queue_mapping(skb):
*
* VO 0
* VI 1
* BE 2
* BK 3
*
*
* Regular (not A-MPDU) frames are put into hardware queues corresponding
* to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
* own queue per aggregation session (RA/TID combination), such queues are
* set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
* order to map frames to the right queue, we also need an AC->hw queue
* mapping. This is implemented here.
*
* Due to the way hw queues are set up (by the hw specific modules like
* 4965.c), the AC->hw queue mapping is the identity
* mapping.
*/
static const u8 tid_to_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
IEEE80211_AC_BE,
IEEE80211_AC_VI,
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_VO
};
static inline int
il4965_get_ac_from_tid(u16 tid)
{
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
return tid_to_ac[tid];
/* no support for TIDs 8-15 yet */
return -EINVAL;
}
static inline int
il4965_get_fifo_from_tid(u16 tid)
{
const u8 ac_to_fifo[] = {
IL_TX_FIFO_VO,
IL_TX_FIFO_VI,
IL_TX_FIFO_BE,
IL_TX_FIFO_BK,
};
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
return ac_to_fifo[tid_to_ac[tid]];
/* no support for TIDs 8-15 yet */
return -EINVAL;
}
/*
* handle build C_TX command notification.
*/
static void
il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
struct il_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr, u8 std_id)
{
__le16 fc = hdr->frame_control;
__le32 tx_flags = tx_cmd->tx_flags;
tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
tx_flags |= TX_CMD_FLG_ACK_MSK;
if (ieee80211_is_mgmt(fc))
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
if (ieee80211_is_probe_resp(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & 0xf))
tx_flags |= TX_CMD_FLG_TSF_MSK;
} else {
tx_flags &= (~TX_CMD_FLG_ACK_MSK);
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
tx_cmd->sta_id = std_id;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
if (ieee80211_is_data_qos(fc)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
il_tx_cmd_protection(il, info, fc, &tx_flags);
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
else
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
} else {
tx_cmd->timeout.pm_frame_timeout = 0;
}
tx_cmd->driver_txop = 0;
tx_cmd->tx_flags = tx_flags;
tx_cmd->next_frame_len = 0;
}
static void
il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, __le16 fc)
{
const u8 rts_retry_limit = 60;
u32 rate_flags;
int rate_idx;
u8 data_retry_limit;
u8 rate_plcp;
/* Set retry limit on DATA packets and Probe Responses */
if (ieee80211_is_probe_resp(fc))
data_retry_limit = 3;
else
data_retry_limit = IL4965_DEFAULT_TX_RETRY;
tx_cmd->data_retry_limit = data_retry_limit;
/* Set retry limit on RTS packets */
tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
/* DATA packets will use the uCode station table for rate/antenna
* selection */
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_idx = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
return;
}
/**
* If the current TX rate stored in mac80211 has the MCS bit set, it's
* not really a TX rate. Thus, we use the lowest supported rate for
* this band. Also use the lowest supported rate if the stored rate
* idx is invalid.
*/
rate_idx = info->control.rates[0].idx;
if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
|| rate_idx > RATE_COUNT_LEGACY)
rate_idx =
rate_lowest_index(&il->bands[info->band],
info->control.sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == IEEE80211_BAND_5GHZ)
rate_idx += IL_FIRST_OFDM_RATE;
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = il_rates[rate_idx].plcp;
/* Zero out flags for this packet */
rate_flags = 0;
/* Set CCK flag as needed */
if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
/* Set the rate in the TX cmd */
tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
}
static void
il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
int sta_id)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
D_TX("tx_cmd with AES hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
D_TX("tx_cmd with tkip hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
/* fall through */
case WLAN_CIPHER_SUITE_WEP40:
tx_cmd->sec_ctl |=
(TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
TX_CMD_SEC_SHIFT);
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
D_TX("Configuring packet for WEP encryption " "with key %d\n",
keyconf->keyidx);
break;
default:
IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
break;
}
}
/*
* start C_TX command process
*/
int
il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->control.sta;
struct il_station_priv *sta_priv = NULL;
struct il_tx_queue *txq;
struct il_queue *q;
struct il_device_cmd *out_cmd;
struct il_cmd_meta *out_meta;
struct il_tx_cmd *tx_cmd;
int txq_id;
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
u16 len, firstlen, secondlen;
u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
u8 sta_id;
u8 wait_write_ptr = 0;
u8 tid = 0;
u8 *qc = NULL;
unsigned long flags;
bool is_agg = false;
spin_lock_irqsave(&il->lock, flags);
if (il_is_rfkill(il)) {
D_DROP("Dropping - RF KILL\n");
goto drop_unlock;
}
fc = hdr->frame_control;
#ifdef CONFIG_IWLEGACY_DEBUG
if (ieee80211_is_auth(fc))
D_TX("Sending AUTH frame\n");
else if (ieee80211_is_assoc_req(fc))
D_TX("Sending ASSOC frame\n");
else if (ieee80211_is_reassoc_req(fc))
D_TX("Sending REASSOC frame\n");
#endif
hdr_len = ieee80211_hdrlen(fc);
/* For management frames use broadcast id to do not break aggregation */
if (!ieee80211_is_data(fc))
sta_id = il->hw_params.bcast_id;
else {
/* Find idx into station table for destination station */
sta_id = il_sta_id_or_broadcast(il, info->control.sta);
if (sta_id == IL_INVALID_STATION) {
D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
goto drop_unlock;
}
}
D_TX("station Id %d\n", sta_id);
if (sta)
sta_priv = (void *)sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
* next frame is processed -- and the next frame to
* this station is the one that will consume this
* counter.
* For now set the counter to just 1 since we do not
* support uAPSD yet.
*/
il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
}
/* FIXME: remove me ? */
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Access category (AC) is also the queue number */
txq_id = skb_get_queue_mapping(skb);
/* irqs already disabled/saved above when locking il->lock */
spin_lock(&il->sta_lock);
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
spin_unlock(&il->sta_lock);
goto drop_unlock;
}
seq_number = il->stations[sta_id].tid[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl =
hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
is_agg = true;
}
}
txq = &il->txq[txq_id];
q = &txq->q;
if (unlikely(il_queue_space(q) < q->high_mark)) {
spin_unlock(&il->sta_lock);
goto drop_unlock;
}
if (ieee80211_is_data_qos(fc)) {
il->stations[sta_id].tid[tid].tfds_in_queue++;
if (!ieee80211_has_morefrags(fc))
il->stations[sta_id].tid[tid].seq_number = seq_number;
}
spin_unlock(&il->sta_lock);
txq->skbs[q->write_ptr] = skb;
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
out_meta = &txq->meta[q->write_ptr];
tx_cmd = &out_cmd->cmd.tx;
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
/*
* Set up the Tx-command (not MAC!) header.
* Store the chosen Tx queue and TFD idx within the sequence field;
* after Tx, uCode's Tx response will return this value so driver can
* locate the frame within the tx queue and do post-tx processing.
*/
out_cmd->hdr.cmd = C_TX;
out_cmd->hdr.sequence =
cpu_to_le16((u16)
(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
/* Total # bytes to be transmitted */
len = (u16) skb->len;
tx_cmd->len = cpu_to_le16(len);
if (info->control.hw_key)
il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
/* TODO need this for burst mode later on */
il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
il_update_stats(il, true, fc, len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
* (payload data will be in another buffer).
* Size of this varies, due to varying MAC header length.
* If end is not dword aligned, we'll have 2 extra bytes at the end
* of the MAC header (device reads on dword boundaries).
* We'll tell device about this padding later.
*/
len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
firstlen = (len + 3) & ~3;
/* Tell NIC about any 2-byte padding after MAC header */
if (firstlen != len)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys =
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
secondlen = skb->len - hdr_len;
if (secondlen > 0) {
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
PCI_DMA_TODEVICE);
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
0, 0);
}
scratch_phys =
txcmd_phys + sizeof(struct il_cmd_header) +
offsetof(struct il_tx_cmd, scratch);
/* take back ownership of DMA buffer to enable update */
pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
PCI_DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
PCI_DMA_BIDIRECTIONAL);
/* Tell device the write idx *just past* this latest filled TFD */
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
il_txq_update_write_ptr(il, txq);
spin_unlock_irqrestore(&il->lock, flags);
/*
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually,
* regardless of the value of ret. "ret" only indicates
* whether or not we should update the write pointer.
*/
/*
* Avoid atomic ops if it isn't an associated client.
* Also, if this is a packet for aggregation, don't
* increase the counter because the ucode will stop
* aggregation queues when their respective station
* goes to sleep.
*/
if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&il->lock, flags);
txq->need_update = 1;
il_txq_update_write_ptr(il, txq);
spin_unlock_irqrestore(&il->lock, flags);
} else {
il_stop_queue(il, txq);
}
}
return 0;
drop_unlock:
spin_unlock_irqrestore(&il->lock, flags);
return -1;
}
static inline int
il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
{
ptr->addr =
dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
return 0;
}
static inline void
il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
{
if (unlikely(!ptr->addr))
return;
dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
/**
* il4965_hw_txq_ctx_free - Free TXQ Context
*
* Destroy all TX DMA queues and structures
*/
void
il4965_hw_txq_ctx_free(struct il_priv *il)
{
int txq_id;
/* Tx queues */
if (il->txq) {
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
if (txq_id == il->cmd_queue)
il_cmd_queue_free(il);
else
il_tx_queue_free(il, txq_id);
}
il4965_free_dma_ptr(il, &il->kw);
il4965_free_dma_ptr(il, &il->scd_bc_tbls);
/* free tx queue structure */
il_free_txq_mem(il);
}
/**
* il4965_txq_ctx_alloc - allocate TX queue context
* Allocate all Tx DMA structures and initialize them
*
* @param il
* @return error code
*/
int
il4965_txq_ctx_alloc(struct il_priv *il)
{
int ret, txq_id;
unsigned long flags;
/* Free all tx/cmd queues and keep-warm buffer */
il4965_hw_txq_ctx_free(il);
ret =
il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
il->hw_params.scd_bc_tbls_size);
if (ret) {
IL_ERR("Scheduler BC Table allocation failed\n");
goto error_bc_tbls;
}
/* Alloc keep-warm buffer */
ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
if (ret) {
IL_ERR("Keep Warm allocation failed\n");
goto error_kw;
}
/* allocate tx queue structure */
ret = il_alloc_txq_mem(il);
if (ret)
goto error;
spin_lock_irqsave(&il->lock, flags);
/* Turn off all Tx DMA fifos */
il4965_txq_set_sched(il, 0);
/* Tell NIC where to find the "keep warm" buffer */
il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
spin_unlock_irqrestore(&il->lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
ret = il_tx_queue_init(il, txq_id);
if (ret) {
IL_ERR("Tx %d queue init failed\n", txq_id);
goto error;
}
}
return ret;
error:
il4965_hw_txq_ctx_free(il);
il4965_free_dma_ptr(il, &il->kw);
error_kw:
il4965_free_dma_ptr(il, &il->scd_bc_tbls);
error_bc_tbls:
return ret;
}
void
il4965_txq_ctx_reset(struct il_priv *il)
{
int txq_id;
unsigned long flags;
spin_lock_irqsave(&il->lock, flags);
/* Turn off all Tx DMA fifos */
il4965_txq_set_sched(il, 0);
/* Tell NIC where to find the "keep warm" buffer */
il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
spin_unlock_irqrestore(&il->lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4) */
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
il_tx_queue_reset(il, txq_id);
}
void
il4965_txq_ctx_unmap(struct il_priv *il)
{
int txq_id;
if (!il->txq)
return;
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
if (txq_id == il->cmd_queue)
il_cmd_queue_unmap(il);
else
il_tx_queue_unmap(il, txq_id);
}
/**
* il4965_txq_ctx_stop - Stop all Tx DMA channels
*/
void
il4965_txq_ctx_stop(struct il_priv *il)
{
int ch, ret;
_il_wr_prph(il, IL49_SCD_TXFACT, 0);
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
_il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
ret =
_il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
1000);
if (ret < 0)
IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
}
}
/*
* Find first available (lowest unused) Tx Queue, mark it "active".
* Called only when finding queue for aggregation.
* Should never return anything < 7, because they should already
* be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
*/
static int
il4965_txq_ctx_activate_free(struct il_priv *il)
{
int txq_id;
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
return txq_id;
return -1;
}
/**
* il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
*/
static void
il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
(0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
/**
* il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
*/
static int
il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
{
u32 tbl_dw_addr;
u32 tbl_dw;
u16 scd_q2ratid;
scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
tbl_dw_addr =
il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
return 0;
}
/**
* il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
*
* NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
* i.e. it must be one of the higher queues used for aggregation
*/
static int
il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
int tid, u16 ssn_idx)
{
unsigned long flags;
u16 ra_tid;
int ret;
if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IL49_FIRST_AMPDU_QUEUE +
il->cfg->num_of_ampdu_queues <= txq_id)) {
IL_WARN("queue number out of range: %d, must be %d to %d\n",
txq_id, IL49_FIRST_AMPDU_QUEUE,
IL49_FIRST_AMPDU_QUEUE +
il->cfg->num_of_ampdu_queues - 1);
return -EINVAL;
}
ra_tid = BUILD_RAxTID(sta_id, tid);
/* Modify device's station table to Tx this TID */
ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
if (ret)
return ret;
spin_lock_irqsave(&il->lock, flags);
/* Stop this Tx queue before configuring it */
il4965_tx_queue_stop_scheduler(il, txq_id);
/* Map receiver-address / traffic-ID to this queue */
il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
/* Set this queue as a chain-building queue */
il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
/* Place first TFD at idx corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
il4965_set_wr_ptrs(il, txq_id, ssn_idx);
/* Set up Tx win size and frame limit for this queue */
il_write_targ_mem(il,
il->scd_base_addr +
IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
(SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
& IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
il_write_targ_mem(il,
il->scd_base_addr +
IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
(SCD_FRAME_LIMIT <<
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
spin_unlock_irqrestore(&il->lock, flags);
return 0;
}
int
il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 * ssn)
{
int sta_id;
int tx_fifo;
int txq_id;
int ret;
unsigned long flags;
struct il_tid_data *tid_data;
/* FIXME: warning if tx fifo not found ? */
tx_fifo = il4965_get_fifo_from_tid(tid);
if (unlikely(tx_fifo < 0))
return tx_fifo;
D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
sta_id = il_sta_id(sta);
if (sta_id == IL_INVALID_STATION) {
IL_ERR("Start AGG on invalid station\n");
return -ENXIO;
}
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
return -ENXIO;
}
txq_id = il4965_txq_ctx_activate_free(il);
if (txq_id == -1) {
IL_ERR("No free aggregation queue available\n");
return -ENXIO;
}
spin_lock_irqsave(&il->sta_lock, flags);
tid_data = &il->stations[sta_id].tid[tid];
*ssn = SEQ_TO_SN(tid_data->seq_number);
tid_data->agg.txq_id = txq_id;
il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
spin_unlock_irqrestore(&il->sta_lock, flags);
ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
if (ret)
return ret;
spin_lock_irqsave(&il->sta_lock, flags);
tid_data = &il->stations[sta_id].tid[tid];
if (tid_data->tfds_in_queue == 0) {
D_HT("HW queue is empty\n");
tid_data->agg.state = IL_AGG_ON;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
} else {
D_HT("HW queue is NOT empty: %d packets in HW queue\n",
tid_data->tfds_in_queue);
tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
}
spin_unlock_irqrestore(&il->sta_lock, flags);
return ret;
}
/**
* txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
* il->lock must be held by the caller
*/
static int
il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
{
if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IL49_FIRST_AMPDU_QUEUE +
il->cfg->num_of_ampdu_queues <= txq_id)) {
IL_WARN("queue number out of range: %d, must be %d to %d\n",
txq_id, IL49_FIRST_AMPDU_QUEUE,
IL49_FIRST_AMPDU_QUEUE +
il->cfg->num_of_ampdu_queues - 1);
return -EINVAL;
}
il4965_tx_queue_stop_scheduler(il, txq_id);
il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
/* supposes that ssn_idx is valid (!= 0xFFF) */
il4965_set_wr_ptrs(il, txq_id, ssn_idx);
il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
il_txq_ctx_deactivate(il, txq_id);
il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
return 0;
}
int
il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
int tx_fifo_id, txq_id, sta_id, ssn;
struct il_tid_data *tid_data;
int write_ptr, read_ptr;
unsigned long flags;
/* FIXME: warning if tx_fifo_id not found ? */
tx_fifo_id = il4965_get_fifo_from_tid(tid);
if (unlikely(tx_fifo_id < 0))
return tx_fifo_id;
sta_id = il_sta_id(sta);
if (sta_id == IL_INVALID_STATION) {
IL_ERR("Invalid station for AGG tid %d\n", tid);
return -ENXIO;
}
spin_lock_irqsave(&il->sta_lock, flags);
tid_data = &il->stations[sta_id].tid[tid];
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
txq_id = tid_data->agg.txq_id;
switch (il->stations[sta_id].tid[tid].agg.state) {
case IL_EMPTYING_HW_QUEUE_ADDBA:
/*
* This can happen if the peer stops aggregation
* again before we've had a chance to drain the
* queue we selected previously, i.e. before the
* session was really started completely.
*/
D_HT("AGG stop before setup done\n");
goto turn_off;
case IL_AGG_ON:
break;
default:
IL_WARN("Stopping AGG while state not ON or starting\n");
}
write_ptr = il->txq[txq_id].q.write_ptr;
read_ptr = il->txq[txq_id].q.read_ptr;
/* The queue is not empty */
if (write_ptr != read_ptr) {
D_HT("Stopping a non empty AGG HW QUEUE\n");
il->stations[sta_id].tid[tid].agg.state =
IL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_irqrestore(&il->sta_lock, flags);
return 0;
}
D_HT("HW queue is empty\n");
turn_off:
il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
/* do not restore/save irqs */
spin_unlock(&il->sta_lock);
spin_lock(&il->lock);
/*
* the only reason this call can fail is queue number out of range,
* which can happen if uCode is reloaded and all the station
* information are lost. if it is outside the range, there is no need
* to deactivate the uCode queue, just return "success" to allow
* mac80211 to clean up it own data.
*/
il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
spin_unlock_irqrestore(&il->lock, flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
return 0;
}
int
il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
{
struct il_queue *q = &il->txq[txq_id].q;
u8 *addr = il->stations[sta_id].sta.sta.addr;
struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
lockdep_assert_held(&il->sta_lock);
switch (il->stations[sta_id].tid[tid].agg.state) {
case IL_EMPTYING_HW_QUEUE_DELBA:
/* We are reclaiming the last packet of the */
/* aggregated HW queue */
if (txq_id == tid_data->agg.txq_id &&
q->read_ptr == q->write_ptr) {
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
int tx_fifo = il4965_get_fifo_from_tid(tid);
D_HT("HW queue empty: continue DELBA flow\n");
il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
tid_data->agg.state = IL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
}
break;
case IL_EMPTYING_HW_QUEUE_ADDBA:
/* We are reclaiming the last packet of the queue */
if (tid_data->tfds_in_queue == 0) {
D_HT("HW queue empty: continue ADDBA flow\n");
tid_data->agg.state = IL_AGG_ON;
ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
}
break;
}
return 0;
}
static void
il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
{
struct ieee80211_sta *sta;
struct il_station_priv *sta_priv;
rcu_read_lock();
sta = ieee80211_find_sta(il->vif, addr1);
if (sta) {
sta_priv = (void *)sta->drv_priv;
/* avoid atomic ops if this isn't a client */
if (sta_priv->client &&
atomic_dec_return(&sta_priv->pending_frames) == 0)
ieee80211_sta_block_awake(il->hw, sta, false);
}
rcu_read_unlock();
}
static void
il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!is_agg)
il4965_non_agg_tx_status(il, hdr->addr1);
ieee80211_tx_status_irqsafe(il->hw, skb);
}
int
il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
{
struct il_tx_queue *txq = &il->txq[txq_id];
struct il_queue *q = &txq->q;
int nfreed = 0;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
"is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
q->write_ptr, q->read_ptr);
return 0;
}
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
skb = txq->skbs[txq->q.read_ptr];
if (WARN_ON_ONCE(skb == NULL))
continue;
hdr = (struct ieee80211_hdr *) skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
nfreed++;
il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
txq->skbs[txq->q.read_ptr] = NULL;
il->ops->txq_free_tfd(il, txq);
}
return nfreed;
}
/**
* il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
*
* Go through block-ack's bitmap of ACK'd frames, update driver's record of
* ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
*/
static int
il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
struct il_compressed_ba_resp *ba_resp)
{
int i, sh, ack;
u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
int successes = 0;
struct ieee80211_tx_info *info;
u64 bitmap, sent_bitmap;
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IL_ERR("Received BA when not expected\n");
return -EINVAL;
}
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = 0;
D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
/* Calculate shift to align block-ack bits with our Tx win bits */
sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
if (sh < 0) /* tbw something is wrong with indices */
sh += 0x100;
if (agg->frame_count > (64 - sh)) {
D_TX_REPLY("more frames than bitmap size");
return -1;
}
/* don't use 64-bit values for now */
bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
/* check for success or failure according to the
* transmitted bitmap and block-ack bitmap */
sent_bitmap = bitmap & agg->bitmap;
/* For each frame attempted in aggregation,
* update driver's record of tx frame's status. */
i = 0;
while (sent_bitmap) {
ack = sent_bitmap & 1ULL;
successes += ack;
D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
sent_bitmap >>= 1;
++i;
}
D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
memset(&info->status, 0, sizeof(info->status));
info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = successes;
info->status.ampdu_len = agg->frame_count;
il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
return 0;
}
static inline bool
il4965_is_tx_success(u32 status)
{
status &= TX_STATUS_MSK;
return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
}
static u8
il4965_find_station(struct il_priv *il, const u8 *addr)
{
int i;
int start = 0;
int ret = IL_INVALID_STATION;
unsigned long flags;
if (il->iw_mode == NL80211_IFTYPE_ADHOC)
start = IL_STA_ID;
if (is_broadcast_ether_addr(addr))
return il->hw_params.bcast_id;
spin_lock_irqsave(&il->sta_lock, flags);
for (i = start; i < il->hw_params.max_stations; i++)
if (il->stations[i].used &&
(!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
ret = i;
goto out;
}
D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
out:
/*
* It may be possible that more commands interacting with stations
* arrive before we completed processing the adding of
* station
*/
if (ret != IL_INVALID_STATION &&
(!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
(il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
IL_ERR("Requested station info for sta %d before ready.\n",
ret);
ret = IL_INVALID_STATION;
}
spin_unlock_irqrestore(&il->sta_lock, flags);
return ret;
}
static int
il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
{
if (il->iw_mode == NL80211_IFTYPE_STATION)
return IL_AP_ID;
else {
u8 *da = ieee80211_get_DA(hdr);
return il4965_find_station(il, da);
}
}
static inline u32
il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
{
return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
}
static inline u32
il4965_tx_status_to_mac80211(u32 status)
{
status &= TX_STATUS_MSK;
switch (status) {
case TX_STATUS_SUCCESS:
case TX_STATUS_DIRECT_DONE:
return IEEE80211_TX_STAT_ACK;
case TX_STATUS_FAIL_DEST_PS:
return IEEE80211_TX_STAT_TX_FILTERED;
default:
return 0;
}
}
/**
* il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
*/
static int
il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
struct il4965_tx_resp *tx_resp, int txq_id,
u16 start_idx)
{
u16 status;
struct agg_tx_status *frame_status = tx_resp->u.agg_status;
struct ieee80211_tx_info *info = NULL;
struct ieee80211_hdr *hdr = NULL;
u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
int i, sh, idx;
u16 seq;
if (agg->wait_for_ba)
D_TX_REPLY("got tx response w/o block-ack\n");
agg->frame_count = tx_resp->frame_count;
agg->start_idx = start_idx;
agg->rate_n_flags = rate_n_flags;
agg->bitmap = 0;
/* num frames attempted by Tx command */
if (agg->frame_count == 1) {
/* Only one frame was attempted; no block-ack will arrive */
status = le16_to_cpu(frame_status[0].status);
idx = start_idx;
D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
agg->frame_count, agg->start_idx, idx);
info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= il4965_tx_status_to_mac80211(status);
il4965_hwrate_to_tx_control(il, rate_n_flags, info);
D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
tx_resp->failure_frame);
D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
agg->wait_for_ba = 0;
} else {
/* Two or more frames were attempted; expect block-ack */
u64 bitmap = 0;
int start = agg->start_idx;
struct sk_buff *skb;
/* Construct bit-map of pending frames within Tx win */
for (i = 0; i < agg->frame_count; i++) {
u16 sc;
status = le16_to_cpu(frame_status[i].status);
seq = le16_to_cpu(frame_status[i].sequence);
idx = SEQ_TO_IDX(seq);
txq_id = SEQ_TO_QUEUE(seq);
if (status &
(AGG_TX_STATE_FEW_BYTES_MSK |
AGG_TX_STATE_ABORT_MSK))
continue;
D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
agg->frame_count, txq_id, idx);
skb = il->txq[txq_id].skbs[idx];
if (WARN_ON_ONCE(skb == NULL))
return -1;
hdr = (struct ieee80211_hdr *) skb->data;
sc = le16_to_cpu(hdr->seq_ctrl);
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
IL_ERR("BUG_ON idx doesn't match seq control"
" idx=%d, seq_idx=%d, seq=%d\n", idx,
SEQ_TO_SN(sc), hdr->seq_ctrl);
return -1;
}
D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
SEQ_TO_SN(sc));
sh = idx - start;
if (sh > 64) {
sh = (start - idx) + 0xff;
bitmap = bitmap << sh;
sh = 0;
start = idx;
} else if (sh < -64)
sh = 0xff - (start - idx);
else if (sh < 0) {
sh = start - idx;
start = idx;
bitmap = bitmap << sh;
sh = 0;
}
bitmap |= 1ULL << sh;
D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
(unsigned long long)bitmap);
}
agg->bitmap = bitmap;
agg->start_idx = start;
D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
agg->frame_count, agg->start_idx,
(unsigned long long)agg->bitmap);
if (bitmap)
agg->wait_for_ba = 1;
}
return 0;
}
/**
* il4965_hdl_tx - Handle standard (non-aggregation) Tx response
*/
static void
il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
int idx = SEQ_TO_IDX(sequence);
struct il_tx_queue *txq = &il->txq[txq_id];
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info;
struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
int uninitialized_var(tid);
int sta_id;
int freed;
u8 *qc = NULL;
unsigned long flags;
if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
"is out of range [0-%d] %d %d\n", txq_id, idx,
txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
return;
}
txq->time_stamp = jiffies;
skb = txq->skbs[txq->q.read_ptr];
info = IEEE80211_SKB_CB(skb);
memset(&info->status, 0, sizeof(info->status));
hdr = (struct ieee80211_hdr *) skb->data;
if (ieee80211_is_data_qos(hdr->frame_control)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
}
sta_id = il4965_get_ra_sta_id(il, hdr);
if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
IL_ERR("Station not known\n");
return;
}
spin_lock_irqsave(&il->sta_lock, flags);
if (txq->sched_retry) {
const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
struct il_ht_agg *agg = NULL;
WARN_ON(!qc);
agg = &il->stations[sta_id].tid[tid].agg;
il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
/* check if BAR is needed */
if (tx_resp->frame_count == 1 &&
!il4965_is_tx_success(status))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
D_TX_REPLY("Retry scheduler reclaim scd_ssn "
"%d idx %d\n", scd_ssn, idx);
freed = il4965_tx_queue_reclaim(il, txq_id, idx);
if (qc)
il4965_free_tfds_in_queue(il, sta_id, tid,
freed);
if (il->mac80211_registered &&
il_queue_space(&txq->q) > txq->q.low_mark &&
agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
il_wake_queue(il, txq);
}
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= il4965_tx_status_to_mac80211(status);
il4965_hwrate_to_tx_control(il,
le32_to_cpu(tx_resp->rate_n_flags),
info);
D_TX_REPLY("TXQ %d status %s (0x%08x) "
"rate_n_flags 0x%x retries %d\n", txq_id,
il4965_get_tx_fail_reason(status), status,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
freed = il4965_tx_queue_reclaim(il, txq_id, idx);
if (qc && likely(sta_id != IL_INVALID_STATION))
il4965_free_tfds_in_queue(il, sta_id, tid, freed);
else if (sta_id == IL_INVALID_STATION)
D_TX_REPLY("Station not known\n");
if (il->mac80211_registered &&
il_queue_space(&txq->q) > txq->q.low_mark)
il_wake_queue(il, txq);
}
if (qc && likely(sta_id != IL_INVALID_STATION))
il4965_txq_check_empty(il, sta_id, tid, txq_id);
il4965_check_abort_status(il, tx_resp->frame_count, status);
spin_unlock_irqrestore(&il->sta_lock, flags);
}
/**
* translate ucode response to mac80211 tx status control values
*/
void
il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
struct ieee80211_tx_info *info)
{
struct ieee80211_tx_rate *r = &info->control.rates[0];
info->antenna_sel_tx =
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
if (rate_n_flags & RATE_MCS_HT_MSK)
r->flags |= IEEE80211_TX_RC_MCS;
if (rate_n_flags & RATE_MCS_GF_MSK)
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
if (rate_n_flags & RATE_MCS_HT40_MSK)
r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (rate_n_flags & RATE_MCS_DUP_MSK)
r->flags |= IEEE80211_TX_RC_DUP_DATA;
if (rate_n_flags & RATE_MCS_SGI_MSK)
r->flags |= IEEE80211_TX_RC_SHORT_GI;
r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
}
/**
* il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
*
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
void
il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
struct il_tx_queue *txq = NULL;
struct il_ht_agg *agg;
int idx;
int sta_id;
int tid;
unsigned long flags;
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
/* "ssn" is start of block-ack Tx win, corresponds to idx
* (in Tx queue's circular buffer) of first TFD/frame in win */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
if (scd_flow >= il->hw_params.max_txq_num) {
IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
return;
}
txq = &il->txq[scd_flow];
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
agg = &il->stations[sta_id].tid[tid].agg;
if (unlikely(agg->txq_id != scd_flow)) {
/*
* FIXME: this is a uCode bug which need to be addressed,
* log the information and return for now!
* since it is possible happen very often and in order
* not to fill the syslog, don't enable the logging by default
*/
D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
scd_flow, agg->txq_id);
return;
}
/* Find idx just before block-ack win */
idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
spin_lock_irqsave(&il->sta_lock, flags);
D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
ba_resp->sta_id);
D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
"%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
ba_resp->scd_flow, ba_resp->scd_ssn);
D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
(unsigned long long)agg->bitmap);
/* Update driver's record of ACK vs. not for each frame in win */
il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
/* Release all TFDs before the SSN, i.e. all TFDs in front of
* block-ack win (we assume that they've been successfully
* transmitted ... if not, it's too late anyway). */
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
/* calculate mac80211 ampdu sw queue to wake */
int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
il4965_free_tfds_in_queue(il, sta_id, tid, freed);
if (il_queue_space(&txq->q) > txq->q.low_mark &&
il->mac80211_registered &&
agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
il_wake_queue(il, txq);
il4965_txq_check_empty(il, sta_id, tid, scd_flow);
}
spin_unlock_irqrestore(&il->sta_lock, flags);
}
#ifdef CONFIG_IWLEGACY_DEBUG
const char *
il4965_get_tx_fail_reason(u32 status)
{
#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
return "SUCCESS";
TX_STATUS_POSTPONE(DELAY);
TX_STATUS_POSTPONE(FEW_BYTES);
TX_STATUS_POSTPONE(QUIET_PERIOD);
TX_STATUS_POSTPONE(CALC_TTAK);
TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
TX_STATUS_FAIL(SHORT_LIMIT);
TX_STATUS_FAIL(LONG_LIMIT);
TX_STATUS_FAIL(FIFO_UNDERRUN);
TX_STATUS_FAIL(DRAIN_FLOW);
TX_STATUS_FAIL(RFKILL_FLUSH);
TX_STATUS_FAIL(LIFE_EXPIRE);
TX_STATUS_FAIL(DEST_PS);
TX_STATUS_FAIL(HOST_ABORTED);
TX_STATUS_FAIL(BT_RETRY);
TX_STATUS_FAIL(STA_INVALID);
TX_STATUS_FAIL(FRAG_DROPPED);
TX_STATUS_FAIL(TID_DISABLE);
TX_STATUS_FAIL(FIFO_FLUSHED);
TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
TX_STATUS_FAIL(PASSIVE_NO_RX);
TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
}
return "UNKNOWN";
#undef TX_STATUS_FAIL
#undef TX_STATUS_POSTPONE
}
#endif /* CONFIG_IWLEGACY_DEBUG */
static struct il_link_quality_cmd *
il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
{
int i, r;
struct il_link_quality_cmd *link_cmd;
u32 rate_flags = 0;
__le32 rate_n_flags;
link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
if (!link_cmd) {
IL_ERR("Unable to allocate memory for LQ cmd.\n");
return NULL;
}
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
if (il->band == IEEE80211_BAND_5GHZ)
r = RATE_6M_IDX;
else
r = RATE_1M_IDX;
if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
rate_flags |=
il4965_first_antenna(il->hw_params.
valid_tx_ant) << RATE_MCS_ANT_POS;
rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
il4965_first_antenna(il->hw_params.valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
} else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
il->hw_params.valid_tx_ant;
}
link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
link_cmd->agg_params.agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
link_cmd->sta_id = sta_id;
return link_cmd;
}
/*
* il4965_add_bssid_station - Add the special IBSS BSSID station
*
* Function sleeps.
*/
int
il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
{
int ret;
u8 sta_id;
struct il_link_quality_cmd *link_cmd;
unsigned long flags;
if (sta_id_r)
*sta_id_r = IL_INVALID_STATION;
ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
if (ret) {
IL_ERR("Unable to add station %pM\n", addr);
return ret;
}
if (sta_id_r)
*sta_id_r = sta_id;
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].used |= IL_STA_LOCAL;
spin_unlock_irqrestore(&il->sta_lock, flags);
/* Set up default rate scaling table in device's station table */
link_cmd = il4965_sta_alloc_lq(il, sta_id);
if (!link_cmd) {
IL_ERR("Unable to initialize rate scaling for station %pM.\n",
addr);
return -ENOMEM;
}
ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
if (ret)
IL_ERR("Link quality command failed (%d)\n", ret);
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].lq = link_cmd;
spin_unlock_irqrestore(&il->sta_lock, flags);
return 0;
}
static int
il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
{
int i;
u8 buff[sizeof(struct il_wep_cmd) +
sizeof(struct il_wep_key) * WEP_KEYS_MAX];
struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
size_t cmd_size = sizeof(struct il_wep_cmd);
struct il_host_cmd cmd = {
.id = C_WEPKEY,
.data = wep_cmd,
.flags = CMD_SYNC,
};
bool not_empty = false;
might_sleep();
memset(wep_cmd, 0,
cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
for (i = 0; i < WEP_KEYS_MAX; i++) {
u8 key_size = il->_4965.wep_keys[i].key_size;
wep_cmd->key[i].key_idx = i;
if (key_size) {
wep_cmd->key[i].key_offset = i;
not_empty = true;
} else
wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
wep_cmd->key[i].key_size = key_size;
memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
}
wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
wep_cmd->num_keys = WEP_KEYS_MAX;
cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
cmd.len = cmd_size;
if (not_empty || send_if_empty)
return il_send_cmd(il, &cmd);
else
return 0;
}
int
il4965_restore_default_wep_keys(struct il_priv *il)
{
lockdep_assert_held(&il->mutex);
return il4965_static_wepkey_cmd(il, false);
}
int
il4965_remove_default_wep_key(struct il_priv *il,
struct ieee80211_key_conf *keyconf)
{
int ret;
int idx = keyconf->keyidx;
lockdep_assert_held(&il->mutex);
D_WEP("Removing default WEP key: idx=%d\n", idx);
memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
if (il_is_rfkill(il)) {
D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
return 0;
}
ret = il4965_static_wepkey_cmd(il, 1);
D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
return ret;
}
int
il4965_set_default_wep_key(struct il_priv *il,
struct ieee80211_key_conf *keyconf)
{
int ret;
int len = keyconf->keylen;
int idx = keyconf->keyidx;
lockdep_assert_held(&il->mutex);
if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
D_WEP("Bad WEP key length %d\n", keyconf->keylen);
return -EINVAL;
}
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
keyconf->hw_key_idx = HW_KEY_DEFAULT;
il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
il->_4965.wep_keys[idx].key_size = len;
memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
ret = il4965_static_wepkey_cmd(il, false);
D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
return ret;
}
static int
il4965_set_wep_dynamic_key_info(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
__le16 key_flags = 0;
struct il_addsta_cmd sta_cmd;
lockdep_assert_held(&il->mutex);
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (keyconf->keylen == WEP_KEY_LEN_128)
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
if (sta_id == il->hw_params.bcast_id)
key_flags |= STA_KEY_MULTICAST_MSK;
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
keyconf->keylen);
if ((il->stations[sta_id].sta.key.
key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
il->stations[sta_id].sta.key.key_offset =
il_get_free_ucode_key_idx(il);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
il->stations[sta_id].sta.key.key_flags = key_flags;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &il->stations[sta_id].sta,
sizeof(struct il_addsta_cmd));
spin_unlock_irqrestore(&il->sta_lock, flags);
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
}
static int
il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
__le16 key_flags = 0;
struct il_addsta_cmd sta_cmd;
lockdep_assert_held(&il->mutex);
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (sta_id == il->hw_params.bcast_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
if ((il->stations[sta_id].sta.key.
key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
il->stations[sta_id].sta.key.key_offset =
il_get_free_ucode_key_idx(il);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
il->stations[sta_id].sta.key.key_flags = key_flags;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &il->stations[sta_id].sta,
sizeof(struct il_addsta_cmd));
spin_unlock_irqrestore(&il->sta_lock, flags);
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
}
static int
il4965_set_tkip_dynamic_key_info(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
int ret = 0;
__le16 key_flags = 0;
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (sta_id == il->hw_params.bcast_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
il->stations[sta_id].keyinfo.keylen = 16;
if ((il->stations[sta_id].sta.key.
key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
il->stations[sta_id].sta.key.key_offset =
il_get_free_ucode_key_idx(il);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
il->stations[sta_id].sta.key.key_flags = key_flags;
/* This copy is acutally not needed: we get the key with each TX */
memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
spin_unlock_irqrestore(&il->sta_lock, flags);
return ret;
}
void
il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
u8 sta_id;
unsigned long flags;
int i;
if (il_scan_cancel(il)) {
/* cancel scan failed, just live w/ bad key and rely
briefly on SW decryption */
return;
}
sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION)
return;
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
for (i = 0; i < 5; i++)
il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
cpu_to_le16(phase1key[i]);
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&il->sta_lock, flags);
}
int
il4965_remove_dynamic_key(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
u16 key_flags;
u8 keyidx;
struct il_addsta_cmd sta_cmd;
lockdep_assert_held(&il->mutex);
il->_4965.key_mapping_keys--;
spin_lock_irqsave(&il->sta_lock, flags);
key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
if (keyconf->keyidx != keyidx) {
/* We need to remove a key with idx different that the one
* in the uCode. This means that the key we need to remove has
* been replaced by another one with different idx.
* Don't do anything and return ok
*/
spin_unlock_irqrestore(&il->sta_lock, flags);
return 0;
}
if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
key_flags);
spin_unlock_irqrestore(&il->sta_lock, flags);
return 0;
}
if (!test_and_clear_bit
(il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
IL_ERR("idx %d not used in uCode key table.\n",
il->stations[sta_id].sta.key.key_offset);
memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
il->stations[sta_id].sta.key.key_flags =
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
if (il_is_rfkill(il)) {
D_WEP
("Not sending C_ADD_STA command because RFKILL enabled.\n");
spin_unlock_irqrestore(&il->sta_lock, flags);
return 0;
}
memcpy(&sta_cmd, &il->stations[sta_id].sta,
sizeof(struct il_addsta_cmd));
spin_unlock_irqrestore(&il->sta_lock, flags);
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
}
int
il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
int ret;
lockdep_assert_held(&il->mutex);
il->_4965.key_mapping_keys++;
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
ret =
il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_TKIP:
ret =
il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
break;
default:
IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
keyconf->cipher);
ret = -EINVAL;
}
D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
return ret;
}
/**
* il4965_alloc_bcast_station - add broadcast station into driver's station table.
*
* This adds the broadcast station into the driver's station table
* and marks it driver active, so that it will be restored to the
* device at the next best time.
*/
int
il4965_alloc_bcast_station(struct il_priv *il)
{
struct il_link_quality_cmd *link_cmd;
unsigned long flags;
u8 sta_id;
spin_lock_irqsave(&il->sta_lock, flags);
sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
if (sta_id == IL_INVALID_STATION) {
IL_ERR("Unable to prepare broadcast station\n");
spin_unlock_irqrestore(&il->sta_lock, flags);
return -EINVAL;
}
il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
il->stations[sta_id].used |= IL_STA_BCAST;
spin_unlock_irqrestore(&il->sta_lock, flags);
link_cmd = il4965_sta_alloc_lq(il, sta_id);
if (!link_cmd) {
IL_ERR
("Unable to initialize rate scaling for bcast station.\n");
return -ENOMEM;
}
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].lq = link_cmd;
spin_unlock_irqrestore(&il->sta_lock, flags);
return 0;
}
/**
* il4965_update_bcast_station - update broadcast station's LQ command
*
* Only used by iwl4965. Placed here to have all bcast station management
* code together.
*/
static int
il4965_update_bcast_station(struct il_priv *il)
{
unsigned long flags;
struct il_link_quality_cmd *link_cmd;
u8 sta_id = il->hw_params.bcast_id;
link_cmd = il4965_sta_alloc_lq(il, sta_id);
if (!link_cmd) {
IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
return -ENOMEM;
}
spin_lock_irqsave(&il->sta_lock, flags);
if (il->stations[sta_id].lq)
kfree(il->stations[sta_id].lq);
else
D_INFO("Bcast sta rate scaling has not been initialized.\n");
il->stations[sta_id].lq = link_cmd;
spin_unlock_irqrestore(&il->sta_lock, flags);
return 0;
}
int
il4965_update_bcast_stations(struct il_priv *il)
{
return il4965_update_bcast_station(il);
}
/**
* il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
*/
int
il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
{
unsigned long flags;
struct il_addsta_cmd sta_cmd;
lockdep_assert_held(&il->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &il->stations[sta_id].sta,
sizeof(struct il_addsta_cmd));
spin_unlock_irqrestore(&il->sta_lock, flags);
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
}
int
il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
u16 ssn)
{
unsigned long flags;
int sta_id;
struct il_addsta_cmd sta_cmd;
lockdep_assert_held(&il->mutex);
sta_id = il_sta_id(sta);
if (sta_id == IL_INVALID_STATION)
return -ENXIO;
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].sta.station_flags_msk = 0;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &il->stations[sta_id].sta,
sizeof(struct il_addsta_cmd));
spin_unlock_irqrestore(&il->sta_lock, flags);
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
}
int
il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
{
unsigned long flags;
int sta_id;
struct il_addsta_cmd sta_cmd;
lockdep_assert_held(&il->mutex);
sta_id = il_sta_id(sta);
if (sta_id == IL_INVALID_STATION) {
IL_ERR("Invalid station for AGG tid %d\n", tid);
return -ENXIO;
}
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].sta.station_flags_msk = 0;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &il->stations[sta_id].sta,
sizeof(struct il_addsta_cmd));
spin_unlock_irqrestore(&il->sta_lock, flags);
return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
}
void
il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
{
unsigned long flags;
spin_lock_irqsave(&il->sta_lock, flags);
il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
il->stations[sta_id].sta.sta.modify_mask =
STA_MODIFY_SLEEP_TX_COUNT_MSK;
il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&il->sta_lock, flags);
}
void
il4965_update_chain_flags(struct il_priv *il)
{
if (il->ops->set_rxon_chain) {
il->ops->set_rxon_chain(il);
if (il->active.rx_chain != il->staging.rx_chain)
il_commit_rxon(il);
}
}
static void
il4965_clear_free_frames(struct il_priv *il)
{
struct list_head *element;
D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
while (!list_empty(&il->free_frames)) {
element = il->free_frames.next;
list_del(element);
kfree(list_entry(element, struct il_frame, list));
il->frames_count--;
}
if (il->frames_count) {
IL_WARN("%d frames still in use. Did we lose one?\n",
il->frames_count);
il->frames_count = 0;
}
}
static struct il_frame *
il4965_get_free_frame(struct il_priv *il)
{
struct il_frame *frame;
struct list_head *element;
if (list_empty(&il->free_frames)) {
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame) {
IL_ERR("Could not allocate frame!\n");
return NULL;
}
il->frames_count++;
return frame;
}
element = il->free_frames.next;
list_del(element);
return list_entry(element, struct il_frame, list);
}
static void
il4965_free_frame(struct il_priv *il, struct il_frame *frame)
{
memset(frame, 0, sizeof(*frame));
list_add(&frame->list, &il->free_frames);
}
static u32
il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
int left)
{
lockdep_assert_held(&il->mutex);
if (!il->beacon_skb)
return 0;
if (il->beacon_skb->len > left)
return 0;
memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
return il->beacon_skb->len;
}
/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
static void
il4965_set_beacon_tim(struct il_priv *il,
struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
u32 frame_size)
{
u16 tim_idx;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
/*
* The idx is relative to frame start but we start looking at the
* variable-length part of the beacon.
*/
tim_idx = mgmt->u.beacon.variable - beacon;
/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
while ((tim_idx < (frame_size - 2)) &&
(beacon[tim_idx] != WLAN_EID_TIM))
tim_idx += beacon[tim_idx + 1] + 2;
/* If TIM field was found, set variables */
if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
} else
IL_WARN("Unable to find TIM Element in beacon\n");
}
static unsigned int
il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
{
struct il_tx_beacon_cmd *tx_beacon_cmd;
u32 frame_size;
u32 rate_flags;
u32 rate;
/*
* We have to set up the TX command, the TX Beacon command, and the
* beacon contents.
*/
lockdep_assert_held(&il->mutex);
if (!il->beacon_enabled) {
IL_ERR("Trying to build beacon without beaconing enabled\n");
return 0;
}
/* Initialize memory */
tx_beacon_cmd = &frame->u.beacon;
memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
/* Set up TX beacon contents */
frame_size =
il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
sizeof(frame->u) - sizeof(*tx_beacon_cmd));
if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
return 0;
if (!frame_size)
return 0;
/* Set up TX command fields */
tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
tx_beacon_cmd->tx.tx_flags =
TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
TX_CMD_FLG_STA_RATE_MSK;
/* Set up TX beacon command fields */
il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
frame_size);
/* Set up packet rate and flags */
rate = il_get_lowest_plcp(il);
il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK;
tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
return sizeof(*tx_beacon_cmd) + frame_size;
}
int
il4965_send_beacon_cmd(struct il_priv *il)
{
struct il_frame *frame;
unsigned int frame_size;
int rc;
frame = il4965_get_free_frame(il);
if (!frame) {
IL_ERR("Could not obtain free frame buffer for beacon "
"command.\n");
return -ENOMEM;
}
frame_size = il4965_hw_get_beacon_cmd(il, frame);
if (!frame_size) {
IL_ERR("Error configuring the beacon command\n");
il4965_free_frame(il, frame);
return -EINVAL;
}
rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
il4965_free_frame(il, frame);
return rc;
}
static inline dma_addr_t
il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
{
struct il_tfd_tb *tb = &tfd->tbs[idx];
dma_addr_t addr = get_unaligned_le32(&tb->lo);
if (sizeof(dma_addr_t) > sizeof(u32))
addr |=
((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
16;
return addr;
}
static inline u16
il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
{
struct il_tfd_tb *tb = &tfd->tbs[idx];
return le16_to_cpu(tb->hi_n_len) >> 4;
}
static inline void
il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
{
struct il_tfd_tb *tb = &tfd->tbs[idx];
u16 hi_n_len = len << 4;
put_unaligned_le32(addr, &tb->lo);
if (sizeof(dma_addr_t) > sizeof(u32))
hi_n_len |= ((addr >> 16) >> 16) & 0xF;
tb->hi_n_len = cpu_to_le16(hi_n_len);
tfd->num_tbs = idx + 1;
}
static inline u8
il4965_tfd_get_num_tbs(struct il_tfd *tfd)
{
return tfd->num_tbs & 0x1f;
}
/**
* il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @il - driver ilate data
* @txq - tx queue
*
* Does NOT advance any TFD circular buffer read/write idxes
* Does NOT free the TFD itself (which is within circular buffer)
*/
void
il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
{
struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
struct il_tfd *tfd;
struct pci_dev *dev = il->pci_dev;
int idx = txq->q.read_ptr;
int i;
int num_tbs;
tfd = &tfd_tmp[idx];
/* Sanity check on number of chunks */
num_tbs = il4965_tfd_get_num_tbs(tfd);
if (num_tbs >= IL_NUM_OF_TBS) {
IL_ERR("Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
}
/* Unmap tx_cmd */
if (num_tbs)
pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
dma_unmap_len(&txq->meta[idx], len),
PCI_DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
il4965_tfd_tb_get_len(tfd, i),
PCI_DMA_TODEVICE);
/* free SKB */
if (txq->skbs) {
struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
/* can be called from irqs-disabled context */
if (skb) {
dev_kfree_skb_any(skb);
txq->skbs[txq->q.read_ptr] = NULL;
}
}
}
int
il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset, u8 pad)
{
struct il_queue *q;
struct il_tfd *tfd, *tfd_tmp;
u32 num_tbs;
q = &txq->q;
tfd_tmp = (struct il_tfd *)txq->tfds;
tfd = &tfd_tmp[q->write_ptr];
if (reset)
memset(tfd, 0, sizeof(*tfd));
num_tbs = il4965_tfd_get_num_tbs(tfd);
/* Each TFD can point to a maximum 20 Tx buffers */
if (num_tbs >= IL_NUM_OF_TBS) {
IL_ERR("Error can not send more than %d chunks\n",
IL_NUM_OF_TBS);
return -EINVAL;
}
BUG_ON(addr & ~DMA_BIT_MASK(36));
if (unlikely(addr & ~IL_TX_DMA_MASK))
IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
il4965_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
}
/*
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
*
* 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
* channels supported in hardware.
*/
int
il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
{
int txq_id = txq->q.id;
/* Circular buffer (TFD queue in DRAM) physical base address */
il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
return 0;
}
/******************************************************************************
*
* Generic RX handler implementations
*
******************************************************************************/
static void
il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il_alive_resp *palive;
struct delayed_work *pwork;
palive = &pkt->u.alive_frame;
D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type, palive->ver_subtype);
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
D_INFO("Initialization Alive received.\n");
memcpy(&il->card_alive_init, &pkt->u.alive_frame,
sizeof(struct il_init_alive_resp));
pwork = &il->init_alive_start;
} else {
D_INFO("Runtime Alive received.\n");
memcpy(&il->card_alive, &pkt->u.alive_frame,
sizeof(struct il_alive_resp));
pwork = &il->alive_start;
}
/* We delay the ALIVE response by 5ms to
* give the HW RF Kill time to activate... */
if (palive->is_valid == UCODE_VALID_OK)
queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
else
IL_WARN("uCode did not respond OK.\n");
}
/**
* il4965_bg_stats_periodic - Timer callback to queue stats
*
* This callback is provided in order to send a stats request.
*
* This timer function is continually reset to execute within
* 60 seconds since the last N_STATS was received. We need to
* ensure we receive the stats in order to update the temperature
* used for calibrating the TXPOWER.
*/
static void
il4965_bg_stats_periodic(unsigned long data)
{
struct il_priv *il = (struct il_priv *)data;
if (test_bit(S_EXIT_PENDING, &il->status))
return;
/* dont send host command if rf-kill is on */
if (!il_is_ready_rf(il))
return;
il_send_stats_request(il, CMD_ASYNC, false);
}
static void
il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il4965_beacon_notif *beacon =
(struct il4965_beacon_notif *)pkt->u.raw;
#ifdef CONFIG_IWLEGACY_DEBUG
u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le32_to_cpu(beacon->ibss_mgr_status),
le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
#endif
il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
}
static void
il4965_perform_ct_kill_task(struct il_priv *il)
{
unsigned long flags;
D_POWER("Stop all queues\n");
if (il->mac80211_registered)
ieee80211_stop_queues(il->hw);
_il_wr(il, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
_il_rd(il, CSR_UCODE_DRV_GP1);
spin_lock_irqsave(&il->reg_lock, flags);
if (likely(_il_grab_nic_access(il)))
_il_release_nic_access(il);
spin_unlock_irqrestore(&il->reg_lock, flags);
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static void
il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = il->status;
D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
_il_wr(il, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
il_wr(il, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
}
if (flags & CT_CARD_DISABLED)
il4965_perform_ct_kill_task(il);
if (flags & HW_CARD_DISABLED)
set_bit(S_RFKILL, &il->status);
else
clear_bit(S_RFKILL, &il->status);
if (!(flags & RXON_CARD_DISABLED))
il_scan_cancel(il);
if ((test_bit(S_RFKILL, &status) !=
test_bit(S_RFKILL, &il->status)))
wiphy_rfkill_set_hw_state(il->hw->wiphy,
test_bit(S_RFKILL, &il->status));
else
wake_up(&il->wait_command_queue);
}
/**
* il4965_setup_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
* to the host.
*
* This function chains into the hardware specific files for them to setup
* any hardware specific handlers as well.
*/
static void
il4965_setup_handlers(struct il_priv *il)
{
il->handlers[N_ALIVE] = il4965_hdl_alive;
il->handlers[N_ERROR] = il_hdl_error;
il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
il->handlers[N_BEACON] = il4965_hdl_beacon;
/*
* The same handler is used for both the REPLY to a discrete
* stats request from the host as well as for the periodic
* stats notifications (after received beacons) from the uCode.
*/
il->handlers[C_STATS] = il4965_hdl_c_stats;
il->handlers[N_STATS] = il4965_hdl_stats;
il_setup_rx_scan_handlers(il);
/* status change handler */
il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
/* Rx handlers */
il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
il->handlers[N_RX_MPDU] = il4965_hdl_rx;
il->handlers[N_RX] = il4965_hdl_rx;
/* block ack */
il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
/* Tx response */
il->handlers[C_TX] = il4965_hdl_tx;
}
/**
* il4965_rx_handle - Main entry function for receiving responses from uCode
*
* Uses the il->handlers callback function array to invoke
* the appropriate handlers, including command responses,
* frame-received notifications, and other notifications.
*/
void
il4965_rx_handle(struct il_priv *il)
{
struct il_rx_buf *rxb;
struct il_rx_pkt *pkt;
struct il_rx_queue *rxq = &il->rxq;
u32 r, i;
int reclaim;
unsigned long flags;
u8 fill_rx = 0;
u32 count = 8;
int total_empty;
/* uCode's read idx (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
i = rxq->read;
/* Rx interrupt, but nothing sent from uCode */
if (i == r)
D_RX("r = %d, i = %d\n", r, i);
/* calculate total frames need to be restock after handling RX */
total_empty = r - rxq->write_actual;
if (total_empty < 0)
total_empty += RX_QUEUE_SIZE;
if (total_empty > (RX_QUEUE_SIZE / 2))
fill_rx = 1;
while (i != r) {
int len;
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
* then a bug has been introduced in the queue refilling
* routines -- catch it here */
BUG_ON(rxb == NULL);
rxq->queue[i] = NULL;
pci_unmap_page(il->pci_dev, rxb->page_dma,
PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
pkt = rxb_addr(rxb);
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
* If the packet (e.g. Rx frame) originated from uCode,
* there is no command buffer to reclaim.
* Ucode should set SEQ_RX_FRAME bit if ucode-originated,
* but apparently a few don't get set; catch them here. */
reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
(pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
(pkt->hdr.cmd != N_RX_MPDU) &&
(pkt->hdr.cmd != N_COMPRESSED_BA) &&
(pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
/* Based on type of command response or notification,
* handle those that need handling via function in
* handlers table. See il4965_setup_handlers() */
if (il->handlers[pkt->hdr.cmd]) {
D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
il->isr_stats.handlers[pkt->hdr.cmd]++;
il->handlers[pkt->hdr.cmd] (il, rxb);
} else {
/* No handling needed */
D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
}
/*
* XXX: After here, we should always check rxb->page
* against NULL before touching it or its virtual
* memory (pkt). Because some handler might have
* already taken or freed the pages.
*/
if (reclaim) {
/* Invoke any callbacks, transfer the buffer to caller,
* and fire off the (possibly) blocking il_send_cmd()
* as we reclaim the driver command queue */
if (rxb->page)
il_tx_cmd_complete(il, rxb);
else
IL_WARN("Claim null rxb?\n");
}
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
spin_lock_irqsave(&rxq->lock, flags);
if (rxb->page != NULL) {
rxb->page_dma =
pci_map_page(il->pci_dev, rxb->page, 0,
PAGE_SIZE << il->hw_params.
rx_page_order, PCI_DMA_FROMDEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
i = (i + 1) & RX_QUEUE_MASK;
/* If there are a lot of unused frames,
* restock the Rx queue so ucode wont assert. */
if (fill_rx) {
count++;
if (count >= 8) {
rxq->read = i;
il4965_rx_replenish_now(il);
count = 0;
}
}
}
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
il4965_rx_replenish_now(il);
else
il4965_rx_queue_restock(il);
}
/* call this function to flush any scheduled tasklet */
static inline void
il4965_synchronize_irq(struct il_priv *il)
{
/* wait to make sure we flush pending tasklet */
synchronize_irq(il->pci_dev->irq);
tasklet_kill(&il->irq_tasklet);
}
static void
il4965_irq_tasklet(struct il_priv *il)
{
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
u32 i;
#ifdef CONFIG_IWLEGACY_DEBUG
u32 inta_mask;
#endif
spin_lock_irqsave(&il->lock, flags);
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
* and will clear only when CSR_FH_INT_STATUS gets cleared. */
inta = _il_rd(il, CSR_INT);
_il_wr(il, CSR_INT, inta);
/* Ack/clear/reset pending flow-handler (DMA) interrupts.
* Any new interrupts that happen after this, either while we're
* in this tasklet, or later, will show up in next ISR/tasklet. */
inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
#ifdef CONFIG_IWLEGACY_DEBUG
if (il_get_debug_level(il) & IL_DL_ISR) {
/* just for debug */
inta_mask = _il_rd(il, CSR_INT_MASK);
D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
inta_mask, inta_fh);
}
#endif
spin_unlock_irqrestore(&il->lock, flags);
/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
* atomic, make sure that inta covers all the interrupts that
* we've discovered, even if FH interrupt came in just after
* reading CSR_INT. */
if (inta_fh & CSR49_FH_INT_RX_MASK)
inta |= CSR_INT_BIT_FH_RX;
if (inta_fh & CSR49_FH_INT_TX_MASK)
inta |= CSR_INT_BIT_FH_TX;
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
IL_ERR("Hardware error detected. Restarting.\n");
/* Tell the device to stop sending interrupts */
il_disable_interrupts(il);
il->isr_stats.hw++;
il_irq_handle_error(il);
handled |= CSR_INT_BIT_HW_ERR;
return;
}
#ifdef CONFIG_IWLEGACY_DEBUG
if (il_get_debug_level(il) & (IL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
D_ISR("Scheduler finished to transmit "
"the frame/frames.\n");
il->isr_stats.sch++;
}
/* Alive notification via Rx interrupt will do the real work */
if (inta & CSR_INT_BIT_ALIVE) {
D_ISR("Alive interrupt\n");
il->isr_stats.alive++;
}
}
#endif
/* Safely ignore these bits for debug checks below */
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
int hw_rf_kill = 0;
if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
hw_rf_kill = 1;
IL_WARN("RF_KILL bit toggled to %s.\n",
hw_rf_kill ? "disable radio" : "enable radio");
il->isr_stats.rfkill++;
/* driver only loads ucode once setting the interface up.
* the driver allows loading the ucode even if the radio
* is killed. Hence update the killswitch state here. The
* rfkill handler will care about restarting if needed.
*/
if (!test_bit(S_ALIVE, &il->status)) {
if (hw_rf_kill)
set_bit(S_RFKILL, &il->status);
else
clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
}
handled |= CSR_INT_BIT_RF_KILL;
}
/* Chip got too hot and stopped itself */
if (inta & CSR_INT_BIT_CT_KILL) {
IL_ERR("Microcode CT kill error detected.\n");
il->isr_stats.ctkill++;
handled |= CSR_INT_BIT_CT_KILL;
}
/* Error detected by uCode */
if (inta & CSR_INT_BIT_SW_ERR) {
IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
inta);
il->isr_stats.sw++;
il_irq_handle_error(il);
handled |= CSR_INT_BIT_SW_ERR;
}
/*
* uCode wakes up after power-down sleep.
* Tell device about any new tx or host commands enqueued,
* and about any Rx buffers made available while asleep.
*/
if (inta & CSR_INT_BIT_WAKEUP) {
D_ISR("Wakeup interrupt\n");
il_rx_queue_update_write_ptr(il, &il->rxq);
for (i = 0; i < il->hw_params.max_txq_num; i++)
il_txq_update_write_ptr(il, &il->txq[i]);
il->isr_stats.wakeup++;
handled |= CSR_INT_BIT_WAKEUP;
}
/* All uCode command responses, including Tx command responses,
* Rx "responses" (frame-received notification), and other
* notifications from uCode come through here*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
il4965_rx_handle(il);
il->isr_stats.rx++;
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
}
/* This "Tx" DMA channel is used only for loading uCode */
if (inta & CSR_INT_BIT_FH_TX) {
D_ISR("uCode load interrupt\n");
il->isr_stats.tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
il->ucode_write_complete = 1;
wake_up(&il->wait_command_queue);
}
if (inta & ~handled) {
IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
il->isr_stats.unhandled++;
}
if (inta & ~(il->inta_mask)) {
IL_WARN("Disabled INTA bits 0x%08x were pending\n",
inta & ~il->inta_mask);
IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
}
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
if (test_bit(S_INT_ENABLED, &il->status))
il_enable_interrupts(il);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
il_enable_rfkill_int(il);
#ifdef CONFIG_IWLEGACY_DEBUG
if (il_get_debug_level(il) & (IL_DL_ISR)) {
inta = _il_rd(il, CSR_INT);
inta_mask = _il_rd(il, CSR_INT_MASK);
inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
}
#endif
}
/*****************************************************************************
*
* sysfs attributes
*
*****************************************************************************/
#ifdef CONFIG_IWLEGACY_DEBUG
/*
* The following adds a new attribute to the sysfs representation
* of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
* used for controlling the debug level.
*
* See the level definitions in iwl for details.
*
* The debug_level being managed using sysfs below is a per device debug
* level that is used instead of the global debug level if it (the per
* device debug level) is set.
*/
static ssize_t
il4965_show_debug_level(struct device *d, struct device_attribute *attr,
char *buf)
{
struct il_priv *il = dev_get_drvdata(d);
return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
}
static ssize_t
il4965_store_debug_level(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct il_priv *il = dev_get_drvdata(d);
unsigned long val;
int ret;
ret = strict_strtoul(buf, 0, &val);
if (ret)
IL_ERR("%s is not in hex or decimal form.\n", buf);
else
il->debug_level = val;
return strnlen(buf, count);
}
static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
il4965_store_debug_level);
#endif /* CONFIG_IWLEGACY_DEBUG */
static ssize_t
il4965_show_temperature(struct device *d, struct device_attribute *attr,
char *buf)
{
struct il_priv *il = dev_get_drvdata(d);
if (!il_is_alive(il))
return -EAGAIN;
return sprintf(buf, "%d\n", il->temperature);
}
static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
static ssize_t
il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
{
struct il_priv *il = dev_get_drvdata(d);
if (!il_is_ready_rf(il))
return sprintf(buf, "off\n");
else
return sprintf(buf, "%d\n", il->tx_power_user_lmt);
}
static ssize_t
il4965_store_tx_power(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct il_priv *il = dev_get_drvdata(d);
unsigned long val;
int ret;
ret = strict_strtoul(buf, 10, &val);
if (ret)
IL_INFO("%s is not in decimal form.\n", buf);
else {
ret = il_set_tx_power(il, val, false);
if (ret)
IL_ERR("failed setting tx power (0x%d).\n", ret);
else
ret = count;
}
return ret;
}
static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
il4965_store_tx_power);
static struct attribute *il_sysfs_entries[] = {
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
#ifdef CONFIG_IWLEGACY_DEBUG
&dev_attr_debug_level.attr,
#endif
NULL
};
static struct attribute_group il_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = il_sysfs_entries,
};
/******************************************************************************
*
* uCode download functions
*
******************************************************************************/
static void
il4965_dealloc_ucode_pci(struct il_priv *il)
{
il_free_fw_desc(il->pci_dev, &il->ucode_code);
il_free_fw_desc(il->pci_dev, &il->ucode_data);
il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
il_free_fw_desc(il->pci_dev, &il->ucode_init);
il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
il_free_fw_desc(il->pci_dev, &il->ucode_boot);
}
static void
il4965_nic_start(struct il_priv *il)
{
/* Remove all resets to allow NIC to operate */
_il_wr(il, CSR_RESET, 0);
}
static void il4965_ucode_callback(const struct firmware *ucode_raw,
void *context);
static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
static int __must_check
il4965_request_firmware(struct il_priv *il, bool first)
{
const char *name_pre = il->cfg->fw_name_pre;
char tag[8];
if (first) {
il->fw_idx = il->cfg->ucode_api_max;
sprintf(tag, "%d", il->fw_idx);
} else {
il->fw_idx--;
sprintf(tag, "%d", il->fw_idx);
}
if (il->fw_idx < il->cfg->ucode_api_min) {
IL_ERR("no suitable firmware found!\n");
return -ENOENT;
}
sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
&il->pci_dev->dev, GFP_KERNEL, il,
il4965_ucode_callback);
}
struct il4965_firmware_pieces {
const void *inst, *data, *init, *init_data, *boot;
size_t inst_size, data_size, init_size, init_data_size, boot_size;
};
static int
il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
struct il4965_firmware_pieces *pieces)
{
struct il_ucode_header *ucode = (void *)ucode_raw->data;
u32 api_ver, hdr_size;
const u8 *src;
il->ucode_ver = le32_to_cpu(ucode->ver);
api_ver = IL_UCODE_API(il->ucode_ver);
switch (api_ver) {
default:
case 0:
case 1:
case 2:
hdr_size = 24;
if (ucode_raw->size < hdr_size) {
IL_ERR("File size too small!\n");
return -EINVAL;
}
pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
pieces->data_size = le32_to_cpu(ucode->v1.data_size);
pieces->init_size = le32_to_cpu(ucode->v1.init_size);
pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
src = ucode->v1.data;
break;
}
/* Verify size of file vs. image size info in file's header */
if (ucode_raw->size !=
hdr_size + pieces->inst_size + pieces->data_size +
pieces->init_size + pieces->init_data_size + pieces->boot_size) {
IL_ERR("uCode file size %d does not match expected size\n",
(int)ucode_raw->size);
return -EINVAL;
}
pieces->inst = src;
src += pieces->inst_size;
pieces->data = src;
src += pieces->data_size;
pieces->init = src;
src += pieces->init_size;
pieces->init_data = src;
src += pieces->init_data_size;
pieces->boot = src;
src += pieces->boot_size;
return 0;
}
/**
* il4965_ucode_callback - callback when firmware was loaded
*
* If loaded successfully, copies the firmware into buffers
* for the card to fetch (via DMA).
*/
static void
il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct il_priv *il = context;
struct il_ucode_header *ucode;
int err;
struct il4965_firmware_pieces pieces;
const unsigned int api_max = il->cfg->ucode_api_max;
const unsigned int api_min = il->cfg->ucode_api_min;
u32 api_ver;
u32 max_probe_length = 200;
u32 standard_phy_calibration_size =
IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
memset(&pieces, 0, sizeof(pieces));
if (!ucode_raw) {
if (il->fw_idx <= il->cfg->ucode_api_max)
IL_ERR("request for firmware file '%s' failed.\n",
il->firmware_name);
goto try_again;
}
D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
ucode_raw->size);
/* Make sure that we got at least the API version number */
if (ucode_raw->size < 4) {
IL_ERR("File size way too small!\n");
goto try_again;
}
/* Data from ucode file: header followed by uCode images */
ucode = (struct il_ucode_header *)ucode_raw->data;
err = il4965_load_firmware(il, ucode_raw, &pieces);
if (err)
goto try_again;
api_ver = IL_UCODE_API(il->ucode_ver);
/*
* api_ver should match the api version forming part of the
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
if (api_ver < api_min || api_ver > api_max) {
IL_ERR("Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n", api_max,
api_ver);
goto try_again;
}
if (api_ver != api_max)
IL_ERR("Firmware has old API version. Expected v%u, "
"got v%u. New firmware can be obtained "
"from http://www.intellinuxwireless.org.\n", api_max,
api_ver);
IL_INFO("loaded firmware version %u.%u.%u.%u\n",
IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
"%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
IL_UCODE_SERIAL(il->ucode_ver));
/*
* For any of the failures below (before allocating pci memory)
* we will try to load a version with a smaller API -- maybe the
* user just got a corrupted version of the latest API.
*/
D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
/* Verify that uCode images will fit in card's SRAM */
if (pieces.inst_size > il->hw_params.max_inst_size) {
IL_ERR("uCode instr len %Zd too large to fit in\n",
pieces.inst_size);
goto try_again;
}
if (pieces.data_size > il->hw_params.max_data_size) {
IL_ERR("uCode data len %Zd too large to fit in\n",
pieces.data_size);
goto try_again;
}
if (pieces.init_size > il->hw_params.max_inst_size) {
IL_ERR("uCode init instr len %Zd too large to fit in\n",
pieces.init_size);
goto try_again;
}
if (pieces.init_data_size > il->hw_params.max_data_size) {
IL_ERR("uCode init data len %Zd too large to fit in\n",
pieces.init_data_size);
goto try_again;
}
if (pieces.boot_size > il->hw_params.max_bsm_size) {
IL_ERR("uCode boot instr len %Zd too large to fit in\n",
pieces.boot_size);
goto try_again;
}
/* Allocate ucode buffers for card's bus-master loading ... */
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
il->ucode_code.len = pieces.inst_size;
il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
il->ucode_data.len = pieces.data_size;
il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
il->ucode_data_backup.len = pieces.data_size;
il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
!il->ucode_data_backup.v_addr)
goto err_pci_alloc;
/* Initialization instructions and data */
if (pieces.init_size && pieces.init_data_size) {
il->ucode_init.len = pieces.init_size;
il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
il->ucode_init_data.len = pieces.init_data_size;
il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
goto err_pci_alloc;
}
/* Bootstrap (instructions only, no data) */
if (pieces.boot_size) {
il->ucode_boot.len = pieces.boot_size;
il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
if (!il->ucode_boot.v_addr)
goto err_pci_alloc;
}
/* Now that we can no longer fail, copy information */
il->sta_key_max_num = STA_KEY_MAX_NUM;
/* Copy images into buffers for card's bus-master reads ... */
/* Runtime instructions (first block of data in file) */
D_INFO("Copying (but not loading) uCode instr len %Zd\n",
pieces.inst_size);
memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
/*
* Runtime data
* NOTE: Copy into backup buffer will be done in il_up()
*/
D_INFO("Copying (but not loading) uCode data len %Zd\n",
pieces.data_size);
memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
/* Initialization instructions */
if (pieces.init_size) {
D_INFO("Copying (but not loading) init instr len %Zd\n",
pieces.init_size);
memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
}
/* Initialization data */
if (pieces.init_data_size) {
D_INFO("Copying (but not loading) init data len %Zd\n",
pieces.init_data_size);
memcpy(il->ucode_init_data.v_addr, pieces.init_data,
pieces.init_data_size);
}
/* Bootstrap instructions */
D_INFO("Copying (but not loading) boot instr len %Zd\n",
pieces.boot_size);
memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
/*
* figure out the offset of chain noise reset and gain commands
* base on the size of standard phy calibration commands table size
*/
il->_4965.phy_calib_chain_noise_reset_cmd =
standard_phy_calibration_size;
il->_4965.phy_calib_chain_noise_gain_cmd =
standard_phy_calibration_size + 1;
/**************************************************
* This is still part of probe() in a sense...
*
* 9. Setup and register with mac80211 and debugfs
**************************************************/
err = il4965_mac_setup_register(il, max_probe_length);
if (err)
goto out_unbind;
err = il_dbgfs_register(il, DRV_NAME);
if (err)
IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
err);
err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
if (err) {
IL_ERR("failed to create sysfs device attributes\n");
goto out_unbind;
}
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
complete(&il->_4965.firmware_loading_complete);
return;
try_again:
/* try next, if any */
if (il4965_request_firmware(il, false))
goto out_unbind;
release_firmware(ucode_raw);
return;
err_pci_alloc:
IL_ERR("failed to allocate pci memory\n");
il4965_dealloc_ucode_pci(il);
out_unbind:
complete(&il->_4965.firmware_loading_complete);
device_release_driver(&il->pci_dev->dev);
release_firmware(ucode_raw);
}
static const char *const desc_lookup_text[] = {
"OK",
"FAIL",
"BAD_PARAM",
"BAD_CHECKSUM",
"NMI_INTERRUPT_WDG",
"SYSASSERT",
"FATAL_ERROR",
"BAD_COMMAND",
"HW_ERROR_TUNE_LOCK",
"HW_ERROR_TEMPERATURE",
"ILLEGAL_CHAN_FREQ",
"VCC_NOT_STBL",
"FH49_ERROR",
"NMI_INTERRUPT_HOST",
"NMI_INTERRUPT_ACTION_PT",
"NMI_INTERRUPT_UNKNOWN",
"UCODE_VERSION_MISMATCH",
"HW_ERROR_ABS_LOCK",
"HW_ERROR_CAL_LOCK_FAIL",
"NMI_INTERRUPT_INST_ACTION_PT",
"NMI_INTERRUPT_DATA_ACTION_PT",
"NMI_TRM_HW_ER",
"NMI_INTERRUPT_TRM",
"NMI_INTERRUPT_BREAK_POINT",
"DEBUG_0",
"DEBUG_1",
"DEBUG_2",
"DEBUG_3",
};
static struct {
char *name;
u8 num;
} advanced_lookup[] = {
{
"NMI_INTERRUPT_WDG", 0x34}, {
"SYSASSERT", 0x35}, {
"UCODE_VERSION_MISMATCH", 0x37}, {
"BAD_COMMAND", 0x38}, {
"NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
"FATAL_ERROR", 0x3D}, {
"NMI_TRM_HW_ERR", 0x46}, {
"NMI_INTERRUPT_TRM", 0x4C}, {
"NMI_INTERRUPT_BREAK_POINT", 0x54}, {
"NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
"NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
"NMI_INTERRUPT_HOST", 0x66}, {
"NMI_INTERRUPT_ACTION_PT", 0x7C}, {
"NMI_INTERRUPT_UNKNOWN", 0x84}, {
"NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
"ADVANCED_SYSASSERT", 0},};
static const char *
il4965_desc_lookup(u32 num)
{
int i;
int max = ARRAY_SIZE(desc_lookup_text);
if (num < max)
return desc_lookup_text[num];
max = ARRAY_SIZE(advanced_lookup) - 1;
for (i = 0; i < max; i++) {
if (advanced_lookup[i].num == num)
break;
}
return advanced_lookup[i].name;
}
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
void
il4965_dump_nic_error_log(struct il_priv *il)
{
u32 data2, line;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
u32 pc, hcmd;
if (il->ucode_type == UCODE_INIT)
base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
else
base = le32_to_cpu(il->card_alive.error_event_table_ptr);
if (!il->ops->is_valid_rtc_data_addr(base)) {
IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
return;
}
count = il_read_targ_mem(il, base);
if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
IL_ERR("Start IWL Error Log Dump:\n");
IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
}
desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
il->isr_stats.err_code = desc;
pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
line = il_read_targ_mem(il, base + 9 * sizeof(u32));
time = il_read_targ_mem(il, base + 11 * sizeof(u32));
hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
IL_ERR("Desc Time "
"data1 data2 line\n");
IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
il4965_desc_lookup(desc), desc, time, data1, data2, line);
IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
blink2, ilink1, ilink2, hcmd);
}
static void
il4965_rf_kill_ct_config(struct il_priv *il)
{
struct il_ct_kill_config cmd;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&il->lock, flags);
_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
spin_unlock_irqrestore(&il->lock, flags);
cmd.critical_temperature_R =
cpu_to_le32(il->hw_params.ct_kill_threshold);
ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
if (ret)
IL_ERR("C_CT_KILL_CONFIG failed\n");
else
D_INFO("C_CT_KILL_CONFIG " "succeeded, "
"critical temperature is %d\n",
il->hw_params.ct_kill_threshold);
}
static const s8 default_queue_to_tx_fifo[] = {
IL_TX_FIFO_VO,
IL_TX_FIFO_VI,
IL_TX_FIFO_BE,
IL_TX_FIFO_BK,
IL49_CMD_FIFO_NUM,
IL_TX_FIFO_UNUSED,
IL_TX_FIFO_UNUSED,
};
#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
static int
il4965_alive_notify(struct il_priv *il)
{
u32 a;
unsigned long flags;
int i, chan;
u32 reg_val;
spin_lock_irqsave(&il->lock, flags);
/* Clear 4965's internal Tx Scheduler data base */
il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
il_write_targ_mem(il, a, 0);
for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
il_write_targ_mem(il, a, 0);
for (;
a <
il->scd_base_addr +
IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
a += 4)
il_write_targ_mem(il, a, 0);
/* Tel 4965 where to find Tx byte count tables */
il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
il_wr(il, FH49_TX_CHICKEN_BITS_REG,
reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Disable chain mode for all queues */
il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
/* Initialize each Tx queue (including the command queue) */
for (i = 0; i < il->hw_params.max_txq_num; i++) {
/* TFD circular buffer read/write idxes */
il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
/* Max Tx Window size for Scheduler-ACK mode */
il_write_targ_mem(il,
il->scd_base_addr +
IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
(SCD_WIN_SIZE <<
IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
/* Frame limit */
il_write_targ_mem(il,
il->scd_base_addr +
IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
(SCD_FRAME_LIMIT <<
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
}
il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
(1 << il->hw_params.max_txq_num) - 1);
/* Activate all Tx DMA/FIFO channels */
il4965_txq_set_sched(il, IL_MASK(0, 6));
il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
/* make sure all queue are not stopped */
memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
for (i = 0; i < 4; i++)
atomic_set(&il->queue_stop_count[i], 0);
/* reset to 0 to enable all the queue first */
il->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
il_txq_ctx_activate(il, i);
if (ac == IL_TX_FIFO_UNUSED)
continue;
il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
}
spin_unlock_irqrestore(&il->lock, flags);
return 0;
}
/**
* il4965_alive_start - called after N_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by il_init_alive_start()).
*/
static void
il4965_alive_start(struct il_priv *il)
{
int ret = 0;
D_INFO("Runtime Alive received.\n");
if (il->card_alive.is_valid != UCODE_VALID_OK) {
/* We had an error bringing up the hardware, so take it
* all the way back down so we can try again */
D_INFO("Alive failed.\n");
goto restart;
}
/* Initialize uCode has loaded Runtime uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "runtime" alive if code weren't properly loaded. */
if (il4965_verify_ucode(il)) {
/* Runtime instruction load was bad;
* take it all the way back down so we can try again */
D_INFO("Bad runtime uCode load.\n");
goto restart;
}
ret = il4965_alive_notify(il);
if (ret) {
IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
goto restart;
}
/* After the ALIVE response, we can send host commands to the uCode */
set_bit(S_ALIVE, &il->status);
/* Enable watchdog to monitor the driver tx queues */
il_setup_watchdog(il);
if (il_is_rfkill(il))
return;
ieee80211_wake_queues(il->hw);
il->active_rate = RATES_MASK;
if (il_is_associated(il)) {
struct il_rxon_cmd *active_rxon =
(struct il_rxon_cmd *)&il->active;
/* apply any changes in staging */
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
il_connection_init_rx_config(il);
if (il->ops->set_rxon_chain)
il->ops->set_rxon_chain(il);
}
/* Configure bluetooth coexistence if enabled */
il_send_bt_config(il);
il4965_reset_run_time_calib(il);
set_bit(S_READY, &il->status);
/* Configure the adapter for unassociated operation */
il_commit_rxon(il);
/* At this point, the NIC is initialized and operational */
il4965_rf_kill_ct_config(il);
D_INFO("ALIVE processing complete.\n");
wake_up(&il->wait_command_queue);
il_power_update_mode(il, true);
D_INFO("Updated power mode\n");
return;
restart:
queue_work(il->workqueue, &il->restart);
}
static void il4965_cancel_deferred_work(struct il_priv *il);
static void
__il4965_down(struct il_priv *il)
{
unsigned long flags;
int exit_pending;
D_INFO(DRV_NAME " is going down\n");
il_scan_cancel_timeout(il, 200);
exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
* to prevent rearm timer */
del_timer_sync(&il->watchdog);
il_clear_ucode_stations(il);
/* FIXME: race conditions ? */
spin_lock_irq(&il->sta_lock);
/*
* Remove all key information that is not stored as part
* of station information since mac80211 may not have had
* a chance to remove all the keys. When device is
* reconfigured by mac80211 after an error all keys will
* be reconfigured.
*/
memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
il->_4965.key_mapping_keys = 0;
spin_unlock_irq(&il->sta_lock);
il_dealloc_bcast_stations(il);
il_clear_driver_stations(il);
/* Unblock any waiting calls */
wake_up_all(&il->wait_command_queue);
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
if (!exit_pending)
clear_bit(S_EXIT_PENDING, &il->status);
/* stop and reset the on-board processor */
_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/* tell the device to stop sending interrupts */
spin_lock_irqsave(&il->lock, flags);
il_disable_interrupts(il);
spin_unlock_irqrestore(&il->lock, flags);
il4965_synchronize_irq(il);
if (il->mac80211_registered)
ieee80211_stop_queues(il->hw);
/* If we have not previously called il_init() then
* clear all bits but the RF Kill bit and return */
if (!il_is_init(il)) {
il->status =
test_bit(S_RFKILL, &il->status) << S_RFKILL |
test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
goto exit;
}
/* ...otherwise clear out all the status bits but the RF Kill
* bit and continue taking the NIC down. */
il->status &=
test_bit(S_RFKILL, &il->status) << S_RFKILL |
test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
/*
* We disabled and synchronized interrupt, and priv->mutex is taken, so
* here is the only thread which will program device registers, but
* still have lockdep assertions, so we are taking reg_lock.
*/
spin_lock_irq(&il->reg_lock);
/* FIXME: il_grab_nic_access if rfkill is off ? */
il4965_txq_ctx_stop(il);
il4965_rxq_stop(il);
/* Power-down device's busmaster DMA clocks */
_il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
/* Make sure (redundant) we've released our request to stay awake */
_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
_il_apm_stop(il);
spin_unlock_irq(&il->reg_lock);
il4965_txq_ctx_unmap(il);
exit:
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
/* clear out any free frames */
il4965_clear_free_frames(il);
}
static void
il4965_down(struct il_priv *il)
{
mutex_lock(&il->mutex);
__il4965_down(il);
mutex_unlock(&il->mutex);
il4965_cancel_deferred_work(il);
}
static void
il4965_set_hw_ready(struct il_priv *il)
{
int ret;
il_set_bit(il, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
100);
if (ret >= 0)
il->hw_ready = true;
D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
}
static void
il4965_prepare_card_hw(struct il_priv *il)
{
int ret;
il->hw_ready = false;
il4965_set_hw_ready(il);
if (il->hw_ready)
return;
/* If HW is not ready, prepare the conditions to check again */
il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
ret =
_il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
/* HW should be ready by now, check again. */
if (ret != -ETIMEDOUT)
il4965_set_hw_ready(il);
}
#define MAX_HW_RESTARTS 5
static int
__il4965_up(struct il_priv *il)
{
int i;
int ret;
if (test_bit(S_EXIT_PENDING, &il->status)) {
IL_WARN("Exit pending; will not bring the NIC up\n");
return -EIO;
}
if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
IL_ERR("ucode not available for device bringup\n");
return -EIO;
}
ret = il4965_alloc_bcast_station(il);
if (ret) {
il_dealloc_bcast_stations(il);
return ret;
}
il4965_prepare_card_hw(il);
if (!il->hw_ready) {
IL_ERR("HW not ready\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */
if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(S_RFKILL, &il->status);
else {
set_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
il_enable_rfkill_int(il);
IL_WARN("Radio disabled by HW RF Kill switch\n");
return 0;
}
_il_wr(il, CSR_INT, 0xFFFFFFFF);
/* must be initialised before il_hw_nic_init */
il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
ret = il4965_hw_nic_init(il);
if (ret) {
IL_ERR("Unable to init nic\n");
return ret;
}
/* make sure rfkill handshake bits are cleared */
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
/* clear (again), then enable host interrupts */
_il_wr(il, CSR_INT, 0xFFFFFFFF);
il_enable_interrupts(il);
/* really make sure rfkill handshake bits are cleared */
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Copy original ucode data image from disk into backup cache.
* This will be used to initialize the on-board processor's
* data SRAM for a clean start when the runtime program first loads. */
memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
il->ucode_data.len);
for (i = 0; i < MAX_HW_RESTARTS; i++) {
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
ret = il->ops->load_ucode(il);
if (ret) {
IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
continue;
}
/* start card; "initialize" will load runtime ucode */
il4965_nic_start(il);
D_INFO(DRV_NAME " is coming up\n");
return 0;
}
set_bit(S_EXIT_PENDING, &il->status);
__il4965_down(il);
clear_bit(S_EXIT_PENDING, &il->status);
/* tried to restart and config the device for as long as our
* patience could withstand */
IL_ERR("Unable to initialize device after %d attempts.\n", i);
return -EIO;
}
/*****************************************************************************
*
* Workqueue callbacks
*
*****************************************************************************/
static void
il4965_bg_init_alive_start(struct work_struct *data)
{
struct il_priv *il =
container_of(data, struct il_priv, init_alive_start.work);
mutex_lock(&il->mutex);
if (test_bit(S_EXIT_PENDING, &il->status))
goto out;
il->ops->init_alive_start(il);
out:
mutex_unlock(&il->mutex);
}
static void
il4965_bg_alive_start(struct work_struct *data)
{
struct il_priv *il =
container_of(data, struct il_priv, alive_start.work);
mutex_lock(&il->mutex);
if (test_bit(S_EXIT_PENDING, &il->status))
goto out;
il4965_alive_start(il);
out:
mutex_unlock(&il->mutex);
}
static void
il4965_bg_run_time_calib_work(struct work_struct *work)
{
struct il_priv *il = container_of(work, struct il_priv,
run_time_calib_work);
mutex_lock(&il->mutex);
if (test_bit(S_EXIT_PENDING, &il->status) ||
test_bit(S_SCANNING, &il->status)) {
mutex_unlock(&il->mutex);
return;
}
if (il->start_calib) {
il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
}
mutex_unlock(&il->mutex);
}
static void
il4965_bg_restart(struct work_struct *data)
{
struct il_priv *il = container_of(data, struct il_priv, restart);
if (test_bit(S_EXIT_PENDING, &il->status))
return;
if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
mutex_lock(&il->mutex);
il->is_open = 0;
__il4965_down(il);
mutex_unlock(&il->mutex);
il4965_cancel_deferred_work(il);
ieee80211_restart_hw(il->hw);
} else {
il4965_down(il);
mutex_lock(&il->mutex);
if (test_bit(S_EXIT_PENDING, &il->status)) {
mutex_unlock(&il->mutex);
return;
}
__il4965_up(il);
mutex_unlock(&il->mutex);
}
}
static void
il4965_bg_rx_replenish(struct work_struct *data)
{
struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
if (test_bit(S_EXIT_PENDING, &il->status))
return;
mutex_lock(&il->mutex);
il4965_rx_replenish(il);
mutex_unlock(&il->mutex);
}
/*****************************************************************************
*
* mac80211 entry point functions
*
*****************************************************************************/
#define UCODE_READY_TIMEOUT (4 * HZ)
/*
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
static int
il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
{
int ret;
struct ieee80211_hw *hw = il->hw;
hw->rate_control_algorithm = "iwl-4965-rs";
/* Tell mac80211 our characteristics */
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
if (il->cfg->sku & IL_SKU_N)
hw->flags |=
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
hw->sta_data_size = sizeof(struct il_station_priv);
hw->vif_data_size = sizeof(struct il_vif_priv);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |=
WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
/*
* For now, disable PS by default because it affects
* RX performance significantly.
*/
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&il->bands[IEEE80211_BAND_2GHZ];
if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&il->bands[IEEE80211_BAND_5GHZ];
il_leds_init(il);
ret = ieee80211_register_hw(il->hw);
if (ret) {
IL_ERR("Failed to register hw (error %d)\n", ret);
return ret;
}
il->mac80211_registered = 1;
return 0;
}
int
il4965_mac_start(struct ieee80211_hw *hw)
{
struct il_priv *il = hw->priv;
int ret;
D_MAC80211("enter\n");
/* we should be verifying the device is ready to be opened */
mutex_lock(&il->mutex);
ret = __il4965_up(il);
mutex_unlock(&il->mutex);
if (ret)
return ret;
if (il_is_rfkill(il))
goto out;
D_INFO("Start UP work done.\n");
/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
ret = wait_event_timeout(il->wait_command_queue,
test_bit(S_READY, &il->status),
UCODE_READY_TIMEOUT);
if (!ret) {
if (!test_bit(S_READY, &il->status)) {
IL_ERR("START_ALIVE timeout after %dms.\n",
jiffies_to_msecs(UCODE_READY_TIMEOUT));
return -ETIMEDOUT;
}
}
il4965_led_enable(il);
out:
il->is_open = 1;
D_MAC80211("leave\n");
return 0;
}
void
il4965_mac_stop(struct ieee80211_hw *hw)
{
struct il_priv *il = hw->priv;
D_MAC80211("enter\n");
if (!il->is_open)
return;
il->is_open = 0;
il4965_down(il);
flush_workqueue(il->workqueue);
/* User space software may expect getting rfkill changes
* even if interface is down */
_il_wr(il, CSR_INT, 0xFFFFFFFF);
il_enable_rfkill_int(il);
D_MAC80211("leave\n");
}
void
il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct il_priv *il = hw->priv;
D_MACDUMP("enter\n");
D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (il4965_tx_skb(il, skb))
dev_kfree_skb_any(skb);
D_MACDUMP("leave\n");
}
void
il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
{
struct il_priv *il = hw->priv;
D_MAC80211("enter\n");
il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
D_MAC80211("leave\n");
}
int
il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct il_priv *il = hw->priv;
int ret;
u8 sta_id;
bool is_default_wep_key = false;
D_MAC80211("enter\n");
if (il->cfg->mod_params->sw_crypto) {
D_MAC80211("leave - hwcrypto disabled\n");
return -EOPNOTSUPP;
}
sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION)
return -EINVAL;
mutex_lock(&il->mutex);
il_scan_cancel_timeout(il, 100);
/*
* If we are getting WEP group key and we didn't receive any key mapping
* so far, we are in legacy wep mode (group key only), otherwise we are
* in 1X mode.
* In legacy wep mode, we use another host command to the uCode.
*/
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
if (cmd == SET_KEY)
is_default_wep_key = !il->_4965.key_mapping_keys;
else
is_default_wep_key =
(key->hw_key_idx == HW_KEY_DEFAULT);
}
switch (cmd) {
case SET_KEY:
if (is_default_wep_key)
ret = il4965_set_default_wep_key(il, key);
else
ret = il4965_set_dynamic_key(il, key, sta_id);
D_MAC80211("enable hwcrypto key\n");
break;
case DISABLE_KEY:
if (is_default_wep_key)
ret = il4965_remove_default_wep_key(il, key);
else
ret = il4965_remove_dynamic_key(il, key, sta_id);
D_MAC80211("disable hwcrypto key\n");
break;
default:
ret = -EINVAL;
}
mutex_unlock(&il->mutex);
D_MAC80211("leave\n");
return ret;
}
int
il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 * ssn,
u8 buf_size)
{
struct il_priv *il = hw->priv;
int ret = -EINVAL;
D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
if (!(il->cfg->sku & IL_SKU_N))
return -EACCES;
mutex_lock(&il->mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
D_HT("start Rx\n");
ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
break;
case IEEE80211_AMPDU_RX_STOP:
D_HT("stop Rx\n");
ret = il4965_sta_rx_agg_stop(il, sta, tid);
if (test_bit(S_EXIT_PENDING, &il->status))
ret = 0;
break;
case IEEE80211_AMPDU_TX_START:
D_HT("start Tx\n");
ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
break;
case IEEE80211_AMPDU_TX_STOP:
D_HT("stop Tx\n");
ret = il4965_tx_agg_stop(il, vif, sta, tid);
if (test_bit(S_EXIT_PENDING, &il->status))
ret = 0;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
ret = 0;
break;
}
mutex_unlock(&il->mutex);
return ret;
}
int
il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct il_priv *il = hw->priv;
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
int ret;
u8 sta_id;
D_INFO("received request to add station %pM\n", sta->addr);
mutex_lock(&il->mutex);
D_INFO("proceeding to add station %pM\n", sta->addr);
sta_priv->common.sta_id = IL_INVALID_STATION;
atomic_set(&sta_priv->pending_frames, 0);
ret =
il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
if (ret) {
IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
mutex_unlock(&il->mutex);
return ret;
}
sta_priv->common.sta_id = sta_id;
/* Initialize rate scaling */
D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
il4965_rs_rate_init(il, sta, sta_id);
mutex_unlock(&il->mutex);
return 0;
}
void
il4965_mac_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch)
{
struct il_priv *il = hw->priv;
const struct il_channel_info *ch_info;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = ch_switch->channel;
struct il_ht_config *ht_conf = &il->current_ht_config;
u16 ch;
D_MAC80211("enter\n");
mutex_lock(&il->mutex);
if (il_is_rfkill(il))
goto out;
if (test_bit(S_EXIT_PENDING, &il->status) ||
test_bit(S_SCANNING, &il->status) ||
test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
goto out;
if (!il_is_associated(il))
goto out;
if (!il->ops->set_channel_switch)
goto out;
ch = channel->hw_value;
if (le16_to_cpu(il->active.channel) == ch)
goto out;
ch_info = il_get_channel_info(il, channel->band, ch);
if (!il_is_channel_valid(ch_info)) {
D_MAC80211("invalid channel\n");
goto out;
}
spin_lock_irq(&il->lock);
il->current_ht_config.smps = conf->smps_mode;
/* Configure HT40 channels */
il->ht.enabled = conf_is_ht(conf);
if (il->ht.enabled) {
if (conf_is_ht40_minus(conf)) {
il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
il->ht.is_40mhz = true;
} else if (conf_is_ht40_plus(conf)) {
il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
il->ht.is_40mhz = true;
} else {
il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
il->ht.is_40mhz = false;
}
} else
il->ht.is_40mhz = false;
if ((le16_to_cpu(il->staging.channel) != ch))
il->staging.flags = 0;
il_set_rxon_channel(il, channel);
il_set_rxon_ht(il, ht_conf);
il_set_flags_for_band(il, channel->band, il->vif);
spin_unlock_irq(&il->lock);
il_set_rate(il);
/*
* at this point, staging_rxon has the
* configuration for channel switch
*/
set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
il->switch_channel = cpu_to_le16(ch);
if (il->ops->set_channel_switch(il, ch_switch)) {
clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
il->switch_channel = 0;
ieee80211_chswitch_done(il->vif, false);
}
out:
mutex_unlock(&il->mutex);
D_MAC80211("leave\n");
}
void
il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
struct il_priv *il = hw->priv;
__le32 filter_or = 0, filter_nand = 0;
#define CHK(test, flag) do { \
if (*total_flags & (test)) \
filter_or |= (flag); \
else \
filter_nand |= (flag); \
} while (0)
D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
*total_flags);
CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
#undef CHK
mutex_lock(&il->mutex);
il->staging.filter_flags &= ~filter_nand;
il->staging.filter_flags |= filter_or;
/*
* Not committing directly because hardware can perform a scan,
* but we'll eventually commit the filter flags change anyway.
*/
mutex_unlock(&il->mutex);
/*
* Receiving all multicast frames is always enabled by the
* default flags setup in il_connection_init_rx_config()
* since we currently do not support programming multicast
* filters into the device.
*/
*total_flags &=
FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
/*****************************************************************************
*
* driver setup and teardown
*
*****************************************************************************/
static void
il4965_bg_txpower_work(struct work_struct *work)
{
struct il_priv *il = container_of(work, struct il_priv,
txpower_work);
mutex_lock(&il->mutex);
/* If a scan happened to start before we got here
* then just return; the stats notification will
* kick off another scheduled work to compensate for
* any temperature delta we missed here. */
if (test_bit(S_EXIT_PENDING, &il->status) ||
test_bit(S_SCANNING, &il->status))
goto out;
/* Regardless of if we are associated, we must reconfigure the
* TX power since frames can be sent on non-radar channels while
* not associated */
il->ops->send_tx_power(il);
/* Update last_temperature to keep is_calib_needed from running
* when it isn't needed... */
il->last_temperature = il->temperature;
out:
mutex_unlock(&il->mutex);
}
static void
il4965_setup_deferred_work(struct il_priv *il)
{
il->workqueue = create_singlethread_workqueue(DRV_NAME);
init_waitqueue_head(&il->wait_command_queue);
INIT_WORK(&il->restart, il4965_bg_restart);
INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
il_setup_scan_deferred_work(il);
INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
init_timer(&il->stats_periodic);
il->stats_periodic.data = (unsigned long)il;
il->stats_periodic.function = il4965_bg_stats_periodic;
init_timer(&il->watchdog);
il->watchdog.data = (unsigned long)il;
il->watchdog.function = il_bg_watchdog;
tasklet_init(&il->irq_tasklet,
(void (*)(unsigned long))il4965_irq_tasklet,
(unsigned long)il);
}
static void
il4965_cancel_deferred_work(struct il_priv *il)
{
cancel_work_sync(&il->txpower_work);
cancel_delayed_work_sync(&il->init_alive_start);
cancel_delayed_work(&il->alive_start);
cancel_work_sync(&il->run_time_calib_work);
il_cancel_scan_deferred_work(il);
del_timer_sync(&il->stats_periodic);
}
static void
il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
{
int i;
for (i = 0; i < RATE_COUNT_LEGACY; i++) {
rates[i].bitrate = il_rates[i].ieee * 5;
rates[i].hw_value = i; /* Rate scaling will work on idxes */
rates[i].hw_value_short = i;
rates[i].flags = 0;
if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
/*
* If CCK != 1M then set short preamble rate flag.
*/
rates[i].flags |=
(il_rates[i].plcp ==
RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
}
}
}
/*
* Acquire il->lock before calling this function !
*/
void
il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
{
il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
}
void
il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
int tx_fifo_id, int scd_retry)
{
int txq_id = txq->q.id;
/* Find out whether to activate Tx queue */
int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
/* Set up and activate */
il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
(active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
(scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
(scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
IL49_SCD_QUEUE_STTS_REG_MSK);
txq->sched_retry = scd_retry;
D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
const struct ieee80211_ops il4965_mac_ops = {
.tx = il4965_mac_tx,
.start = il4965_mac_start,
.stop = il4965_mac_stop,
.add_interface = il_mac_add_interface,
.remove_interface = il_mac_remove_interface,
.change_interface = il_mac_change_interface,
.config = il_mac_config,
.configure_filter = il4965_configure_filter,
.set_key = il4965_mac_set_key,
.update_tkip_key = il4965_mac_update_tkip_key,
.conf_tx = il_mac_conf_tx,
.reset_tsf = il_mac_reset_tsf,
.bss_info_changed = il_mac_bss_info_changed,
.ampdu_action = il4965_mac_ampdu_action,
.hw_scan = il_mac_hw_scan,
.sta_add = il4965_mac_sta_add,
.sta_remove = il_mac_sta_remove,
.channel_switch = il4965_mac_channel_switch,
.tx_last_beacon = il_mac_tx_last_beacon,
};
static int
il4965_init_drv(struct il_priv *il)
{
int ret;
spin_lock_init(&il->sta_lock);
spin_lock_init(&il->hcmd_lock);
INIT_LIST_HEAD(&il->free_frames);
mutex_init(&il->mutex);
il->ieee_channels = NULL;
il->ieee_rates = NULL;
il->band = IEEE80211_BAND_2GHZ;
il->iw_mode = NL80211_IFTYPE_STATION;
il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
/* initialize force reset */
il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
/* Choose which receivers/antennas to use */
if (il->ops->set_rxon_chain)
il->ops->set_rxon_chain(il);
il_init_scan_params(il);
ret = il_init_channel_map(il);
if (ret) {
IL_ERR("initializing regulatory failed: %d\n", ret);
goto err;
}
ret = il_init_geos(il);
if (ret) {
IL_ERR("initializing geos failed: %d\n", ret);
goto err_free_channel_map;
}
il4965_init_hw_rates(il, il->ieee_rates);
return 0;
err_free_channel_map:
il_free_channel_map(il);
err:
return ret;
}
static void
il4965_uninit_drv(struct il_priv *il)
{
il_free_geos(il);
il_free_channel_map(il);
kfree(il->scan_cmd);
}
static void
il4965_hw_detect(struct il_priv *il)
{
il->hw_rev = _il_rd(il, CSR_HW_REV);
il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
il->rev_id = il->pci_dev->revision;
D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
}
static struct il_sensitivity_ranges il4965_sensitivity = {
.min_nrg_cck = 97,
.max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 85,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 105,
.auto_corr_min_ofdm_mrc_x1 = 220,
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
.auto_corr_max_ofdm_x1 = 140,
.auto_corr_max_ofdm_mrc_x1 = 270,
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
.auto_corr_min_cck_mrc = 200,
.auto_corr_max_cck_mrc = 400,
.nrg_th_cck = 100,
.nrg_th_ofdm = 100,
.barker_corr_th_min = 190,
.barker_corr_th_min_mrc = 390,
.nrg_th_cca = 62,
};
static void
il4965_set_hw_params(struct il_priv *il)
{
il->hw_params.bcast_id = IL4965_BROADCAST_ID;
il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
if (il->cfg->mod_params->amsdu_size_8K)
il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
else
il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
if (il->cfg->mod_params->disable_11n)
il->cfg->sku &= ~IL_SKU_N;
if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
il->cfg->num_of_queues =
il->cfg->mod_params->num_of_queues;
il->hw_params.max_txq_num = il->cfg->num_of_queues;
il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
il->hw_params.scd_bc_tbls_size =
il->cfg->num_of_queues *
sizeof(struct il4965_scd_bc_tbl);
il->hw_params.tfd_size = sizeof(struct il_tfd);
il->hw_params.max_stations = IL4965_STATION_COUNT;
il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
il->hw_params.ct_kill_threshold =
CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
il->hw_params.sens = &il4965_sensitivity;
il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
}
static int
il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = 0;
struct il_priv *il;
struct ieee80211_hw *hw;
struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
unsigned long flags;
u16 pci_cmd;
/************************
* 1. Allocating HW data
************************/
hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
if (!hw) {
err = -ENOMEM;
goto out;
}
il = hw->priv;
il->hw = hw;
SET_IEEE80211_DEV(hw, &pdev->dev);
D_INFO("*** LOAD DRIVER ***\n");
il->cfg = cfg;
il->ops = &il4965_ops;
#ifdef CONFIG_IWLEGACY_DEBUGFS
il->debugfs_ops = &il4965_debugfs_ops;
#endif
il->pci_dev = pdev;
il->inta_mask = CSR_INI_SET_MASK;
/**************************
* 2. Initializing PCI bus
**************************/
pci_disable_link_state(pdev,
PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
PCIE_LINK_STATE_CLKPM);
if (pci_enable_device(pdev)) {
err = -ENODEV;
goto out_ieee80211_free_hw;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err =
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
IL_WARN("No suitable DMA available.\n");
goto out_pci_disable_device;
}
}
err = pci_request_regions(pdev, DRV_NAME);
if (err)
goto out_pci_disable_device;
pci_set_drvdata(pdev, il);
/***********************
* 3. Read REV register
***********************/
il->hw_base = pci_ioremap_bar(pdev, 0);
if (!il->hw_base) {
err = -ENODEV;
goto out_pci_release_regions;
}
D_INFO("pci_resource_len = 0x%08llx\n",
(unsigned long long)pci_resource_len(pdev, 0));
D_INFO("pci_resource_base = %p\n", il->hw_base);
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&il->reg_lock);
spin_lock_init(&il->lock);
/*
* stop and reset the on-board processor just in case it is in a
* strange state ... like being left stranded by a primary kernel
* and this is now the kdump kernel trying to start up
*/
_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
il4965_hw_detect(il);
IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
il4965_prepare_card_hw(il);
if (!il->hw_ready) {
IL_WARN("Failed, HW not ready\n");
goto out_iounmap;
}
/*****************
* 4. Read EEPROM
*****************/
/* Read the EEPROM */
err = il_eeprom_init(il);
if (err) {
IL_ERR("Unable to init EEPROM\n");
goto out_iounmap;
}
err = il4965_eeprom_check_version(il);
if (err)
goto out_free_eeprom;
if (err)
goto out_free_eeprom;
/* extract MAC Address */
il4965_eeprom_get_mac(il, il->addresses[0].addr);
D_INFO("MAC address: %pM\n", il->addresses[0].addr);
il->hw->wiphy->addresses = il->addresses;
il->hw->wiphy->n_addresses = 1;
/************************
* 5. Setup HW constants
************************/
il4965_set_hw_params(il);
/*******************
* 6. Setup il
*******************/
err = il4965_init_drv(il);
if (err)
goto out_free_eeprom;
/* At this point both hw and il are initialized. */
/********************
* 7. Setup services
********************/
spin_lock_irqsave(&il->lock, flags);
il_disable_interrupts(il);
spin_unlock_irqrestore(&il->lock, flags);
pci_enable_msi(il->pci_dev);
err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
if (err) {
IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
goto out_disable_msi;
}
il4965_setup_deferred_work(il);
il4965_setup_handlers(il);
/*********************************************
* 8. Enable interrupts and read RFKILL state
*********************************************/
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
}
il_enable_rfkill_int(il);
/* If platform's RF_KILL switch is NOT set to KILL */
if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(S_RFKILL, &il->status);
else
set_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy,
test_bit(S_RFKILL, &il->status));
il_power_initialize(il);
init_completion(&il->_4965.firmware_loading_complete);
err = il4965_request_firmware(il, true);
if (err)
goto out_destroy_workqueue;
return 0;
out_destroy_workqueue:
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
free_irq(il->pci_dev->irq, il);
out_disable_msi:
pci_disable_msi(il->pci_dev);
il4965_uninit_drv(il);
out_free_eeprom:
il_eeprom_free(il);
out_iounmap:
iounmap(il->hw_base);
out_pci_release_regions:
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
out_ieee80211_free_hw:
ieee80211_free_hw(il->hw);
out:
return err;
}
static void __devexit
il4965_pci_remove(struct pci_dev *pdev)
{
struct il_priv *il = pci_get_drvdata(pdev);
unsigned long flags;
if (!il)
return;
wait_for_completion(&il->_4965.firmware_loading_complete);
D_INFO("*** UNLOAD DRIVER ***\n");
il_dbgfs_unregister(il);
sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
/* ieee80211_unregister_hw call wil cause il_mac_stop to
* to be called and il4965_down since we are removing the device
* we need to set S_EXIT_PENDING bit.
*/
set_bit(S_EXIT_PENDING, &il->status);
il_leds_exit(il);
if (il->mac80211_registered) {
ieee80211_unregister_hw(il->hw);
il->mac80211_registered = 0;
} else {
il4965_down(il);
}
/*
* Make sure device is reset to low power before unloading driver.
* This may be redundant with il4965_down(), but there are paths to
* run il4965_down() without calling apm_ops.stop(), and there are
* paths to avoid running il4965_down() at all before leaving driver.
* This (inexpensive) call *makes sure* device is reset.
*/
il_apm_stop(il);
/* make sure we flush any pending irq or
* tasklet for the driver
*/
spin_lock_irqsave(&il->lock, flags);
il_disable_interrupts(il);
spin_unlock_irqrestore(&il->lock, flags);
il4965_synchronize_irq(il);
il4965_dealloc_ucode_pci(il);
if (il->rxq.bd)
il4965_rx_queue_free(il, &il->rxq);
il4965_hw_txq_ctx_free(il);
il_eeprom_free(il);
/*netif_stop_queue(dev); */
flush_workqueue(il->workqueue);
/* ieee80211_unregister_hw calls il_mac_stop, which flushes
* il->workqueue... so we can't take down the workqueue
* until now... */
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
free_irq(il->pci_dev->irq, il);
pci_disable_msi(il->pci_dev);
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
il4965_uninit_drv(il);
dev_kfree_skb(il->beacon_skb);
ieee80211_free_hw(il->hw);
}
/*
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
* must be called under il->lock and mac access
*/
void
il4965_txq_set_sched(struct il_priv *il, u32 mask)
{
il_wr_prph(il, IL49_SCD_TXFACT, mask);
}
/*****************************************************************************
*
* driver and module entry point
*
*****************************************************************************/
/* Hardware specific file defines the PCI IDs table for that hardware module */
static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
{IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
{IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
{0}
};
MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
static struct pci_driver il4965_driver = {
.name = DRV_NAME,
.id_table = il4965_hw_card_ids,
.probe = il4965_pci_probe,
.remove = __devexit_p(il4965_pci_remove),
.driver.pm = IL_LEGACY_PM_OPS,
};
static int __init
il4965_init(void)
{
int ret;
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
ret = il4965_rate_control_register();
if (ret) {
pr_err("Unable to register rate control algorithm: %d\n", ret);
return ret;
}
ret = pci_register_driver(&il4965_driver);
if (ret) {
pr_err("Unable to initialize PCI module\n");
goto error_register;
}
return ret;
error_register:
il4965_rate_control_unregister();
return ret;
}
static void __exit
il4965_exit(void)
{
pci_unregister_driver(&il4965_driver);
il4965_rate_control_unregister();
}
module_exit(il4965_exit);
module_init(il4965_init);
#ifdef CONFIG_IWLEGACY_DEBUG
module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num, "number of hw queues.");
module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
| gpl-2.0 |
zhaochengw/ef40s_jb_kernel | drivers/i2c/busses/i2c-sis96x.c | 4117 | 9045 | /*
Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
This module must be considered BETA unless and until
the chipset manufacturer releases a datasheet.
The register definitions are based on the SiS630.
This module relies on quirk_sis_96x_smbus (drivers/pci/quirks.c)
for just about every machine for which users have reported.
If this module isn't detecting your 96x south bridge, have a
look there.
We assume there can only be one SiS96x with one SMBus interface.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
/* base address register in PCI config space */
#define SIS96x_BAR 0x04
/* SiS96x SMBus registers */
#define SMB_STS 0x00
#define SMB_EN 0x01
#define SMB_CNT 0x02
#define SMB_HOST_CNT 0x03
#define SMB_ADDR 0x04
#define SMB_CMD 0x05
#define SMB_PCOUNT 0x06
#define SMB_COUNT 0x07
#define SMB_BYTE 0x08
#define SMB_DEV_ADDR 0x10
#define SMB_DB0 0x11
#define SMB_DB1 0x12
#define SMB_SAA 0x13
/* register count for request_region */
#define SMB_IOSIZE 0x20
/* Other settings */
#define MAX_TIMEOUT 500
/* SiS96x SMBus constants */
#define SIS96x_QUICK 0x00
#define SIS96x_BYTE 0x01
#define SIS96x_BYTE_DATA 0x02
#define SIS96x_WORD_DATA 0x03
#define SIS96x_PROC_CALL 0x04
#define SIS96x_BLOCK_DATA 0x05
static struct pci_driver sis96x_driver;
static struct i2c_adapter sis96x_adapter;
static u16 sis96x_smbus_base;
static inline u8 sis96x_read(u8 reg)
{
return inb(sis96x_smbus_base + reg) ;
}
static inline void sis96x_write(u8 reg, u8 data)
{
outb(data, sis96x_smbus_base + reg) ;
}
/* Execute a SMBus transaction.
int size is from SIS96x_QUICK to SIS96x_BLOCK_DATA
*/
static int sis96x_transaction(int size)
{
int temp;
int result = 0;
int timeout = 0;
dev_dbg(&sis96x_adapter.dev, "SMBus transaction %d\n", size);
/* Make sure the SMBus host is ready to start transmitting */
if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) {
dev_dbg(&sis96x_adapter.dev, "SMBus busy (0x%02x). "
"Resetting...\n", temp);
/* kill the transaction */
sis96x_write(SMB_HOST_CNT, 0x20);
/* check it again */
if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) {
dev_dbg(&sis96x_adapter.dev, "Failed (0x%02x)\n", temp);
return -EBUSY;
} else {
dev_dbg(&sis96x_adapter.dev, "Successful\n");
}
}
/* Turn off timeout interrupts, set fast host clock */
sis96x_write(SMB_CNT, 0x20);
/* clear all (sticky) status flags */
temp = sis96x_read(SMB_STS);
sis96x_write(SMB_STS, temp & 0x1e);
/* start the transaction by setting bit 4 and size bits */
sis96x_write(SMB_HOST_CNT, 0x10 | (size & 0x07));
/* We will always wait for a fraction of a second! */
do {
msleep(1);
temp = sis96x_read(SMB_STS);
} while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout > MAX_TIMEOUT) {
dev_dbg(&sis96x_adapter.dev, "SMBus Timeout! (0x%02x)\n", temp);
result = -ETIMEDOUT;
}
/* device error - probably missing ACK */
if (temp & 0x02) {
dev_dbg(&sis96x_adapter.dev, "Failed bus transaction!\n");
result = -ENXIO;
}
/* bus collision */
if (temp & 0x04) {
dev_dbg(&sis96x_adapter.dev, "Bus collision!\n");
result = -EIO;
}
/* Finish up by resetting the bus */
sis96x_write(SMB_STS, temp);
if ((temp = sis96x_read(SMB_STS))) {
dev_dbg(&sis96x_adapter.dev, "Failed reset at "
"end of transaction! (0x%02x)\n", temp);
}
return result;
}
/* Return negative errno on error. */
static s32 sis96x_access(struct i2c_adapter * adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data * data)
{
int status;
switch (size) {
case I2C_SMBUS_QUICK:
sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
size = SIS96x_QUICK;
break;
case I2C_SMBUS_BYTE:
sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
if (read_write == I2C_SMBUS_WRITE)
sis96x_write(SMB_CMD, command);
size = SIS96x_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
sis96x_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE)
sis96x_write(SMB_BYTE, data->byte);
size = SIS96x_BYTE_DATA;
break;
case I2C_SMBUS_PROC_CALL:
case I2C_SMBUS_WORD_DATA:
sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
sis96x_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE) {
sis96x_write(SMB_BYTE, data->word & 0xff);
sis96x_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8);
}
size = (size == I2C_SMBUS_PROC_CALL ?
SIS96x_PROC_CALL : SIS96x_WORD_DATA);
break;
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
status = sis96x_transaction(size);
if (status)
return status;
if ((size != SIS96x_PROC_CALL) &&
((read_write == I2C_SMBUS_WRITE) || (size == SIS96x_QUICK)))
return 0;
switch (size) {
case SIS96x_BYTE:
case SIS96x_BYTE_DATA:
data->byte = sis96x_read(SMB_BYTE);
break;
case SIS96x_WORD_DATA:
case SIS96x_PROC_CALL:
data->word = sis96x_read(SMB_BYTE) +
(sis96x_read(SMB_BYTE + 1) << 8);
break;
}
return 0;
}
static u32 sis96x_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_PROC_CALL;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = sis96x_access,
.functionality = sis96x_func,
};
static struct i2c_adapter sis96x_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
};
static const struct pci_device_id sis96x_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
MODULE_DEVICE_TABLE (pci, sis96x_ids);
static int __devinit sis96x_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 ww = 0;
int retval;
if (sis96x_smbus_base) {
dev_err(&dev->dev, "Only one device supported.\n");
return -EBUSY;
}
pci_read_config_word(dev, PCI_CLASS_DEVICE, &ww);
if (PCI_CLASS_SERIAL_SMBUS != ww) {
dev_err(&dev->dev, "Unsupported device class 0x%04x!\n", ww);
return -ENODEV;
}
sis96x_smbus_base = pci_resource_start(dev, SIS96x_BAR);
if (!sis96x_smbus_base) {
dev_err(&dev->dev, "SiS96x SMBus base address "
"not initialized!\n");
return -EINVAL;
}
dev_info(&dev->dev, "SiS96x SMBus base address: 0x%04x\n",
sis96x_smbus_base);
retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]);
if (retval)
return -ENODEV;
/* Everything is happy, let's grab the memory and set things up. */
if (!request_region(sis96x_smbus_base, SMB_IOSIZE,
sis96x_driver.name)) {
dev_err(&dev->dev, "SMBus registers 0x%04x-0x%04x "
"already in use!\n", sis96x_smbus_base,
sis96x_smbus_base + SMB_IOSIZE - 1);
sis96x_smbus_base = 0;
return -EINVAL;
}
/* set up the sysfs linkage to our parent device */
sis96x_adapter.dev.parent = &dev->dev;
snprintf(sis96x_adapter.name, sizeof(sis96x_adapter.name),
"SiS96x SMBus adapter at 0x%04x", sis96x_smbus_base);
if ((retval = i2c_add_adapter(&sis96x_adapter))) {
dev_err(&dev->dev, "Couldn't register adapter!\n");
release_region(sis96x_smbus_base, SMB_IOSIZE);
sis96x_smbus_base = 0;
}
return retval;
}
static void __devexit sis96x_remove(struct pci_dev *dev)
{
if (sis96x_smbus_base) {
i2c_del_adapter(&sis96x_adapter);
release_region(sis96x_smbus_base, SMB_IOSIZE);
sis96x_smbus_base = 0;
}
}
static struct pci_driver sis96x_driver = {
.name = "sis96x_smbus",
.id_table = sis96x_ids,
.probe = sis96x_probe,
.remove = __devexit_p(sis96x_remove),
};
static int __init i2c_sis96x_init(void)
{
return pci_register_driver(&sis96x_driver);
}
static void __exit i2c_sis96x_exit(void)
{
pci_unregister_driver(&sis96x_driver);
}
MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
MODULE_DESCRIPTION("SiS96x SMBus driver");
MODULE_LICENSE("GPL");
/* Register initialization functions using helper macros */
module_init(i2c_sis96x_init);
module_exit(i2c_sis96x_exit);
| gpl-2.0 |
manveru0/FeaCore_Phoenix_S3_JellyBean | tools/firewire/nosy-dump.c | 8469 | 25674 | /*
* nosy-dump - Interface to snoop mode driver for TI PCILynx 1394 controllers
* Copyright (C) 2002-2006 Kristian Høgsberg
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <byteswap.h>
#include <endian.h>
#include <fcntl.h>
#include <linux/firewire-constants.h>
#include <poll.h>
#include <popt.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <termios.h>
#include <unistd.h>
#include "list.h"
#include "nosy-dump.h"
#include "nosy-user.h"
enum {
PACKET_FIELD_DETAIL = 0x01,
PACKET_FIELD_DATA_LENGTH = 0x02,
/* Marks the fields we print in transaction view. */
PACKET_FIELD_TRANSACTION = 0x04,
};
static void print_packet(uint32_t *data, size_t length);
static void decode_link_packet(struct link_packet *packet, size_t length,
int include_flags, int exclude_flags);
static int run = 1;
sig_t sys_sigint_handler;
static char *option_nosy_device = "/dev/nosy";
static char *option_view = "packet";
static char *option_output;
static char *option_input;
static int option_hex;
static int option_iso;
static int option_cycle_start;
static int option_version;
static int option_verbose;
enum {
VIEW_TRANSACTION,
VIEW_PACKET,
VIEW_STATS,
};
static const struct poptOption options[] = {
{
.longName = "device",
.shortName = 'd',
.argInfo = POPT_ARG_STRING,
.arg = &option_nosy_device,
.descrip = "Path to nosy device.",
.argDescrip = "DEVICE"
},
{
.longName = "view",
.argInfo = POPT_ARG_STRING,
.arg = &option_view,
.descrip = "Specify view of bus traffic: packet, transaction or stats.",
.argDescrip = "VIEW"
},
{
.longName = "hex",
.shortName = 'x',
.argInfo = POPT_ARG_NONE,
.arg = &option_hex,
.descrip = "Print each packet in hex.",
},
{
.longName = "iso",
.argInfo = POPT_ARG_NONE,
.arg = &option_iso,
.descrip = "Print iso packets.",
},
{
.longName = "cycle-start",
.argInfo = POPT_ARG_NONE,
.arg = &option_cycle_start,
.descrip = "Print cycle start packets.",
},
{
.longName = "verbose",
.shortName = 'v',
.argInfo = POPT_ARG_NONE,
.arg = &option_verbose,
.descrip = "Verbose packet view.",
},
{
.longName = "output",
.shortName = 'o',
.argInfo = POPT_ARG_STRING,
.arg = &option_output,
.descrip = "Log to output file.",
.argDescrip = "FILENAME"
},
{
.longName = "input",
.shortName = 'i',
.argInfo = POPT_ARG_STRING,
.arg = &option_input,
.descrip = "Decode log from file.",
.argDescrip = "FILENAME"
},
{
.longName = "version",
.argInfo = POPT_ARG_NONE,
.arg = &option_version,
.descrip = "Specify print version info.",
},
POPT_AUTOHELP
POPT_TABLEEND
};
/* Allow all ^C except the first to interrupt the program in the usual way. */
static void
sigint_handler(int signal_num)
{
if (run == 1) {
run = 0;
signal(SIGINT, SIG_DFL);
}
}
static struct subaction *
subaction_create(uint32_t *data, size_t length)
{
struct subaction *sa;
/* we put the ack in the subaction struct for easy access. */
sa = malloc(sizeof *sa - sizeof sa->packet + length);
sa->ack = data[length / 4 - 1];
sa->length = length;
memcpy(&sa->packet, data, length);
return sa;
}
static void
subaction_destroy(struct subaction *sa)
{
free(sa);
}
static struct list pending_transaction_list = {
&pending_transaction_list, &pending_transaction_list
};
static struct link_transaction *
link_transaction_lookup(int request_node, int response_node, int tlabel)
{
struct link_transaction *t;
list_for_each_entry(t, &pending_transaction_list, link) {
if (t->request_node == request_node &&
t->response_node == response_node &&
t->tlabel == tlabel)
return t;
}
t = malloc(sizeof *t);
t->request_node = request_node;
t->response_node = response_node;
t->tlabel = tlabel;
list_init(&t->request_list);
list_init(&t->response_list);
list_append(&pending_transaction_list, &t->link);
return t;
}
static void
link_transaction_destroy(struct link_transaction *t)
{
struct subaction *sa;
while (!list_empty(&t->request_list)) {
sa = list_head(&t->request_list, struct subaction, link);
list_remove(&sa->link);
subaction_destroy(sa);
}
while (!list_empty(&t->response_list)) {
sa = list_head(&t->response_list, struct subaction, link);
list_remove(&sa->link);
subaction_destroy(sa);
}
free(t);
}
struct protocol_decoder {
const char *name;
int (*decode)(struct link_transaction *t);
};
static const struct protocol_decoder protocol_decoders[] = {
{ "FCP", decode_fcp }
};
static void
handle_transaction(struct link_transaction *t)
{
struct subaction *sa;
int i;
if (!t->request) {
printf("BUG in handle_transaction\n");
return;
}
for (i = 0; i < array_length(protocol_decoders); i++)
if (protocol_decoders[i].decode(t))
break;
/* HACK: decode only fcp right now. */
return;
decode_link_packet(&t->request->packet, t->request->length,
PACKET_FIELD_TRANSACTION, 0);
if (t->response)
decode_link_packet(&t->response->packet, t->request->length,
PACKET_FIELD_TRANSACTION, 0);
else
printf("[no response]");
if (option_verbose) {
list_for_each_entry(sa, &t->request_list, link)
print_packet((uint32_t *) &sa->packet, sa->length);
list_for_each_entry(sa, &t->response_list, link)
print_packet((uint32_t *) &sa->packet, sa->length);
}
printf("\r\n");
link_transaction_destroy(t);
}
static void
clear_pending_transaction_list(void)
{
struct link_transaction *t;
while (!list_empty(&pending_transaction_list)) {
t = list_head(&pending_transaction_list,
struct link_transaction, link);
list_remove(&t->link);
link_transaction_destroy(t);
/* print unfinished transactions */
}
}
static const char * const tcode_names[] = {
[0x0] = "write_quadlet_request", [0x6] = "read_quadlet_response",
[0x1] = "write_block_request", [0x7] = "read_block_response",
[0x2] = "write_response", [0x8] = "cycle_start",
[0x3] = "reserved", [0x9] = "lock_request",
[0x4] = "read_quadlet_request", [0xa] = "iso_data",
[0x5] = "read_block_request", [0xb] = "lock_response",
};
static const char * const ack_names[] = {
[0x0] = "no ack", [0x8] = "reserved (0x08)",
[0x1] = "ack_complete", [0x9] = "reserved (0x09)",
[0x2] = "ack_pending", [0xa] = "reserved (0x0a)",
[0x3] = "reserved (0x03)", [0xb] = "reserved (0x0b)",
[0x4] = "ack_busy_x", [0xc] = "reserved (0x0c)",
[0x5] = "ack_busy_a", [0xd] = "ack_data_error",
[0x6] = "ack_busy_b", [0xe] = "ack_type_error",
[0x7] = "reserved (0x07)", [0xf] = "reserved (0x0f)",
};
static const char * const rcode_names[] = {
[0x0] = "complete", [0x4] = "conflict_error",
[0x1] = "reserved (0x01)", [0x5] = "data_error",
[0x2] = "reserved (0x02)", [0x6] = "type_error",
[0x3] = "reserved (0x03)", [0x7] = "address_error",
};
static const char * const retry_names[] = {
[0x0] = "retry_1",
[0x1] = "retry_x",
[0x2] = "retry_a",
[0x3] = "retry_b",
};
enum {
PACKET_RESERVED,
PACKET_REQUEST,
PACKET_RESPONSE,
PACKET_OTHER,
};
struct packet_info {
const char *name;
int type;
int response_tcode;
const struct packet_field *fields;
int field_count;
};
struct packet_field {
const char *name; /* Short name for field. */
int offset; /* Location of field, specified in bits; */
/* negative means from end of packet. */
int width; /* Width of field, 0 means use data_length. */
int flags; /* Show options. */
const char * const *value_names;
};
#define COMMON_REQUEST_FIELDS \
{ "dest", 0, 16, PACKET_FIELD_TRANSACTION }, \
{ "tl", 16, 6 }, \
{ "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \
{ "tcode", 24, 4, PACKET_FIELD_TRANSACTION, tcode_names }, \
{ "pri", 28, 4, PACKET_FIELD_DETAIL }, \
{ "src", 32, 16, PACKET_FIELD_TRANSACTION }, \
{ "offs", 48, 48, PACKET_FIELD_TRANSACTION }
#define COMMON_RESPONSE_FIELDS \
{ "dest", 0, 16 }, \
{ "tl", 16, 6 }, \
{ "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \
{ "tcode", 24, 4, 0, tcode_names }, \
{ "pri", 28, 4, PACKET_FIELD_DETAIL }, \
{ "src", 32, 16 }, \
{ "rcode", 48, 4, PACKET_FIELD_TRANSACTION, rcode_names }
static const struct packet_field read_quadlet_request_fields[] = {
COMMON_REQUEST_FIELDS,
{ "crc", 96, 32, PACKET_FIELD_DETAIL },
{ "ack", 156, 4, 0, ack_names },
};
static const struct packet_field read_quadlet_response_fields[] = {
COMMON_RESPONSE_FIELDS,
{ "data", 96, 32, PACKET_FIELD_TRANSACTION },
{ "crc", 128, 32, PACKET_FIELD_DETAIL },
{ "ack", 188, 4, 0, ack_names },
};
static const struct packet_field read_block_request_fields[] = {
COMMON_REQUEST_FIELDS,
{ "data_length", 96, 16, PACKET_FIELD_TRANSACTION },
{ "extended_tcode", 112, 16 },
{ "crc", 128, 32, PACKET_FIELD_DETAIL },
{ "ack", 188, 4, 0, ack_names },
};
static const struct packet_field block_response_fields[] = {
COMMON_RESPONSE_FIELDS,
{ "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH },
{ "extended_tcode", 112, 16 },
{ "crc", 128, 32, PACKET_FIELD_DETAIL },
{ "data", 160, 0, PACKET_FIELD_TRANSACTION },
{ "crc", -64, 32, PACKET_FIELD_DETAIL },
{ "ack", -4, 4, 0, ack_names },
};
static const struct packet_field write_quadlet_request_fields[] = {
COMMON_REQUEST_FIELDS,
{ "data", 96, 32, PACKET_FIELD_TRANSACTION },
{ "ack", -4, 4, 0, ack_names },
};
static const struct packet_field block_request_fields[] = {
COMMON_REQUEST_FIELDS,
{ "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH | PACKET_FIELD_TRANSACTION },
{ "extended_tcode", 112, 16, PACKET_FIELD_TRANSACTION },
{ "crc", 128, 32, PACKET_FIELD_DETAIL },
{ "data", 160, 0, PACKET_FIELD_TRANSACTION },
{ "crc", -64, 32, PACKET_FIELD_DETAIL },
{ "ack", -4, 4, 0, ack_names },
};
static const struct packet_field write_response_fields[] = {
COMMON_RESPONSE_FIELDS,
{ "reserved", 64, 32, PACKET_FIELD_DETAIL },
{ "ack", -4, 4, 0, ack_names },
};
static const struct packet_field iso_data_fields[] = {
{ "data_length", 0, 16, PACKET_FIELD_DATA_LENGTH },
{ "tag", 16, 2 },
{ "channel", 18, 6 },
{ "tcode", 24, 4, 0, tcode_names },
{ "sy", 28, 4 },
{ "crc", 32, 32, PACKET_FIELD_DETAIL },
{ "data", 64, 0 },
{ "crc", -64, 32, PACKET_FIELD_DETAIL },
{ "ack", -4, 4, 0, ack_names },
};
static const struct packet_info packet_info[] = {
{
.name = "write_quadlet_request",
.type = PACKET_REQUEST,
.response_tcode = TCODE_WRITE_RESPONSE,
.fields = write_quadlet_request_fields,
.field_count = array_length(write_quadlet_request_fields)
},
{
.name = "write_block_request",
.type = PACKET_REQUEST,
.response_tcode = TCODE_WRITE_RESPONSE,
.fields = block_request_fields,
.field_count = array_length(block_request_fields)
},
{
.name = "write_response",
.type = PACKET_RESPONSE,
.fields = write_response_fields,
.field_count = array_length(write_response_fields)
},
{
.name = "reserved",
.type = PACKET_RESERVED,
},
{
.name = "read_quadlet_request",
.type = PACKET_REQUEST,
.response_tcode = TCODE_READ_QUADLET_RESPONSE,
.fields = read_quadlet_request_fields,
.field_count = array_length(read_quadlet_request_fields)
},
{
.name = "read_block_request",
.type = PACKET_REQUEST,
.response_tcode = TCODE_READ_BLOCK_RESPONSE,
.fields = read_block_request_fields,
.field_count = array_length(read_block_request_fields)
},
{
.name = "read_quadlet_response",
.type = PACKET_RESPONSE,
.fields = read_quadlet_response_fields,
.field_count = array_length(read_quadlet_response_fields)
},
{
.name = "read_block_response",
.type = PACKET_RESPONSE,
.fields = block_response_fields,
.field_count = array_length(block_response_fields)
},
{
.name = "cycle_start",
.type = PACKET_OTHER,
.fields = write_quadlet_request_fields,
.field_count = array_length(write_quadlet_request_fields)
},
{
.name = "lock_request",
.type = PACKET_REQUEST,
.fields = block_request_fields,
.field_count = array_length(block_request_fields)
},
{
.name = "iso_data",
.type = PACKET_OTHER,
.fields = iso_data_fields,
.field_count = array_length(iso_data_fields)
},
{
.name = "lock_response",
.type = PACKET_RESPONSE,
.fields = block_response_fields,
.field_count = array_length(block_response_fields)
},
};
static int
handle_request_packet(uint32_t *data, size_t length)
{
struct link_packet *p = (struct link_packet *) data;
struct subaction *sa, *prev;
struct link_transaction *t;
t = link_transaction_lookup(p->common.source, p->common.destination,
p->common.tlabel);
sa = subaction_create(data, length);
t->request = sa;
if (!list_empty(&t->request_list)) {
prev = list_tail(&t->request_list,
struct subaction, link);
if (!ACK_BUSY(prev->ack)) {
/*
* error, we should only see ack_busy_* before the
* ack_pending/ack_complete -- this is an ack_pending
* instead (ack_complete would have finished the
* transaction).
*/
}
if (prev->packet.common.tcode != sa->packet.common.tcode ||
prev->packet.common.tlabel != sa->packet.common.tlabel) {
/* memcmp() ? */
/* error, these should match for retries. */
}
}
list_append(&t->request_list, &sa->link);
switch (sa->ack) {
case ACK_COMPLETE:
if (p->common.tcode != TCODE_WRITE_QUADLET_REQUEST &&
p->common.tcode != TCODE_WRITE_BLOCK_REQUEST)
/* error, unified transactions only allowed for write */;
list_remove(&t->link);
handle_transaction(t);
break;
case ACK_NO_ACK:
case ACK_DATA_ERROR:
case ACK_TYPE_ERROR:
list_remove(&t->link);
handle_transaction(t);
break;
case ACK_PENDING:
/* request subaction phase over, wait for response. */
break;
case ACK_BUSY_X:
case ACK_BUSY_A:
case ACK_BUSY_B:
/* ok, wait for retry. */
/* check that retry protocol is respected. */
break;
}
return 1;
}
static int
handle_response_packet(uint32_t *data, size_t length)
{
struct link_packet *p = (struct link_packet *) data;
struct subaction *sa, *prev;
struct link_transaction *t;
t = link_transaction_lookup(p->common.destination, p->common.source,
p->common.tlabel);
if (list_empty(&t->request_list)) {
/* unsolicited response */
}
sa = subaction_create(data, length);
t->response = sa;
if (!list_empty(&t->response_list)) {
prev = list_tail(&t->response_list, struct subaction, link);
if (!ACK_BUSY(prev->ack)) {
/*
* error, we should only see ack_busy_* before the
* ack_pending/ack_complete
*/
}
if (prev->packet.common.tcode != sa->packet.common.tcode ||
prev->packet.common.tlabel != sa->packet.common.tlabel) {
/* use memcmp() instead? */
/* error, these should match for retries. */
}
} else {
prev = list_tail(&t->request_list, struct subaction, link);
if (prev->ack != ACK_PENDING) {
/*
* error, should not get response unless last request got
* ack_pending.
*/
}
if (packet_info[prev->packet.common.tcode].response_tcode !=
sa->packet.common.tcode) {
/* error, tcode mismatch */
}
}
list_append(&t->response_list, &sa->link);
switch (sa->ack) {
case ACK_COMPLETE:
case ACK_NO_ACK:
case ACK_DATA_ERROR:
case ACK_TYPE_ERROR:
list_remove(&t->link);
handle_transaction(t);
/* transaction complete, remove t from pending list. */
break;
case ACK_PENDING:
/* error for responses. */
break;
case ACK_BUSY_X:
case ACK_BUSY_A:
case ACK_BUSY_B:
/* no problem, wait for next retry */
break;
}
return 1;
}
static int
handle_packet(uint32_t *data, size_t length)
{
if (length == 0) {
printf("bus reset\r\n");
clear_pending_transaction_list();
} else if (length > sizeof(struct phy_packet)) {
struct link_packet *p = (struct link_packet *) data;
switch (packet_info[p->common.tcode].type) {
case PACKET_REQUEST:
return handle_request_packet(data, length);
case PACKET_RESPONSE:
return handle_response_packet(data, length);
case PACKET_OTHER:
case PACKET_RESERVED:
return 0;
}
}
return 1;
}
static unsigned int
get_bits(struct link_packet *packet, int offset, int width)
{
uint32_t *data = (uint32_t *) packet;
uint32_t index, shift, mask;
index = offset / 32 + 1;
shift = 32 - (offset & 31) - width;
mask = width == 32 ? ~0 : (1 << width) - 1;
return (data[index] >> shift) & mask;
}
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define byte_index(i) ((i) ^ 3)
#elif __BYTE_ORDER == __BIG_ENDIAN
#define byte_index(i) (i)
#else
#error unsupported byte order.
#endif
static void
dump_data(unsigned char *data, int length)
{
int i, print_length;
if (length > 128)
print_length = 128;
else
print_length = length;
for (i = 0; i < print_length; i++)
printf("%s%02hhx",
(i % 4 == 0 && i != 0) ? " " : "",
data[byte_index(i)]);
if (print_length < length)
printf(" (%d more bytes)", length - print_length);
}
static void
decode_link_packet(struct link_packet *packet, size_t length,
int include_flags, int exclude_flags)
{
const struct packet_info *pi;
int data_length = 0;
int i;
pi = &packet_info[packet->common.tcode];
for (i = 0; i < pi->field_count; i++) {
const struct packet_field *f = &pi->fields[i];
int offset;
if (f->flags & exclude_flags)
continue;
if (include_flags && !(f->flags & include_flags))
continue;
if (f->offset < 0)
offset = length * 8 + f->offset - 32;
else
offset = f->offset;
if (f->value_names != NULL) {
uint32_t bits;
bits = get_bits(packet, offset, f->width);
printf("%s", f->value_names[bits]);
} else if (f->width == 0) {
printf("%s=[", f->name);
dump_data((unsigned char *) packet + (offset / 8 + 4), data_length);
printf("]");
} else {
unsigned long long bits;
int high_width, low_width;
if ((offset & ~31) != ((offset + f->width - 1) & ~31)) {
/* Bit field spans quadlet boundary. */
high_width = ((offset + 31) & ~31) - offset;
low_width = f->width - high_width;
bits = get_bits(packet, offset, high_width);
bits = (bits << low_width) |
get_bits(packet, offset + high_width, low_width);
} else {
bits = get_bits(packet, offset, f->width);
}
printf("%s=0x%0*llx", f->name, (f->width + 3) / 4, bits);
if (f->flags & PACKET_FIELD_DATA_LENGTH)
data_length = bits;
}
if (i < pi->field_count - 1)
printf(", ");
}
}
static void
print_packet(uint32_t *data, size_t length)
{
int i;
printf("%6u ", data[0]);
if (length == 4) {
printf("bus reset");
} else if (length < sizeof(struct phy_packet)) {
printf("short packet: ");
for (i = 1; i < length / 4; i++)
printf("%s%08x", i == 0 ? "[" : " ", data[i]);
printf("]");
} else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) {
struct phy_packet *pp = (struct phy_packet *) data;
/* phy packet are 3 quadlets: the 1 quadlet payload,
* the bitwise inverse of the payload and the snoop
* mode ack */
switch (pp->common.identifier) {
case PHY_PACKET_CONFIGURATION:
if (!pp->phy_config.set_root && !pp->phy_config.set_gap_count) {
printf("ext phy config: phy_id=%02x", pp->phy_config.root_id);
} else {
printf("phy config:");
if (pp->phy_config.set_root)
printf(" set_root_id=%02x", pp->phy_config.root_id);
if (pp->phy_config.set_gap_count)
printf(" set_gap_count=%d", pp->phy_config.gap_count);
}
break;
case PHY_PACKET_LINK_ON:
printf("link-on packet, phy_id=%02x", pp->link_on.phy_id);
break;
case PHY_PACKET_SELF_ID:
if (pp->self_id.extended) {
printf("extended self id: phy_id=%02x, seq=%d",
pp->ext_self_id.phy_id, pp->ext_self_id.sequence);
} else {
static const char * const speed_names[] = {
"S100", "S200", "S400", "BETA"
};
printf("self id: phy_id=%02x, link %s, gap_count=%d, speed=%s%s%s",
pp->self_id.phy_id,
(pp->self_id.link_active ? "active" : "not active"),
pp->self_id.gap_count,
speed_names[pp->self_id.phy_speed],
(pp->self_id.contender ? ", irm contender" : ""),
(pp->self_id.initiated_reset ? ", initiator" : ""));
}
break;
default:
printf("unknown phy packet: ");
for (i = 1; i < length / 4; i++)
printf("%s%08x", i == 0 ? "[" : " ", data[i]);
printf("]");
break;
}
} else {
struct link_packet *packet = (struct link_packet *) data;
decode_link_packet(packet, length, 0,
option_verbose ? 0 : PACKET_FIELD_DETAIL);
}
if (option_hex) {
printf(" [");
dump_data((unsigned char *) data + 4, length - 4);
printf("]");
}
printf("\r\n");
}
#define HIDE_CURSOR "\033[?25l"
#define SHOW_CURSOR "\033[?25h"
#define CLEAR "\033[H\033[2J"
static void
print_stats(uint32_t *data, size_t length)
{
static int bus_reset_count, short_packet_count, phy_packet_count;
static int tcode_count[16];
static struct timeval last_update;
struct timeval now;
int i;
if (length == 0)
bus_reset_count++;
else if (length < sizeof(struct phy_packet))
short_packet_count++;
else if (length == sizeof(struct phy_packet) && data[1] == ~data[2])
phy_packet_count++;
else {
struct link_packet *packet = (struct link_packet *) data;
tcode_count[packet->common.tcode]++;
}
gettimeofday(&now, NULL);
if (now.tv_sec <= last_update.tv_sec &&
now.tv_usec < last_update.tv_usec + 500000)
return;
last_update = now;
printf(CLEAR HIDE_CURSOR
" bus resets : %8d\n"
" short packets : %8d\n"
" phy packets : %8d\n",
bus_reset_count, short_packet_count, phy_packet_count);
for (i = 0; i < array_length(packet_info); i++)
if (packet_info[i].type != PACKET_RESERVED)
printf(" %-24s: %8d\n", packet_info[i].name, tcode_count[i]);
printf(SHOW_CURSOR "\n");
}
static struct termios saved_attributes;
static void
reset_input_mode(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &saved_attributes);
}
static void
set_input_mode(void)
{
struct termios tattr;
/* Make sure stdin is a terminal. */
if (!isatty(STDIN_FILENO)) {
fprintf(stderr, "Not a terminal.\n");
exit(EXIT_FAILURE);
}
/* Save the terminal attributes so we can restore them later. */
tcgetattr(STDIN_FILENO, &saved_attributes);
atexit(reset_input_mode);
/* Set the funny terminal modes. */
tcgetattr(STDIN_FILENO, &tattr);
tattr.c_lflag &= ~(ICANON|ECHO); /* Clear ICANON and ECHO. */
tattr.c_cc[VMIN] = 1;
tattr.c_cc[VTIME] = 0;
tcsetattr(STDIN_FILENO, TCSAFLUSH, &tattr);
}
int main(int argc, const char *argv[])
{
uint32_t buf[128 * 1024];
uint32_t filter;
int length, retval, view;
int fd = -1;
FILE *output = NULL, *input = NULL;
poptContext con;
char c;
struct pollfd pollfds[2];
sys_sigint_handler = signal(SIGINT, sigint_handler);
con = poptGetContext(NULL, argc, argv, options, 0);
retval = poptGetNextOpt(con);
if (retval < -1) {
poptPrintUsage(con, stdout, 0);
return -1;
}
if (option_version) {
printf("dump tool for nosy sniffer, version %s\n", VERSION);
return 0;
}
if (__BYTE_ORDER != __LITTLE_ENDIAN)
fprintf(stderr, "warning: nosy has only been tested on little "
"endian machines\n");
if (option_input != NULL) {
input = fopen(option_input, "r");
if (input == NULL) {
fprintf(stderr, "Could not open %s, %m\n", option_input);
return -1;
}
} else {
fd = open(option_nosy_device, O_RDWR);
if (fd < 0) {
fprintf(stderr, "Could not open %s, %m\n", option_nosy_device);
return -1;
}
set_input_mode();
}
if (strcmp(option_view, "transaction") == 0)
view = VIEW_TRANSACTION;
else if (strcmp(option_view, "stats") == 0)
view = VIEW_STATS;
else
view = VIEW_PACKET;
if (option_output) {
output = fopen(option_output, "w");
if (output == NULL) {
fprintf(stderr, "Could not open %s, %m\n", option_output);
return -1;
}
}
setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
filter = ~0;
if (!option_iso)
filter &= ~(1 << TCODE_STREAM_DATA);
if (!option_cycle_start)
filter &= ~(1 << TCODE_CYCLE_START);
if (view == VIEW_STATS)
filter = ~(1 << TCODE_CYCLE_START);
ioctl(fd, NOSY_IOC_FILTER, filter);
ioctl(fd, NOSY_IOC_START);
pollfds[0].fd = fd;
pollfds[0].events = POLLIN;
pollfds[1].fd = STDIN_FILENO;
pollfds[1].events = POLLIN;
while (run) {
if (input != NULL) {
if (fread(&length, sizeof length, 1, input) != 1)
return 0;
fread(buf, 1, length, input);
} else {
poll(pollfds, 2, -1);
if (pollfds[1].revents) {
read(STDIN_FILENO, &c, sizeof c);
switch (c) {
case 'q':
if (output != NULL)
fclose(output);
return 0;
}
}
if (pollfds[0].revents)
length = read(fd, buf, sizeof buf);
else
continue;
}
if (output != NULL) {
fwrite(&length, sizeof length, 1, output);
fwrite(buf, 1, length, output);
}
switch (view) {
case VIEW_TRANSACTION:
handle_packet(buf, length);
break;
case VIEW_PACKET:
print_packet(buf, length);
break;
case VIEW_STATS:
print_stats(buf, length);
break;
}
}
if (output != NULL)
fclose(output);
close(fd);
poptFreeContext(con);
return 0;
}
| gpl-2.0 |
MattCrystal/freezing-octo-ironman | lib/raid6/sse1.c | 8469 | 4981 | /* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 53 Temple Place Ste 330,
* Boston MA 02111-1307, USA; either version 2 of the License, or
* (at your option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* raid6/sse1.c
*
* SSE-1/MMXEXT implementation of RAID-6 syndrome functions
*
* This is really an MMX implementation, but it requires SSE-1 or
* AMD MMXEXT for prefetch support and a few other features. The
* support for nontemporal memory accesses is enough to make this
* worthwhile as a separate implementation.
*/
#if defined(__i386__) && !defined(__arch_um__)
#include <linux/raid/pq.h>
#include "x86.h"
/* Defined in raid6/mmx.c */
extern const struct raid6_mmx_constants {
u64 x1d;
} raid6_mmx_constants;
static int raid6_have_sse1_or_mmxext(void)
{
/* Not really boot_cpu but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX) &&
(boot_cpu_has(X86_FEATURE_XMM) ||
boot_cpu_has(X86_FEATURE_MMXEXT));
}
/*
* Plain SSE1 implementation
*/
static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
for ( d = 0 ; d < bytes ; d += 8 ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
asm volatile("movq %mm2,%mm4"); /* Q[0] */
asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d]));
for ( z = z0-2 ; z >= 0 ; z-- ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("paddb %mm4,%mm4");
asm volatile("pand %mm0,%mm5");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm6,%mm2");
asm volatile("pxor %mm6,%mm4");
asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
}
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("paddb %mm4,%mm4");
asm volatile("pand %mm0,%mm5");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm6,%mm2");
asm volatile("pxor %mm6,%mm4");
asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x1 = {
raid6_sse11_gen_syndrome,
raid6_have_sse1_or_mmxext,
"sse1x1",
1 /* Has cache hints */
};
/*
* Unrolled-by-2 SSE1 implementation
*/
static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("pxor %mm7,%mm7"); /* Zero temp */
/* We uniformly assume a single prefetch covers at least 16 bytes */
for ( d = 0 ; d < bytes ; d += 16 ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */
asm volatile("movq %mm2,%mm4"); /* Q[0] */
asm volatile("movq %mm3,%mm6"); /* Q[1] */
for ( z = z0-1 ; z >= 0 ; z-- ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("pcmpgtb %mm6,%mm7");
asm volatile("paddb %mm4,%mm4");
asm volatile("paddb %mm6,%mm6");
asm volatile("pand %mm0,%mm5");
asm volatile("pand %mm0,%mm7");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
asm volatile("pxor %mm5,%mm2");
asm volatile("pxor %mm7,%mm3");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm7,%mm7");
}
asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
}
asm volatile("sfence" : :: "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x2 = {
raid6_sse12_gen_syndrome,
raid6_have_sse1_or_mmxext,
"sse1x2",
1 /* Has cache hints */
};
#endif
| gpl-2.0 |
SomethingExplosive/android_kernel_samsung_manta | arch/sh/kernel/cpu/sh2a/clock-sh7201.c | 9237 | 2017 | /*
* arch/sh/kernel/cpu/sh2a/clock-sh7201.c
*
* SH7201 support for the clock framework
*
* Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk>
*
* Based on clock-sh4.c
* Copyright (C) 2005 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static const int pll1rate[]={1,2,3,4,6,8};
static const int pfc_divisors[]={1,2,3,4,6,8,12};
#define ifc_divisors pfc_divisors
static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
clk->rate = 10000000 * pll2_mult *
pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct sh_clk_ops sh7201_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7201_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7201_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7201_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7201_clk_ops[] = {
&sh7201_master_clk_ops,
&sh7201_module_clk_ops,
&sh7201_bus_clk_ops,
&sh7201_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (test_mode_pin(MODE_PIN1 | MODE_PIN0))
pll2_mult = 1;
else if (test_mode_pin(MODE_PIN1))
pll2_mult = 2;
else
pll2_mult = 4;
if (idx < ARRAY_SIZE(sh7201_clk_ops))
*ops = sh7201_clk_ops[idx];
}
| gpl-2.0 |
poondog/kangaroo-m7-mkv | arch/arm/mach-mmp/devices.c | 9493 | 1542 | /*
* linux/arch/arm/mach-mmp/devices.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h>
#include <mach/devices.h>
int __init pxa_register_device(struct pxa_device_desc *desc,
void *data, size_t size)
{
struct platform_device *pdev;
struct resource res[2 + MAX_RESOURCE_DMA];
int i, ret = 0, nres = 0;
pdev = platform_device_alloc(desc->drv_name, desc->id);
if (pdev == NULL)
return -ENOMEM;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
memset(res, 0, sizeof(res));
if (desc->start != -1ul && desc->size > 0) {
res[nres].start = desc->start;
res[nres].end = desc->start + desc->size - 1;
res[nres].flags = IORESOURCE_MEM;
nres++;
}
if (desc->irq != NO_IRQ) {
res[nres].start = desc->irq;
res[nres].end = desc->irq;
res[nres].flags = IORESOURCE_IRQ;
nres++;
}
for (i = 0; i < MAX_RESOURCE_DMA; i++, nres++) {
if (desc->dma[i] == 0)
break;
res[nres].start = desc->dma[i];
res[nres].end = desc->dma[i];
res[nres].flags = IORESOURCE_DMA;
}
ret = platform_device_add_resources(pdev, res, nres);
if (ret) {
platform_device_put(pdev);
return ret;
}
if (data && size) {
ret = platform_device_add_data(pdev, data, size);
if (ret) {
platform_device_put(pdev);
return ret;
}
}
return platform_device_add(pdev);
}
| gpl-2.0 |
virt2real/linux-davinci-2.6 | fs/nls/nls_cp1251.c | 12565 | 12751 | /*
* linux/fs/nls/nls_cp1251.c
*
* Charset cp1251 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0402, 0x0403, 0x201a, 0x0453,
0x201e, 0x2026, 0x2020, 0x2021,
0x20ac, 0x2030, 0x0409, 0x2039,
0x040a, 0x040c, 0x040b, 0x040f,
/* 0x90*/
0x0452, 0x2018, 0x2019, 0x201c,
0x201d, 0x2022, 0x2013, 0x2014,
0x0000, 0x2122, 0x0459, 0x203a,
0x045a, 0x045c, 0x045b, 0x045f,
/* 0xa0*/
0x00a0, 0x040e, 0x045e, 0x0408,
0x00a4, 0x0490, 0x00a6, 0x00a7,
0x0401, 0x00a9, 0x0404, 0x00ab,
0x00ac, 0x00ad, 0x00ae, 0x0407,
/* 0xb0*/
0x00b0, 0x00b1, 0x0406, 0x0456,
0x0491, 0x00b5, 0x00b6, 0x00b7,
0x0451, 0x2116, 0x0454, 0x00bb,
0x0458, 0x0405, 0x0455, 0x0457,
/* 0xc0*/
0x0410, 0x0411, 0x0412, 0x0413,
0x0414, 0x0415, 0x0416, 0x0417,
0x0418, 0x0419, 0x041a, 0x041b,
0x041c, 0x041d, 0x041e, 0x041f,
/* 0xd0*/
0x0420, 0x0421, 0x0422, 0x0423,
0x0424, 0x0425, 0x0426, 0x0427,
0x0428, 0x0429, 0x042a, 0x042b,
0x042c, 0x042d, 0x042e, 0x042f,
/* 0xe0*/
0x0430, 0x0431, 0x0432, 0x0433,
0x0434, 0x0435, 0x0436, 0x0437,
0x0438, 0x0439, 0x043a, 0x043b,
0x043c, 0x043d, 0x043e, 0x043f,
/* 0xf0*/
0x0440, 0x0441, 0x0442, 0x0443,
0x0444, 0x0445, 0x0446, 0x0447,
0x0448, 0x0449, 0x044a, 0x044b,
0x044c, 0x044d, 0x044e, 0x044f,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */
0xb0, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page04[256] = {
0x00, 0xa8, 0x80, 0x81, 0xaa, 0xbd, 0xb2, 0xaf, /* 0x00-0x07 */
0xa3, 0x8a, 0x8c, 0x8e, 0x8d, 0x00, 0xa1, 0x8f, /* 0x08-0x0f */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x10-0x17 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x18-0x1f */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x20-0x27 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x28-0x2f */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0x48-0x4f */
0x00, 0xb8, 0x90, 0x83, 0xba, 0xbe, 0xb3, 0xbf, /* 0x50-0x57 */
0xbc, 0x9a, 0x9c, 0x9e, 0x9d, 0x00, 0xa2, 0x9f, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0xa5, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */
0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
};
static const unsigned char page21[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
};
static const unsigned char *const page_uni2charset[256] = {
page00, NULL, NULL, NULL, page04, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, page21, NULL, NULL, NULL, NULL, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x90, 0x83, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa2, 0xa2, 0xbc, 0xa4, 0xb4, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xb8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb3, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x81, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x80, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa1, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb2, 0xa5, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xa8, 0xb9, 0xaa, 0xbb, 0xa3, 0xbd, 0xbd, 0xaf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "cp1251",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_cp1251(void)
{
return register_nls(&table);
}
static void __exit exit_nls_cp1251(void)
{
unregister_nls(&table);
}
module_init(init_nls_cp1251)
module_exit(exit_nls_cp1251)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
thewisenerd/kernel_sprout | lib/is_single_threaded.c | 13589 | 1363 | /* Function to determine if a thread group is single threaded or not
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from security/selinux/hooks.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
/*
* Returns true if the task does not share ->mm with another thread/process.
*/
bool current_is_single_threaded(void)
{
struct task_struct *task = current;
struct mm_struct *mm = task->mm;
struct task_struct *p, *t;
bool ret;
if (atomic_read(&task->signal->live) != 1)
return false;
if (atomic_read(&mm->mm_users) == 1)
return true;
ret = false;
rcu_read_lock();
for_each_process(p) {
if (unlikely(p->flags & PF_KTHREAD))
continue;
if (unlikely(p == task->group_leader))
continue;
t = p;
do {
if (unlikely(t->mm == mm))
goto found;
if (likely(t->mm))
break;
/*
* t->mm == NULL. Make sure next_thread/next_task
* will see other CLONE_VM tasks which might be
* forked before exiting.
*/
smp_rmb();
} while_each_thread(p, t);
}
ret = true;
found:
rcu_read_unlock();
return ret;
}
| gpl-2.0 |
ccrma/sndtools | src/sndpeek/Stk.cpp | 22 | 3136 | /***************************************************/
/*! \class Stk
\brief STK base class
Nearly all STK classes inherit from this class.
The global sample rate can be queried and
modified via Stk. In addition, this class
provides error handling and byte-swapping
functions.
by Perry R. Cook and Gary P. Scavone, 1995 - 2002.
*/
/***************************************************/
#include "Stk.h"
#include <stdio.h>
#include <string.h>
MY_FLOAT Stk :: srate = (MY_FLOAT) SRATE;
std::string Stk :: rawwavepath = RAWWAVE_PATH;
const Stk::STK_FORMAT Stk :: STK_SINT8 = 1;
const Stk::STK_FORMAT Stk :: STK_SINT16 = 2;
const Stk::STK_FORMAT Stk :: STK_SINT32 = 8;
const Stk::STK_FORMAT Stk :: MY_FLOAT32 = 16;
const Stk::STK_FORMAT Stk :: MY_FLOAT64 = 32;
Stk :: Stk(void)
{
}
Stk :: ~Stk(void)
{
}
MY_FLOAT Stk :: sampleRate(void)
{
return srate;
}
void Stk :: setSampleRate(MY_FLOAT newRate)
{
if (newRate > 0)
srate = newRate;
}
std::string Stk :: rawwavePath(void)
{
return rawwavepath;
}
void Stk :: setRawwavePath(std::string newPath)
{
if ( !newPath.empty() )
rawwavepath = newPath;
// Make sure the path includes a "/"
if ( rawwavepath[rawwavepath.length()-1] != '/' )
rawwavepath += "/";
}
void Stk :: swap16(unsigned char *ptr)
{
register unsigned char val;
// Swap 1st and 2nd bytes
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
}
void Stk :: swap32(unsigned char *ptr)
{
register unsigned char val;
// Swap 1st and 4th bytes
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
//Swap 2nd and 3rd bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
}
void Stk :: swap64(unsigned char *ptr)
{
register unsigned char val;
// Swap 1st and 8th bytes
val = *(ptr);
*(ptr) = *(ptr+7);
*(ptr+7) = val;
// Swap 2nd and 7th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+5);
*(ptr+5) = val;
// Swap 3rd and 6th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 4th and 5th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
}
#if (defined(__OS_IRIX__) || defined(__OS_LINUX__) || defined(__OS_MACOSX__))
#include <unistd.h>
#elif defined(__OS_WINDOWS__)
#include <windows.h>
#endif
void Stk :: sleep(unsigned long milliseconds)
{
#if defined(__OS_WINDOWS__)
Sleep((DWORD) milliseconds);
#elif (defined(__OS_IRIX__) || defined(__OS_LINUX__) || defined(__OS_MACOSX__))
usleep( (unsigned long) (milliseconds * 1000.0) );
#endif
}
void Stk :: handleError( const char *message, StkError::TYPE type )
{
if (type == StkError::WARNING)
fprintf(stderr, "\n%s\n\n", message);
else if (type == StkError::DEBUG_WARNING) {
#if defined(_STK_DEBUG_)
fprintf(stderr, "\n%s\n\n", message);
#endif
}
else {
// Print error message before throwing.
fprintf(stderr, "\n%s\n\n", message);
throw StkError(message, type);
}
}
StkError :: StkError(const char *p, TYPE tipe)
: type(tipe)
{
strncpy(message, p, 256);
}
StkError :: ~StkError(void)
{
}
void StkError :: printMessage(void)
{
printf("\n%s\n\n", message);
}
| gpl-2.0 |
kzlin129/tt-gpl | go9/linux-s3c24xx/drivers/input/misc/pcspkr.c | 22 | 2214 | /*
* PC Speaker beeper driver for Linux
*
* Copyright (c) 2002 Vojtech Pavlik
* Copyright (c) 1992 Orest Zborowski
*
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <asm/8253pit.h>
#include <asm/io.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("PC Speaker beeper driver");
MODULE_LICENSE("GPL");
static char pcspkr_name[] = "PC Speaker";
static char pcspkr_phys[] = "isa0061/input0";
static struct input_dev pcspkr_dev;
static DEFINE_SPINLOCK(i8253_beep_lock);
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
unsigned int count = 0;
unsigned long flags;
if (type != EV_SND)
return -1;
switch (code) {
case SND_BELL: if (value) value = 1000;
case SND_TONE: break;
default: return -1;
}
if (value > 20 && value < 32767)
count = PIT_TICK_RATE / value;
spin_lock_irqsave(&i8253_beep_lock, flags);
if (count) {
/* enable counter 2 */
outb_p(inb_p(0x61) | 3, 0x61);
/* set command for counter 2, 2 byte write */
outb_p(0xB6, 0x43);
/* select desired HZ */
outb_p(count & 0xff, 0x42);
outb((count >> 8) & 0xff, 0x42);
} else {
/* disable counter 2 */
outb(inb_p(0x61) & 0xFC, 0x61);
}
spin_unlock_irqrestore(&i8253_beep_lock, flags);
return 0;
}
static int __init pcspkr_init(void)
{
pcspkr_dev.evbit[0] = BIT(EV_SND);
pcspkr_dev.sndbit[0] = BIT(SND_BELL) | BIT(SND_TONE);
pcspkr_dev.event = pcspkr_event;
pcspkr_dev.name = pcspkr_name;
pcspkr_dev.phys = pcspkr_phys;
pcspkr_dev.id.bustype = BUS_ISA;
pcspkr_dev.id.vendor = 0x001f;
pcspkr_dev.id.product = 0x0001;
pcspkr_dev.id.version = 0x0100;
input_register_device(&pcspkr_dev);
printk(KERN_INFO "input: %s\n", pcspkr_name);
return 0;
}
static void __exit pcspkr_exit(void)
{
input_unregister_device(&pcspkr_dev);
/* turn off the speaker */
pcspkr_event(NULL, EV_SND, SND_BELL, 0);
}
module_init(pcspkr_init);
module_exit(pcspkr_exit);
| gpl-2.0 |
hajuuk/asuswrt | release/src/router/samba-3.5.8/source4/utils/oLschema2ldif.c | 22 | 12028 | /*
ldb database library
Copyright (C) Simo Sorce 2005
** NOTE! The following LGPL license applies to the ldb
** library. This does NOT imply that all of Samba is released
** under the LGPL
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Name: ldb
*
* Component: oLschema2ldif
*
* Description: utility to convert an OpenLDAP schema into AD LDIF
*
* Author: Simo Sorce
*/
#include "includes.h"
#include "ldb.h"
#include "tools/cmdline.h"
#include "dsdb/samdb/samdb.h"
#define SCHEMA_UNKNOWN 0
#define SCHEMA_NAME 1
#define SCHEMA_SUP 2
#define SCHEMA_STRUCTURAL 3
#define SCHEMA_ABSTRACT 4
#define SCHEMA_AUXILIARY 5
#define SCHEMA_MUST 6
#define SCHEMA_MAY 7
#define SCHEMA_SINGLE_VALUE 8
#define SCHEMA_EQUALITY 9
#define SCHEMA_ORDERING 10
#define SCHEMA_SUBSTR 11
#define SCHEMA_SYNTAX 12
#define SCHEMA_DESC 13
struct schema_conv {
int count;
int failures;
};
struct schema_token {
int type;
char *value;
};
struct ldb_context *ldb_ctx;
struct ldb_dn *basedn;
static int check_braces(const char *string)
{
int b;
char *c;
b = 0;
if ((c = strchr(string, '(')) == NULL) {
return -1;
}
b++;
c++;
while (b) {
c = strpbrk(c, "()");
if (c == NULL) return 1;
if (*c == '(') b++;
if (*c == ')') b--;
c++;
}
return 0;
}
static char *skip_spaces(char *string) {
return (string + strspn(string, " \t\n"));
}
static int add_multi_string(struct ldb_message *msg, const char *attr, char *values)
{
char *c;
char *s;
int n;
c = skip_spaces(values);
while (*c) {
n = strcspn(c, " \t$");
s = talloc_strndup(msg, c, n);
if (ldb_msg_add_string(msg, attr, s) != 0) {
return -1;
}
c += n;
c += strspn(c, " \t$");
}
return 0;
}
#define MSG_ADD_STRING(a, v) do { if (ldb_msg_add_string(msg, a, v) != 0) goto failed; } while(0)
#define MSG_ADD_M_STRING(a, v) do { if (add_multi_string(msg, a, v) != 0) goto failed; } while(0)
static char *get_def_value(TALLOC_CTX *ctx, char **string)
{
char *c = *string;
char *value;
int n;
if (*c == '\'') {
c++;
n = strcspn(c, "\'");
value = talloc_strndup(ctx, c, n);
c += n;
c++; /* skip closing \' */
} else {
n = strcspn(c, " \t\n");
value = talloc_strndup(ctx, c, n);
c += n;
}
*string = c;
return value;
}
static struct schema_token *get_next_schema_token(TALLOC_CTX *ctx, char **string)
{
char *c = skip_spaces(*string);
char *type;
struct schema_token *token;
int n;
token = talloc(ctx, struct schema_token);
n = strcspn(c, " \t\n");
type = talloc_strndup(token, c, n);
c += n;
c = skip_spaces(c);
if (strcasecmp("NAME", type) == 0) {
talloc_free(type);
token->type = SCHEMA_NAME;
/* we do not support aliases so we get only the first name given and skip others */
if (*c == '(') {
char *s = strchr(c, ')');
if (s == NULL) return NULL;
s = skip_spaces(s);
*string = s;
c++;
c = skip_spaces(c);
}
token->value = get_def_value(ctx, &c);
if (*string < c) { /* single name */
c = skip_spaces(c);
*string = c;
}
return token;
}
if (strcasecmp("SUP", type) == 0) {
talloc_free(type);
token->type = SCHEMA_SUP;
if (*c == '(') {
c++;
n = strcspn(c, ")");
token->value = talloc_strndup(ctx, c, n);
c += n;
c++;
} else {
token->value = get_def_value(ctx, &c);
}
c = skip_spaces(c);
*string = c;
return token;
}
if (strcasecmp("STRUCTURAL", type) == 0) {
talloc_free(type);
token->type = SCHEMA_STRUCTURAL;
*string = c;
return token;
}
if (strcasecmp("ABSTRACT", type) == 0) {
talloc_free(type);
token->type = SCHEMA_ABSTRACT;
*string = c;
return token;
}
if (strcasecmp("AUXILIARY", type) == 0) {
talloc_free(type);
token->type = SCHEMA_AUXILIARY;
*string = c;
return token;
}
if (strcasecmp("MUST", type) == 0) {
talloc_free(type);
token->type = SCHEMA_MUST;
if (*c == '(') {
c++;
n = strcspn(c, ")");
token->value = talloc_strndup(ctx, c, n);
c += n;
c++;
} else {
token->value = get_def_value(ctx, &c);
}
c = skip_spaces(c);
*string = c;
return token;
}
if (strcasecmp("MAY", type) == 0) {
talloc_free(type);
token->type = SCHEMA_MAY;
if (*c == '(') {
c++;
n = strcspn(c, ")");
token->value = talloc_strndup(ctx, c, n);
c += n;
c++;
} else {
token->value = get_def_value(ctx, &c);
}
c = skip_spaces(c);
*string = c;
return token;
}
if (strcasecmp("SINGLE-VALUE", type) == 0) {
talloc_free(type);
token->type = SCHEMA_SINGLE_VALUE;
*string = c;
return token;
}
if (strcasecmp("EQUALITY", type) == 0) {
talloc_free(type);
token->type = SCHEMA_EQUALITY;
token->value = get_def_value(ctx, &c);
c = skip_spaces(c);
*string = c;
return token;
}
if (strcasecmp("ORDERING", type) == 0) {
talloc_free(type);
token->type = SCHEMA_ORDERING;
token->value = get_def_value(ctx, &c);
c = skip_spaces(c);
*string = c;
return token;
}
if (strcasecmp("SUBSTR", type) == 0) {
talloc_free(type);
token->type = SCHEMA_SUBSTR;
token->value = get_def_value(ctx, &c);
c = skip_spaces(c);
*string = c;
return token;
}
if (strcasecmp("SYNTAX", type) == 0) {
talloc_free(type);
token->type = SCHEMA_SYNTAX;
token->value = get_def_value(ctx, &c);
c = skip_spaces(c);
*string = c;
return token;
}
if (strcasecmp("DESC", type) == 0) {
talloc_free(type);
token->type = SCHEMA_DESC;
token->value = get_def_value(ctx, &c);
c = skip_spaces(c);
*string = c;
return token;
}
token->type = SCHEMA_UNKNOWN;
token->value = type;
if (*c == ')') {
*string = c;
return token;
}
if (*c == '\'') {
c = strchr(++c, '\'');
c++;
} else {
c += strcspn(c, " \t\n");
}
c = skip_spaces(c);
*string = c;
return token;
}
static struct ldb_message *process_entry(TALLOC_CTX *mem_ctx, const char *entry)
{
TALLOC_CTX *ctx;
struct ldb_message *msg;
struct schema_token *token;
char *c, *s;
int n;
ctx = talloc_new(mem_ctx);
msg = ldb_msg_new(ctx);
ldb_msg_add_string(msg, "objectClass", "top");
c = talloc_strdup(ctx, entry);
if (!c) return NULL;
c = skip_spaces(c);
switch (*c) {
case 'a':
if (strncmp(c, "attributetype", 13) == 0) {
c += 13;
MSG_ADD_STRING("objectClass", "attributeSchema");
break;
}
goto failed;
case 'o':
if (strncmp(c, "objectclass", 11) == 0) {
c += 11;
MSG_ADD_STRING("objectClass", "classSchema");
break;
}
goto failed;
default:
goto failed;
}
c = strchr(c, '(');
if (c == NULL) goto failed;
c++;
c = skip_spaces(c);
/* get attributeID */
n = strcspn(c, " \t");
s = talloc_strndup(msg, c, n);
MSG_ADD_STRING("attributeID", s);
c += n;
c = skip_spaces(c);
while (*c != ')') {
token = get_next_schema_token(msg, &c);
if (!token) goto failed;
switch (token->type) {
case SCHEMA_NAME:
MSG_ADD_STRING("cn", token->value);
MSG_ADD_STRING("name", token->value);
MSG_ADD_STRING("lDAPDisplayName", token->value);
msg->dn = ldb_dn_copy(msg, basedn);
ldb_dn_add_child_fmt(msg->dn, "CN=%s,CN=Schema,CN=Configuration", token->value);
break;
case SCHEMA_SUP:
MSG_ADD_M_STRING("subClassOf", token->value);
break;
case SCHEMA_STRUCTURAL:
MSG_ADD_STRING("objectClassCategory", "1");
break;
case SCHEMA_ABSTRACT:
MSG_ADD_STRING("objectClassCategory", "2");
break;
case SCHEMA_AUXILIARY:
MSG_ADD_STRING("objectClassCategory", "3");
break;
case SCHEMA_MUST:
MSG_ADD_M_STRING("mustContain", token->value);
break;
case SCHEMA_MAY:
MSG_ADD_M_STRING("mayContain", token->value);
break;
case SCHEMA_SINGLE_VALUE:
MSG_ADD_STRING("isSingleValued", "TRUE");
break;
case SCHEMA_EQUALITY:
/* TODO */
break;
case SCHEMA_ORDERING:
/* TODO */
break;
case SCHEMA_SUBSTR:
/* TODO */
break;
case SCHEMA_SYNTAX:
{
const struct dsdb_syntax *map =
find_syntax_map_by_standard_oid(token->value);
if (!map) {
break;
}
MSG_ADD_STRING("attributeSyntax", map->attributeSyntax_oid);
break;
}
case SCHEMA_DESC:
MSG_ADD_STRING("description", token->value);
break;
default:
fprintf(stderr, "Unknown Definition: %s\n", token->value);
}
}
talloc_steal(mem_ctx, msg);
talloc_free(ctx);
return msg;
failed:
talloc_free(ctx);
return NULL;
}
static struct schema_conv process_file(FILE *in, FILE *out)
{
TALLOC_CTX *ctx;
struct schema_conv ret;
char *entry;
int c, t, line;
struct ldb_ldif ldif;
ldif.changetype = LDB_CHANGETYPE_NONE;
ctx = talloc_new(NULL);
ret.count = 0;
ret.failures = 0;
line = 0;
while ((c = fgetc(in)) != EOF) {
line++;
/* fprintf(stderr, "Parsing line %d\n", line); */
if (c == '#') {
do {
c = fgetc(in);
} while (c != EOF && c != '\n');
continue;
}
if (c == '\n') {
continue;
}
t = 0;
entry = talloc_array(ctx, char, 1024);
if (entry == NULL) exit(-1);
do {
if (c == '\n') {
entry[t] = '\0';
if (check_braces(entry) == 0) {
ret.count++;
ldif.msg = process_entry(ctx, entry);
if (ldif.msg == NULL) {
ret.failures++;
fprintf(stderr, "No valid msg from entry \n[%s]\n at line %d\n", entry, line);
break;
}
ldb_ldif_write_file(ldb_ctx, out, &ldif);
break;
}
line++;
} else {
entry[t] = c;
t++;
}
if ((t % 1023) == 0) {
entry = talloc_realloc(ctx, entry, char, t + 1024);
if (entry == NULL) exit(-1);
}
} while ((c = fgetc(in)) != EOF);
if (c != '\n') {
entry[t] = '\0';
if (check_braces(entry) == 0) {
ret.count++;
ldif.msg = process_entry(ctx, entry);
if (ldif.msg == NULL) {
ret.failures++;
fprintf(stderr, "No valid msg from entry \n[%s]\n at line %d\n", entry, line);
break;
}
ldb_ldif_write_file(ldb_ctx, out, &ldif);
} else {
fprintf(stderr, "malformed entry on line %d\n", line);
ret.failures++;
}
}
if (c == EOF) break;
}
return ret;
}
static void usage(void)
{
printf("Usage: oLschema2ldif -H NONE <options>\n");
printf("\nConvert OpenLDAP schema to AD-like LDIF format\n\n");
printf("Options:\n");
printf(" -I inputfile inputfile of OpenLDAP style schema otherwise STDIN\n");
printf(" -O outputfile outputfile otherwise STDOUT\n");
printf(" -o options pass options like modules to activate\n");
printf(" e.g: -o modules:timestamps\n");
printf("\n");
printf("Converts records from an openLdap formatted schema to an ldif schema\n\n");
exit(1);
}
int main(int argc, const char **argv)
{
TALLOC_CTX *ctx;
struct schema_conv ret;
struct ldb_cmdline *options;
FILE *in = stdin;
FILE *out = stdout;
ctx = talloc_new(NULL);
ldb_ctx = ldb_init(ctx, NULL);
setenv("LDB_URL", "NONE", 1);
options = ldb_cmdline_process(ldb_ctx, argc, argv, usage);
if (options->basedn == NULL) {
perror("Base DN not specified");
exit(1);
} else {
basedn = ldb_dn_new(ctx, ldb_ctx, options->basedn);
if ( ! ldb_dn_validate(basedn)) {
perror("Malformed Base DN");
exit(1);
}
}
if (options->input) {
in = fopen(options->input, "r");
if (!in) {
perror(options->input);
exit(1);
}
}
if (options->output) {
out = fopen(options->output, "w");
if (!out) {
perror(options->output);
exit(1);
}
}
ret = process_file(in, out);
fclose(in);
fclose(out);
printf("Converted %d records with %d failures\n", ret.count, ret.failures);
return 0;
}
| gpl-2.0 |
flipz/android-2.6.27-heroc | arch/ia64/kernel/signal.c | 22 | 19031 | /*
* Architecture-specific signal handling support.
*
* Copyright (C) 1999-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Derived from i386 and Alpha versions.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/unistd.h>
#include <linux/wait.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/sigcontext.h>
#include "sigframe.h"
#define DEBUG_SIG 0
#define STACK_ALIGN 16 /* minimal alignment for stack pointer */
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
#if _NSIG_WORDS > 1
# define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t))
# define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t))
#else
# define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0])
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
asmlinkage long
sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2,
long arg3, long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{
return do_sigaltstack(uss, uoss, regs.r12);
}
static long
restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
{
unsigned long ip, flags, nat, um, cfm, rsc;
long err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* restore scratch that always needs gets updated during signal delivery: */
err = __get_user(flags, &sc->sc_flags);
err |= __get_user(nat, &sc->sc_nat);
err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */
err |= __get_user(cfm, &sc->sc_cfm);
err |= __get_user(um, &sc->sc_um); /* user mask */
err |= __get_user(rsc, &sc->sc_ar_rsc);
err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */
err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
scr->pt.cr_ifs = cfm | (1UL << 63);
scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
/* establish new instruction pointer: */
scr->pt.cr_iip = ip & ~0x3UL;
ia64_psr(&scr->pt)->ri = ip & 0x3;
scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM);
scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat);
if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
/* Restore most scratch-state only when not in syscall. */
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
}
if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) {
struct ia64_psr *psr = ia64_psr(&scr->pt);
err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16);
psr->mfh = 0; /* drop signal handler's fph contents... */
preempt_disable();
if (psr->dfh)
ia64_drop_fpu(current);
else {
/* We already own the local fph, otherwise psr->dfh wouldn't be 0. */
__ia64_load_fpu(current->thread.fph);
ia64_set_local_fpu_owner(current);
}
preempt_enable();
}
return err;
}
int
copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from)
{
if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t)))
return -EFAULT;
if (from->si_code < 0) {
if (__copy_to_user(to, from, sizeof(siginfo_t)))
return -EFAULT;
return 0;
} else {
int err;
/*
* If you change siginfo_t structure, please be sure this code is fixed
* accordingly. It should never copy any pad contained in the structure
* to avoid security leaks, but must copy the generic 3 ints plus the
* relevant union member.
*/
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
switch (from->si_code >> 16) {
case __SI_FAULT >> 16:
err |= __put_user(from->si_flags, &to->si_flags);
err |= __put_user(from->si_isr, &to->si_isr);
case __SI_POLL >> 16:
err |= __put_user(from->si_addr, &to->si_addr);
err |= __put_user(from->si_imm, &to->si_imm);
break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_ptr, &to->si_ptr);
break;
case __SI_RT >> 16: /* Not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_ptr, &to->si_ptr);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
break;
}
return err;
}
}
long
ia64_rt_sigreturn (struct sigscratch *scr)
{
extern char ia64_strace_leave_kernel, ia64_leave_kernel;
struct sigcontext __user *sc;
struct siginfo si;
sigset_t set;
long retval;
sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc;
/*
* When we return to the previously executing context, r8 and r10 have already
* been setup the way we want them. Indeed, if the signal wasn't delivered while
* in a system call, we must not touch r8 or r10 as otherwise user-level state
* could be corrupted.
*/
retval = (long) &ia64_leave_kernel;
if (test_thread_flag(TIF_SYSCALL_TRACE)
|| test_thread_flag(TIF_SYSCALL_AUDIT))
/*
* strace expects to be notified after sigreturn returns even though the
* context to which we return may not be in the middle of a syscall.
* Thus, the return-value that strace displays for sigreturn is
* meaningless.
*/
retval = (long) &ia64_strace_leave_kernel;
if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
goto give_sigsegv;
if (GET_SIGSET(&set, &sc->sc_mask))
goto give_sigsegv;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
{
current->blocked = set;
recalc_sigpending();
}
spin_unlock_irq(¤t->sighand->siglock);
if (restore_sigcontext(sc, scr))
goto give_sigsegv;
#if DEBUG_SIG
printk("SIG return (%s:%d): sp=%lx ip=%lx\n",
current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);
#endif
/*
* It is more difficult to avoid calling this function than to
* call it and ignore errors.
*/
do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12);
return retval;
give_sigsegv:
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SI_KERNEL;
si.si_pid = task_pid_vnr(current);
si.si_uid = current->uid;
si.si_addr = sc;
force_sig_info(SIGSEGV, &si, current);
return retval;
}
/*
* This does just the minimum required setup of sigcontext.
* Specifically, it only installs data that is either not knowable at
* the user-level or that gets modified before execution in the
* trampoline starts. Everything else is done at the user-level.
*/
static long
setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr)
{
unsigned long flags = 0, ifs, cfm, nat;
long err = 0;
ifs = scr->pt.cr_ifs;
if (on_sig_stack((unsigned long) sc))
flags |= IA64_SC_FLAG_ONSTACK;
if ((ifs & (1UL << 63)) == 0)
/* if cr_ifs doesn't have the valid bit set, we got here through a syscall */
flags |= IA64_SC_FLAG_IN_SYSCALL;
cfm = ifs & ((1UL << 38) - 1);
ia64_flush_fph(current);
if ((current->thread.flags & IA64_THREAD_FPH_VALID)) {
flags |= IA64_SC_FLAG_FPH_VALID;
err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16);
}
nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat);
err |= __put_user(flags, &sc->sc_flags);
err |= __put_user(nat, &sc->sc_nat);
err |= PUT_SIGSET(mask, &sc->sc_mask);
err |= __put_user(cfm, &sc->sc_cfm);
err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um);
err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */
err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */
err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */
err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
/* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */
err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
}
return err;
}
/*
* Check whether the register-backing store is already on the signal stack.
*/
static inline int
rbs_on_sig_stack (unsigned long bsp)
{
return (bsp - current->sas_ss_sp < current->sas_ss_size);
}
static long
force_sigsegv_info (int sig, void __user *addr)
{
unsigned long flags;
struct siginfo si;
if (sig == SIGSEGV) {
/*
* Acquiring siglock around the sa_handler-update is almost
* certainly overkill, but this isn't a
* performance-critical path and I'd rather play it safe
* here than having to debug a nasty race if and when
* something changes in kernel/signal.c that would make it
* no longer safe to modify sa_handler without holding the
* lock.
*/
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SI_KERNEL;
si.si_pid = task_pid_vnr(current);
si.si_uid = current->uid;
si.si_addr = addr;
force_sig_info(SIGSEGV, &si, current);
return 0;
}
static long
setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
struct sigscratch *scr)
{
extern char __kernel_sigtramp[];
unsigned long tramp_addr, new_rbs = 0, new_sp;
struct sigframe __user *frame;
long err;
new_sp = scr->pt.r12;
tramp_addr = (unsigned long) __kernel_sigtramp;
if (ka->sa.sa_flags & SA_ONSTACK) {
int onstack = sas_ss_flags(new_sp);
if (onstack == 0) {
new_sp = current->sas_ss_sp + current->sas_ss_size;
/*
* We need to check for the register stack being on the
* signal stack separately, because it's switched
* separately (memory stack is switched in the kernel,
* register stack is switched in the signal trampoline).
*/
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
new_rbs = ALIGN(current->sas_ss_sp,
sizeof(long));
} else if (onstack == SS_ONSTACK) {
unsigned long check_sp;
/*
* If we are on the alternate signal stack and would
* overflow it, don't. Return an always-bogus address
* instead so we will die with SIGSEGV.
*/
check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
if (!likely(on_sig_stack(check_sp)))
return force_sigsegv_info(sig, (void __user *)
check_sp);
}
}
frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return force_sigsegv_info(sig, frame);
err = __put_user(sig, &frame->arg0);
err |= __put_user(&frame->info, &frame->arg1);
err |= __put_user(&frame->sc, &frame->arg2);
err |= __put_user(new_rbs, &frame->sc.sc_rbs_base);
err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */
err |= __put_user(ka->sa.sa_handler, &frame->handler);
err |= copy_siginfo_to_user(&frame->info, info);
err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp);
err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size);
err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags);
err |= setup_sigcontext(&frame->sc, set, scr);
if (unlikely(err))
return force_sigsegv_info(sig, frame);
scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
scr->pt.cr_iip = tramp_addr;
ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */
ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */
/*
* Force the interruption function mask to zero. This has no effect when a
* system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is
* ignored), but it has the desirable effect of making it possible to deliver a
* signal with an incomplete register frame (which happens when a mandatory RSE
* load faults). Furthermore, it has no negative effect on the getting the user's
* dirty partition preserved, because that's governed by scr->pt.loadrs.
*/
scr->pt.cr_ifs = (1UL << 63);
/*
* Note: this affects only the NaT bits of the scratch regs (the ones saved in
* pt_regs), which is exactly what we want.
*/
scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n",
current->comm, current->pid, sig, scr->pt.r12, frame->sc.sc_ip, frame->handler);
#endif
return 1;
}
static long
handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
struct sigscratch *scr)
{
if (IS_IA32_PROCESS(&scr->pt)) {
/* send signal to IA-32 process */
if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt))
return 0;
} else
/* send signal to IA-64 process */
if (!setup_frame(sig, ka, info, oldset, scr))
return 0;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(¤t->blocked, sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 1;
}
/*
* Note that `init' is a special process: it doesn't get signals it doesn't want to
* handle. Thus you cannot kill init even with a SIGKILL even by mistake.
*/
void
ia64_do_signal (struct sigscratch *scr, long in_syscall)
{
struct k_sigaction ka;
sigset_t *oldset;
siginfo_t info;
long restart = in_syscall;
long errno = scr->pt.r8;
# define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c))
/*
* In the ia64_leave_kernel code path, we want the common case to go fast, which
* is why we may in certain cases get here from kernel mode. Just return without
* doing anything if so.
*/
if (!user_mode(&scr->pt))
return;
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = ¤t->saved_sigmask;
else
oldset = ¤t->blocked;
/*
* This only loops in the rare cases of handle_signal() failing, in which case we
* need to push through a forced SIGSEGV.
*/
while (1) {
int signr = get_signal_to_deliver(&info, &ka, &scr->pt, NULL);
/*
* get_signal_to_deliver() may have run a debugger (via notify_parent())
* and the debugger may have modified the state (e.g., to arrange for an
* inferior call), thus it's important to check for restarting _after_
* get_signal_to_deliver().
*/
if (IS_IA32_PROCESS(&scr->pt)) {
if (in_syscall) {
if (errno >= 0)
restart = 0;
else
errno = -errno;
}
} else if ((long) scr->pt.r10 != -1)
/*
* A system calls has to be restarted only if one of the error codes
* ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
* isn't -1 then r8 doesn't hold an error code and we don't need to
* restart the syscall, so we can clear the "restart" flag here.
*/
restart = 0;
if (signr <= 0)
break;
if (unlikely(restart)) {
switch (errno) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
scr->pt.r8 = ERR_CODE(EINTR);
/* note: scr->pt.r10 is already -1 */
break;
case ERESTARTSYS:
if ((ka.sa.sa_flags & SA_RESTART) == 0) {
scr->pt.r8 = ERR_CODE(EINTR);
/* note: scr->pt.r10 is already -1 */
break;
}
case ERESTARTNOINTR:
if (IS_IA32_PROCESS(&scr->pt)) {
scr->pt.r8 = scr->pt.r1;
scr->pt.cr_iip -= 2;
} else
ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */
}
}
/*
* Whee! Actually deliver the signal. If the delivery failed, we need to
* continue to iterate in this loop so we can deliver the SIGSEGV...
*/
if (handle_signal(signr, &ka, &info, oldset, scr)) {
/*
* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
return;
}
}
/* Did we come from a system call? */
if (restart) {
/* Restart the system call - no handlers present */
if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR
|| errno == ERESTART_RESTARTBLOCK)
{
if (IS_IA32_PROCESS(&scr->pt)) {
scr->pt.r8 = scr->pt.r1;
scr->pt.cr_iip -= 2;
if (errno == ERESTART_RESTARTBLOCK)
scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */
} else {
/*
* Note: the syscall number is in r15 which is saved in
* pt_regs so all we need to do here is adjust ip so that
* the "break" instruction gets re-executed.
*/
ia64_decrement_ip(&scr->pt);
if (errno == ERESTART_RESTARTBLOCK)
scr->pt.r15 = __NR_restart_syscall;
}
}
}
/* if there's no signal to deliver, we just put the saved sigmask
* back */
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
}
| gpl-2.0 |
fbocharov/au-linux-kernel-spring-2016 | linux/drivers/rtc/rtc-davinci.c | 278 | 14980 | /*
* DaVinci Power Management and Real Time Clock Driver for TI platforms
*
* Copyright (C) 2009 Texas Instruments, Inc
*
* Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
/*
* The DaVinci RTC is a simple RTC with the following
* Sec: 0 - 59 : BCD count
* Min: 0 - 59 : BCD count
* Hour: 0 - 23 : BCD count
* Day: 0 - 0x7FFF(32767) : Binary count ( Over 89 years )
*/
/* PRTC interface registers */
#define DAVINCI_PRTCIF_PID 0x00
#define PRTCIF_CTLR 0x04
#define PRTCIF_LDATA 0x08
#define PRTCIF_UDATA 0x0C
#define PRTCIF_INTEN 0x10
#define PRTCIF_INTFLG 0x14
/* PRTCIF_CTLR bit fields */
#define PRTCIF_CTLR_BUSY BIT(31)
#define PRTCIF_CTLR_SIZE BIT(25)
#define PRTCIF_CTLR_DIR BIT(24)
#define PRTCIF_CTLR_BENU_MSB BIT(23)
#define PRTCIF_CTLR_BENU_3RD_BYTE BIT(22)
#define PRTCIF_CTLR_BENU_2ND_BYTE BIT(21)
#define PRTCIF_CTLR_BENU_LSB BIT(20)
#define PRTCIF_CTLR_BENU_MASK (0x00F00000)
#define PRTCIF_CTLR_BENL_MSB BIT(19)
#define PRTCIF_CTLR_BENL_3RD_BYTE BIT(18)
#define PRTCIF_CTLR_BENL_2ND_BYTE BIT(17)
#define PRTCIF_CTLR_BENL_LSB BIT(16)
#define PRTCIF_CTLR_BENL_MASK (0x000F0000)
/* PRTCIF_INTEN bit fields */
#define PRTCIF_INTEN_RTCSS BIT(1)
#define PRTCIF_INTEN_RTCIF BIT(0)
#define PRTCIF_INTEN_MASK (PRTCIF_INTEN_RTCSS \
| PRTCIF_INTEN_RTCIF)
/* PRTCIF_INTFLG bit fields */
#define PRTCIF_INTFLG_RTCSS BIT(1)
#define PRTCIF_INTFLG_RTCIF BIT(0)
#define PRTCIF_INTFLG_MASK (PRTCIF_INTFLG_RTCSS \
| PRTCIF_INTFLG_RTCIF)
/* PRTC subsystem registers */
#define PRTCSS_RTC_INTC_EXTENA1 (0x0C)
#define PRTCSS_RTC_CTRL (0x10)
#define PRTCSS_RTC_WDT (0x11)
#define PRTCSS_RTC_TMR0 (0x12)
#define PRTCSS_RTC_TMR1 (0x13)
#define PRTCSS_RTC_CCTRL (0x14)
#define PRTCSS_RTC_SEC (0x15)
#define PRTCSS_RTC_MIN (0x16)
#define PRTCSS_RTC_HOUR (0x17)
#define PRTCSS_RTC_DAY0 (0x18)
#define PRTCSS_RTC_DAY1 (0x19)
#define PRTCSS_RTC_AMIN (0x1A)
#define PRTCSS_RTC_AHOUR (0x1B)
#define PRTCSS_RTC_ADAY0 (0x1C)
#define PRTCSS_RTC_ADAY1 (0x1D)
#define PRTCSS_RTC_CLKC_CNT (0x20)
/* PRTCSS_RTC_INTC_EXTENA1 */
#define PRTCSS_RTC_INTC_EXTENA1_MASK (0x07)
/* PRTCSS_RTC_CTRL bit fields */
#define PRTCSS_RTC_CTRL_WDTBUS BIT(7)
#define PRTCSS_RTC_CTRL_WEN BIT(6)
#define PRTCSS_RTC_CTRL_WDRT BIT(5)
#define PRTCSS_RTC_CTRL_WDTFLG BIT(4)
#define PRTCSS_RTC_CTRL_TE BIT(3)
#define PRTCSS_RTC_CTRL_TIEN BIT(2)
#define PRTCSS_RTC_CTRL_TMRFLG BIT(1)
#define PRTCSS_RTC_CTRL_TMMD BIT(0)
/* PRTCSS_RTC_CCTRL bit fields */
#define PRTCSS_RTC_CCTRL_CALBUSY BIT(7)
#define PRTCSS_RTC_CCTRL_DAEN BIT(5)
#define PRTCSS_RTC_CCTRL_HAEN BIT(4)
#define PRTCSS_RTC_CCTRL_MAEN BIT(3)
#define PRTCSS_RTC_CCTRL_ALMFLG BIT(2)
#define PRTCSS_RTC_CCTRL_AIEN BIT(1)
#define PRTCSS_RTC_CCTRL_CAEN BIT(0)
static DEFINE_SPINLOCK(davinci_rtc_lock);
struct davinci_rtc {
struct rtc_device *rtc;
void __iomem *base;
int irq;
};
static inline void rtcif_write(struct davinci_rtc *davinci_rtc,
u32 val, u32 addr)
{
writel(val, davinci_rtc->base + addr);
}
static inline u32 rtcif_read(struct davinci_rtc *davinci_rtc, u32 addr)
{
return readl(davinci_rtc->base + addr);
}
static inline void rtcif_wait(struct davinci_rtc *davinci_rtc)
{
while (rtcif_read(davinci_rtc, PRTCIF_CTLR) & PRTCIF_CTLR_BUSY)
cpu_relax();
}
static inline void rtcss_write(struct davinci_rtc *davinci_rtc,
unsigned long val, u8 addr)
{
rtcif_wait(davinci_rtc);
rtcif_write(davinci_rtc, PRTCIF_CTLR_BENL_LSB | addr, PRTCIF_CTLR);
rtcif_write(davinci_rtc, val, PRTCIF_LDATA);
rtcif_wait(davinci_rtc);
}
static inline u8 rtcss_read(struct davinci_rtc *davinci_rtc, u8 addr)
{
rtcif_wait(davinci_rtc);
rtcif_write(davinci_rtc, PRTCIF_CTLR_DIR | PRTCIF_CTLR_BENL_LSB | addr,
PRTCIF_CTLR);
rtcif_wait(davinci_rtc);
return rtcif_read(davinci_rtc, PRTCIF_LDATA);
}
static inline void davinci_rtcss_calendar_wait(struct davinci_rtc *davinci_rtc)
{
while (rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
PRTCSS_RTC_CCTRL_CALBUSY)
cpu_relax();
}
static irqreturn_t davinci_rtc_interrupt(int irq, void *class_dev)
{
struct davinci_rtc *davinci_rtc = class_dev;
unsigned long events = 0;
u32 irq_flg;
u8 alm_irq, tmr_irq;
u8 rtc_ctrl, rtc_cctrl;
int ret = IRQ_NONE;
irq_flg = rtcif_read(davinci_rtc, PRTCIF_INTFLG) &
PRTCIF_INTFLG_RTCSS;
alm_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
PRTCSS_RTC_CCTRL_ALMFLG;
tmr_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL) &
PRTCSS_RTC_CTRL_TMRFLG;
if (irq_flg) {
if (alm_irq) {
events |= RTC_IRQF | RTC_AF;
rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
rtc_cctrl |= PRTCSS_RTC_CCTRL_ALMFLG;
rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
} else if (tmr_irq) {
events |= RTC_IRQF | RTC_PF;
rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
rtc_ctrl |= PRTCSS_RTC_CTRL_TMRFLG;
rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
}
rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS,
PRTCIF_INTFLG);
rtc_update_irq(davinci_rtc->rtc, 1, events);
ret = IRQ_HANDLED;
}
return ret;
}
static int
davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
u8 rtc_ctrl;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&davinci_rtc_lock, flags);
rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
switch (cmd) {
case RTC_WIE_ON:
rtc_ctrl |= PRTCSS_RTC_CTRL_WEN | PRTCSS_RTC_CTRL_WDTFLG;
break;
case RTC_WIE_OFF:
rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
break;
default:
ret = -ENOIOCTLCMD;
}
rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
spin_unlock_irqrestore(&davinci_rtc_lock, flags);
return ret;
}
static int convertfromdays(u16 days, struct rtc_time *tm)
{
int tmp_days, year, mon;
for (year = 2000;; year++) {
tmp_days = rtc_year_days(1, 12, year);
if (days >= tmp_days)
days -= tmp_days;
else {
for (mon = 0;; mon++) {
tmp_days = rtc_month_days(mon, year);
if (days >= tmp_days) {
days -= tmp_days;
} else {
tm->tm_year = year - 1900;
tm->tm_mon = mon;
tm->tm_mday = days + 1;
break;
}
}
break;
}
}
return 0;
}
static int convert2days(u16 *days, struct rtc_time *tm)
{
int i;
*days = 0;
/* epoch == 1900 */
if (tm->tm_year < 100 || tm->tm_year > 199)
return -EINVAL;
for (i = 2000; i < 1900 + tm->tm_year; i++)
*days += rtc_year_days(1, 12, i);
*days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year);
return 0;
}
static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
u16 days = 0;
u8 day0, day1;
unsigned long flags;
spin_lock_irqsave(&davinci_rtc_lock, flags);
davinci_rtcss_calendar_wait(davinci_rtc);
tm->tm_sec = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_SEC));
davinci_rtcss_calendar_wait(davinci_rtc);
tm->tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_MIN));
davinci_rtcss_calendar_wait(davinci_rtc);
tm->tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_HOUR));
davinci_rtcss_calendar_wait(davinci_rtc);
day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY0);
davinci_rtcss_calendar_wait(davinci_rtc);
day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY1);
spin_unlock_irqrestore(&davinci_rtc_lock, flags);
days |= day1;
days <<= 8;
days |= day0;
if (convertfromdays(days, tm) < 0)
return -EINVAL;
return 0;
}
static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
u16 days;
u8 rtc_cctrl;
unsigned long flags;
if (convert2days(&days, tm) < 0)
return -EINVAL;
spin_lock_irqsave(&davinci_rtc_lock, flags);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, bin2bcd(tm->tm_sec), PRTCSS_RTC_SEC);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, bin2bcd(tm->tm_min), PRTCSS_RTC_MIN);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, bin2bcd(tm->tm_hour), PRTCSS_RTC_HOUR);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_DAY0);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_DAY1);
rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
rtc_cctrl |= PRTCSS_RTC_CCTRL_CAEN;
rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
spin_unlock_irqrestore(&davinci_rtc_lock, flags);
return 0;
}
static int davinci_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
unsigned long flags;
u8 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
spin_lock_irqsave(&davinci_rtc_lock, flags);
if (enabled)
rtc_cctrl |= PRTCSS_RTC_CCTRL_DAEN |
PRTCSS_RTC_CCTRL_HAEN |
PRTCSS_RTC_CCTRL_MAEN |
PRTCSS_RTC_CCTRL_ALMFLG |
PRTCSS_RTC_CCTRL_AIEN;
else
rtc_cctrl &= ~PRTCSS_RTC_CCTRL_AIEN;
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
spin_unlock_irqrestore(&davinci_rtc_lock, flags);
return 0;
}
static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
u16 days = 0;
u8 day0, day1;
unsigned long flags;
spin_lock_irqsave(&davinci_rtc_lock, flags);
davinci_rtcss_calendar_wait(davinci_rtc);
alm->time.tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AMIN));
davinci_rtcss_calendar_wait(davinci_rtc);
alm->time.tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AHOUR));
davinci_rtcss_calendar_wait(davinci_rtc);
day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY0);
davinci_rtcss_calendar_wait(davinci_rtc);
day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY1);
spin_unlock_irqrestore(&davinci_rtc_lock, flags);
days |= day1;
days <<= 8;
days |= day0;
if (convertfromdays(days, &alm->time) < 0)
return -EINVAL;
alm->pending = !!(rtcss_read(davinci_rtc,
PRTCSS_RTC_CCTRL) &
PRTCSS_RTC_CCTRL_AIEN);
alm->enabled = alm->pending && device_may_wakeup(dev);
return 0;
}
static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
unsigned long flags;
u16 days;
if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0
&& alm->time.tm_year < 0) {
struct rtc_time tm;
unsigned long now, then;
davinci_rtc_read_time(dev, &tm);
rtc_tm_to_time(&tm, &now);
alm->time.tm_mday = tm.tm_mday;
alm->time.tm_mon = tm.tm_mon;
alm->time.tm_year = tm.tm_year;
rtc_tm_to_time(&alm->time, &then);
if (then < now) {
rtc_time_to_tm(now + 24 * 60 * 60, &tm);
alm->time.tm_mday = tm.tm_mday;
alm->time.tm_mon = tm.tm_mon;
alm->time.tm_year = tm.tm_year;
}
}
if (convert2days(&days, &alm->time) < 0)
return -EINVAL;
spin_lock_irqsave(&davinci_rtc_lock, flags);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_min), PRTCSS_RTC_AMIN);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_hour), PRTCSS_RTC_AHOUR);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_ADAY0);
davinci_rtcss_calendar_wait(davinci_rtc);
rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_ADAY1);
spin_unlock_irqrestore(&davinci_rtc_lock, flags);
return 0;
}
static struct rtc_class_ops davinci_rtc_ops = {
.ioctl = davinci_rtc_ioctl,
.read_time = davinci_rtc_read_time,
.set_time = davinci_rtc_set_time,
.alarm_irq_enable = davinci_rtc_alarm_irq_enable,
.read_alarm = davinci_rtc_read_alarm,
.set_alarm = davinci_rtc_set_alarm,
};
static int __init davinci_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct davinci_rtc *davinci_rtc;
struct resource *res;
int ret = 0;
davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL);
if (!davinci_rtc)
return -ENOMEM;
davinci_rtc->irq = platform_get_irq(pdev, 0);
if (davinci_rtc->irq < 0) {
dev_err(dev, "no RTC irq\n");
return davinci_rtc->irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
davinci_rtc->base = devm_ioremap_resource(dev, res);
if (IS_ERR(davinci_rtc->base))
return PTR_ERR(davinci_rtc->base);
platform_set_drvdata(pdev, davinci_rtc);
davinci_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&davinci_rtc_ops, THIS_MODULE);
if (IS_ERR(davinci_rtc->rtc)) {
dev_err(dev, "unable to register RTC device, err %d\n",
ret);
return PTR_ERR(davinci_rtc->rtc);
}
rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
rtcss_write(davinci_rtc, 0, PRTCSS_RTC_INTC_EXTENA1);
rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL);
rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL);
ret = devm_request_irq(dev, davinci_rtc->irq, davinci_rtc_interrupt,
0, "davinci_rtc", davinci_rtc);
if (ret < 0) {
dev_err(dev, "unable to register davinci RTC interrupt\n");
return ret;
}
/* Enable interrupts */
rtcif_write(davinci_rtc, PRTCIF_INTEN_RTCSS, PRTCIF_INTEN);
rtcss_write(davinci_rtc, PRTCSS_RTC_INTC_EXTENA1_MASK,
PRTCSS_RTC_INTC_EXTENA1);
rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
device_init_wakeup(&pdev->dev, 0);
return 0;
}
static int __exit davinci_rtc_remove(struct platform_device *pdev)
{
struct davinci_rtc *davinci_rtc = platform_get_drvdata(pdev);
device_init_wakeup(&pdev->dev, 0);
rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
return 0;
}
static struct platform_driver davinci_rtc_driver = {
.remove = __exit_p(davinci_rtc_remove),
.driver = {
.name = "rtc_davinci",
},
};
module_platform_driver_probe(davinci_rtc_driver, davinci_rtc_probe);
MODULE_AUTHOR("Miguel Aguilar <miguel.aguilar@ridgerun.com>");
MODULE_DESCRIPTION("Texas Instruments DaVinci PRTC Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
1N4148/agni | drivers/media/video/exynos/fimc-is/fimc-is-v4l2.c | 278 | 153518 | /*
* Samsung Exynos4 SoC series FIMC-IS slave interface driver
*
* v4l2 subdev driver interface
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* Contact: Younghwan Joo, <yhwan.joo@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/memory.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
#include <linux/videodev2.h>
#include <linux/videodev2_exynos_camera.h>
#include <media/videobuf2-core.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-mediabus.h>
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include "fimc-is-core.h"
#include "fimc-is-regs.h"
#include "fimc-is-param.h"
#include "fimc-is-cmd.h"
#include "fimc-is-err.h"
/* Binary load functions */
static int fimc_is_request_firmware(struct fimc_is_dev *dev)
{
int ret;
struct firmware *fw_blob;
u8 *buf = NULL;
#ifdef SDCARD_FW
struct file *fp;
mm_segment_t old_fs;
long fsize, nread;
int fw_requested = 1;
ret = 0;
old_fs = get_fs();
set_fs(KERNEL_DS);
fp = filp_open(FIMC_IS_FW_SDCARD, O_RDONLY, 0);
if (IS_ERR(fp)) {
dbg("failed to open %s\n", FIMC_IS_FW_SDCARD);
goto request_fw;
}
fw_requested = 0;
fsize = fp->f_path.dentry->d_inode->i_size;
dbg("start, file path %s, size %ld Bytes\n", FIMC_IS_FW_SDCARD, fsize);
buf = vmalloc(fsize);
if (!buf) {
err("failed to allocate memory\n");
ret = -ENOMEM;
goto out;
}
nread = vfs_read(fp, (char __user *)buf, fsize, &fp->f_pos);
if (nread != fsize) {
err("failed to read firmware file, %ld Bytes\n", nread);
ret = -EIO;
goto out;
}
#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
memcpy((void *)phys_to_virt(dev->mem.base), (void *)buf, fsize);
fimc_is_mem_cache_clean((void *)phys_to_virt(dev->mem.base),
fsize + 1);
if (dev->mem.fw_ref_base > 0) {
memcpy((void *)phys_to_virt(dev->mem.fw_ref_base),
(void *)buf, fsize);
fimc_is_mem_cache_clean(
(void *)phys_to_virt(dev->mem.fw_ref_base), fsize + 1);
dev->fw.size = fsize;
}
#elif defined(CONFIG_VIDEOBUF2_ION)
if (dev->mem.bitproc_buf == 0) {
err("failed to load FIMC-IS F/W, FIMC-IS will not working\n");
} else {
memcpy(dev->mem.kvaddr, (void *)buf, fsize);
fimc_is_mem_cache_clean((void *)dev->mem.kvaddr, fsize + 1);
}
#endif
vfs_llseek(fp, -FIMC_IS_FW_VERSION_LENGTH, SEEK_END);
vfs_read(fp, (char __user *)dev->fw.fw_version,
(FIMC_IS_FW_VERSION_LENGTH - 1), &fp->f_pos);
dev->fw.fw_version[FIMC_IS_FW_VERSION_LENGTH - 1] = '\0';
vfs_llseek(fp, -(FIMC_IS_FW_INFO_LENGTH +
FIMC_IS_FW_VERSION_LENGTH - 1), SEEK_END);
vfs_read(fp, (char __user *)dev->fw.fw_info,
(FIMC_IS_FW_INFO_LENGTH-1), &fp->f_pos);
dev->fw.fw_info[FIMC_IS_FW_INFO_LENGTH - 1] = '\0';
dev->fw.state = 1;
request_fw:
if (fw_requested) {
set_fs(old_fs);
#endif
ret = request_firmware((const struct firmware **)&fw_blob,
FIMC_IS_FW, &dev->pdev->dev);
if (ret) {
dev_err(&dev->pdev->dev,
"could not load firmware (err=%d)\n", ret);
return -EINVAL;
}
#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
memcpy((void *)phys_to_virt(dev->mem.base),
fw_blob->data, fw_blob->size);
fimc_is_mem_cache_clean((void *)phys_to_virt(dev->mem.base),
fw_blob->size + 1);
if (dev->mem.fw_ref_base > 0) {
memcpy((void *)phys_to_virt(dev->mem.fw_ref_base),
fw_blob->data, fw_blob->size);
fimc_is_mem_cache_clean(
(void *)phys_to_virt(dev->mem.fw_ref_base),
fw_blob->size + 1);
dev->fw.size = fw_blob->size;
}
#elif defined(CONFIG_VIDEOBUF2_ION)
if (dev->mem.bitproc_buf == 0) {
err("failed to load FIMC-IS F/W\n");
return -EINVAL;
} else {
memcpy(dev->mem.kvaddr, fw_blob->data, fw_blob->size);
fimc_is_mem_cache_clean(
(void *)dev->mem.kvaddr, fw_blob->size + 1);
dbg(
"FIMC_IS F/W loaded successfully - size:%d\n", fw_blob->size);
}
#endif
memcpy((void *)dev->fw.fw_info,
(fw_blob->data + fw_blob->size -
(FIMC_IS_FW_INFO_LENGTH + FIMC_IS_FW_VERSION_LENGTH-1)),
(FIMC_IS_FW_INFO_LENGTH - 1));
dev->fw.fw_info[FIMC_IS_FW_INFO_LENGTH - 1] = '\0';
memcpy((void *)dev->fw.fw_version,
(fw_blob->data + fw_blob->size -
FIMC_IS_FW_VERSION_LENGTH),
(FIMC_IS_FW_VERSION_LENGTH - 1));
dev->fw.fw_version[FIMC_IS_FW_VERSION_LENGTH - 1] = '\0';
dev->fw.state = 1;
dbg("FIMC_IS F/W loaded successfully - size:%d\n",
fw_blob->size);
release_firmware(fw_blob);
#ifdef SDCARD_FW
}
#endif
out:
#ifdef SDCARD_FW
if (!fw_requested) {
vfree(buf);
filp_close(fp, current->files);
set_fs(old_fs);
}
#endif
printk(KERN_INFO "FIMC_IS FW loaded = 0x%08x\n", dev->mem.base);
return ret;
}
static int fimc_is_load_setfile(struct fimc_is_dev *dev)
{
int ret;
struct firmware *fw_blob;
u8 *buf = NULL;
#ifdef SDCARD_FW
struct file *fp;
mm_segment_t old_fs;
long fsize, nread;
int fw_requested = 1;
ret = 0;
old_fs = get_fs();
set_fs(KERNEL_DS);
fp = filp_open(FIMC_IS_SETFILE_SDCARD, O_RDONLY, 0);
if (IS_ERR(fp)) {
dbg("failed to open %s\n", FIMC_IS_SETFILE_SDCARD);
goto request_fw;
}
fw_requested = 0;
fsize = fp->f_path.dentry->d_inode->i_size;
dbg("start, file path %s, size %ld Bytes\n",
FIMC_IS_SETFILE_SDCARD, fsize);
buf = vmalloc(fsize);
if (!buf) {
err("failed to allocate memory\n");
ret = -ENOMEM;
goto out;
}
nread = vfs_read(fp, (char __user *)buf, fsize, &fp->f_pos);
if (nread != fsize) {
err("failed to read firmware file, %ld Bytes\n", nread);
ret = -EIO;
goto out;
}
#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
memcpy((void *)phys_to_virt(dev->mem.base + dev->setfile.base),
(void *)buf, fsize);
fimc_is_mem_cache_clean(
(void *)phys_to_virt(dev->mem.base + dev->setfile.base),
fsize + 1);
if (dev->mem.setfile_ref_base > 0) {
memcpy((void *)phys_to_virt(dev->mem.setfile_ref_base),
(void *)buf, fsize);
fimc_is_mem_cache_clean(
(void *)phys_to_virt(dev->mem.setfile_ref_base),
fsize + 1);
dev->setfile.size = fsize;
}
#elif defined(CONFIG_VIDEOBUF2_ION)
if (dev->mem.bitproc_buf == 0) {
err("failed to load FIMC-IS F/W, FIMC-IS will not working\n");
} else {
memcpy((dev->mem.kvaddr + dev->setfile.base),
(void *)buf, fsize);
fimc_is_mem_cache_clean((void *)dev->mem.kvaddr, fsize + 1);
dbg("FIMC_IS Setfile loaded successfully - size:%ld\n", fsize);
}
#endif
vfs_llseek(fp, -FIMC_IS_SETFILE_INFO_LENGTH, SEEK_END);
vfs_read(fp, (char __user *)dev->fw.setfile_info,
FIMC_IS_SETFILE_INFO_LENGTH, &fp->f_pos);
dev->setfile.state = 1;
request_fw:
if (fw_requested) {
set_fs(old_fs);
#endif
ret = request_firmware((const struct firmware **)&fw_blob,
FIMC_IS_SETFILE, &dev->pdev->dev);
if (ret) {
dev_err(&dev->pdev->dev,
"could not load firmware (err=%d)\n", ret);
return -EINVAL;
}
#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
memcpy((void *)phys_to_virt(dev->mem.base + dev->setfile.base),
fw_blob->data, fw_blob->size);
fimc_is_mem_cache_clean(
(void *)phys_to_virt(dev->mem.base + dev->setfile.base),
fw_blob->size + 1);
if (dev->mem.setfile_ref_base > 0) {
memcpy((void *)phys_to_virt(dev->mem.setfile_ref_base),
fw_blob->data, fw_blob->size);
fimc_is_mem_cache_clean(
(void *)phys_to_virt(dev->mem.setfile_ref_base),
fw_blob->size + 1);
dev->setfile.size = fw_blob->size;
}
#elif defined(CONFIG_VIDEOBUF2_ION)
if (dev->mem.bitproc_buf == 0) {
err("failed to load FIMC-IS F/W\n");
return -EINVAL;
} else {
memcpy((dev->mem.kvaddr + dev->setfile.base),
fw_blob->data, fw_blob->size);
fimc_is_mem_cache_clean((void *)dev->mem.kvaddr,
fw_blob->size + 1);
dbg(
"FIMC_IS F/W loaded successfully - size:%d\n", fw_blob->size);
}
#endif
memcpy((void *)dev->fw.setfile_info,
(fw_blob->data + fw_blob->size -
FIMC_IS_SETFILE_INFO_LENGTH),
(FIMC_IS_SETFILE_INFO_LENGTH - 1));
dev->fw.setfile_info[FIMC_IS_SETFILE_INFO_LENGTH - 1] = '\0';
dev->setfile.state = 1;
dbg("FIMC_IS setfile loaded successfully - size:%d\n",
fw_blob->size);
release_firmware(fw_blob);
#ifdef SDCARD_FW
}
#endif
dbg("A5 mem base = 0x%08x\n", dev->mem.base);
dbg("Setfile base = 0x%08x\n", dev->setfile.base);
out:
#ifdef SDCARD_FW
if (!fw_requested) {
vfree(buf);
filp_close(fp, current->files);
set_fs(old_fs);
}
#endif
return ret;
}
/* v4l2 subdev core operations
*/
static int fimc_is_load_fw(struct v4l2_subdev *sd)
{
int ret = 0;
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
dbg("+++ fimc_is_load_fw\n");
if (!test_bit(IS_ST_IDLE, &dev->state)) {
err("FW was already loaded!!\n");
return ret;
}
/* 1. Load IS firmware */
if (dev->fw.state && (dev->mem.fw_ref_base > 0)) {
memcpy((void *)phys_to_virt(dev->mem.base),
(void *)phys_to_virt(dev->mem.fw_ref_base),
dev->fw.size);
fimc_is_mem_cache_clean((void *)phys_to_virt(dev->mem.base),
dev->fw.size + 1);
} else {
ret = fimc_is_request_firmware(dev);
if (ret) {
err("failed to fimc_is_request_firmware (%d)\n", ret);
return -EINVAL;
}
}
/* 2. Init GPIO (UART) */
ret = fimc_is_hw_io_init(dev);
if (ret) {
dev_err(&dev->pdev->dev, "failed to init GPIO config\n");
return -EINVAL;
}
/* 3. Chip ID and Revision */
dev->is_shared_region->chip_id = 0xe4412;
dev->is_shared_region->chip_rev_no = 1;
fimc_is_mem_cache_clean((void *)IS_SHARED,
(unsigned long)(sizeof(struct is_share_region)));
/* 4. A5 power on */
fimc_is_hw_a5_power(dev, 1);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_FW_LOADED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
dev_err(&dev->pdev->dev,
"wait timeout A5 power on: %s\n", __func__);
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
clear_bit(IS_ST_IDLE, &dev->state);
dbg("--- fimc_is_load_fw end\n");
printk(KERN_INFO "FIMC-IS FW info = %s\n", dev->fw.fw_info);
printk(KERN_INFO "FIMC-IS FW ver = %s\n", dev->fw.fw_version);
return ret;
}
int fimc_is_s_power(struct v4l2_subdev *sd, int on)
{
struct fimc_is_dev *is_dev = to_fimc_is_dev(sd);
struct device *dev = &is_dev->pdev->dev;
int ret = 0;
printk(KERN_INFO "%s++ %d\n", __func__, on);
if (on) {
if (test_bit(IS_PWR_ST_POWERON, &is_dev->power)) {
err("FIMC-IS was already power on state!!\n");
return ret;
}
#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
/* lock bus frequency */
dev_lock(is_dev->bus_dev, dev, BUS_LOCK_FREQ_L1);
#endif
fimc_is_hw_set_low_poweroff(is_dev, false);
ret = pm_runtime_get_sync(dev);
set_bit(IS_ST_A5_PWR_ON, &is_dev->state);
} else {
if (test_bit(IS_PWR_ST_POWEROFF, &is_dev->power)) {
err("FIMC-IS was already power off state!!\n");
err("Close sensor - %d\n", is_dev->sensor.id);
fimc_is_hw_close_sensor(is_dev, 0);
printk(KERN_INFO "%s Wait close sensor interrupt\n", __func__);
ret = wait_event_timeout(is_dev->irq_queue1,
!test_bit(IS_ST_OPEN_SENSOR,
&is_dev->power), FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("Timeout-close sensor:%s\n", __func__);
fimc_is_hw_set_low_poweroff(is_dev, true);
} else {
is_dev->p_region_index1 = 0;
is_dev->p_region_index2 = 0;
atomic_set(&is_dev->p_region_num, 0);
printk(KERN_INFO "%s already power off return\n", __func__);
return ret;
}
}
printk(KERN_INFO "%s sub ip power off ++\n", __func__);
if (!test_bit(IS_PWR_SUB_IP_POWER_OFF, &is_dev->power)) {
printk(KERN_INFO "%s Sub ip is alive\n", __func__);
fimc_is_hw_subip_poweroff(is_dev);
printk(KERN_INFO "%s Wait Sub ip power off\n", __func__);
ret = wait_event_timeout(is_dev->irq_queue1,
test_bit(IS_PWR_SUB_IP_POWER_OFF,
&is_dev->power), FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("%s wait timeout\n", __func__);
fimc_is_hw_set_low_poweroff(is_dev, true);
}
} else
printk(KERN_INFO "%s sub ip was already power off state!!\n", __func__);
printk(KERN_INFO "%s sub ip power off --\n", __func__);
fimc_is_hw_a5_power(is_dev, 0);
printk(KERN_INFO "A5 power off\n");
ret = pm_runtime_put_sync(dev);
is_dev->sensor.id = 0;
is_dev->sensor.framerate_update = false;
is_dev->p_region_index1 = 0;
is_dev->p_region_index2 = 0;
atomic_set(&is_dev->p_region_num, 0);
is_dev->state = 0;
set_bit(IS_ST_IDLE, &is_dev->state);
is_dev->power = 0;
is_dev->af.af_state = FIMC_IS_AF_IDLE;
is_dev->af.mode = IS_FOCUS_MODE_IDLE;
set_bit(IS_PWR_ST_POWEROFF, &is_dev->power);
}
printk(KERN_INFO "%s --\n", __func__);
return ret;
}
static int fimc_is_init_set(struct v4l2_subdev *sd, u32 val)
{
int ret = 0;
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
dev->sensor.sensor_type = val;
dev->sensor.id = 0;
dbg("fimc_is_init\n");
if (!test_bit(IS_ST_A5_PWR_ON, &dev->state)) {
err("A5 is not power on state!!\n");
return -EINVAL;
}
/* Init sequence 1: Open sensor */
dbg("v4l2 : open sensor : %d\n", val);
fimc_is_hw_open_sensor(dev, dev->sensor.id, val);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_OPEN_SENSOR, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("wait timeout - open sensor\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
/* Init sequence 2: Load setfile */
/* Get setfile address */
dbg("v4l2 : setfile address\n");
fimc_is_hw_get_setfile_addr(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_SETFILE_LOADED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("wait timeout - get setfile address\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
dbg("v4l2 : load setfile\n");
if (dev->setfile.state && (dev->mem.setfile_ref_base > 0)) {
memcpy((void *)phys_to_virt(dev->mem.base + dev->setfile.base),
(void *)phys_to_virt(dev->mem.setfile_ref_base),
dev->setfile.size);
fimc_is_mem_cache_clean(
(void *)phys_to_virt(dev->mem.base + dev->setfile.base),
dev->setfile.size + 1);
} else {
fimc_is_load_setfile(dev);
}
clear_bit(IS_ST_SETFILE_LOADED, &dev->state);
fimc_is_hw_load_setfile(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_SETFILE_LOADED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("wait timeout - get setfile address\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
printk(KERN_INFO "FIMC-IS Setfile info = %s\n", dev->fw.setfile_info);
dbg("v4l2 : Load set file end\n");
/* Check magic number */
if (dev->is_p_region->shared[MAX_SHARED_COUNT-1] != MAGIC_NUMBER)
err("!!! MAGIC NUMBER ERROR !!!\n");
/* Display region information (DEBUG only) */
dbg("Parameter region addr = 0x%08x\n", virt_to_phys(dev->is_p_region));
dbg("ISP region addr = 0x%08x\n",
virt_to_phys(&dev->is_p_region->parameter.isp));
dbg("Shared region addr = 0x%08x\n",
virt_to_phys(&dev->is_p_region->shared));
dev->frame_count = 0;
dev->setfile.sub_index = 0;
/* Init sequence 3: Stream off */
dbg("Stream Off\n");
clear_bit(IS_ST_STREAM_OFF, &dev->state);
fimc_is_hw_set_stream(dev, 0); /*stream off */
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_STREAM_OFF, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("wait timeout - stream off\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
/* Init sequence 4: Set init value - PREVIEW_STILL mode */
dbg("Default setting : preview_still\n");
dev->scenario_id = ISS_PREVIEW_STILL;
fimc_is_hw_set_init(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state); /* BLOCK I/F Mode*/
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : init values (PREVIEW_STILL)\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
/* Init sequence 5: Set init value - PREVIEW_VIDEO mode */
dbg("Default setting : preview_video\n");
dev->scenario_id = ISS_PREVIEW_VIDEO;
fimc_is_hw_set_init(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state); /* BLOCK I/F Mode*/
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : init values (PREVIEW_VIDEO)\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
/* Init sequence 6: Set init value - CAPTURE_STILL mode */
dbg("Default setting : capture_still\n");
dev->scenario_id = ISS_CAPTURE_STILL;
fimc_is_hw_set_init(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state); /* BLOCK I/F Mode*/
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : init values (CAPTURE_STILL)\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
/* Init sequence 6: Set init value - CAPTURE_VIDEO mode */
dbg("Default setting : capture_video\n");
dev->scenario_id = ISS_CAPTURE_VIDEO;
fimc_is_hw_set_init(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state); /* BLOCK I/F Mode*/
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : init values (CAPTURE_VIDEO)\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
set_bit(IS_ST_INIT_DONE, &dev->state);
dbg("Init sequence completed!! Ready to use\n");
#ifdef MSG_CONFIG_COTROL
fimc_is_hw_set_debug_level(dev, FIMC_IS_DEBUG_MSG, FIMC_IS_DEBUG_LEVEL);
#endif
return ret;
}
static int fimc_is_reset(struct v4l2_subdev *sd, u32 val)
{
struct fimc_is_dev *is_dev = to_fimc_is_dev(sd);
struct device *dev = &is_dev->pdev->dev;
int ret = 0;
dbg("fimc_is_reset\n");
if (!val)
return -EINVAL;
dbg("hard reset start\n");
/* Power off */
fimc_is_hw_subip_poweroff(is_dev);
ret = wait_event_timeout(is_dev->irq_queue1,
test_bit(IS_PWR_SUB_IP_POWER_OFF, &is_dev->power), (HZ));
fimc_is_hw_a5_power(is_dev, 0);
dbg("A5 power off\n");
fimc_is_hw_set_low_poweroff(is_dev, true);
ret = pm_runtime_put_sync(dev);
is_dev->sensor.id = 0;
is_dev->p_region_index1 = 0;
is_dev->p_region_index2 = 0;
atomic_set(&is_dev->p_region_num, 0);
is_dev->state = 0;
set_bit(IS_ST_IDLE, &is_dev->state);
is_dev->power = 0;
is_dev->af.af_state = FIMC_IS_AF_IDLE;
is_dev->af.mode = IS_FOCUS_MODE_IDLE;
set_bit(IS_PWR_ST_POWEROFF, &is_dev->power);
/* Restart */
#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
/* lock bus frequency */
dev_lock(is_dev->bus_dev, dev, BUS_LOCK_FREQ_L1);
#endif
fimc_is_hw_set_low_poweroff(is_dev, false);
ret = pm_runtime_get_sync(dev);
set_bit(IS_ST_A5_PWR_ON, &is_dev->state);
/* Re- init */
ret = fimc_is_init_set(sd, is_dev->sensor.sensor_type);
return 0;
}
static int fimc_is_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int ret = 0;
int i, max, tmp = 0;
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
switch (ctrl->id) {
/* EXIF information */
case V4L2_CID_IS_CAMERA_EXIF_EXPTIME:
case V4L2_CID_CAMERA_EXIF_EXPTIME: /* Exposure Time */
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
ctrl->value = dev->is_p_region->header[0].
exif.exposure_time.den;
break;
case V4L2_CID_IS_CAMERA_EXIF_FLASH:
case V4L2_CID_CAMERA_EXIF_FLASH: /* Flash */
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
ctrl->value = dev->is_p_region->header[0].exif.flash;
break;
case V4L2_CID_IS_CAMERA_EXIF_ISO:
case V4L2_CID_CAMERA_EXIF_ISO: /* ISO Speed Rating */
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
ctrl->value = dev->is_p_region->header[0].
exif.iso_speed_rating;
break;
case V4L2_CID_IS_CAMERA_EXIF_SHUTTERSPEED:
case V4L2_CID_CAMERA_EXIF_TV: /* Shutter Speed */
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
/* Exposure time = shutter speed by FW */
ctrl->value = dev->is_p_region->header[0].
exif.exposure_time.den;
break;
case V4L2_CID_IS_CAMERA_EXIF_BRIGHTNESS:
case V4L2_CID_CAMERA_EXIF_BV: /* Brightness */
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
ctrl->value = dev->is_p_region->header[0].exif.brightness.num;
break;
case V4L2_CID_CAMERA_EXIF_EBV: /* exposure bias */
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
ctrl->value = dev->is_p_region->header[0].exif.brightness.den;
break;
/* Get x and y offset of sensor */
case V4L2_CID_IS_GET_SENSOR_OFFSET_X:
ctrl->value = dev->sensor.offset_x;
break;
case V4L2_CID_IS_GET_SENSOR_OFFSET_Y:
ctrl->value = dev->sensor.offset_y;
break;
/* Get current sensor size */
case V4L2_CID_IS_GET_SENSOR_WIDTH:
switch (dev->scenario_id) {
case ISS_PREVIEW_STILL:
ctrl->value = dev->sensor.width_prev;
break;
case ISS_PREVIEW_VIDEO:
ctrl->value = dev->sensor.width_prev_cam;
break;
case ISS_CAPTURE_STILL:
ctrl->value = dev->sensor.width_cap;
break;
case ISS_CAPTURE_VIDEO:
ctrl->value = dev->sensor.width_cam;
break;
}
break;
case V4L2_CID_IS_GET_SENSOR_HEIGHT:
switch (dev->scenario_id) {
case ISS_PREVIEW_STILL:
ctrl->value = dev->sensor.height_prev;
break;
case ISS_PREVIEW_VIDEO:
ctrl->value = dev->sensor.height_prev_cam;
break;
case ISS_CAPTURE_STILL:
ctrl->value = dev->sensor.height_cap;
break;
case ISS_CAPTURE_VIDEO:
ctrl->value = dev->sensor.height_cam;
break;
}
break;
/* Get information related to frame management */
case V4L2_CID_IS_GET_FRAME_VALID:
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
if ((dev->scenario_id == ISS_PREVIEW_STILL) ||
(dev->scenario_id == ISS_PREVIEW_VIDEO)) {
ctrl->value = dev->is_p_region->header
[dev->frame_count%MAX_FRAME_COUNT_PREVIEW].valid;
} else {
ctrl->value = dev->is_p_region->header[0].valid;
}
break;
case V4L2_CID_IS_GET_FRAME_BADMARK:
break;
case V4L2_CID_IS_GET_FRAME_NUMBER:
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
if ((dev->scenario_id == ISS_PREVIEW_STILL) ||
(dev->scenario_id == ISS_PREVIEW_VIDEO)) {
ctrl->value =
dev->is_p_region->header
[dev->frame_count%MAX_FRAME_COUNT_PREVIEW].
frame_number;
} else {
ctrl->value =
dev->is_p_region->header[0].frame_number;
}
break;
case V4L2_CID_IS_GET_LOSTED_FRAME_NUMBER:
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
if (dev->scenario_id == ISS_CAPTURE_STILL) {
ctrl->value =
dev->is_p_region->header[0].frame_number;
} else if (dev->scenario_id == ISS_CAPTURE_VIDEO) {
ctrl->value =
dev->is_p_region->header[0].frame_number + 1;
} else {
max = dev->is_p_region->header[0].frame_number;
for (i = 1; i < MAX_FRAME_COUNT_PREVIEW; i++) {
if (max <
dev->is_p_region->header[i].frame_number)
max =
dev->is_p_region->header[i].frame_number;
}
ctrl->value = max;
}
dev->frame_count = ctrl->value;
break;
case V4L2_CID_IS_GET_FRAME_CAPTURED:
fimc_is_mem_cache_inv((void *)IS_HEADER,
(unsigned long)(sizeof(struct is_frame_header)*4));
ctrl->value =
dev->is_p_region->header
[dev->frame_count%MAX_FRAME_COUNT_PREVIEW].captured;
break;
case V4L2_CID_IS_FD_GET_DATA:
ctrl->value = dev->fd_header.count;
fimc_is_mem_cache_inv((void *)IS_FACE,
(unsigned long)(sizeof(struct is_face_marker)*MAX_FACE_COUNT));
memcpy((void *)dev->fd_header.target_addr,
&dev->is_p_region->face[dev->fd_header.index],
(sizeof(struct is_face_marker)*dev->fd_header.count));
break;
/* AF result */
case V4L2_CID_CAMERA_AUTO_FOCUS_RESULT:
if (!is_af_use(dev))
ctrl->value = 0x02;
else
ctrl->value = dev->af.af_lock_state;
break;
case V4L2_CID_IS_ZOOM_STATE:
if (test_bit(IS_ST_SET_ZOOM, &dev->state))
ctrl->value = 1;
else
ctrl->value = 0;
break;
case V4L2_CID_IS_ZOOM_MAX_LEVEL:
switch (dev->scenario_id) {
case ISS_PREVIEW_STILL:
tmp = dev->sensor.width_prev;
break;
case ISS_PREVIEW_VIDEO:
tmp = dev->sensor.width_prev_cam;
break;
case ISS_CAPTURE_STILL:
tmp = dev->sensor.width_cap;
break;
case ISS_CAPTURE_VIDEO:
tmp = dev->sensor.width_cam;
break;
}
i = 0;
while ((tmp - (16*i)) > (tmp/4) && (tmp - (16*i)) > 200)
i++;
ctrl->value = i;
break;
/* F/W debug region address */
case V4L2_CID_IS_FW_DEBUG_REGION_ADDR:
ctrl->value = dev->mem.base + FIMC_IS_DEBUG_REGION_ADDR;
break;
#if defined(CONFIG_SLP)
#define FRONT_CAM_STANDARD_REVISION 0x0b
case V4L2_CID_PHYSICAL_ROTATION:
if (system_rev > FRONT_CAM_STANDARD_REVISION || \
system_rev == 0x04 || system_rev == 0x06)
ctrl->value = IS_ROTATION_270;
else
ctrl->value = IS_ROTATION_90;
break;
#endif
default:
return -EINVAL;
}
return ret;
}
static int fimc_is_v4l2_digital_zoom(struct fimc_is_dev *dev, int zoom_factor)
{
u32 ori_width = 0, ori_height = 0;
u32 crop_offset_x = 0, crop_offset_y = 0;
u32 crop_width = 0, crop_height = 0;
u32 mode = 0;
int tmp, ret = 0;
clear_bit(IS_ST_SET_ZOOM, &dev->state);
/* 1. Get current width and height */
switch (dev->scenario_id) {
case ISS_PREVIEW_STILL:
mode = IS_MODE_PREVIEW_STILL;
ori_width = dev->sensor.width_prev;
ori_height = dev->sensor.height_prev;
tmp = fimc_is_hw_get_sensor_max_framerate(dev);
IS_SENSOR_SET_FRAME_RATE(dev, tmp);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
break;
case ISS_PREVIEW_VIDEO:
mode = IS_MODE_PREVIEW_VIDEO;
ori_width = dev->sensor.width_prev_cam;
ori_height = dev->sensor.height_prev_cam;
break;
case ISS_CAPTURE_STILL:
mode = IS_MODE_CAPTURE_STILL;
ori_width = dev->sensor.width_cap;
ori_height = dev->sensor.height_cap;
break;
case ISS_CAPTURE_VIDEO:
mode = IS_MODE_CAPTURE_VIDEO;
ori_width = dev->sensor.width_cam;
ori_height = dev->sensor.height_cam;
break;
}
/* calculate the offset and size */
if (!zoom_factor) {
crop_offset_x = 0;
crop_offset_y = 0;
crop_width = 0;
crop_height = 0;
dev->sensor.zoom_out_width = ori_width;
dev->sensor.zoom_out_height = ori_height;
} else {
crop_width = ori_width - (16 * zoom_factor);
crop_height = (crop_width * ori_height) / ori_width;
/* bayer crop contraint */
switch (crop_height%4) {
case 1:
crop_height--;
break;
case 2:
crop_height += 2;
break;
case 3:
crop_height++;
break;
}
if ((crop_height < (ori_height / 4)) ||
(crop_width < (ori_width / 4))) {
crop_width = ori_width/4;
crop_height = ori_height/4;
}
crop_offset_x = (ori_width - crop_width)/2;
crop_offset_y = (ori_height - crop_height)/2;
dev->sensor.zoom_out_width = crop_width;
dev->sensor.zoom_out_height = crop_height;
}
dbg("Zoom out offset = %d, %d\n", crop_offset_x, crop_offset_y);
dbg("Zoom out = %d, %d\n", dev->sensor.zoom_out_width,
dev->sensor.zoom_out_height);
/* 2. stream off */
clear_bit(IS_ST_STREAM_ON, &dev->state);
fimc_is_hw_set_stream(dev, 0);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_STREAM_OFF, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
dev_err(&dev->pdev->dev, "wait timeout : %s\n", __func__);
return -EBUSY;
}
clear_bit(IS_ST_STREAM_OFF, &dev->state);
/* 3. update input and output size of ISP,DRC and FD */
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, ori_width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, ori_height);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, crop_offset_x);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, crop_offset_y);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, crop_width);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, crop_height);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
IS_ISP_SET_PARAM_OTF_OUTPUT_CMD(dev, OTF_OUTPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, dev->sensor.zoom_out_width);
IS_ISP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, dev->sensor.zoom_out_height);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_OUTPUT);
IS_INC_PARAM_NUM(dev);
IS_ISP_SET_PARAM_DMA_OUTPUT1_WIDTH(dev, dev->sensor.zoom_out_width);
IS_ISP_SET_PARAM_DMA_OUTPUT1_HEIGHT(dev, dev->sensor.zoom_out_height);
IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_OUTPUT);
IS_INC_PARAM_NUM(dev);
IS_ISP_SET_PARAM_DMA_OUTPUT2_WIDTH(dev, dev->sensor.zoom_out_width);
IS_ISP_SET_PARAM_DMA_OUTPUT2_HEIGHT(dev, dev->sensor.zoom_out_height);
IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_OUTPUT);
IS_INC_PARAM_NUM(dev);
/* DRC input / output*/
IS_DRC_SET_PARAM_OTF_INPUT_WIDTH(dev, dev->sensor.zoom_out_width);
IS_DRC_SET_PARAM_OTF_INPUT_HEIGHT(dev, dev->sensor.zoom_out_height);
IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
IS_DRC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, dev->sensor.zoom_out_width);
IS_DRC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, dev->sensor.zoom_out_height);
IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_OUTPUT);
IS_INC_PARAM_NUM(dev);
/* FD input / output*/
IS_FD_SET_PARAM_OTF_INPUT_WIDTH(dev, dev->sensor.zoom_out_width);
IS_FD_SET_PARAM_OTF_INPUT_HEIGHT(dev, dev->sensor.zoom_out_height);
IS_SET_PARAM_BIT(dev, PARAM_FD_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
/* 4. Set parameter */
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
dev_err(&dev->pdev->dev, "wait timeout : %s\n", __func__);
return -EBUSY;
}
/* 5. Mode change for getting CAC margin */
clear_bit(IS_ST_CHANGE_MODE, &dev->state);
fimc_is_hw_change_mode(dev, mode);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_CHANGE_MODE, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
dev_err(&dev->pdev->dev,
"Mode change timeout:%s\n", __func__);
return -EBUSY;
}
set_bit(IS_ST_SET_ZOOM, &dev->state);
return 0;
}
static int fimc_is_v4l2_isp_scene_mode(struct fimc_is_dev *dev, int mode)
{
int ret = 0;
switch (mode) {
case SCENE_MODE_NONE:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev, ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_AUTO);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_ENABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_PORTRAIT:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev, ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, -1);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, -1);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_AUTO);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_ENABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_LANDSCAPE:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_MATRIX);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev, ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 1);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 1);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 1);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_SPORTS:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 400);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev, ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_PARTY_INDOOR:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 200);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev, ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 1);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_AUTO);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_ENABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_BEACH_SNOW:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 50);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev, ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 1);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 1);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_SUNSET:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev, ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev,
ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_DUSK_DAWN:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev,
ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev,
ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_FLUORESCENT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_FALL_COLOR:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev,
ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 2);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_NIGHTSHOT:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev,
ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_BACK_LIGHT:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev,
ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
/* FIXME add with SCENE_MODE_BACK_LIGHT (FLASH mode) */
case SCENE_MODE_FIREWORKS:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 50);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev,
ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_TEXT:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev,
ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 2);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 2);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case SCENE_MODE_CANDLE_LIGHT:
/* ISO */
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
IS_ISP_SET_PARAM_ISO_ERR(dev, ISP_ISO_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
/* Metering */
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, 0);
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_METERING_ERR(dev,
ISP_METERING_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
/* AWB */
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
IS_ISP_SET_PARAM_AWB_ERR(dev, ISP_AWB_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
/* Adjust */
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_MANUAL_ALL);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_ERR(dev, ISP_ADJUST_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
/* Flash */
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
IS_ISP_SET_PARAM_FLASH_ERR(dev, ISP_FLASH_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
/* AF */
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev, ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_ISP_SET_PARAM_AA_ERR(dev, ISP_AF_ERROR_NO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
default:
break;
}
return ret;
}
int fimc_is_wait_af_done(struct fimc_is_dev *dev)
{
unsigned long timeo = jiffies + 600; /* timeout of 3sec */
while (time_before(jiffies, timeo)) {
fimc_is_mem_cache_inv((void *)IS_SHARED,
(unsigned long)(sizeof(struct is_share_region)));
if (dev->is_shared_region->af_status) {
dbg("AF done : %d ms\n",
jiffies_to_msecs(jiffies - timeo + 600));
return 0;
}
msleep(20);
}
err("AF wait time out: %d ms\n",
jiffies_to_msecs(jiffies - timeo + 600));
return 0;
}
int fimc_is_af_face(struct fimc_is_dev *dev)
{
int ret = 0, max_confidence = 0, i = 0;
int width, height;
u32 touch_x = 0, touch_y = 0;
for (i = dev->fd_header.index;
i < (dev->fd_header.index + dev->fd_header.count); i++) {
if (max_confidence < dev->is_p_region->face[i].confidence) {
max_confidence = dev->is_p_region->face[i].confidence;
touch_x = dev->is_p_region->face[i].face.offset_x +
(dev->is_p_region->face[i].face.width / 2);
touch_y = dev->is_p_region->face[i].face.offset_y +
(dev->is_p_region->face[i].face.height / 2);
}
}
width = fimc_is_hw_get_sensor_size_width(dev);
height = fimc_is_hw_get_sensor_size_height(dev);
touch_x = 1024 * touch_x / (u32)width;
touch_y = 1024 * touch_y / (u32)height;
if ((touch_x == 0) || (touch_y == 0) || (max_confidence < 50))
return ret;
if (dev->af.prev_pos_x == 0 && dev->af.prev_pos_y == 0) {
dev->af.prev_pos_x = touch_x;
dev->af.prev_pos_y = touch_y;
} else {
if (abs(dev->af.prev_pos_x - touch_x) < 100 &&
abs(dev->af.prev_pos_y - touch_y) < 100) {
return ret;
}
dbg("AF Face level = %d\n", max_confidence);
dbg("AF Face = <%d, %d>\n", touch_x, touch_y);
dbg("AF Face = prev <%d, %d>\n",
dev->af.prev_pos_x, dev->af.prev_pos_y);
dev->af.prev_pos_x = touch_x;
dev->af.prev_pos_y = touch_y;
}
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_TOUCH);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, touch_x);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, touch_y);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
return ret;
}
static int fimc_is_v4l2_af_mode(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case FOCUS_MODE_AUTO:
dev->af.mode = IS_FOCUS_MODE_AUTO;
break;
case FOCUS_MODE_MACRO:
dev->af.mode = IS_FOCUS_MODE_MACRO;
break;
case FOCUS_MODE_INFINITY:
dev->af.mode = IS_FOCUS_MODE_INFINITY;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_MANUAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case FOCUS_MODE_CONTINOUS:
dev->af.mode = IS_FOCUS_MODE_CONTINUOUS;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_CONTINUOUS);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
dev->af.af_lock_state = 0;
dev->af.ae_lock_state = 0;
dev->af.awb_lock_state = 0;
dev->af.prev_pos_x = 0;
dev->af.prev_pos_y = 0;
break;
case FOCUS_MODE_TOUCH:
dev->af.mode = IS_FOCUS_MODE_TOUCH;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_TOUCH);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, dev->af.pos_x);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, dev->af.pos_y);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
dev->af.af_state = FIMC_IS_AF_SETCONFIG;
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
dev->af.af_lock_state = 0;
dev->af.ae_lock_state = 0;
dev->af.awb_lock_state = 0;
break;
case FOCUS_MODE_FACEDETECT:
dev->af.mode = IS_FOCUS_MODE_FACEDETECT;
dev->af.af_lock_state = 0;
dev->af.ae_lock_state = 0;
dev->af.awb_lock_state = 0;
dev->af.prev_pos_x = 0;
dev->af.prev_pos_y = 0;
break;
default:
return ret;
}
return ret;
}
static int fimc_is_v4l2_af_start_stop(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case AUTO_FOCUS_OFF:
if (!is_af_use(dev)) {
/* 6A3 can't support AF */
dev->af.af_state = FIMC_IS_AF_IDLE;
} else {
if (dev->af.af_state == FIMC_IS_AF_IDLE)
return ret;
/* Abort or lock AF */
dev->af.af_state = FIMC_IS_AF_ABORT;
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_STOP);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
switch (dev->af.mode) {
case IS_FOCUS_MODE_AUTO:
IS_ISP_SET_PARAM_AA_MODE(dev,
ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev,
ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev,
ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev,
ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean(
(void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case IS_FOCUS_MODE_MACRO:
IS_ISP_SET_PARAM_AA_MODE(dev,
ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SCENE(dev,
ISP_AF_SCENE_MACRO);
IS_ISP_SET_PARAM_AA_SLEEP(dev,
ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev,
ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean(
(void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
(dev->af.af_state == FIMC_IS_AF_IDLE), HZ/5);
if (!ret) {
dev_err(&dev->pdev->dev,
"Focus change timeout:%s\n", __func__);
return -EBUSY;
}
break;
case IS_FOCUS_MODE_CONTINUOUS:
IS_ISP_SET_PARAM_AA_MODE(dev,
ISP_AF_MODE_CONTINUOUS);
IS_ISP_SET_PARAM_AA_SCENE(dev,
ISP_AF_SCENE_NORMAL);
IS_ISP_SET_PARAM_AA_SLEEP(dev,
ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev,
ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean(
(void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
(dev->af.af_state == FIMC_IS_AF_IDLE), HZ/5);
if (!ret) {
dev_err(&dev->pdev->dev,
"Focus change timeout:%s\n", __func__);
return -EBUSY;
}
break;
default:
/* If other AF mode, there is no
cancelation process*/
break;
}
dev->af.mode = IS_FOCUS_MODE_IDLE;
}
break;
case AUTO_FOCUS_ON:
if (!is_af_use(dev)) {
/* 6A3 can't support AF */
dev->af.af_state = FIMC_IS_AF_LOCK;
dev->af.af_lock_state = FIMC_IS_AF_LOCKED;
dev->is_shared_region->af_status = 1;
fimc_is_mem_cache_clean((void *)IS_SHARED,
(unsigned long)(sizeof(struct is_share_region)));
} else {
dev->af.af_lock_state = 0;
dev->af.ae_lock_state = 0;
dev->af.awb_lock_state = 0;
dev->is_shared_region->af_status = 0;
fimc_is_mem_cache_clean((void *)IS_SHARED,
(unsigned long)(sizeof(struct is_share_region)));
IS_ISP_SET_PARAM_AA_CMD(dev,
ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AF);
IS_ISP_SET_PARAM_AA_MODE(dev, ISP_AF_MODE_SINGLE);
IS_ISP_SET_PARAM_AA_SLEEP(dev, ISP_AF_SLEEP_OFF);
IS_ISP_SET_PARAM_AA_FACE(dev, ISP_AF_FACE_DISABLE);
IS_ISP_SET_PARAM_AA_TOUCH_X(dev, 0);
IS_ISP_SET_PARAM_AA_TOUCH_Y(dev, 0);
IS_ISP_SET_PARAM_AA_MANUAL_AF(dev, 0);
switch (dev->af.mode) {
case IS_FOCUS_MODE_AUTO:
IS_ISP_SET_PARAM_AA_SCENE(dev,
ISP_AF_SCENE_NORMAL);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
dev->af.af_state =
FIMC_IS_AF_SETCONFIG;
fimc_is_mem_cache_clean(
(void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
(dev->af.af_state == FIMC_IS_AF_RUNNING), HZ/5);
if (!ret) {
dev_err(&dev->pdev->dev,
"Focus change timeout:%s\n", __func__);
return -EBUSY;
}
break;
case IS_FOCUS_MODE_MACRO:
IS_ISP_SET_PARAM_AA_SCENE(dev,
ISP_AF_SCENE_MACRO);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
dev->af.af_state =
FIMC_IS_AF_SETCONFIG;
fimc_is_mem_cache_clean(
(void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
(dev->af.af_state == FIMC_IS_AF_RUNNING), HZ/5);
if (!ret) {
dev_err(&dev->pdev->dev,
"Focus change timeout:%s\n", __func__);
return -EBUSY;
}
break;
default:
break;
}
}
break;
default:
break;
}
return ret;
}
static int fimc_is_v4l2_isp_iso(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case ISO_AUTO:
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_AUTO);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 0);
break;
case ISO_100:
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 100);
break;
case ISO_200:
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 200);
break;
case ISO_400:
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 400);
break;
case ISO_800:
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 800);
break;
case ISO_1600:
IS_ISP_SET_PARAM_ISO_CMD(dev, ISP_ISO_COMMAND_MANUAL);
IS_ISP_SET_PARAM_ISO_VALUE(dev, 1600);
break;
default:
return ret;
}
if (value >= ISO_AUTO && value < ISO_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_effect(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_IMAGE_EFFECT_DISABLE:
IS_ISP_SET_PARAM_EFFECT_CMD(dev, ISP_IMAGE_EFFECT_DISABLE);
break;
case IS_IMAGE_EFFECT_MONOCHROME:
IS_ISP_SET_PARAM_EFFECT_CMD(dev, ISP_IMAGE_EFFECT_MONOCHROME);
break;
case IS_IMAGE_EFFECT_NEGATIVE_MONO:
IS_ISP_SET_PARAM_EFFECT_CMD(dev,
ISP_IMAGE_EFFECT_NEGATIVE_MONO);
break;
case IS_IMAGE_EFFECT_NEGATIVE_COLOR:
IS_ISP_SET_PARAM_EFFECT_CMD(dev,
ISP_IMAGE_EFFECT_NEGATIVE_COLOR);
break;
case IS_IMAGE_EFFECT_SEPIA:
IS_ISP_SET_PARAM_EFFECT_CMD(dev, ISP_IMAGE_EFFECT_SEPIA);
break;
}
/* only ISP effect in Pegasus */
if (value >= 0 && value < 5) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_IMAGE_EFFECT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_effect_legacy(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IMAGE_EFFECT_NONE:
IS_ISP_SET_PARAM_EFFECT_CMD(dev, ISP_IMAGE_EFFECT_DISABLE);
break;
case IMAGE_EFFECT_BNW:
IS_ISP_SET_PARAM_EFFECT_CMD(dev, ISP_IMAGE_EFFECT_MONOCHROME);
break;
case IMAGE_EFFECT_NEGATIVE:
IS_ISP_SET_PARAM_EFFECT_CMD(dev,
ISP_IMAGE_EFFECT_NEGATIVE_COLOR);
break;
case IMAGE_EFFECT_SEPIA:
IS_ISP_SET_PARAM_EFFECT_CMD(dev, ISP_IMAGE_EFFECT_SEPIA);
break;
default:
return ret;
}
/* only ISP effect in Pegasus */
if (value > IMAGE_EFFECT_BASE && value < IMAGE_EFFECT_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_IMAGE_EFFECT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return 0;
}
}
return ret;
}
static int fimc_is_v4l2_isp_flash_mode(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case FLASH_MODE_OFF:
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_DISABLE);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
break;
case FLASH_MODE_AUTO:
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_AUTO);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_ENABLE);
break;
case FLASH_MODE_ON:
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_MANUALON);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
break;
case FLASH_MODE_TORCH:
IS_ISP_SET_PARAM_FLASH_CMD(dev, ISP_FLASH_COMMAND_TORCH);
IS_ISP_SET_PARAM_FLASH_REDEYE(dev, ISP_FLASH_REDEYE_DISABLE);
break;
default:
return ret;
}
if (value > FLASH_MODE_BASE && value < FLASH_MODE_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_awb_mode(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_AWB_AUTO:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev, 0);
break;
case IS_AWB_DAYLIGHT:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
break;
case IS_AWB_CLOUDY:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_CLOUDY);
break;
case IS_AWB_TUNGSTEN:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_TUNGSTEN);
break;
case IS_AWB_FLUORESCENT:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_FLUORESCENT);
break;
}
if (value >= IS_AWB_AUTO && value < IS_AWB_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_awb_mode_legacy(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case WHITE_BALANCE_AUTO:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev, 0);
break;
case WHITE_BALANCE_SUNNY:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_DAYLIGHT);
break;
case WHITE_BALANCE_CLOUDY:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_CLOUDY);
break;
case WHITE_BALANCE_TUNGSTEN:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_TUNGSTEN);
break;
case WHITE_BALANCE_FLUORESCENT:
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_ILLUMINATION);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev,
ISP_AWB_ILLUMINATION_FLUORESCENT);
break;
}
if (value > WHITE_BALANCE_BASE && value < WHITE_BALANCE_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return 0;
}
}
return ret;
}
static int fimc_is_v4l2_isp_contrast(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_CONTRAST_AUTO:
IS_ISP_SET_PARAM_ADJUST_CMD(dev, ISP_ADJUST_COMMAND_AUTO);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
break;
case IS_CONTRAST_MINUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, -2);
break;
case IS_CONTRAST_MINUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, -1);
break;
case IS_CONTRAST_DEFAULT:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
break;
case IS_CONTRAST_PLUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 1);
break;
case IS_CONTRAST_PLUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 2);
break;
default:
return ret;
}
if (value >= 0 && value < IS_CONTRAST_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_contrast_legacy(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case CONTRAST_MINUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, -2);
break;
case CONTRAST_MINUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, -1);
break;
case CONTRAST_DEFAULT:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 0);
break;
case CONTRAST_PLUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 1);
break;
case CONTRAST_PLUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_CONTRAST);
IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, 2);
break;
default:
return ret;
}
if (value >= 0 && value < CONTRAST_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_saturation(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case SATURATION_MINUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SATURATION);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, -2);
break;
case SATURATION_MINUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SATURATION);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, -1);
break;
case SATURATION_DEFAULT:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SATURATION);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 0);
break;
case SATURATION_PLUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SATURATION);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 1);
break;
case SATURATION_PLUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SATURATION);
IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, 2);
break;
default:
return ret;
}
if (value >= 0 && value < SATURATION_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_sharpness(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case SHARPNESS_MINUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SHARPNESS);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, -2);
break;
case SHARPNESS_MINUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SHARPNESS);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, -1);
break;
case SHARPNESS_DEFAULT:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SHARPNESS);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 0);
break;
case SHARPNESS_PLUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SHARPNESS);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 1);
break;
case SHARPNESS_PLUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_SHARPNESS);
IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, 2);
break;
default:
return ret;
}
if (value >= 0 && value < SHARPNESS_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_exposure(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_EXPOSURE_MINUS_4:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, -4);
break;
case IS_EXPOSURE_MINUS_3:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, -3);
break;
case IS_EXPOSURE_MINUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, -2);
break;
case IS_EXPOSURE_MINUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, -1);
break;
case IS_EXPOSURE_DEFAULT:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
break;
case IS_EXPOSURE_PLUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 1);
break;
case IS_EXPOSURE_PLUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 2);
break;
case IS_EXPOSURE_PLUS_3:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 3);
break;
case IS_EXPOSURE_PLUS_4:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 4);
break;
default:
return ret;
}
if (value >= 0 && value < IS_EXPOSURE_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_exposure_legacy(struct fimc_is_dev *dev, int value)
{
int ret = 0;
if (value >= -4 && value < 5) {
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_brightness(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_BRIGHTNESS_MINUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, -2);
break;
case IS_BRIGHTNESS_MINUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, -1);
break;
case IS_BRIGHTNESS_DEFAULT:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 0);
break;
case IS_BRIGHTNESS_PLUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 1);
break;
case IS_BRIGHTNESS_PLUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS);
IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, 2);
break;
default:
return ret;
}
if (value >= 0 && value < IS_BRIGHTNESS_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_hue(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_HUE_MINUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_HUE);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, -2);
break;
case IS_HUE_MINUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_HUE);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, -1);
break;
case IS_HUE_DEFAULT:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_HUE);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 0);
break;
case IS_HUE_PLUS_1:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_HUE);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 1);
break;
case IS_HUE_PLUS_2:
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_HUE);
IS_ISP_SET_PARAM_ADJUST_HUE(dev, 2);
break;
default:
return ret;
}
if (value >= IS_HUE_MINUS_2 && value < IS_HUE_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_metering(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_METERING_AVERAGE:
IS_ISP_SET_PARAM_METERING_CMD(dev,
ISP_METERING_COMMAND_AVERAGE);
break;
case IS_METERING_SPOT:
IS_ISP_SET_PARAM_METERING_CMD(dev, ISP_METERING_COMMAND_SPOT);
break;
case IS_METERING_MATRIX:
IS_ISP_SET_PARAM_METERING_CMD(dev, ISP_METERING_COMMAND_MATRIX);
break;
case IS_METERING_CENTER:
IS_ISP_SET_PARAM_METERING_CMD(dev, ISP_METERING_COMMAND_CENTER);
break;
default:
return ret;
}
if (value >= 0 && value < IS_METERING_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_metering_legacy(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case METERING_CENTER:
IS_ISP_SET_PARAM_METERING_CMD(dev, ISP_METERING_COMMAND_CENTER);
break;
case METERING_SPOT:
IS_ISP_SET_PARAM_METERING_CMD(dev, ISP_METERING_COMMAND_SPOT);
break;
case METERING_MATRIX:
IS_ISP_SET_PARAM_METERING_CMD(dev, ISP_METERING_COMMAND_MATRIX);
break;
default:
return ret;
}
if (value > METERING_BASE && value < METERING_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_afc(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_AFC_DISABLE:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_DISABLE);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, 0);
break;
case IS_AFC_AUTO:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_AUTO);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, 0);
break;
case IS_AFC_MANUAL_50HZ:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_MANUAL);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, ISP_AFC_MANUAL_50HZ);
break;
case IS_AFC_MANUAL_60HZ:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_MANUAL);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, ISP_AFC_MANUAL_60HZ);
break;
default:
return ret;
}
if (value >= 0 && value < IS_AFC_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_AFC);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_isp_afc_legacy(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case ANTI_BANDING_OFF:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_DISABLE);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, 0);
break;
case ANTI_BANDING_AUTO:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_AUTO);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, 0);
break;
case ANTI_BANDING_50HZ:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_MANUAL);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, ISP_AFC_MANUAL_50HZ);
break;
case ANTI_BANDING_60HZ:
IS_ISP_SET_PARAM_AFC_CMD(dev, ISP_AFC_COMMAND_MANUAL);
IS_ISP_SET_PARAM_AFC_MANUAL(dev, ISP_AFC_MANUAL_60HZ);
break;
default:
return ret;
}
if (value >= ANTI_BANDING_OFF && value <= ANTI_BANDING_60HZ) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_AFC);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_fd_angle_mode(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_FD_ROLL_ANGLE_BASIC:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_ROLL_ANGLE);
IS_FD_SET_PARAM_FD_CONFIG_ROLL_ANGLE(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case V4L2_CID_IS_FD_SET_YAW_ANGLE:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_YAW_ANGLE);
IS_FD_SET_PARAM_FD_CONFIG_YAW_ANGLE(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case V4L2_CID_IS_FD_SET_SMILE_MODE:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_SMILE_MODE);
IS_FD_SET_PARAM_FD_CONFIG_SMILE_MODE(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case V4L2_CID_IS_FD_SET_BLINK_MODE:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_BLINK_MODE);
IS_FD_SET_PARAM_FD_CONFIG_BLINK_MODE(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case V4L2_CID_IS_FD_SET_EYE_DETECT_MODE:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_EYES_DETECT);
IS_FD_SET_PARAM_FD_CONFIG_EYE_DETECT(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case V4L2_CID_IS_FD_SET_MOUTH_DETECT_MODE:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_MOUTH_DETECT);
IS_FD_SET_PARAM_FD_CONFIG_MOUTH_DETECT(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case V4L2_CID_IS_FD_SET_ORIENTATION_MODE:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_ORIENTATION);
IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case V4L2_CID_IS_FD_SET_ORIENTATION:
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_ORIENTATION_VALUE);
IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION_VALUE(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
default:
break;
}
return ret;
}
static int fimc_is_v4l2_frame_rate(struct fimc_is_dev *dev, int value)
{
int i, ret = 0;
int width, height, format;
width = fimc_is_hw_get_sensor_size_width(dev);
height = fimc_is_hw_get_sensor_size_height(dev);
format = fimc_is_hw_get_sensor_format(dev);
dev->sensor.framerate_update = true;
switch (value) {
case FRAME_RATE_AUTO: /* FRAME_RATE_AUTO */
i = fimc_is_hw_get_sensor_max_framerate(dev);
IS_SENSOR_SET_FRAME_RATE(dev, i);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, 66666);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
break;
case FRAME_RATE_7: /* FRAME_RATE_7 */
IS_SENSOR_SET_FRAME_RATE(dev, 7);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, 124950);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
break;
case FRAME_RATE_15: /* FRAME_RATE_15 */
IS_SENSOR_SET_FRAME_RATE(dev, 15);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, 66666);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
break;
case FRAME_RATE_20: /* FRAME_RATE_20 */
IS_SENSOR_SET_FRAME_RATE(dev, 20);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, 50000);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
break;
case FRAME_RATE_30: /* FRAME_RATE_30 */
IS_SENSOR_SET_FRAME_RATE(dev, 30);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, 33333);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
break;
case FRAME_RATE_60: /* FRAME_RATE_60 */
IS_SENSOR_SET_FRAME_RATE(dev, 60);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, 16666);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
break;
default:
IS_SENSOR_SET_FRAME_RATE(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev,
(u32)(1000000/value));
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
break;
}
return ret;
}
static int fimc_is_v4l2_ae_awb_lockunlock(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case AE_UNLOCK_AWB_UNLOCK:
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AE |
ISP_AA_TARGET_AWB);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case AE_LOCK_AWB_UNLOCK:
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_STOP);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AE);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AWB);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case AE_UNLOCK_AWB_LOCK:
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_START);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AE);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_STOP);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AWB);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
case AE_LOCK_AWB_LOCK:
IS_ISP_SET_PARAM_AA_CMD(dev, ISP_AA_COMMAND_STOP);
IS_ISP_SET_PARAM_AA_TARGET(dev, ISP_AA_TARGET_AE |
ISP_AA_TARGET_AWB);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AA);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
break;
default:
break;
}
return ret;
}
static int fimc_is_v4l2_set_isp(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_ISP_BYPASS_DISABLE:
IS_ISP_SET_PARAM_CONTROL_BYPASS(dev, CONTROL_BYPASS_DISABLE);
break;
case IS_ISP_BYPASS_ENABLE:
IS_ISP_SET_PARAM_CONTROL_BYPASS(dev, CONTROL_BYPASS_ENABLE);
break;
default:
return ret;
}
if (value >= 0 && value < IS_ISP_BYPASS_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_set_drc(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_DRC_BYPASS_DISABLE:
IS_DRC_SET_PARAM_CONTROL_BYPASS(dev, CONTROL_BYPASS_DISABLE);
IS_DRC_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_START);
break;
case IS_DRC_BYPASS_ENABLE:
IS_DRC_SET_PARAM_CONTROL_BYPASS(dev, CONTROL_BYPASS_ENABLE);
IS_DRC_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_START);
break;
default:
return ret;
}
if (value >= 0 && value < IS_DRC_BYPASS_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_DRC_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_cmd_isp(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_ISP_COMMAND_STOP:
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
break;
case IS_ISP_COMMAND_START:
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_START);
break;
default:
return ret;
}
if (value >= 0 && value < IS_ISP_COMMAND_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_cmd_drc(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_DRC_COMMAND_STOP:
IS_DRC_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
break;
case IS_DRC_COMMAND_START:
IS_DRC_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_START);
break;
}
if (value >= 0 && value < IS_ISP_COMMAND_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_DRC_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_cmd_fd(struct fimc_is_dev *dev, int value)
{
int ret = 0;
switch (value) {
case IS_FD_COMMAND_STOP:
dbg("IS_FD_COMMAND_STOP\n");
IS_FD_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
break;
case IS_FD_COMMAND_START:
dbg("IS_FD_COMMAND_START\n");
IS_FD_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_START);
break;
}
if (value >= 0 && value < IS_ISP_COMMAND_MAX) {
IS_SET_PARAM_BIT(dev, PARAM_FD_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
return ret;
}
static int fimc_is_v4l2_shot_mode(struct fimc_is_dev *dev, int value)
{
int ret = 0;
IS_SET_PARAM_GLOBAL_SHOTMODE_CMD(dev, value);
IS_SET_PARAM_BIT(dev, PARAM_GLOBAL_SHOTMODE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
return ret;
}
static int fimc_is_v4l2_mode_change(struct fimc_is_dev *dev, int value)
{
int ret = 0;
if (!test_bit(IS_ST_INIT_DONE, &dev->state)) {
err("Not init done state!!\n");
return -EINVAL;
}
clear_bit(IS_ST_CHANGE_MODE, &dev->state);
fimc_is_hw_change_mode(dev, value);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_CHANGE_MODE, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("Mode change timeout !!\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
printk(KERN_INFO "CAC margin - %d, %d\n", dev->sensor.offset_x,
dev->sensor.offset_y);
return ret;
}
static int fimc_is_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int ret = 0;
int i;
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
switch (ctrl->id) {
case V4L2_CID_IS_S_SCENARIO_MODE:
ret = fimc_is_v4l2_mode_change(dev, ctrl->value);
break;
case V4L2_CID_IS_S_FORMAT_SCENARIO:
/* Set default value between still and video mode change */
/* This is optional part */
if ((dev->scenario_id + ctrl->value) == 1) {
IS_ISP_SET_PARAM_AWB_CMD(dev, ISP_AWB_COMMAND_AUTO);
IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB);
IS_INC_PARAM_NUM(dev);
IS_ISP_SET_PARAM_ADJUST_CMD(dev,
ISP_ADJUST_COMMAND_MANUAL_EXPOSURE);
IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, 0);
IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
switch (ctrl->value) {
case IS_MODE_PREVIEW_STILL:
dev->scenario_id = ISS_PREVIEW_STILL;
break;
case IS_MODE_PREVIEW_VIDEO:
dev->scenario_id = ISS_PREVIEW_VIDEO;
break;
case IS_MODE_CAPTURE_STILL:
dev->scenario_id = ISS_CAPTURE_STILL;
break;
case IS_MODE_CAPTURE_VIDEO:
dev->scenario_id = ISS_CAPTURE_VIDEO;
break;
default:
return -EBUSY;
}
break;
case V4L2_CID_IS_CAMERA_SHOT_MODE_NORMAL:
ret = fimc_is_v4l2_shot_mode(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_FRAME_RATE:
ret = fimc_is_v4l2_frame_rate(dev, ctrl->value);
break;
/* Focus */
case V4L2_CID_IS_CAMERA_OBJECT_POSITION_X:
case V4L2_CID_CAMERA_OBJECT_POSITION_X:
dev->af.pos_x = ctrl->value;
break;
case V4L2_CID_IS_CAMERA_OBJECT_POSITION_Y:
case V4L2_CID_CAMERA_OBJECT_POSITION_Y:
dev->af.pos_y = ctrl->value;
break;
case V4L2_CID_CAMERA_FOCUS_MODE:
ret = fimc_is_v4l2_af_mode(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_SET_AUTO_FOCUS:
ret = fimc_is_v4l2_af_start_stop(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_TOUCH_AF_START_STOP:
switch (ctrl->value) {
case TOUCH_AF_STOP:
break;
case TOUCH_AF_START:
break;
default:
break;
}
break;
case V4L2_CID_CAMERA_CAF_START_STOP:
switch (ctrl->value) {
case CAF_STOP:
break;
case CAF_START:
break;
default:
break;
}
break;
/* AWB, AE Lock/Unlock */
case V4L2_CID_CAMERA_AEAWB_LOCK_UNLOCK:
ret = fimc_is_v4l2_ae_awb_lockunlock(dev, ctrl->value);
break;
/* FLASH */
case V4L2_CID_CAMERA_FLASH_MODE:
ret = fimc_is_v4l2_isp_flash_mode(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_AWB_MODE:
ret = fimc_is_v4l2_awb_mode(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_WHITE_BALANCE:
ret = fimc_is_v4l2_awb_mode_legacy(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_EFFECT:
ret = fimc_is_v4l2_isp_effect_legacy(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_IMAGE_EFFECT:
ret = fimc_is_v4l2_isp_effect(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_ISO:
case V4L2_CID_CAMERA_ISO:
ret = fimc_is_v4l2_isp_iso(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_CONTRAST:
ret = fimc_is_v4l2_isp_contrast_legacy(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_CONTRAST:
ret = fimc_is_v4l2_isp_contrast(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_SATURATION:
case V4L2_CID_CAMERA_SATURATION:
ret = fimc_is_v4l2_isp_saturation(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_SHARPNESS:
case V4L2_CID_CAMERA_SHARPNESS:
ret = fimc_is_v4l2_isp_sharpness(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_EXPOSURE:
ret = fimc_is_v4l2_isp_exposure(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_BRIGHTNESS:
ret = fimc_is_v4l2_isp_exposure_legacy(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_BRIGHTNESS:
ret = fimc_is_v4l2_isp_brightness(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_HUE:
ret = fimc_is_v4l2_isp_hue(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_METERING:
ret = fimc_is_v4l2_isp_metering_legacy(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_METERING:
ret = fimc_is_v4l2_isp_metering(dev, ctrl->value);
break;
/* Ony valid at SPOT Mode */
case V4L2_CID_IS_CAMERA_METERING_POSITION_X:
IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_METERING_POSITION_Y:
IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_METERING_WINDOW_X:
IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_METERING_WINDOW_Y:
IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_ANTI_BANDING:
ret = fimc_is_v4l2_isp_afc_legacy(dev, ctrl->value);
break;
case V4L2_CID_IS_CAMERA_AFC_MODE:
ret = fimc_is_v4l2_isp_afc(dev, ctrl->value);
break;
case V4L2_CID_IS_FD_SET_MAX_FACE_NUMBER:
if (ctrl->value >= 0) {
IS_FD_SET_PARAM_FD_CONFIG_CMD(dev,
FD_CONFIG_COMMAND_MAXIMUM_NUMBER);
IS_FD_SET_PARAM_FD_CONFIG_MAX_NUMBER(dev, ctrl->value);
IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
fimc_is_hw_set_param(dev);
}
break;
case V4L2_CID_IS_FD_SET_ROLL_ANGLE:
ret = fimc_is_v4l2_fd_angle_mode(dev, ctrl->value);
break;
case V4L2_CID_IS_FD_SET_DATA_ADDRESS:
dev->fd_header.target_addr = ctrl->value;
break;
case V4L2_CID_IS_SET_ISP:
ret = fimc_is_v4l2_set_isp(dev, ctrl->value);
break;
case V4L2_CID_IS_SET_DRC:
ret = fimc_is_v4l2_set_drc(dev, ctrl->value);
break;
case V4L2_CID_IS_CMD_ISP:
ret = fimc_is_v4l2_cmd_isp(dev, ctrl->value);
break;
case V4L2_CID_IS_CMD_DRC:
ret = fimc_is_v4l2_cmd_drc(dev, ctrl->value);
break;
case V4L2_CID_IS_CMD_FD:
ret = fimc_is_v4l2_cmd_fd(dev, ctrl->value);
break;
case V4L2_CID_IS_SET_FRAME_NUMBER:
dev->frame_count = ctrl->value + 1;
dev->is_p_region->header[0].valid = 0;
dev->is_p_region->header[1].valid = 0;
dev->is_p_region->header[2].valid = 0;
dev->is_p_region->header[3].valid = 0;
fimc_is_mem_cache_clean((void *)IS_HEADER, IS_PARAM_SIZE);
break;
case V4L2_CID_IS_SET_FRAME_VALID:
if ((dev->scenario_id == ISS_CAPTURE_STILL)
|| (dev->scenario_id == ISS_CAPTURE_VIDEO)) {
dev->is_p_region->header[0].valid = ctrl->value;
dev->is_p_region->header[0].bad_mark = ctrl->value;
dev->is_p_region->header[0].captured = ctrl->value;
} else {
dev->is_p_region->header[dev->frame_count%
MAX_FRAME_COUNT_PREVIEW].valid = ctrl->value;
dev->is_p_region->header[dev->frame_count%
MAX_FRAME_COUNT_PREVIEW].bad_mark = ctrl->value;
dev->is_p_region->header[dev->frame_count%
MAX_FRAME_COUNT_PREVIEW].captured = ctrl->value;
}
dev->frame_count++;
fimc_is_mem_cache_clean((void *)IS_HEADER, IS_PARAM_SIZE);
break;
case V4L2_CID_IS_SET_FRAME_BADMARK:
break;
case V4L2_CID_IS_SET_FRAME_CAPTURED:
break;
case V4L2_CID_IS_CLEAR_FRAME_NUMBER:
if (dev->scenario_id == ISS_CAPTURE_STILL) {
dev->is_p_region->header[0].valid = 0;
dev->is_p_region->header[0].bad_mark = 0;
dev->is_p_region->header[0].captured = 0;
} else if (dev->scenario_id == ISS_CAPTURE_VIDEO) {
dev->is_p_region->header[0].valid = 0;
dev->is_p_region->header[0].bad_mark = 0;
dev->is_p_region->header[0].captured = 0;
} else {
for (i = 0; i < MAX_FRAME_COUNT_PREVIEW; i++) {
if (dev->is_p_region->header[i].frame_number <
dev->frame_count) {
dev->is_p_region->header[i].valid = 0;
dev->is_p_region->header[i].
bad_mark = 0;
dev->is_p_region->header[i].
captured = 0;
}
}
}
fimc_is_mem_cache_clean((void *)IS_HEADER, IS_PARAM_SIZE);
break;
case V4L2_CID_CAMERA_SCENE_MODE:
ret = fimc_is_v4l2_isp_scene_mode(dev, ctrl->value);
break;
case V4L2_CID_IS_ZOOM:
ret = fimc_is_v4l2_digital_zoom(dev, ctrl->value);
break;
case V4L2_CID_CAMERA_VT_MODE:
dev->setfile.sub_index = ctrl->value;
printk(KERN_INFO "VT mode(%d) is selected\n",
dev->setfile.sub_index);
break;
case V4L2_CID_CAMERA_VGA_BLUR:
break;
default:
dbg("Invalid control\n");
return -EINVAL;
}
return ret;
}
static int fimc_is_g_ext_ctrls_handler(struct fimc_is_dev *dev,
struct v4l2_ext_control *ctrl, int index)
{
int ret = 0;
u32 tmp = 0;
switch (ctrl->id) {
/* Face Detection CID handler */
/* 1. Overall information */
case V4L2_CID_IS_FD_GET_FACE_COUNT:
ctrl->value = dev->fd_header.count;
break;
case V4L2_CID_IS_FD_GET_FACE_FRAME_NUMBER:
if (dev->fd_header.offset < dev->fd_header.count) {
ctrl->value =
dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].frame_number;
} else {
ctrl->value = 0;
return -255;
}
break;
case V4L2_CID_IS_FD_GET_FACE_CONFIDENCE:
ctrl->value = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].confidence;
break;
case V4L2_CID_IS_FD_GET_FACE_SMILE_LEVEL:
ctrl->value = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].smile_level;
break;
case V4L2_CID_IS_FD_GET_FACE_BLINK_LEVEL:
ctrl->value = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].blink_level;
break;
/* 2. Face information */
case V4L2_CID_IS_FD_GET_FACE_TOPLEFT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].face.offset_x;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_FACE_TOPLEFT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].face.offset_y;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_FACE_BOTTOMRIGHT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].face.offset_x
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].face.width;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_FACE_BOTTOMRIGHT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].face.offset_y
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].face.height;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
/* 3. Left eye information */
case V4L2_CID_IS_FD_GET_LEFT_EYE_TOPLEFT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].left_eye.offset_x;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_LEFT_EYE_TOPLEFT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].left_eye.offset_y;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_LEFT_EYE_BOTTOMRIGHT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].left_eye.offset_x
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].left_eye.width;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_LEFT_EYE_BOTTOMRIGHT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].left_eye.offset_y
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].left_eye.height;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
/* 4. Right eye information */
case V4L2_CID_IS_FD_GET_RIGHT_EYE_TOPLEFT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].right_eye.offset_x;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_RIGHT_EYE_TOPLEFT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].right_eye.offset_y;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_RIGHT_EYE_BOTTOMRIGHT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].right_eye.offset_x
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].right_eye.width;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_RIGHT_EYE_BOTTOMRIGHT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].right_eye.offset_y
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].right_eye.height;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
/* 5. Mouth eye information */
case V4L2_CID_IS_FD_GET_MOUTH_TOPLEFT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].mouth.offset_x;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_MOUTH_TOPLEFT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].mouth.offset_y;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_MOUTH_BOTTOMRIGHT_X:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].mouth.offset_x
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].mouth.width;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.width;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
case V4L2_CID_IS_FD_GET_MOUTH_BOTTOMRIGHT_Y:
tmp = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].mouth.offset_y
+ dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].mouth.height;
tmp = (tmp * 2 * GED_FD_RANGE) / dev->fd_header.height;
ctrl->value = (s32)tmp - GED_FD_RANGE;
break;
/* 6. Angle information */
case V4L2_CID_IS_FD_GET_ANGLE:
ctrl->value = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].roll_angle;
break;
case V4L2_CID_IS_FD_GET_YAW_ANGLE:
ctrl->value = dev->is_p_region->face[dev->fd_header.index
+ dev->fd_header.offset].yaw_angle;
break;
/* 7. Update next face information */
case V4L2_CID_IS_FD_GET_NEXT:
dev->fd_header.offset++;
break;
case V4L2_CID_CAM_SENSOR_FW_VER:
strcpy(ctrl->string, dev->fw.fw_version);
break;
default:
return 255;
break;
}
return ret;
}
static int fimc_is_g_ext_ctrls(struct v4l2_subdev *sd,
struct v4l2_ext_controls *ctrls)
{
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
struct v4l2_ext_control *ctrl;
int i, ret = 0;
unsigned long flags;
spin_lock_irqsave(&dev->slock, flags);
ctrl = ctrls->controls;
if (ctrls->ctrl_class != V4L2_CTRL_CLASS_CAMERA)
return -EINVAL;
fimc_is_mem_cache_inv((void *)IS_FACE,
(unsigned long)(sizeof(struct is_face_marker)*MAX_FACE_COUNT));
dev->fd_header.offset = 0;
/* get width and height at the current scenario */
switch (dev->scenario_id) {
case ISS_PREVIEW_STILL:
dev->fd_header.width = (s32)dev->sensor.width_prev;
dev->fd_header.height = (s32)dev->sensor.height_prev;
break;
case ISS_PREVIEW_VIDEO:
dev->fd_header.width = (s32)dev->sensor.width_prev_cam;
dev->fd_header.height = (s32)dev->sensor.height_prev_cam;
break;
case ISS_CAPTURE_STILL:
dev->fd_header.width = (s32)dev->sensor.width_cap;
dev->fd_header.height = (s32)dev->sensor.height_cap;
break;
case ISS_CAPTURE_VIDEO:
dev->fd_header.width = (s32)dev->sensor.width_cam;
dev->fd_header.height = (s32)dev->sensor.height_cam;
break;
}
for (i = 0; i < ctrls->count; i++) {
ctrl = ctrls->controls + i;
ret = fimc_is_g_ext_ctrls_handler(dev, ctrl, i);
if (ret > 0) {
ctrls->error_idx = i;
break;
} else if (ret < 0) {
ret = 0;
break;
}
}
dev->fd_header.index = 0;
dev->fd_header.count = 0;
spin_unlock_irqrestore(&dev->slock, flags);
return ret;
}
static int fimc_is_s_ext_ctrls_handler(struct fimc_is_dev *dev,
struct v4l2_ext_control *ctrl)
{
switch (ctrl->id) {
case V4L2_CID_IS_TUNE_SEL_ENTRY:
dev->h2i_cmd.entry_id = (0x1 << ctrl->value);
break;
case V4L2_CID_IS_TUNE_SENSOR_EXPOSURE:
IS_SENSOR_SET_TUNE_EXPOSURE(dev, ctrl->value);
break;
case V4L2_CID_IS_TUNE_SENSOR_ANALOG_GAIN:
IS_SENSOR_SET_TUNE_ANALOG_GAIN(dev, ctrl->value);
break;
case V4L2_CID_IS_TUNE_SENSOR_FRAME_RATE:
IS_SENSOR_SET_TUNE_FRAME_RATE(dev, ctrl->value);
break;
case V4L2_CID_IS_TUNE_SENSOR_ACTUATOR_POS:
if (!is_af_use(dev))
IS_SENSOR_SET_TUNE_ACTUATOR_POSITION(dev, 0);
else
IS_SENSOR_SET_TUNE_ACTUATOR_POSITION(dev, ctrl->value);
break;
default:
dbg("Invalid control\n");
return -EINVAL;
}
return 0;
}
static int fimc_is_s_ext_ctrls(struct v4l2_subdev *sd,
struct v4l2_ext_controls *ctrls)
{
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
struct v4l2_ext_control *ctrl = ctrls->controls;
int i, ret = 0;
dbg("S_EXT_CTRLS - %d\n", ctrls->count);
if (ctrls->ctrl_class != V4L2_CTRL_CLASS_CAMERA)
return -EINVAL;
dev->h2i_cmd.cmd_type = 0;
dev->h2i_cmd.entry_id = 0;
for (i = 0; i < ctrls->count; i++, ctrl++) {
ret = fimc_is_s_ext_ctrls_handler(dev, ctrl);
if (ret) {
ctrls->error_idx = i;
break;
}
}
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
fimc_is_hw_set_tune(dev);
return ret;
}
/* v4l2 subdev video operations
*/
static int fimc_is_try_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
dbg("fimc_is_try_mbus_fmt - %d, %d\n", mf->width, mf->height);
switch (dev->scenario_id) {
case ISS_PREVIEW_STILL:
dev->sensor.width_prev = mf->width;
dev->sensor.height_prev = mf->height;
break;
case ISS_PREVIEW_VIDEO:
dev->sensor.width_prev_cam = mf->width;
dev->sensor.height_prev_cam = mf->height;
break;
case ISS_CAPTURE_STILL:
dev->sensor.width_cap = mf->width;
dev->sensor.height_cap = mf->height;
break;
case ISS_CAPTURE_VIDEO:
dev->sensor.width_cam = mf->width;
dev->sensor.height_cam = mf->height;
break;
}
/* for otf, only one image format is available */
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, mf->width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, mf->height);
IS_ISP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, mf->width);
IS_ISP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, mf->height);
IS_DRC_SET_PARAM_OTF_INPUT_WIDTH(dev, mf->width);
IS_DRC_SET_PARAM_OTF_INPUT_HEIGHT(dev, mf->height);
IS_DRC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, mf->width);
IS_DRC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, mf->height);
IS_FD_SET_PARAM_OTF_INPUT_WIDTH(dev, mf->width);
IS_FD_SET_PARAM_OTF_INPUT_HEIGHT(dev, mf->height);
return 0;
}
static int fimc_is_g_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
dbg("fimc_is_g_mbus_fmt\n");
/* for otf, only one image format is available */
IS_DRC_GET_PARAM_OTF_OUTPUT_WIDTH(dev, mf->width);
IS_DRC_GET_PARAM_OTF_OUTPUT_HEIGHT(dev, mf->height);
mf->code = V4L2_MBUS_FMT_YUYV8_2X8;
mf->field = 0;
return 0;
}
static int fimc_is_s_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
int ret = 0, format;
u32 frametime_max = 0;
printk(KERN_INFO "FIMC-IS s_fmt = %d,%d\n", mf->width, mf->height);
/* scenario ID setting */
switch (mf->field) {
case 0:
dev->scenario_id = ISS_PREVIEW_STILL;
dev->sensor.width_prev = mf->width;
dev->sensor.height_prev = mf->height;
if (!dev->sensor.framerate_update)
frametime_max = dev->sensor.frametime_max_prev;
break;
case 1:
dev->scenario_id = ISS_PREVIEW_VIDEO;
dev->sensor.width_prev_cam = mf->width;
dev->sensor.height_prev_cam = mf->height;
if (!dev->sensor.framerate_update)
frametime_max = dev->sensor.frametime_max_prev_cam;
break;
case 2:
dev->scenario_id = ISS_CAPTURE_STILL;
dev->sensor.width_cap = mf->width;
dev->sensor.height_cap = mf->height;
if (!dev->sensor.framerate_update)
frametime_max = dev->sensor.frametime_max_cap;
break;
case 3:
dev->scenario_id = ISS_CAPTURE_VIDEO;
dev->sensor.width_cam = mf->width;
dev->sensor.height_cam = mf->height;
if (!dev->sensor.framerate_update)
frametime_max = dev->sensor.frametime_max_cam;
break;
default:
return ret;
}
format = fimc_is_hw_get_sensor_format(dev);
/* 1. ISP input / output*/
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, mf->width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, mf->height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev, OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev, OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
if (!dev->sensor.framerate_update) {
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, frametime_max);
}
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
IS_ISP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, mf->width);
IS_ISP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, mf->height);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_OUTPUT);
IS_INC_PARAM_NUM(dev);
IS_ISP_SET_PARAM_DMA_OUTPUT1_WIDTH(dev, mf->width);
IS_ISP_SET_PARAM_DMA_OUTPUT1_HEIGHT(dev, mf->height);
IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_OUTPUT);
IS_INC_PARAM_NUM(dev);
IS_ISP_SET_PARAM_DMA_OUTPUT2_WIDTH(dev, mf->width);
IS_ISP_SET_PARAM_DMA_OUTPUT2_HEIGHT(dev, mf->height);
IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_OUTPUT);
IS_INC_PARAM_NUM(dev);
/* 2. DRC input / output*/
IS_DRC_SET_PARAM_OTF_INPUT_WIDTH(dev, mf->width);
IS_DRC_SET_PARAM_OTF_INPUT_HEIGHT(dev, mf->height);
IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
IS_DRC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, mf->width);
IS_DRC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, mf->height);
IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_OUTPUT);
IS_INC_PARAM_NUM(dev);
/* 3. FD input / output*/
IS_FD_SET_PARAM_OTF_INPUT_WIDTH(dev, mf->width);
IS_FD_SET_PARAM_OTF_INPUT_HEIGHT(dev, mf->height);
IS_SET_PARAM_BIT(dev, PARAM_FD_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region, IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
/* Below sequence is for preventing system hang
due to size mis-match */
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("wait timeout : Set format - %d, %d\n",
mf->width, mf->height);
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
dev->sensor.framerate_update = false;
return 0;
}
static int fimc_is_s_stream(struct v4l2_subdev *sd, int enable)
{
int ret = 0;
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
if (enable) {
dbg("IS Stream On\n");
if (!test_bit(IS_ST_INIT_DONE, &dev->state)) {
err("Not ready state!!\n");
return -EBUSY;
}
clear_bit(IS_ST_STREAM_ON, &dev->state);
fimc_is_hw_set_stream(dev, enable);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_STREAM_ON, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("wait timeout : Stream on\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
} else {
dbg("IS Stream Off\n");
if (!test_bit(IS_ST_INIT_DONE, &dev->state)) {
err("Not ready state!!\n");
return -EBUSY;
}
clear_bit(IS_ST_STREAM_OFF, &dev->state);
fimc_is_hw_set_stream(dev, enable);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_STREAM_OFF, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT);
if (!ret) {
err("wait timeout : Stream off\n");
printk(KERN_ERR "Low power off\n");
fimc_is_hw_set_low_poweroff(dev, true);
return -EINVAL;
}
dev->setfile.sub_index = 0;
}
return ret;
}
static int fimc_is_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *a)
{
struct fimc_is_dev *dev = to_fimc_is_dev(sd);
u32 fps = 0;
int width, height, format;
int i, ret = 0;
if (a->parm.capture.timeperframe.numerator == 0)
fps = 0; /* prevent divide-by-0 error case */
else
fps = a->parm.capture.timeperframe.denominator /
a->parm.capture.timeperframe.numerator;
if (!test_bit(IS_ST_INIT_DONE, &dev->state)) {
printk(KERN_ERR "FIMC_IS ins not ready!!\n");
return -EBUSY;
}
width = fimc_is_hw_get_sensor_size_width(dev);
height = fimc_is_hw_get_sensor_size_height(dev);
format = fimc_is_hw_get_sensor_format(dev);
dev->sensor.framerate_update = true;
if (fps > 0) {
IS_SENSOR_SET_FRAME_RATE(dev, fps);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev,
(u32)(1000000/fps));
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
} else {
/* Auto mode */
i = fimc_is_hw_get_sensor_max_framerate(dev);
IS_SENSOR_SET_FRAME_RATE(dev, i);
IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev, CONTROL_COMMAND_STOP);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, OTF_INPUT_COMMAND_ENABLE);
IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, width);
IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, height);
IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, format);
IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev,
OTF_INPUT_BIT_WIDTH_10BIT);
IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev,
OTF_INPUT_ORDER_BAYER_GR_BG);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_X(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_OFFSET_Y(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_WIDTH(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_CROP_HEIGHT(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MIN(dev, 0);
IS_ISP_SET_PARAM_OTF_INPUT_FRAMETIME_MAX(dev, 66666);
IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
if (test_bit(IS_ST_STREAM_ON, &dev->state)) {
IS_ISP_SET_PARAM_CONTROL_CMD(dev,
CONTROL_COMMAND_START);
IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL);
IS_INC_PARAM_NUM(dev);
fimc_is_mem_cache_clean((void *)dev->is_p_region,
IS_PARAM_SIZE);
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state);
fimc_is_hw_set_param(dev);
ret = wait_event_timeout(dev->irq_queue1,
test_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state),
FIMC_IS_SHUTDOWN_TIMEOUT_SENSOR);
if (!ret) {
err("wait timeout : %s\n", __func__);
return -EINVAL;
}
}
}
return ret;
}
const struct v4l2_subdev_core_ops fimc_is_core_ops = {
.load_fw = fimc_is_load_fw,
.init = fimc_is_init_set,
.reset = fimc_is_reset,
.s_power = fimc_is_s_power,
.g_ctrl = fimc_is_g_ctrl,
.s_ctrl = fimc_is_s_ctrl,
.g_ext_ctrls = fimc_is_g_ext_ctrls,
.s_ext_ctrls = fimc_is_s_ext_ctrls,
};
const struct v4l2_subdev_video_ops fimc_is_video_ops = {
.try_mbus_fmt = fimc_is_try_mbus_fmt,
.g_mbus_fmt = fimc_is_g_mbus_fmt,
.s_mbus_fmt = fimc_is_s_mbus_fmt,
.s_stream = fimc_is_s_stream,
.s_parm = fimc_is_s_parm,
};
const struct v4l2_subdev_ops fimc_is_subdev_ops = {
.core = &fimc_is_core_ops,
.video = &fimc_is_video_ops,
};
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.